diff --git a/.circleci/.gitattributes b/.circleci/.gitattributes deleted file mode 100644 index 2dd06ee5f7cd..000000000000 --- a/.circleci/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -config.yml linguist-generated diff --git a/.circleci/.gitignore b/.circleci/.gitignore deleted file mode 100644 index 3018b3a68132..000000000000 --- a/.circleci/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.tmp/ diff --git a/.circleci/Makefile b/.circleci/Makefile deleted file mode 100644 index dc75ea5f1f19..000000000000 --- a/.circleci/Makefile +++ /dev/null @@ -1,100 +0,0 @@ -# Set SHELL to 'strict mode' without using .SHELLFLAGS for max compatibility. -# See https://fieldnotes.tech/how-to-shell-for-compatible-makefiles/ -SHELL := /usr/bin/env bash -euo pipefail -c - -# CONFIG is the name of the make target someone -# would invoke to update the main config file (config.yml). -CONFIG ?= ci-config -# VERIFY is the name of the make target someone -# would invoke to verify the config file. -VERIFY ?= ci-verify - -CIRCLECI := circleci --skip-update-check -ifeq ($(DEBUG_CIRCLECI_CLI),YES) -CIRCLECI += --debug -endif - -# For config processing, always refer to circleci.com not self-hosted circleci, -# because self-hosted does not currently support the necessary API. -CIRCLECI_CLI_HOST := https://circleci.com -export CIRCLECI_CLI_HOST - -# Set up some documentation/help message variables. -# We do not attempt to install the CircleCI CLI from this Makefile. -CCI_INSTALL_LINK := https://circleci.com/docs/2.0/local-cli/\#installation -CCI_INSTALL_MSG := Please install CircleCI CLI. See $(CCI_INSTALL_LINK) -CCI_VERSION := $(shell $(CIRCLECI) version 2> /dev/null) -ifeq ($(CCI_VERSION),) -# Attempting to use the CLI fails with installation instructions. -CIRCLECI := echo '$(CCI_INSTALL_MSG)'; exit 1; \# -endif - -SOURCE_DIR := config -SOURCE_YML := $(shell [ ! -d $(SOURCE_DIR) ] || find $(SOURCE_DIR) -name '*.yml') -CONFIG_SOURCE := Makefile $(SOURCE_YML) | $(SOURCE_DIR) -OUT := config.yml -TMP := .tmp/config-processed -CONFIG_PACKED := .tmp/config-packed -GO_VERSION_FILE := ../.go-version -GO_VERSION := $(shell cat $(GO_VERSION_FILE)) - -default: help - -help: - @echo "Usage:" - @echo " make $(CONFIG): recompile config.yml from $(SOURCE_DIR)/" - @echo " make $(VERIFY): verify that config.yml is a true mapping from $(SOURCE_DIR)/" - @echo - @echo "Diagnostics:" - @[ -z "$(CCI_VERSION)" ] || echo " circleci-cli version $(CCI_VERSION)" - @[ -n "$(CCI_VERSION)" ] || echo " $(CCI_INSTALL_MSG)" - -$(SOURCE_DIR): - @echo No source directory $(SOURCE_DIR) found.; exit 1 - -# Make sure our .tmp dir exists. -$(shell [ -d .tmp ] || mkdir .tmp) - -.PHONY: $(CONFIG) -$(CONFIG): $(OUT) $(GO_VERSION_FILE) - -.PHONY: $(VERIFY) -$(VERIFY): config-up-to-date - @$(CIRCLECI) config validate $(OUT) - -define GENERATED_FILE_HEADER -### *** -### WARNING: DO NOT manually EDIT or MERGE this file, it is generated by 'make $(CONFIG)'. -### INSTEAD: Edit or merge the source in $(SOURCE_DIR)/ then run 'make $(CONFIG)'. -### *** -endef -export GENERATED_FILE_HEADER - -# GEN_CONFIG writes the config to a temporary file. If the whole process succeeds, -# it them moves that file to $@. This makes is an atomic operation, so if it fails -# make doesn't consider a half-baked file up to date. -define GEN_CONFIG - @yq -i ".references.environment.GO_IMAGE = \"docker.mirror.hashicorp.services/cimg/go:$(GO_VERSION)\"" $(SOURCE_DIR)/executors/\@executors.yml - - @$(CIRCLECI) config pack $(SOURCE_DIR) > $(CONFIG_PACKED) - @echo "$$GENERATED_FILE_HEADER" > $@.tmp || { rm -f $@; exit 1; } - @$(CIRCLECI) config process $(CONFIG_PACKED) >> $@.tmp || { rm -f $@.tmp; exit 1; } - @mv -f $@.tmp $@ -endef - -.PHONY: $(OUT) -$(OUT): $(CONFIG_SOURCE) - $(GEN_CONFIG) - @echo "$@ updated" - -$(TMP): $(CONFIG_SOURCE) - $(GEN_CONFIG) - -.PHONY: config-up-to-date -config-up-to-date: $(TMP) # Note this must not depend on $(OUT)! - @if diff -w $(OUT) $<; then \ - echo "Generated $(OUT) is up to date!"; \ - else \ - echo "Generated $(OUT) is out of date, run make $(CONFIG) to update."; \ - exit 1; \ - fi diff --git a/.circleci/README.md b/.circleci/README.md deleted file mode 100644 index 1ec75cafade9..000000000000 --- a/.circleci/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# How to use CircleCI multi-file config - -This README and the Makefile should be in your `.circleci` directory, -in the root of your repository. -All path references in this README assume we are in this `.circleci` directory. - -The `Makefile` in this directory generates `./config.yml` in CircleCI 2.0 syntax, -from the tree rooted at `./config/`, which contains files in CircleCI 2.0 or 2.1 syntax. - - -## Quickstart - -The basic workflow is: - -- Edit source files in `./config/` -- When you are done, run `make ci-config` to update `./config.yml` -- Commit this entire `.circleci` directory, including that generated file together. -- Run `make ci-verify` to ensure the current `./config.yml` is up to date with the source. - -When merging this `.circleci` directory: - -- Do not merge the generated `./config.yml` file, instead: -- Merge the source files under `./config/`, and then -- Run `make ci-config` to re-generate the merged `./config.yml` - -And that's it, for more detail, read on! - - -## How does it work, roughly? - -CircleCI supports [generating a single config file from many], -using the `$ circleci config pack` command. -It also supports [expanding 2.1 syntax to 2.0 syntax] -using the `$ circleci config process` command. -We use these two commands, stitched together using the `Makefile` -to implement the workflow. - -[generating a single config file from many]: https://circleci.com/docs/2.0/local-cli/#packing-a-config -[expanding 2.1 syntax to 2.0 syntax]: https://circleci.com/docs/2.0/local-cli/#processing-a-config - - -## Prerequisites - -You will need the [CircleCI CLI tool] installed and working, -at least version `0.1.5607`. -You can [download this tool directly from GitHub Releases]. - -``` -$ circleci version -0.1.5607+f705856 -``` - -[CircleCI CLI tool]: https://circleci.com/docs/2.0/local-cli/ -[download this tool directly from GitHub Releases]: https://github.com/CircleCI-Public/circleci-cli/releases - - -## Updating the config source - -Before making changes, be sure to understand the layout -of the `./config/` file tree, as well as circleci 2.1 syntax. -See the [Syntax and layout] section below. - -To update the config, you should edit, add or remove files -in the `./config/` directory, -and then run `make ci-config`. -If that's successful, -you should then commit every `*.yml` file in the tree rooted in this directory. -That is: you should commit both the source under `./config/` -and the generated file `./config.yml` at the same time, in the same commit. -The included git pre-commit hook will help with this. -Do not edit the `./config.yml` file directly, as you will lose your changes -next time `make ci-config` is run. - -[Syntax and layout]: #syntax-and-layout - - -### Verifying `./config.yml` - -To check whether or not the current `./config.yml` is up to date with the source -and valid, run `$ make ci-verify`. -Note that `$ make ci-verify` should be run in CI, -in case not everyone has the git pre-commit hook set up correctly. - - -#### Example shell session - -```sh -$ make ci-config -config.yml updated -$ git add -A . # The -A makes sure to include deletions/renames etc. -$ git commit -m "ci: blah blah blah" -Changes detected in .circleci/, running 'make -C .circleci ci-verify' ---> Generated config.yml is up to date! ---> Config file at config.yml is valid. -``` - - -### Syntax and layout - -It is important to understand the layout of the config directory. -Read the documentation on [packing a config] for a full understanding -of how multiple YAML files are merged by the circleci CLI tool. - -[packing a config]: https://circleci.com/docs/2.0/local-cli/#packing-a-config - -Here is an example file tree (with comments added afterwards): - -```sh -$ tree . -. -├── Makefile -├── README.md # This file. -├── config # The source code for config.yml is rooted here. -│   ├── @config.yml # Files beginning with @ are treated specially by `circleci config pack` -│   ├── commands # Subdirectories of config become top-level keys. -│   │   └── go_test.yml # Filenames (minus .yml) become top-level keys under -│   │   └── go_build.yml # their parent (in this case "commands"). -│ │ # The contents of go_test.yml therefore are placed at: .commands.go_test: -│   └── jobs # jobs also becomes a top-level key under config... -│   ├── build.yml # ...and likewise filenames become keys under their parent. -│   └── test.yml -└── config.yml # The generated file in 2.0 syntax. -``` - -About those `@` files... Preceding a filename with `@` -indicates to `$ circleci config pack` that the contents of this YAML file -should be at the top-level, rather than underneath a key named after their filename. -This naming convention is unfortunate as it breaks autocompletion in bash, -but there we go. - diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index fc0cd8900648..000000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,1216 +0,0 @@ -### *** -### WARNING: DO NOT manually EDIT or MERGE this file, it is generated by 'make ci-config'. -### INSTEAD: Edit or merge the source in config/ then run 'make ci-config'. -### *** -# Orb 'circleci/slack@3.2.0' resolved to 'circleci/slack@3.2.0' -version: 2 -jobs: - install-ui-dependencies: - docker: - - environment: - JOBS: 2 - image: docker.mirror.hashicorp.services/circleci/node:14-browsers - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - restore_cache: - key: yarn-lock-v7-{{ checksum "ui/yarn.lock" }} - name: Restore yarn cache - - run: - command: | - cd ui - yarn install - npm rebuild node-sass - name: Install UI dependencies - - save_cache: - key: yarn-lock-v7-{{ checksum "ui/yarn.lock" }} - name: Save yarn cache - paths: - - ui/node_modules - test-ui: - docker: - - environment: - JOBS: 2 - image: docker.mirror.hashicorp.services/circleci/node:14-browsers - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - resource_class: xlarge - steps: - - run: - command: | - case "$CIRCLE_BRANCH" in - main|ui/*|backport/ui/*|release/*|merge*) ;; - *) # If the branch being tested doesn't match one of the above patterns, - # we don't need to run test-ui and can abort the job. - circleci-agent step halt - ;; - esac - - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - restore_cache: - key: yarn-lock-v7-{{ checksum "ui/yarn.lock" }} - name: Restore yarn cache - - attach_workspace: - at: . - - run: - command: | - # Add ./bin to the PATH so vault binary can be run by Ember tests - export PATH="${PWD}/bin:${PATH}" - - # Run Ember tests - cd ui - mkdir -p test-results/qunit - yarn test:oss - name: Test UI - - store_artifacts: - path: ui/test-results - - store_test_results: - path: ui/test-results - build-go-dev: - machine: - image: ubuntu-2004:2022.10.1 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - run: - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=off" >> "$BASH_ENV" - echo "export GOPRIVATE=github.com/hashicorp/*" >> "$BASH_ENV" - - echo "$ go version" - go version - name: Setup Go - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - attach_workspace: - at: . - - run: - command: | - # Move dev UI assets to expected location - rm -rf ./pkg - mkdir ./pkg - - # Build dev binary - make ci-bootstrap dev - name: Build dev binary - - persist_to_workspace: - paths: - - bin - root: . - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.19.4 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go-remote-docker: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.19.4 - resource_class: medium - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - setup_remote_docker: - docker_layer_caching: true - version: 20.10.17 - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - USE_DOCKER=1 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - run: - command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache /tmp/go-cache - name: Copy test results - when: always - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.19.4 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - fmt: - machine: - image: ubuntu-2004:2022.10.1 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - run: - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=https://proxy.golang.org,direct" >> "$BASH_ENV" - echo "export GOPRIVATE=github.com/hashicorp/*" >> "$BASH_ENV" - - echo "$ go version" - go version - name: Setup Go - - run: - command: | - echo "Using gofumpt version ${GOFUMPT_VERSION}" - go install "mvdan.cc/gofumpt@v${GOFUMPT_VERSION}" - make fmt - if ! git diff --exit-code; then - echo "Code has formatting errors. Run 'make fmt' to fix" - exit 1 - fi - name: make fmt - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.19.4 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go-race: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.19.4 - resource_class: xlarge - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "-race" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.19.4 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.19.4 - resource_class: large - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.19.4 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - semgrep: - docker: - - image: docker.mirror.hashicorp.services/returntocorp/semgrep:0.113.0 - shell: /bin/sh - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - attach_workspace: - at: . - - run: - command: "# Alpine images can't run the make file due to a bash requirement. Run\n# semgrep explicitly here. \nexport PATH=\"$HOME/.local/bin:$PATH\" \necho -n 'Semgrep Version: '\nsemgrep --version\nsemgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci .\n" - name: Run Semgrep Rules - pre-flight-checks: - machine: - image: ubuntu-2004:2022.10.1 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - run: - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=https://proxy.golang.org,direct" >> "$BASH_ENV" - echo "export GOPRIVATE=github.com/hashicorp/*" >> "$BASH_ENV" - - echo "$ go version" - go version - name: Setup Go - - run: - command: | - export CCI_PATH=/tmp/circleci-cli/$CIRCLECI_CLI_VERSION - mkdir -p $CCI_PATH - NAME=circleci-cli_${CIRCLECI_CLI_VERSION}_${ARCH} - URL=$BASE/v${CIRCLECI_CLI_VERSION}/${NAME}.tar.gz - curl -sSL $URL \ - | tar --overwrite --strip-components=1 -xz -C $CCI_PATH "${NAME}/circleci" - # Add circleci to the path for subsequent steps. - echo "export PATH=$CCI_PATH:\$PATH" >> $BASH_ENV - # Done, print some debug info. - set -x - . $BASH_ENV - which circleci - circleci version - environment: - ARCH: linux_amd64 - BASE: https://github.com/CircleCI-Public/circleci-cli/releases/download - name: Install CircleCI CLI - - run: - command: | - set -x - . $BASH_ENV - make ci-verify - name: Verify CircleCI - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}} - - v1.5-{{checksum "go.sum"}} - name: Restore closest matching go modules cache - - run: - command: | - # set GOPATH explicitly to download to the right cache - export GOPATH=$HOME/go - # go list ./... forces downloading some additional versions of modules that 'go mod - # download' misses. We need this because we make use of go list itself during - # code generation in later builds that rely on this module cache. - go list ./... - go mod download -json - ( cd sdk && go mod download -json; ) - ( cd api && go mod download -json; ) - name: go mod download - - run: - command: | - git --no-pager diff --exit-code || { - echo "ERROR: Files modified by go mod download, see above." - exit 1 - } - name: Verify downloading modules did not modify any files - - save_cache: - key: v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Save go modules cache - paths: - - /home/circleci/go/pkg/mod - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.19.4 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go-race-remote-docker: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.19.4 - resource_class: medium - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - setup_remote_docker: - docker_layer_caching: true - version: 20.10.17 - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "-race" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - USE_DOCKER=1 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - run: - command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache /tmp/go-cache - name: Copy test results - when: always - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.19.4 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 -workflows: - ci: - jobs: - - pre-flight-checks - - fmt - - install-ui-dependencies: - requires: - - pre-flight-checks - - build-go-dev: - requires: - - pre-flight-checks - - test-ui: - requires: - - install-ui-dependencies - - build-go-dev - - test-go: - requires: - - pre-flight-checks - - test-go-remote-docker: - requires: - - pre-flight-checks - - test-go-race: - requires: - - pre-flight-checks - - test-go-race-remote-docker: - requires: - - pre-flight-checks - - semgrep: - requires: - - pre-flight-checks - version: 2 diff --git a/.circleci/config/@config.yml b/.circleci/config/@config.yml deleted file mode 100644 index 38fbc6831210..000000000000 --- a/.circleci/config/@config.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -version: 2.1 - -orbs: - slack: circleci/slack@3.2.0 diff --git a/.circleci/config/commands/@caches.yml b/.circleci/config/commands/@caches.yml deleted file mode 100644 index 7ce217f074d6..000000000000 --- a/.circleci/config/commands/@caches.yml +++ /dev/null @@ -1,59 +0,0 @@ -restore_yarn_cache: - steps: - - restore_cache: - name: Restore yarn cache - key: &YARN_LOCK_CACHE_KEY yarn-lock-v7-{{ checksum "ui/yarn.lock" }} -save_yarn_cache: - steps: - - save_cache: - name: Save yarn cache - key: *YARN_LOCK_CACHE_KEY - paths: - - ui/node_modules -# allows restoring go mod caches by incomplete prefix. This is useful when re-generating -# cache, but not when running builds and tests that require an exact match. -# TODO should we be including arch in cache key? -restore_go_mod_cache_permissive: - steps: - - restore_cache: - name: Restore closest matching go modules cache - keys: - - &gocachekey v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}} - - v1.5-{{checksum "go.sum"}} -restore_go_mod_cache: - steps: - - restore_cache: - name: Restore exact go modules cache - keys: - - *gocachekey -save_go_mod_cache: - steps: - - save_cache: - name: Save go modules cache - key: *gocachekey - paths: - - /home/circleci/go/pkg/mod -refresh_go_mod_cache: - steps: - - restore_go_mod_cache_permissive - - run: - name: go mod download - command: | - # set GOPATH explicitly to download to the right cache - export GOPATH=$HOME/go - # go list ./... forces downloading some additional versions of modules that 'go mod - # download' misses. We need this because we make use of go list itself during - # code generation in later builds that rely on this module cache. - go list ./... - go mod download -json - ( cd sdk && go mod download -json; ) - ( cd api && go mod download -json; ) - - run: - name: Verify downloading modules did not modify any files - command: | - git --no-pager diff --exit-code || { - echo "ERROR: Files modified by go mod download, see above." - exit 1 - } - - save_go_mod_cache diff --git a/.circleci/config/commands/configure-git.yml b/.circleci/config/commands/configure-git.yml deleted file mode 100644 index a725ab97e7b9..000000000000 --- a/.circleci/config/commands/configure-git.yml +++ /dev/null @@ -1,7 +0,0 @@ -steps: - - add_ssh_keys: - fingerprints: - # "CircleCI Additional SSH Key" associated with hc-github-team-secure-vault-core GitHub user - - "b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9" - - run: | - git config --global url."git@github.com:".insteadOf https://github.com/ diff --git a/.circleci/config/commands/exit-if-branch-does-not-need-test-ui.yml b/.circleci/config/commands/exit-if-branch-does-not-need-test-ui.yml deleted file mode 100644 index 771ef4d925f8..000000000000 --- a/.circleci/config/commands/exit-if-branch-does-not-need-test-ui.yml +++ /dev/null @@ -1,17 +0,0 @@ -description: > - Check if branch name starts with ui/ or docs/ and if so, exit. -steps: - - run: - working_directory: ~/ - name: Check branch name - command: | - case "$CIRCLE_BRANCH" in - main|ui/*|backport/ui/*|release/*|merge*) ;; - *) # If the branch being tested doesn't match one of the above patterns, - # we don't need to run test-ui and can abort the job. - circleci-agent step halt - ;; - esac - - # exit with success either way - exit 0 diff --git a/.circleci/config/commands/exit-if-ui-or-docs-branch.yml b/.circleci/config/commands/exit-if-ui-or-docs-branch.yml deleted file mode 100644 index 322091f70ba7..000000000000 --- a/.circleci/config/commands/exit-if-ui-or-docs-branch.yml +++ /dev/null @@ -1,14 +0,0 @@ -description: > - Check if branch name starts with ui/ or docs/ and if so, exit. -steps: - - run: - working_directory: ~/ - name: Check branch name - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 diff --git a/.circleci/config/commands/go_test.yml b/.circleci/config/commands/go_test.yml deleted file mode 100644 index 9e4b4daa9da4..000000000000 --- a/.circleci/config/commands/go_test.yml +++ /dev/null @@ -1,226 +0,0 @@ -description: run go tests -parameters: - extra_flags: - type: string - default: "" - log_dir: - type: string - default: "/tmp/testlogs" - cache_dir: - type: string - default: /tmp/go-cache - save_cache: - type: boolean - default: false - use_docker: - type: boolean - default: false - arch: - type: string - # Only supported for use_docker=false, and only other value allowed is 386 - default: amd64 # must be 386 or amd64 -steps: - - configure-git - - run: - name: Compute test cache key - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_go_mod_cache - - run: - name: Run Go tests - no_output_timeout: 60m - environment: - GOPRIVATE: 'github.com/hashicorp/*' - command: | - set -exo pipefail - - EXTRA_TAGS= - case "<< parameters.extra_flags >>" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - <<# parameters.use_docker >> - USE_DOCKER=1 - <> - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=<< parameters.log_dir >> \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d << parameters.cache_dir >> && docker cp << parameters.cache_dir >> ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=<< parameters.arch >> \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - << parameters.extra_flags >> \ - ${all_package_names} - else - GOARCH=<< parameters.arch >> \ - GOCACHE=<< parameters.cache_dir >> \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - << parameters.extra_flags >> \ - ${all_package_names} - fi - - - when: - condition: << parameters.use_docker >> - steps: - - run: - name: Copy test results - when: always - command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache << parameters.cache_dir >> - - when: - condition: << parameters.save_cache >> - steps: - - save_cache: - when: always - key: go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - paths: - - << parameters.cache_dir >> diff --git a/.circleci/config/commands/setup-go.yml b/.circleci/config/commands/setup-go.yml deleted file mode 100644 index 5aec0087e9da..000000000000 --- a/.circleci/config/commands/setup-go.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: > - Ensure the right version of Go is installed and set GOPATH to $HOME/go. -parameters: - GOPROXY: - description: > - Set GOPROXY. By default this is set to "off" meaning you have to have all modules pre-downloaded. - type: string - default: "off" - GOPRIVATE: - description: Set GOPRIVATE, defaults to github.com/hashicorp/* - type: string - default: github.com/hashicorp/* -steps: - - run: - name: Setup Go - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=<>" >> "$BASH_ENV" - echo "export GOPRIVATE=<>" >> "$BASH_ENV" - - echo "$ go version" - go version diff --git a/.circleci/config/executors/@executors.yml b/.circleci/config/executors/@executors.yml deleted file mode 100644 index edf763921ca6..000000000000 --- a/.circleci/config/executors/@executors.yml +++ /dev/null @@ -1,49 +0,0 @@ -references: - environment: &ENVIRONMENT - CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) - GOTESTSUM_VERSION: 0.5.2 # Pin gotestsum to patch version (ex: 1.2.3) - GOFUMPT_VERSION: 0.3.1 # Pin gofumpt to patch version (ex: 1.2.3) - GO_TAGS: "" - GO_IMAGE: &GO_IMAGE "docker.mirror.hashicorp.services/cimg/go:1.19.4" -go-machine: - machine: - image: ubuntu-2004:2022.10.1 - environment: *ENVIRONMENT - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -node: - docker: - - image: docker.mirror.hashicorp.services/circleci/node:14-browsers - environment: - # See https://git.io/vdao3 for details. - JOBS: 2 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -python: - docker: - - image: docker.mirror.hashicorp.services/python:3-alpine - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -semgrep: - docker: - - image: docker.mirror.hashicorp.services/returntocorp/semgrep:0.113.0 - shell: /bin/sh - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -docker-env-go-test-remote-docker: - resource_class: medium - docker: - - image: *GO_IMAGE - environment: *ENVIRONMENT - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -docker-env-go-test: - resource_class: large - docker: - - image: *GO_IMAGE - environment: *ENVIRONMENT - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -docker-env-go-test-race: - resource_class: xlarge - docker: - - image: *GO_IMAGE - environment: *ENVIRONMENT - working_directory: /home/circleci/go/src/github.com/hashicorp/vault diff --git a/.circleci/config/jobs/build-go-dev.yml b/.circleci/config/jobs/build-go-dev.yml deleted file mode 100644 index cce6d3f61edd..000000000000 --- a/.circleci/config/jobs/build-go-dev.yml +++ /dev/null @@ -1,20 +0,0 @@ -executor: go-machine -steps: - - checkout - - setup-go - - restore_go_mod_cache - - attach_workspace: - at: . - - run: - name: Build dev binary - command: | - # Move dev UI assets to expected location - rm -rf ./pkg - mkdir ./pkg - - # Build dev binary - make ci-bootstrap dev - - persist_to_workspace: - root: . - paths: - - bin diff --git a/.circleci/config/jobs/fmt.yml b/.circleci/config/jobs/fmt.yml deleted file mode 100644 index 7d9a08dcebd1..000000000000 --- a/.circleci/config/jobs/fmt.yml +++ /dev/null @@ -1,17 +0,0 @@ -description: Ensure go formatting is correct. -executor: go-machine -steps: - - checkout - # Setup Go enabling the proxy for downloading modules. - - setup-go: - GOPROXY: https://proxy.golang.org,direct - - run: - name: make fmt - command: | - echo "Using gofumpt version ${GOFUMPT_VERSION}" - go install "mvdan.cc/gofumpt@v${GOFUMPT_VERSION}" - make fmt - if ! git diff --exit-code; then - echo "Code has formatting errors. Run 'make fmt' to fix" - exit 1 - fi diff --git a/.circleci/config/jobs/install-ui-dependencies.yml b/.circleci/config/jobs/install-ui-dependencies.yml deleted file mode 100644 index 845e0c7770e2..000000000000 --- a/.circleci/config/jobs/install-ui-dependencies.yml +++ /dev/null @@ -1,11 +0,0 @@ -executor: node -steps: - - checkout - - restore_yarn_cache - - run: - name: Install UI dependencies - command: | - cd ui - yarn install - npm rebuild node-sass - - save_yarn_cache diff --git a/.circleci/config/jobs/pre-flight-checks.yml b/.circleci/config/jobs/pre-flight-checks.yml deleted file mode 100644 index 924b451b51d2..000000000000 --- a/.circleci/config/jobs/pre-flight-checks.yml +++ /dev/null @@ -1,34 +0,0 @@ -description: Ensure nothing obvious is broken, and pre-cache Go modules. -executor: go-machine -steps: - - checkout - # Setup Go enabling the proxy for downloading modules. - - setup-go: - GOPROXY: https://proxy.golang.org,direct - - run: - name: Install CircleCI CLI - environment: - ARCH: linux_amd64 - BASE: https://github.com/CircleCI-Public/circleci-cli/releases/download - command: | - export CCI_PATH=/tmp/circleci-cli/$CIRCLECI_CLI_VERSION - mkdir -p $CCI_PATH - NAME=circleci-cli_${CIRCLECI_CLI_VERSION}_${ARCH} - URL=$BASE/v${CIRCLECI_CLI_VERSION}/${NAME}.tar.gz - curl -sSL $URL \ - | tar --overwrite --strip-components=1 -xz -C $CCI_PATH "${NAME}/circleci" - # Add circleci to the path for subsequent steps. - echo "export PATH=$CCI_PATH:\$PATH" >> $BASH_ENV - # Done, print some debug info. - set -x - . $BASH_ENV - which circleci - circleci version - - run: - name: Verify CircleCI - command: | - set -x - . $BASH_ENV - make ci-verify - - configure-git - - refresh_go_mod_cache diff --git a/.circleci/config/jobs/semgrep.yml b/.circleci/config/jobs/semgrep.yml deleted file mode 100644 index c5cf749e129d..000000000000 --- a/.circleci/config/jobs/semgrep.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -executor: semgrep -steps: - - checkout - - attach_workspace: - at: . - - run: - name: Run Semgrep Rules - command: | - # Alpine images can't run the make file due to a bash requirement. Run - # semgrep explicitly here. - export PATH="$HOME/.local/bin:$PATH" - echo -n 'Semgrep Version: ' - semgrep --version - semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci . diff --git a/.circleci/config/jobs/test-go-nightly.yml b/.circleci/config/jobs/test-go-nightly.yml deleted file mode 100644 index 502cdfa4e185..000000000000 --- a/.circleci/config/jobs/test-go-nightly.yml +++ /dev/null @@ -1,14 +0,0 @@ -executor: go-machine -steps: - - checkout - - setup-go - - restore_go_mod_cache - - go_test: - log_dir: "/tmp/testlogs" - save_cache: true - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go-race-remote-docker.yml b/.circleci/config/jobs/test-go-race-remote-docker.yml deleted file mode 100644 index 6780c60366e4..000000000000 --- a/.circleci/config/jobs/test-go-race-remote-docker.yml +++ /dev/null @@ -1,18 +0,0 @@ -executor: docker-env-go-test-remote-docker -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - setup_remote_docker: - version: 20.10.17 - docker_layer_caching: true - - go_test: - extra_flags: "-race" - log_dir: "/tmp/testlogs" - use_docker: true - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go-race.yml b/.circleci/config/jobs/test-go-race.yml deleted file mode 100644 index fcda05e9ceda..000000000000 --- a/.circleci/config/jobs/test-go-race.yml +++ /dev/null @@ -1,14 +0,0 @@ -executor: docker-env-go-test-race -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - go_test: - extra_flags: "-race" - log_dir: "/tmp/testlogs" - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go-remote-docker.yml b/.circleci/config/jobs/test-go-remote-docker.yml deleted file mode 100644 index f51003f09445..000000000000 --- a/.circleci/config/jobs/test-go-remote-docker.yml +++ /dev/null @@ -1,17 +0,0 @@ -executor: docker-env-go-test-remote-docker -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - setup_remote_docker: - version: 20.10.17 - docker_layer_caching: true - - go_test: - log_dir: "/tmp/testlogs" - use_docker: true - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go.yml b/.circleci/config/jobs/test-go.yml deleted file mode 100644 index c1674de870d2..000000000000 --- a/.circleci/config/jobs/test-go.yml +++ /dev/null @@ -1,13 +0,0 @@ -executor: docker-env-go-test -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - go_test: - log_dir: "/tmp/testlogs" - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-ui.yml b/.circleci/config/jobs/test-ui.yml deleted file mode 100644 index f2aa19b0508d..000000000000 --- a/.circleci/config/jobs/test-ui.yml +++ /dev/null @@ -1,22 +0,0 @@ -executor: node -resource_class: xlarge -steps: - - exit-if-branch-does-not-need-test-ui - - checkout - - restore_yarn_cache - - attach_workspace: - at: . - - run: - name: Test UI - command: | - # Add ./bin to the PATH so vault binary can be run by Ember tests - export PATH="${PWD}/bin:${PATH}" - - # Run Ember tests - cd ui - mkdir -p test-results/qunit - yarn test:oss - - store_artifacts: - path: ui/test-results - - store_test_results: - path: ui/test-results diff --git a/.circleci/config/workflows/ci.yml b/.circleci/config/workflows/ci.yml deleted file mode 100644 index 5e99293d7ea3..000000000000 --- a/.circleci/config/workflows/ci.yml +++ /dev/null @@ -1,35 +0,0 @@ -jobs: - - pre-flight-checks - - fmt - - install-ui-dependencies: - requires: - - pre-flight-checks - - build-go-dev: - requires: - - pre-flight-checks - - test-ui: - requires: - - install-ui-dependencies - - build-go-dev - # Only main, UI, release and merge branches need to run UI tests. - # We don't filter here however because test-ui is configured in github as - # required so it must run, instead we short-circuit within test-ui. - - test-go: - requires: - - pre-flight-checks - # We don't filter here because this is a required CI check; - # instead we short-circuit within the test command so it ends quickly. - - test-go-remote-docker: - requires: - - pre-flight-checks - # We don't filter here because this is a required CI check; - # instead we short-circuit within the test command so it ends quickly. - - test-go-race: - requires: - - pre-flight-checks - - test-go-race-remote-docker: - requires: - - pre-flight-checks - - semgrep: - requires: - - pre-flight-checks diff --git a/.copywrite.hcl b/.copywrite.hcl index c779cce68070..de148843b44e 100644 --- a/.copywrite.hcl +++ b/.copywrite.hcl @@ -1,14 +1,16 @@ schema_version = 1 project { - license = "MPL-2.0" - copyright_year = 2015 + license = "BUSL-1.1" + copyright_year = 2024 # (OPTIONAL) A list of globs that should not have copyright/license headers. # Supports doublestar glob patterns for more flexibility in defining which # files or folders should be ignored header_ignore = [ - "builtin/credentials/aws/pkcs7/**", + "helper/pkcs7/**", "ui/node_modules/**", + "enos/modules/k8s_deploy_vault/raft-config.hcl", + "plugins/database/postgresql/scram/**", ] } diff --git a/.github/.secret_scanning.yml b/.github/.secret_scanning.yml new file mode 100644 index 000000000000..470059630471 --- /dev/null +++ b/.github/.secret_scanning.yml @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +paths-ignore: + - '**/*.mdx' # any file ending in .mdx + - '**/*.md' # any file ending in .md + - '**/*_test.go' # any file ending in _test.go diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 9399e9b5dba9..d313527dcd8b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + contact_links: - name: Ask a question url: https://discuss.hashicorp.com/c/vault diff --git a/.github/ISSUE_TEMPLATE/plugin-submission.md b/.github/ISSUE_TEMPLATE/plugin-submission.md index 8bed55a04c66..54becc1c9e79 100644 --- a/.github/ISSUE_TEMPLATE/plugin-submission.md +++ b/.github/ISSUE_TEMPLATE/plugin-submission.md @@ -7,7 +7,7 @@ assignees: '' --- -Please provide details for the plugin to be listed. All fields are required for a submission to be included in the [Plugin Portal](https://www.vaultproject.io/docs/plugin-portal) page. +Please provide details for the plugin to be listed. All fields are required for a submission to be included in the [Vault Integrations](https://developer.hashicorp.com/vault/integrations) page. **Plugin Information** Name as it would appear listed: diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml new file mode 100644 index 000000000000..281951c170bc --- /dev/null +++ b/.github/actionlint.yaml @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +self-hosted-runner: + # Labels of self-hosted runner in array of string + labels: + - small + - medium + - large + - ondemand + - disk_gb=64 + - os=linux + - type=m5.2xlarge + - type=c6a.xlarge + - type=c6a.4xlarge + - ubuntu-20.04 + - custom-linux-small-vault-latest + - custom-linux-medium-vault-latest + - custom-linux-xl-vault-latest diff --git a/.github/actions/build-vault/action.yml b/.github/actions/build-vault/action.yml new file mode 100644 index 000000000000..8fc228415a02 --- /dev/null +++ b/.github/actions/build-vault/action.yml @@ -0,0 +1,202 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Build Vault +description: | + Build various Vault binaries and package them into Zip bundles, Deb and RPM packages, + and various container images. Upload the resulting artifacts to Github Actions artifact storage. + This composite action is used across both CE and Ent, thus is should maintain compatibility with + both repositories. + +inputs: + github-token: + type: string + description: An elevated Github token to access private Go modules if necessary. + default: "" + cgo-enabled: + type: number + description: Enable or disable CGO during the build. + default: 0 + create-docker-container: + type: boolean + description: Package the binary into a Docker/AWS container. + default: true + create-redhat-container: + type: boolean + description: Package the binary into a Redhat container. + default: false + create-packages: + type: boolean + description: Package the binaries into deb and rpm formats. + default: true + goos: + type: string + description: The Go GOOS value environment variable to set during the build. + goarch: + type: string + description: The Go GOARCH value environment variable to set during the build. + goarm: + type: string + description: The Go GOARM value environment variable to set during the build. + default: "" + goexperiment: + type: string + description: Which Go experiments to enable. + default: "" + go-tags: + type: string + description: A comma separated list of tags to pass to the Go compiler during build. + default: "" + package-name: + type: string + description: The name to use for the linux packages. + default: ${{ github.event.repository.name }} + vault-binary-name: + type: string + description: The name of the vault binary. + default: vault + vault-edition: + type: string + description: The edition of vault to build. + vault-version: + type: string + description: The version metadata to inject into the build via the linker. + web-ui-cache-key: + type: string + description: The cache key for restoring the pre-built web UI artifact. + +outputs: + vault-binary-path: + description: The location of the built binary. + value: ${{ steps.containerize.outputs.vault-binary-path != '' && steps.containerize.outputs.vault-binary-path || steps.metadata.outputs.binary-path }} + +runs: + using: composite + steps: + - name: Ensure zstd is available for actions/cache + # actions/cache restores based on cache key and "cache version", the former is unique to the + # build job or web UI, the latter is a hash which is based on the runner OS, the paths being + # cached, and the program used to compress it. Most of our workflows will use zstd to compress + # the cached artifact so we have to have it around for our machines to get both a version match + # and to decompress it. Most runners include zstd by default but there are exception like + # our Ubuntu 20.04 compatibility runners which do not. + shell: bash + run: which zstd || (sudo apt update && sudo apt install -y zstd) + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ inputs.github-token }} + - uses: ./.github/actions/install-external-tools + - if: inputs.vault-edition != 'ce' + name: Configure Git + shell: bash + run: git config --global url."https://${{ inputs.github-token }}:@github.com".insteadOf "https://github.com" + - name: Restore UI from cache + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + # Restore the UI asset from the UI build workflow. Never use a partial restore key. + enableCrossOsArchive: true + fail-on-cache-miss: true + path: http/web_ui + key: ${{ inputs.web-ui-cache-key }} + - name: Metadata + id: metadata + env: + # We need these for the artifact basename helper + GOARCH: ${{ inputs.goarch }} + GOOS: ${{ inputs.goos }} + VERSION: ${{ inputs.vault-version }} + VERSION_METADATA: ${{ inputs.vault-edition != 'ce' && inputs.vault-edition || '' }} + shell: bash + run: | + if [[ '${{ inputs.vault-edition }}' =~ 'ce' ]]; then + build_step_name='Vault ${{ inputs.goos }} ${{ inputs.goarch }} v${{ inputs.vault-version }}' + package_version='${{ inputs.vault-version }}' + else + build_step_name='Vault ${{ inputs.goos }} ${{ inputs.goarch }} v${{ inputs.vault-version }}+${{ inputs.vault-edition }}' + package_version='${{ inputs.vault-version }}+ent' # this should always be +ent here regardless of enterprise edition + fi + { + echo "artifact-basename=$(make ci-get-artifact-basename)" + echo "binary-path=dist/${{ inputs.vault-binary-name }}" + echo "build-step-name=${build_step_name}" + echo "package-version=${package_version}" + } | tee -a "$GITHUB_OUTPUT" + - name: ${{ steps.metadata.outputs.build-step-name }} + env: + CGO_ENABLED: ${{ inputs.cgo-enabled }} + GO_TAGS: ${{ inputs.go-tags }} + GOARCH: ${{ inputs.goarch }} + GOARM: ${{ inputs.goarm }} + GOOS: ${{ inputs.goos }} + GOEXPERIMENT: ${{ inputs.goexperiment }} + GOPRIVATE: github.com/hashicorp + VERSION: ${{ inputs.version }} + VERSION_METADATA: ${{ inputs.vault-edition != 'ce' && inputs.vault-edition || '' }} + shell: bash + run: make ci-build + - if: inputs.vault-edition != 'ce' + shell: bash + run: make ci-prepare-ent-legal + - if: inputs.vault-edition == 'ce' + shell: bash + run: make ci-prepare-ce-legal + - name: Bundle Vault + env: + BUNDLE_PATH: out/${{ steps.metadata.outputs.artifact-basename }}.zip + shell: bash + run: make ci-bundle + - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: ${{ steps.metadata.outputs.artifact-basename }}.zip + path: out/${{ steps.metadata.outputs.artifact-basename }}.zip + if-no-files-found: error + - if: inputs.create-packages == 'true' + uses: hashicorp/actions-packaging-linux@33f7d23b14f24e6a7b7d9948cb7f5caca2045ee3 + with: + name: ${{ inputs.package-name }} + description: Vault is a tool for secrets management, encryption as a service, and privileged access management. + arch: ${{ inputs.goarch }} + version: ${{ steps.metadata.outputs.package-version }} + maintainer: HashiCorp + homepage: https://github.com/hashicorp/vault + license: BUSL-1.1 + binary: ${{ steps.metadata.outputs.binary-path }} + deb_depends: openssl + rpm_depends: openssl + config_dir: .release/linux/package/ + preinstall: .release/linux/preinst + postinstall: .release/linux/postinst + postremove: .release/linux/postrm + - if: inputs.create-packages == 'true' + id: package-files + name: Determine package file names + shell: bash + run: | + { + echo "rpm-files=$(basename out/*.rpm)" + echo "deb-files=$(basename out/*.deb)" + } | tee -a "$GITHUB_OUTPUT" + - if: inputs.create-packages == 'true' + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: ${{ steps.package-files.outputs.rpm-files }} + path: out/${{ steps.package-files.outputs.rpm-files }} + if-no-files-found: error + - if: inputs.create-packages == 'true' + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: ${{ steps.package-files.outputs.deb-files }} + path: out/${{ steps.package-files.outputs.deb-files }} + if-no-files-found: error + # Do our containerization last as it will move the binary location if we create containers. + - uses: ./.github/actions/containerize + id: containerize + with: + docker: ${{ inputs.create-docker-container }} + redhat: ${{ inputs.create-redhat-container }} + goarch: ${{ inputs.goarch }} + goos: ${{ inputs.goos }} + vault-binary-path: ${{ steps.metadata.outputs.binary-path }} + vault-edition: ${{ inputs.vault-edition }} + vault-version: ${{ inputs.vault-version }} diff --git a/.github/actions/changed-files/action.yml b/.github/actions/changed-files/action.yml new file mode 100644 index 000000000000..3d0a1efacfc7 --- /dev/null +++ b/.github/actions/changed-files/action.yml @@ -0,0 +1,73 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Determine what files changed between two git referecnes. +description: | + Determine what files have changed between two git references. If the github.event_type is + pull_request we'll compare the github.base_ref (merge target) and pull request head SHA. + For other event types we'll gather the changed files from the most recent commit. This allows + us to support PR and merge workflows. + +outputs: + app-changed: + description: Whether or not the vault Go app was modified. + value: ${{ steps.changed-files.outputs.app-changed }} + docs-changed: + description: Whether or not the documentation was modified. + value: ${{ steps.changed-files.outputs.docs-changed }} + ui-changed: + description: Whether or not the web UI was modified. + value: ${{ steps.changed-files.outputs.ui-changed }} + files: + description: All of the file names that changed. + value: ${{ steps.changed-files.outputs.files }} + +runs: + using: composite + steps: + - id: ref + shell: bash + name: ref + run: | + # Determine our desired checkout ref. + # + # * If the trigger event is pull_request we will default to a magical merge SHA that Github + # creates. This SHA is the product of what merging our PR into the merge target branch at + # at the point in time when we created the PR. When you push a change to a PR branch + # Github updates this branch if it can. When you rebase a PR it updates this branch. + # + # * If the trigger event is pull_request and a `checkout-head` tag is present or the + # checkout-head input is set, we'll use HEAD of the PR branch instead of the magical + # merge SHA. + # + # * If the trigger event is a push (merge) then we'll get the latest commit that was pushed. + # + # * For anything any other event type we'll default to whatever is default in Github. + if [ '${{ github.event_name }}' = 'pull_request' ]; then + checkout_ref='${{ github.event.pull_request.head.sha }}' + elif [ '${{ github.event_name }}' = 'push' ]; then + # Our checkout ref for any other event type should default to the github ref. + checkout_ref='${{ github.event.after && github.event.after || github.event.push.after }}' + else + checkout_ref='${{ github.ref }}' + fi + echo "ref=${checkout_ref}" | tee -a "$GITHUB_OUTPUT" + - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + with: + repository: ${{ github.repository }} + path: "changed-files" + # The fetch-depth could probably be optimized at some point. It's currently set to zero to + # ensure that we have a successfull diff, regardless of how many commits might be present + # present between the two references we're comparing. It would be nice to change this + # depending on the number of commits by using the push.commits and/or pull_request.commits + # payload fields, however, they have different behavior and limitations. For now we'll do + # the slow but sure thing of getting the whole repository. + fetch-depth: 0 + ref: ${{ steps.ref.outputs.ref }} + - id: changed-files + name: changed-files + # This script writes output values to $GITHUB_OUTPUT and STDOUT + shell: bash + run: ./.github/scripts/changed-files.sh ${{ github.event_name }} ${{ github.ref_name }} ${{ github.base_ref }} + working-directory: changed-files diff --git a/.github/actions/checkout/action.yml b/.github/actions/checkout/action.yml new file mode 100644 index 000000000000..312763f5c141 --- /dev/null +++ b/.github/actions/checkout/action.yml @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Check out the correct git reference. +description: | + Determine and checkout the correct Git reference depending on the actions event type and tags. + +inputs: + checkout-head: + description: | + Whether or not to check out HEAD on a pull request. This can also be triggered with a + `checkout-head` tag. + default: 'false' + path: + description: Relative path to $GITHUB_WORKSPACE to check out to + default: "" + +outputs: + ref: + description: The git reference that was checked out. + value: ${{ steps.ref.outputs.ref }} + depth: + description: The fetch depth that was checked out. + value: ${{ steps.ref.outputs.ref }} + +runs: + using: composite + steps: + - id: ref + shell: bash + run: | + # Determine our desired checkout ref and fetch depth. Depending our our workflow event + # trigger, inputs, and tags, we'll check out different references at different depths. + # + # * If the trigger event is a pull request we will default to a magical merge SHA that Github + # creates. Essentially, this SHA is the product of merging our PR into the merge target + # branch at some point in time. When you push a change to a PR branch Github updates this + # branch if it can. + # * If the trigger event is a pull request and a `checkout-head` tag is present or the + # checkout-head input is set, we'll use HEAD of the PR branch instead of the magical + # merge SHA. + # * If the trigger event is a push (merge) then we'll get the latest commit that was pushed. + # * For anything any other event type we'll default to whatever is default in Github. + # + # Our fetch depth will varies depending on what our chosen SHA is. We normally want to do + # the most shallow clone possible for speed, but we also need to support getting history + # for determining what files have changed, etc. We'll always check out one level deep for + # merges or standard pull requests. If checking out HEAD is requested we'll fetch a deeper + # history because we need all commits on the branch. + # + if [ '${{ github.event_name }}' = 'pull_request' ]; then + if [ '${{ contains(github.event.pull_request.labels.*.name, 'checkout-head') || inputs.checkout-head == 'true' }}' = 'true' ]; then + checkout_ref='${{ github.event.pull_request.head.sha }}' + fetch_depth=0 + else + checkout_ref='${{ github.ref }}' + fetch_depth=1 + fi + elif [ '${{ github.event_name }}' = 'push' ]; then + # Our checkout ref for any other event type should default to the github ref. + checkout_ref='${{ github.event.push.after }}' + fetch_depth=1 + else + checkout_ref='${{ github.ref }}' + fetch_depth=0 + fi + + { + echo "ref=${checkout_ref}" + echo "depth=${fetch_depth}" + } | tee -a "$GITHUB_OUTPUT" + - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + with: + path: ${{ inputs.path }} + fetch-depth: ${{ steps.ref.outputs.depth }} + ref: ${{ steps.ref.outputs.ref }} diff --git a/.github/actions/containerize/action.yml b/.github/actions/containerize/action.yml new file mode 100644 index 000000000000..e269298e52b7 --- /dev/null +++ b/.github/actions/containerize/action.yml @@ -0,0 +1,109 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Containerize Binary +description: | + Containerize vault binaries and annotate them with the correct registry tags. Artifacts will be + uploaded to the Github artifact store. This action is used for both CE and Ent and thus needs to + stay compatible for both repository contexts. + +inputs: + docker: + type: boolean + description: | + Package the binary into a Docker container suitable for the Docker and AWS registries. We'll + automatically determine the correct tags and target depending on the vault edition. + default: true + goarch: + type: string + description: The Go GOARCH value environment variable to set during the build. + goos: + type: string + description: The Go GOOS value environment variable to set during the build. + redhat: + type: boolean + description: Package the binary into a UBI container suitable for the Redhat Quay registry. + default: false + vault-binary-path: + type: string + description: The path to the vault binary. + default: dist/vault + vault-edition: + type: string + description: The edition of vault to build. + default: ce + vault-version: + type: string + description: The vault version. + +outputs: + vault-binary-path: + description: The location of the binary after containerization + value: ${{ inputs.vault-binary-path }} + +runs: + using: composite + steps: + - id: vars + shell: bash + run: | + if [[ '${{ inputs.vault-edition }}' =~ 'ce' ]]; then + # CE containers + container_version='${{ inputs.vault-version }}' + docker_container_tags='docker.io/hashicorp/vault:${{ inputs.vault-version }} public.ecr.aws/hashicorp/vault:${{ inputs.vault-version }}' + docker_container_target='default' + redhat_container_tags='quay.io/redhat-isv-containers/5f89bb5e0b94cf64cfeb500a:${{ inputs.vault-version }}-ubi' + redhat_container_target='ubi' + else + # Ent containers + container_version='${{ inputs.vault-version }}+${{ inputs.vault-edition }}' + + if [[ '${{ inputs.vault-edition }}' =~ 'fips' ]]; then + # Ent FIPS 140-2 containers + docker_container_tags='docker.io/hashicorp/vault-enterprise-fips:${{ inputs.vault-version }}-${{ inputs.vault-edition }} public.ecr.aws/hashicorp/vault-enterprise-fips:${{ inputs.vault-version }}-${{ inputs.vault-edition }}' + docker_container_target='ubi-fips' + redhat_container_tags='quay.io/redhat-isv-containers/6283f645d02c6b16d9caeb8e:${{ inputs.vault-version }}-${{ inputs.vault-edition }}-ubi' + redhat_container_target='ubi-fips' + else + # All other Ent containers + docker_container_tags='docker.io/hashicorp/vault-enterprise:${{ inputs.vault-version }}-${{ inputs.vault-edition}} public.ecr.aws/hashicorp/vault-enterprise:${{ inputs.vault-version }}-${{ inputs.vault-edition }}' + docker_container_target='default' + redhat_container_tags='quay.io/redhat-isv-containers/5f89bb9242e382c85087dce2:${{ inputs.vault-version }}-${{ inputs.vault-edition }}-ubi' + redhat_container_target='ubi' + fi + fi + { + echo "container-version=${container_version}" + echo "docker-container-tags=${docker_container_tags}" + echo "docker-container-target=${docker_container_target}" + echo "redhat-container-tags=${redhat_container_tags}" + echo "redhat-container-target=${redhat_container_target}" + echo "revision=$(make ci-get-revision)" + } | tee -a "$GITHUB_OUTPUT" + - if: inputs.docker == 'true' || inputs.redhat == 'true' + id: copy-binary + shell: bash + run: | + dest_path='dist/${{ inputs.goos }}/${{ inputs.goarch }}/vault' + dest_dir=$(dirname "$dest_path") + [[ ! -d "$dest_dir" ]] && mkdir -p "$dest_dir" + [[ ! -f "$dest_path" ]] && cp ${{ inputs.vault-binary-path }} "${dest_path}" + - if: inputs.docker == 'true' + uses: hashicorp/actions-docker-build@v2 + with: + arch: ${{ inputs.goarch }} + do_zip_extract_step: 'false' # Don't download and extract an already present binary + target: ${{ steps.vars.outputs.docker-container-target }} + tags: ${{ steps.vars.outputs.docker-container-tags }} + revision: ${{ steps.vars.outputs.revision }} + version: ${{ steps.vars.outputs.container-version }} + - if: inputs.redhat == 'true' + uses: hashicorp/actions-docker-build@v2 + with: + arch: ${{ inputs.goarch }} + do_zip_extract_step: 'false' # Don't download and extract an already present binary + redhat_tag: ${{ steps.vars.outputs.redhat-container-tags }} + target: ${{ steps.vars.outputs.redhat-container-target }} + revision: ${{ steps.vars.outputs.revision }} + version: ${{ steps.vars.outputs.container-version }} diff --git a/.github/actions/install-external-tools/action.yml b/.github/actions/install-external-tools/action.yml new file mode 100644 index 000000000000..1b9b2babb5a8 --- /dev/null +++ b/.github/actions/install-external-tools/action.yml @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Install external tools for CI +description: Install external tools CI + +# When possible, prefer installing pre-built external tools for speed. This allows us to avoid +# downloading modules and compiling external tools on CI runners. + +runs: + using: composite + steps: + - uses: ./.github/actions/set-up-buf + with: + version: v1.25.0 # This should match the version in tools/tool.sh + - uses: ./.github/actions/set-up-gofumpt + - uses: ./.github/actions/set-up-gotestsum + - uses: ./.github/actions/set-up-misspell + - uses: ./.github/actions/set-up-staticcheck + # We assume that the Go toolchain will be managed by the caller workflow so we don't set one + # up here. + - run: go install google.golang.org/protobuf/cmd/protoc-gen-go@latest + shell: bash + - run: go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest + shell: bash + - run: go install github.com/favadi/protoc-go-inject-tag@latest + shell: bash + - run: go install golang.org/x/tools/cmd/goimports@latest + shell: bash + - run: go install github.com/golangci/revgrep/cmd/revgrep@latest + shell: bash + - run: go install github.com/loggerhead/enumer@latest + shell: bash diff --git a/.github/actions/metadata/action.yml b/.github/actions/metadata/action.yml new file mode 100644 index 000000000000..53eff3d62c96 --- /dev/null +++ b/.github/actions/metadata/action.yml @@ -0,0 +1,156 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Gather and export useful workflow metadata information. +description: | + Gather and export metadata about the repository, Github, and any other variable information we + might want for variables or flow control in our various workflows. We centralize it here so as + to have a single point of truth. This workflow also handles checking out the correct Git reference + depending on workflow trigger and tags. This workflow is used in both CE and Ent and thus needs + to maintain compatibility in both execution contexts. + +inputs: + vault-version: + description: | + The version of vault from hashicorp/action-set-product-version. If set we'll utilize this + base version of vault to output complex vault version metadata. If unset those outputs will + not be populated. + default: "" + +outputs: + compute-build: + description: A JSON encoded "runs-on" for App build worfkflows. + value: ${{ steps.workflow-metadata.outputs.compute-build }} + compute-build-compat: + description: A JSON encoded "runs-on" for App build workflows that need an older glibc to link against. + value: ${{ steps.workflow-metadata.outputs.compute-build-compat }} + compute-build-ui: + description: A JSON encoded "runs-on" for web UI build workflows. + value: ${{ steps.workflow-metadata.outputs.compute-build-ui }} + compute-test-go: + description: A JSON encoded "runs-on" for Go test workflows. + value: ${{ steps.workflow-metadata.outputs.compute-test-go }} + compute-test-ui: + description: A JSON encoded "runs-on" for web UI test workflows. + value: ${{ steps.workflow-metadata.outputs.compute-test-ui }} + compute-small: + description: A JSON encoded "runs-on" workflows that don't require optimized runners for resource usage. + value: ${{ steps.workflow-metadata.outputs.compute-small }} + go-tags: + description: The minimal set of Go tags required to build the correct edition of Vault. + value: ${{ steps.workflow-metadata.outputs.go-tags }} + is-draft: + description: Whether or not the workflow is executing in the context of a pull request draft. + value: ${{ steps.workflow-metadata.outputs.is-draft }} + is-enterprise: + description: Whether or not the workflow is executing in the context of Vault enterprise. + value: ${{ steps.workflow-metadata.outputs.is-enterprise }} + is-fork: + description: Whether or not the workflow is being triggered on a pull request that is a fork. + value: ${{ steps.workflow-metadata.outputs.is-fork }} + labels: + description: | + A JSON encoded array of pull request labels names associated with a commit SHA. If the workflow + is triggerd by a pull_request event then we'll get the label names of the pull request. If + it's triggered by any other event type we'll search for a pull request associated with the + commit SHA and return its label names. + value: ${{ steps.workflow-metadata.outputs.labels }} + vault-build-date: + description: The most recent Git commit date. + value: ${{ steps.vault-metadata.outputs.build-date }} + vault-binary-name: + description: The name of the Vault binary. + value: vault + vault-revision: + description: The most recent Git commit SHA. + value: ${{ steps.vault-metadata.outputs.vault-revision }} + vault-version: + description: The version of vault. + value: ${{ inputs.vault-version }} + vault-version-metadata: + description: The version of vault includiting edition and other metadata. + value: ${{ steps.workflow-metadata.outputs.vault-version-metadata }} + vault-version-package: + description: The version of vault formatted for Linux distro packages. + value: ${{ steps.vault-metadata.outputs.vault-version-package }} + workflow-trigger: + description: The github event type that triggered the workflow. + value: ${{ steps.workflow-metadata.outputs.workflow-trigger }} + +runs: + using: composite + steps: + - if: inputs.vault-version != '' + id: vault-metadata + name: vault-metadata + env: + VAULT_VERSION: ${{ inputs.vault-version }} + shell: bash + run: | + { + echo "build-date=$(make ci-get-date)" + echo "vault-revision=$(make ci-get-revision)" + echo "vault-version-package=$(make ci-get-version-package)" + } | tee -a "$GITHUB_OUTPUT" + - id: workflow-metadata + name: workflow-metadata + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + if [ '${{ github.event_name }}' = 'pull_request' ]; then + is_draft='${{ github.event.pull_request.draft }}' + + # Determine our labels. If our event type is pull_request this is rather straight forward. If + # our even_type is push (merge) we'll need to look up the pull request associated with the + # commit and get the labels. This will return the label names as an array. + labels=$(jq -rc <<< '${{ toJSON(github.event.pull_request.labels.*.name) }}') + else + is_draft='false' + + # Look up the labels for the pull request that is associated with the last commit. If + # there are none set it as a JSON encoded empty array. + if pr_number=$(gh api "/repos/${{ github.repository }}/commits/${{ github.ref }}/pulls" | jq -erc '.[0].number'); then + if ! labels=$(gh api "/repos/${{ github.repository }}/issues/${pr_number}/labels" | jq -erc '. | map(.name)'); then + labels='[]' + fi + else + labels='[]' + fi + fi + + { + echo "is-draft=${is_draft}" + echo 'is-fork=${{ github.event.pull_request.head.repo.fork && 'true' || 'false' }}' + echo "labels=${labels}" + echo "workflow-trigger=${{ github.event_name }}" + } | tee -a "$GITHUB_OUTPUT" + + # Set CE and Ent specific workflow metadata + is_enterprise='${{ contains(github.repository, 'vault-enterprise') }}' + if [ "$is_enterprise" = 'true' ]; then + { + echo 'compute-build=["self-hosted","ondemand","os=linux","disk_gb=64","type=c6a.4xlarge"]' + echo 'compute-build-compat=["self-hosted","ubuntu-20.04"]' # for older glibc compatibility, m6a.4xlarge + echo 'compute-build-ui=["self-hosted","ondemand","os=linux", "disk_gb=64", "type=c6a.2xlarge"]' + echo 'compute-test-go=["self-hosted","ondemand","os=linux","disk_gb=64","type=c6a.2xlarge"]' + echo 'compute-test-ui=["self-hosted","ondemand","os=linux","type=m6a.2xlarge"]' + echo 'compute-small=["self-hosted","linux","small"]' + echo 'go-tags=ent,enterprise' + echo 'is-enterprise=true' + echo 'vault-version-metadata=${{ inputs.vault-version }}+ent' + } | tee -a "$GITHUB_OUTPUT" + else + { + echo 'compute-build="custom-linux-medium-vault-latest"' + echo 'compute-build-compat="custom-linux-medium-vault-latest"' + echo 'compute-build-ui="custom-linux-xl-vault-latest"' + echo 'compute-test-go="custom-linux-medium-vault-latest"' + echo 'compute-test-ui="custom-linux-medium-vault-latest"' + echo 'compute-small="ubuntu-latest"' + echo 'go-tags=' + echo 'is-enterprise=false' + echo 'vault-version-metadata=${{ inputs.vault-version }}' + } | tee -a "$GITHUB_OUTPUT" + fi diff --git a/.github/actions/set-up-buf/action.yml b/.github/actions/set-up-buf/action.yml new file mode 100644 index 000000000000..a9cd3656a104 --- /dev/null +++ b/.github/actions/set-up-buf/action.yml @@ -0,0 +1,66 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up buf from Github releases +description: Set up buf from Github releases + +inputs: + destination: + description: "Where to install the buf binary (default: $HOME/bin/buf)" + type: boolean + default: "$HOME/bin/buf" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed buf binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed buf binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of buf + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(gh release list -R bufbuild/buf --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -f1) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "destination-dir=$DESTINATION_DIR" + echo "version=$VERSION" + } | tee -a "$GITHUB_OUTPUT" + + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + OS="$RUNNER_OS" + if [ "$ARCH" = "x64" ]; then + export ARCH="x86_64" + fi + if [ "$ARCH" = "arm64" ] && [ "$OS" = "Linux" ]; then + export ARCH="aarch64" + fi + if [ "$OS" = "macOS" ]; then + export OS="Darwin" + fi + + mkdir -p tmp + gh release download "$VERSION" -p "buf-${OS}-${ARCH}.tar.gz" -O tmp/buf.tgz -R bufbuild/buf + pushd tmp && tar -xvf buf.tgz && popd + mv tmp/buf/bin/buf "$DESTINATION" + rm -rf tmp diff --git a/.github/actions/set-up-go/action.yml b/.github/actions/set-up-go/action.yml new file mode 100644 index 000000000000..9a80bf32f497 --- /dev/null +++ b/.github/actions/set-up-go/action.yml @@ -0,0 +1,82 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up Go with a shared module cache. +description: Set up Go with a shared module cache. + +inputs: + github-token: + description: An elevated Github token to access private modules if necessary. + type: string + no-restore: + description: Whether or not to restore the Go module cache on a cache hit + type: boolean + default: false + go-version: + description: "Override .go-version" + type: string + default: "" + +outputs: + cache-key: + description: The Go modules cache key + value: ${{ steps.metadata.outputs.cache-key }} + cache-path: + description: The GOMODCACHE path + value: ${{ steps.metadata.outputs.cache-path }} + go-version: + description: "The version of Go used" + value: ${{ steps.go-version.outputs.go-version }} + +runs: + using: composite + steps: + - id: go-version + shell: bash + run: | + if [ "${{ inputs.go-version }}" = "" ]; then + echo "go-version=$(cat ./.go-version)" | tee -a "$GITHUB_OUTPUT" + else + echo "go-version=${{ inputs.go-version }}" | tee -a "$GITHUB_OUTPUT" + fi + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + with: + go-version: ${{ steps.go-version.outputs.go-version }} + cache: false # We use our own caching strategy + - id: metadata + shell: bash + run: | + { + echo "cache-path=$(go env GOMODCACHE)" + echo "cache-key=go-modules-${{ hashFiles('**/go.sum') }}" + } | tee -a "$GITHUB_OUTPUT" + - id: cache-modules + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + enableCrossOsArchive: true + lookup-only: ${{ inputs.no-restore }} + # We need to be very considerate of our caching strategy because Github only allows 10gb + # of caches per repository before it starts to evict older caches. This is usually fine + # if you only use the actions cache for cache, but we also use it for Go test time results. + # These results are used to balance our Go test groups, without which we could have + # painfully unbalanced Go test execution times. We have to ensure current caches for all + # active release branches and main do not exceed 10gb. Ideally we'd cache Go modules + # and Go build cache on a per version/platform/architecture/tag/module basis, but that + # would result in several hungred gb over all of our build workflows and release branches. + # Instead, we've chosen a middle ground approach where were share Go modules between build + # workflows but lose the Go build cache. + # We intentionally do not use partial restore keys. If we get dont get an exact cache hit + # we only want to download the latest modules, not append them to a prior cache. This + # keeps cache upload time, download time, and storage size to a minimum. + path: ${{ steps.metadata.outputs.cache-path }} + key: ${{ steps.metadata.outputs.cache-key }} + - if: steps.cache-modules.outputs.cache-hit != 'true' + name: Download go modules + shell: bash + env: + GOPRIVATE: github.com/hashicorp/* + run: | + git config --global url."https://${{ inputs.github-token }}@github.com".insteadOf https://github.com + make go-mod-download + du -h -d 1 ${{ steps.metadata.outputs.cache-path }} diff --git a/.github/actions/set-up-gofumpt/action.yml b/.github/actions/set-up-gofumpt/action.yml new file mode 100644 index 000000000000..d763656fc49a --- /dev/null +++ b/.github/actions/set-up-gofumpt/action.yml @@ -0,0 +1,61 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up gofumpt from Github releases +description: Set up gofumpt from Github releases + +inputs: + destination: + description: "Where to install the gofumpt binary (default: $HOME/bin/gofumpt)" + type: boolean + default: "$HOME/bin/gofumpt" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed gofumpt binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed gofumpt binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of gofumpt + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(gh release list -R mvdan/gofumpt --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -f1) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "destination-dir=$DESTINATION_DIR" + echo "version=$VERSION" + } | tee -a "$GITHUB_OUTPUT" + + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + if [ "$OS" = "macos" ]; then + export OS="darwin" + fi + + gh release download "$VERSION" -p "gofumpt_*_${OS}_${ARCH}" -O gofumpt -R mvdan/gofumpt + chmod +x gofumpt + mv gofumpt "$DESTINATION" diff --git a/.github/actions/set-up-gotestsum/action.yml b/.github/actions/set-up-gotestsum/action.yml new file mode 100644 index 000000000000..9d0147313aef --- /dev/null +++ b/.github/actions/set-up-gotestsum/action.yml @@ -0,0 +1,60 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up gotestsum from Github releases +description: Set up gotestsum from Github releases + +inputs: + destination: + description: "Where to install the gotestsum binary (default: $HOME/bin/gotestsum)" + type: boolean + default: "$HOME/bin/gotestsum" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed gotestsum binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed gotestsum binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of gotestsum + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(gh release list -R gotestyourself/gotestsum --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -f1) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "destination-dir=$DESTINATION_DIR" + echo "version=$VERSION" + } | tee -a "$GITHUB_OUTPUT" + + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + + mkdir -p tmp + gh release download "$VERSION" -p "*${OS}_${ARCH}.tar.gz" -O tmp/gotestsum.tgz -R gotestyourself/gotestsum + pushd tmp && tar -xvf gotestsum.tgz && popd + mv tmp/gotestsum "$DESTINATION" + rm -rf tmp diff --git a/.github/actions/set-up-misspell/action.yml b/.github/actions/set-up-misspell/action.yml new file mode 100644 index 000000000000..d6101bf07313 --- /dev/null +++ b/.github/actions/set-up-misspell/action.yml @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up misspell from Github releases +description: Set up misspell from Github releases + +inputs: + destination: + description: "Where to install the misspell binary (default: $HOME/bin/misspell)" + type: boolean + default: "$HOME/bin/misspell" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed misspell binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed misspell binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of misspell + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(gh release list -R golangci/misspell --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -f1) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "version=$VERSION" + echo "destination-dir=$DESTINATION_DIR" + } | tee -a "$GITHUB_OUTPUT" + + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + if [ "$OS" = "macos" ]; then + export OS="darwin" + fi + + mkdir -p tmp + gh release download "$VERSION" -p "misspell_*_${OS}_${ARCH}.tar.gz" -O tmp/misspell.tgz -R golangci/misspell + pushd tmp && tar -xvf misspell.tgz && popd + mv tmp/misspell "$DESTINATION" + rm -rf tmp diff --git a/.github/actions/set-up-staticcheck/action.yml b/.github/actions/set-up-staticcheck/action.yml new file mode 100644 index 000000000000..cce079618fe2 --- /dev/null +++ b/.github/actions/set-up-staticcheck/action.yml @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +name: Set up staticcheck from Github releases +description: Set up staticcheck from Github releases + +inputs: + destination: + description: "Where to install the staticcheck binary (default: $HOME/bin/staticcheck)" + type: boolean + default: "$HOME/bin/staticcheck" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed staticcheck binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed staticcheck binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of staticcheck + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(gh release list -R dominikh/go-tools --exclude-drafts --exclude-pre-releases | grep ${{ inputs.version }} | cut -d " " -f2) + + mkdir -p $(dirname ${{ inputs.destination }}) + DESTINATION="$(readlink -f "${{ inputs.destination }}")" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + + { + echo "destination=$DESTINATION" + echo "destination-dir=$DESTINATION_DIR" + echo "version=$VERSION" + } | tee -a "$GITHUB_OUTPUT" + + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + if [ "$OS" = "macos" ]; then + export OS="darwin" + fi + + mkdir -p tmp + gh release download "$VERSION" -p "staticcheck_${OS}_${ARCH}.tar.gz" -O tmp/staticcheck.tgz -R dominikh/go-tools + pushd tmp && tar -xvf staticcheck.tgz && popd + mv tmp/staticcheck/staticcheck "$DESTINATION" + rm -rf tmp diff --git a/.github/configs/milestone-check.json b/.github/configs/milestone-check.json deleted file mode 100644 index a06049b15398..000000000000 --- a/.github/configs/milestone-check.json +++ /dev/null @@ -1,8 +0,0 @@ -[ - { - "type": "check-milestone", - "title": "Milestone Check", - "success": "Milestone set", - "failure": "Milestone not set" - } - ] \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000000..81bae9acd600 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 + +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" diff --git a/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json b/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json deleted file mode 100644 index ab09a413bad3..000000000000 --- a/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 3 - }, - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 4 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 5 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 3 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 5 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 4 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - } - ] -} diff --git a/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json b/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json deleted file mode 100644 index ec951fdd0a18..000000000000 --- a/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 3 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 4 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 5 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 1 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 3 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 4 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 5 - } - ] -} diff --git a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json b/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json deleted file mode 100644 index 70e5ea1c3c24..000000000000 --- a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - } - ] -} diff --git a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json b/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json deleted file mode 100644 index e6e9edb10f28..000000000000 --- a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "include": [ - { - "scenario": "smoke backend:raft consul_version:1.13.4 distro:rhel seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 1 - }, - { - "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "smoke backend:consul consul_version:1.13.4 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 2 - }, - { - "scenario": "upgrade backend:raft consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 1 - }, - { - "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 2 - }, - { - "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", - "test_group": 1 - }, - { - "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-2", - "test_group": 2 - } - ] -} diff --git a/.github/scripts/changed-files.sh b/.github/scripts/changed-files.sh new file mode 100755 index 000000000000..f44c6fc26b9d --- /dev/null +++ b/.github/scripts/changed-files.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Determine what files have changed between two git references. +# +# * For pull_request event_type's we'll the merge target (base_ref) with the pull requests reference, +# (ref_name) which is usually a branch name. +# * For other event types (push, workflow_call) we don't have a base_ref target to merge into, so +# instead we'll compare the last commit. +# +# Write the resulting metadata to STDOUT and $GITHUB_OUTPUT if it's defined. + +event_type=$1 # GH event type (pull_request, push, workflow_call) +ref_name=$2 # branch reference that triggered the workflow +base_ref=$3 # PR branch base ref + +if [[ "$event_type" == "pull_request" ]]; then + git fetch --no-tags --prune origin "$base_ref" + head_commit="HEAD" + base_commit="origin/$base_ref" +else + git fetch --no-tags --prune origin "$ref_name" + head_commit=$(git log "origin/$ref_name" --oneline | head -1 | awk '{print $1}') + base_commit=$(git log "origin/$ref_name" --oneline | head -2 | awk 'NR==2 {print $1}') +fi + +docs_changed=false +ui_changed=false +app_changed=false + +if ! files="$(git diff "${base_commit}...${head_commit}" --name-only)"; then + echo "failed to get changed files from git" + exit 1 +fi + +for file in $(awk -F "/" '{ print $1}' <<< "$files" | uniq); do + if [[ "$file" == "changelog" ]]; then + continue + fi + + if [[ "$file" == "website" ]]; then + docs_changed=true + continue + fi + + if [[ "$file" == "ui" ]]; then + ui_changed=true + continue + fi + + # Anything that isn't either a changelog, ui, or docs change we'll consider an app change. + app_changed=true +done + +echo "app-changed=${app_changed}" +echo "docs-changed=${docs_changed}" +echo "ui-changed=${ui_changed}" +echo "files='${files}'" +[ -n "$GITHUB_OUTPUT" ] && { + echo "app-changed=${app_changed}" + echo "docs-changed=${docs_changed}" + echo "ui-changed=${ui_changed}" + # Use a random delimiter for multiline strings. + # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings + delimiter="$(openssl rand -hex 8)" + echo "files<<${delimiter}" + echo "${files}" + echo "${delimiter}" +} >> "$GITHUB_OUTPUT" diff --git a/.github/scripts/gh-comment.sh b/.github/scripts/gh-comment.sh new file mode 100644 index 000000000000..111ed97e0306 --- /dev/null +++ b/.github/scripts/gh-comment.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +function update_or_create_comment { + REPO=$1 + PR_NUMBER=$2 + SEARCH_KEY=$3 + BODY=$4 + + # We only want for the GH bot to place one comment to report build failures + # and if we rerun a job, that comment needs to be updated. + # Let's try to find if the GH bot has placed a similar comment + comment_id=$(gh api \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + --paginate \ + /repos/hashicorp/"$REPO"/issues/"$PR_NUMBER"/comments | + jq -r --arg SEARCH_KEY "$SEARCH_KEY" '.[] | select (.body | startswith($SEARCH_KEY)) | .id') + + if [[ "$comment_id" != "" ]]; then + # update the comment with the new body + gh api \ + --method PATCH \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/hashicorp/"$REPO"/issues/comments/"$comment_id" \ + -f body="$BODY" + else + # create a comment with the new body + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/hashicorp/"$REPO"/issues/"$PR_NUMBER"/comments \ + -f body="$BODY" + fi +} diff --git a/.github/scripts/report-build-status.sh b/.github/scripts/report-build-status.sh new file mode 100755 index 000000000000..8b92534a2c9d --- /dev/null +++ b/.github/scripts/report-build-status.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +# All of these environment variables are required or an error will be returned. +[ "${GITHUB_TOKEN:?}" ] +[ "${PR_NUMBER:?}" ] +[ "${REPO:?}" ] +[ "${RUN_ID:?}" ] + +# list of build jobs +[ "${ARTIFACTS:?}" ] +[ "${TEST:?}" ] +[ "${TEST_CONTAINERS:?}" ] +[ "${UI:?}" ] + +# Build jobs +jobs=("artifacts:$ARTIFACTS" "test:$TEST" "test-containers:$TEST_CONTAINERS" "ui:$UI") + +# Sometimes failed jobs can have a result of "cancelled". Handle both. +failed_jobs=() +for job in "${jobs[@]}";do + if [[ "$job" == *"failure"* || "$job" == *"cancelled"* ]]; then + failed_jobs+=("$job") + fi +done + +# Create a comment body to set on the pull request which reports failed jobs with a url to the +# failed workflow. +if [ ${#failed_jobs[@]} -eq 0 ]; then + new_body="Build Results: +All builds succeeded! :white_check_mark:" +else + new_body="Build Results: +Build failed for these jobs: ${failed_jobs[*]}. Please refer to this workflow to learn more: https://github.com/hashicorp/vault/actions/runs/$RUN_ID" +fi + +source ./.github/scripts/gh-comment.sh + +update_or_create_comment "$REPO" "$PR_NUMBER" "Build Results:" "$new_body" diff --git a/.github/scripts/report-ci-status.sh b/.github/scripts/report-ci-status.sh new file mode 100755 index 000000000000..39a9ca7aee87 --- /dev/null +++ b/.github/scripts/report-ci-status.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e +MAX_TESTS=10 + +# All of these environment variables are required or an error will be returned. +[ "${GITHUB_TOKEN:?}" ] +[ "${RUN_ID:?}" ] +[ "${REPO:?}" ] +[ "${PR_NUMBER:?}" ] +[ "${RESULT:?}" ] + +table_data() { + if [ -z "$TABLE_DATA" ]; then + return 0 + fi + + # Remove any rows that don't have a test name + # Only keep the test type, test package, test name, and logs column + # Remove the scroll emoji + # Remove "github.com/hashicorp/vault" from the package name + TABLE_DATA=$(echo "$TABLE_DATA" | awk -F\| '{if ($4 != " - ") { print "|" $2 "|" $3 "|" $4 "|" $7 }}' | sed -r 's/ :scroll://' | sed -r 's/github.com\/hashicorp\/vault\///') + NUM_FAILURES=$(wc -l <<< "$TABLE_DATA") + + # Check if the number of failures is greater than the maximum tests to display + # If so, limit the table to MAX_TESTS number of results + if [ "$NUM_FAILURES" -gt "$MAX_TESTS" ]; then + TABLE_DATA=$(echo "$TABLE_DATA" | head -n "$MAX_TESTS") + NUM_OTHER=( "$NUM_FAILURES" - "$MAX_TESTS" ) + TABLE_DATA="${TABLE_DATA} + +and ${NUM_OTHER[*]} other tests" + fi + + # Add the header for the table + printf "%s" "Failures: +| Test Type | Package | Test | Logs | +| --------- | ------- | ---- | ---- | +${TABLE_DATA}" +} + +td="$(table_data)" + +case "$RESULT" in + success) + if [ -z "$td" ]; then + BODY="CI Results: +All Go tests succeeded! :white_check_mark:" + else + BODY="CI Results: +All required Go tests succeeded but failures were detected :warning: +${td}" + fi + ;; + *) + BODY="CI Results: ${RESULT} :x: +${td}" + ;; +esac + +source ./.github/scripts/gh-comment.sh + +update_or_create_comment "$REPO" "$PR_NUMBER" "CI Results:" "$BODY" diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml new file mode 100644 index 000000000000..38d2b167ff96 --- /dev/null +++ b/.github/workflows/actionlint.yml @@ -0,0 +1,22 @@ +name: Lint GitHub Actions Workflows +on: + pull_request: + paths: + - '.github/**' + types: [opened, synchronize, reopened, ready_for_review] + +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +jobs: + actionlint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - name: "Check workflow files" + uses: docker://docker.mirror.hashicorp.services/rhysd/actionlint@sha256:93834930f56ca380be3e9a3377670d7aa5921be251b9c774891a39b3629b83b8 + with: + # milestoned and demilestoned work (https://github.com/github/docs/issues/23909) but they aren't listed in the github documentation, so actionlint complains about them + args: "-ignore=\"invalid activity type \\\"demilestoned\\\" for \\\"pull_request\\\" Webhook event\" -ignore=\"invalid activity type \\\"milestoned\\\" for \\\"pull_request\\\" Webhook event\"" diff --git a/.github/workflows/add-hashicorp-contributed-label.yml b/.github/workflows/add-hashicorp-contributed-label.yml new file mode 100644 index 000000000000..379b8cc9c8ca --- /dev/null +++ b/.github/workflows/add-hashicorp-contributed-label.yml @@ -0,0 +1,26 @@ +name: Add HashiCorp contributed label + +# The purpose of this job is to label all HashiCorp contributed PRs, so that +# we can more easily identify community contributed PRs (anything that doesn't +# have this label). +# While it might seem like this is the 'reverse' of what we should do, GitHub +# (rightly) does not allow branches from forks to have write permissions, so +# making PRs from forks self-label themselves as community-contributed is not +# possible. + +on: + # On every pull request, on every branch + pull_request: + types: [opened, synchronize, reopened] + +jobs: + add-hashicorp-contributed-label: + # Only run if this is NOT coming from a fork of hashicorp/vault (if this is not true, it's community contributed) + if: ${{ github.repository == 'hashicorp/vault' && (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name) }} + runs-on: ubuntu-latest + steps: + - name: "Add label to PR" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR: ${{ github.event.pull_request.html_url }} + run: gh pr edit "$PR" --add-label 'hashicorp-contributed-pr' diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index c2f347e57081..f78464a8c8b9 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -11,7 +11,7 @@ jobs: backport-targeted-release-branch: if: github.event.pull_request.merged runs-on: ubuntu-latest - container: hashicorpdev/backport-assistant:0.3.0 + container: hashicorpdev/backport-assistant:0.3.3 steps: - name: Backport changes to targeted release branch run: | diff --git a/.github/workflows/build-artifacts-ce.yml b/.github/workflows/build-artifacts-ce.yml new file mode 100644 index 000000000000..8e6233a4036b --- /dev/null +++ b/.github/workflows/build-artifacts-ce.yml @@ -0,0 +1,241 @@ +name: ce + +# The inputs and outputs for this workflow have been carefully defined as a sort of workflow +# interface as defined in the build.yml workflow. The inputs and outputs here must be consistent +# across the build-artifacts-ce workflow and the build-artifacts-ent workflow. + +on: + workflow_dispatch: + inputs: + build-all: + type: boolean + default: false + build-date: + type: string + required: true + checkout-ref: + type: string + default: "" + compute-build: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for build worfkflows + required: true + compute-build-compat: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for build workflows that need older glibc + required: true + compute-small: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for non-resource-intensive workflows + required: true + vault-revision: + type: string + required: true + vault-version: + type: string + required: true + vault-version-package: + type: string + required: true + web-ui-cache-key: + type: string + required: true + workflow_call: + inputs: + build-all: + type: boolean + default: false + build-date: + type: string + required: true + checkout-ref: + type: string + default: "" + compute-build: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for build worfkflows + required: true + compute-build-compat: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for build workflows that need older glibc + required: true + compute-small: + type: string # JSON encoded to support passing arrays + description: A JSON encoded "runs-on" for non-resource-intensive workflows + required: true + vault-revision: + type: string + required: true + vault-version: + type: string + required: true + vault-version-package: + type: string + required: true + web-ui-cache-key: + type: string + required: true + outputs: + testable-containers: + value: ${{ jobs.core.outputs.testable-containers }} + testable-packages: + value: ${{ jobs.core.outputs.testable-packages }} + +jobs: + # Core are the Linux builds that are officially supported and tested as part of the normal + # CI/CD pipeline. + core: + strategy: + matrix: + include: + - goos: linux + goarch: amd64 + redhat: true + - goos: linux + goarch: arm64 + redhat: false + fail-fast: true + runs-on: ${{ fromJSON(inputs.compute-build) }} + name: (${{ matrix.goos }}, ${{ matrix.goarch }}) + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/build-vault + with: + cgo-enabled: 0 + create-docker-container: true + create-packages: true + create-redhat-container: ${{ matrix.redhat }} + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + goarch: ${{ matrix.goarch }} + goos: ${{ matrix.goos }} + go-tags: ui + vault-binary-name: vault + vault-edition: ce + vault-version: ${{ inputs.vault-version }} + web-ui-cache-key: ${{ inputs.web-ui-cache-key }} + outputs: + # Outputs are strings so we need to encode our collection outputs as JSON. + testable-containers: | + [ + { "artifact": "${{ github.event.repository.name }}_default_linux_amd64_${{ inputs.vault-version }}_${{ inputs.vault-revision }}.docker.tar" } + ] + testable-packages: | + [ + { "sample": "build_ce_linux_amd64_deb", + "artifact": "vault_${{ inputs.vault-version-package }}-1_amd64.deb", + "edition": "ce" + }, + { "sample": "build_ce_linux_arm64_deb", + "artifact": "vault_${{ inputs.vault-version-package }}-1_arm64.deb", + "edition": "ce" + }, + { "sample": "build_ce_linux_amd64_rpm", + "artifact": "vault-${{ inputs.vault-version-package }}-1.x86_64.rpm", + "edition": "ce" + }, + { "sample": "build_ce_linux_arm64_rpm", + "artifact": "vault-${{ inputs.vault-version-package }}-1.aarch64.rpm", + "edition": "ce" + }, + { "sample": "build_ce_linux_amd64_zip", + "artifact": "vault_${{ inputs.vault-version }}_linux_amd64.zip", + "edition": "ce" + }, + { "sample": "build_ce_linux_arm64_zip", + "artifact": "vault_${{ inputs.vault-version }}_linux_arm64.zip", + "edition": "ce" + } + ] + + # Extended build targets are best-case builds for non-Linux platforms that we create for + # convenience but are not built or tested as part our normal CI pipeline. + extended: + if: inputs.build-all == true + strategy: + matrix: + docker: + - false + packages: + - false + goos: + - freebsd + - netbsd + - openbsd + - solaris + - windows + goarch: + - 386 + - amd64 + - arm + exclude: + - goos: solaris + goarch: 386 + - goos: solaris + goarch: arm + - goos: windows + goarch: arm + include: + - goos: darwin + goarch: amd64 + go-tags: ui netcgo + docker: false + packages: false + - goos: darwin + goarch: arm64 + go-tags: ui netcgo + docker: false + packages: false + - goos: linux + goarch: 386 + docker: true + packages: true + - goos: linux + docker: true + goarch: arm + goarm: 6 + packages: true + fail-fast: true + name: (${{ matrix.goos }}, ${{ matrix.goarch }}${{ matrix.goarm && ' ' || '' }}${{ matrix.goarm }}) + runs-on: ${{ fromJSON(inputs.compute-build) }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/build-vault + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + create-docker-container: ${{ matrix.docker }} + create-packages: ${{ matrix.packages }} + create-redhat-container: false + goarch: ${{ matrix.goarch }} + goos: ${{ matrix.goos }} + goarm: ${{ matrix.goarm }} + go-tags: ${{ matrix.go-tags != '' && matrix.go-tags || 'ui' }} + vault-binary-name: vault + vault-edition: ce + vault-version: ${{ inputs.vault-version }} + web-ui-cache-key: ${{ inputs.web-ui-cache-key }} + + status: + if: always() + runs-on: ${{ fromJSON(inputs.compute-small) }} + permissions: + id-token: write + contents: read + needs: + - core + - extended + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + ref: ${{ inputs.checkout-ref }} + - name: Determine status + run: | + results=$(tr -d '\n' <<< '${{ toJSON(needs.*.result) }}') + if ! grep -q -v -E '(failure|cancelled)' <<< "$results"; then + echo "One or more required build workflows failed: ${results}" + exit 1 + fi + exit 0 diff --git a/.github/workflows/build-vault-oss.yml b/.github/workflows/build-vault-oss.yml deleted file mode 100644 index c7d8dc1e6d7a..000000000000 --- a/.github/workflows/build-vault-oss.yml +++ /dev/null @@ -1,109 +0,0 @@ ---- -name: build_vault - -# This workflow is intended to be called by the build workflow for each Vault -# binary that needs to be built and packaged. The ci make targets that are -# utilized automatically determine build metadata and handle building and -# packing vault. - -on: - workflow_call: - inputs: - bundle-path: - required: false - type: string - cgo-enabled: - type: string - default: 0 - create-packages: - type: boolean - default: true - goos: - required: true - type: string - goarch: - required: true - type: string - go-tags: - type: string - go-version: - type: string - package-name: - type: string - default: vault - vault-version: - type: string - required: true - -jobs: - build: - runs-on: ubuntu-latest - name: Vault ${{ inputs.goos }} ${{ inputs.goarch }} v${{ inputs.vault-version }} - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: ${{ inputs.go-version }} - - name: Set up node and yarn - uses: actions/setup-node@v3 - with: - node-version: 14 - cache: yarn - cache-dependency-path: ui/yarn.lock - - name: Build UI - run: make ci-build-ui - - name: Build Vault - env: - CGO_ENABLED: ${{ inputs.cgo-enabled }} - GOARCH: ${{ inputs.goarch }} - GOOS: ${{ inputs.goos }} - GO_TAGS: ${{ inputs.go-tags }} - run: make ci-build - - name: Determine artifact basename - env: - GOARCH: ${{ inputs.goarch }} - GOOS: ${{ inputs.goos }} - run: echo "ARTIFACT_BASENAME=$(make ci-get-artifact-basename)" >> $GITHUB_ENV - - name: Bundle Vault - env: - BUNDLE_PATH: out/${{ env.ARTIFACT_BASENAME }}.zip - run: make ci-bundle - - uses: actions/upload-artifact@v3 - with: - name: ${{ env.ARTIFACT_BASENAME }}.zip - path: out/${{ env.ARTIFACT_BASENAME }}.zip - if-no-files-found: error - - if: ${{ inputs.create-packages }} - uses: hashicorp/actions-packaging-linux@v1 - with: - name: ${{ github.event.repository.name }} - description: Vault is a tool for secrets management, encryption as a service, and privileged access management. - arch: ${{ inputs.goarch }} - version: ${{ inputs.vault-version }} - maintainer: HashiCorp - homepage: https://github.com/hashicorp/vault - license: MPL-2.0 - binary: dist/${{ inputs.package-name }} - deb_depends: openssl - rpm_depends: openssl - config_dir: .release/linux/package/ - preinstall: .release/linux/preinst - postinstall: .release/linux/postinst - postremove: .release/linux/postrm - - if: ${{ inputs.create-packages }} - name: Determine package file names - run: | - echo "RPM_PACKAGE=$(basename out/*.rpm)" >> $GITHUB_ENV - echo "DEB_PACKAGE=$(basename out/*.deb)" >> $GITHUB_ENV - - if: ${{ inputs.create-packages }} - uses: actions/upload-artifact@v3 - with: - name: ${{ env.RPM_PACKAGE }} - path: out/${{ env.RPM_PACKAGE }} - if-no-files-found: error - - if: ${{ inputs.create-packages }} - uses: actions/upload-artifact@v3 - with: - name: ${{ env.DEB_PACKAGE }} - path: out/${{ env.DEB_PACKAGE }} - if-no-files-found: error diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 68c46b22eb2a..10c720ac2293 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,224 +1,400 @@ ---- name: build +# Some words of caution before modifying this workflow: + +# This file and workflow have been carefully architected to meet the following requirements: +# * Builds and tests the correct artifacts in both CE and Ent while maintaining a merge-conflict-free +# build.yml between the two repos +# * Supports multiple Github event triggers +# * Is highly optimized for cost and speed +# * Supports a variety of complex use cases + +# If you wish to modify this file/workflow, please consider: +# * That the workflow must work under when triggered by pull_request, push, schedule, and +# workflow_dispatch events. +# * Merge-conflict-free compatibility between CE and Ent. Any changes that you make here must work +# in both repository contexts. +# * There are many workflow flow control modifiers. Further details below. +# * The total number of workers and the runner size. Further details below. + +# Further details: +# * The workflow is used by the CRT system for building, notarizing, signing, and releasing +# artifacts. Whatever we do in this workflow must support building all artifacts and uploading +# them to Github in order to fulfill the CRT requirement, while also maintaining a smaller +# default build matrix for the pull requests. +# * CRT is designed to trigger a workflow called build in a workflow file called build.yml. This +# file must build the correct artifacts in CE and Ent, depending on the repository context. +# We've gone to great lengths to architect this file and workflow so that we can build and test +# the correct artifacts in each context while maintaining a merge-conflict-free file between CE +# and Ent. Any changes that you make here must work in both repository contexts. +# * The workflow must support multiple event triggers, all of which have varying event payloads +# which must be considered. If you make changes you must ensure that the workflow still works +# under normal pull_request, push, schedule, and workflow_dispatch trigger events. +# * The workflow has been highly optimized for cost and speed. If possible, it's better to add a +# step to an existing job than create another job. Over a long time horizon a new job is often +# much more expensive than a single step in an existing job, they also take up a limited number +# of our available runners. +# * Flow control in the workflow is complex in order to support many various use cases, including: +# * Only building on tier 1 supported "core" artifacts by default. +# * Only building the UI if the Go application or UI has been modified. +# * Skipping builds entirely if the commit or PR only modifies changelog or website documentation. +# * The ability to check out the HEAD reference instead of a Github merge branch reference. +# * The ability to control building all of our tier 2 supported "extended" artifacts via a +# build/all label, even if the event trigger is pull_request or, more importantly, a push. +# It's important to note that we must maintain support for building all artifacts on push +# via a pull request, even though push events aren't directly tied to pull requests. Our +# label metadata helpers are designed to handle this complexity. +# * The ability to build all of our artifacts on a scheduled cadence to ensure we don't +# accidentally regress. +# * All of these considerations, and many others, have led to the modular design we have here. +# * If you're doing something in more than one place, try and use small composite actions +# whenever possible. + on: workflow_dispatch: pull_request: + types: + - opened + - ready_for_review + - reopened + - synchronize push: branches: - main - release/** + schedule: + - cron: '05 02 * * *' # * is a special character in YAML so you have to quote this string -env: - PKG_NAME: vault +concurrency: + group: ${{ github.head_ref || github.run_id }}-build + cancel-in-progress: true jobs: - product-metadata: - runs-on: ubuntu-latest + setup: + # Setup is our entrypoint into the entire workflow. Here we gather metadata and export useful + # outputs for further use as inputs or for flow control. + # + # Trigger the setup workflow if any of the following conditions are true: + # * The workflow was triggered by a push (merge) to the main or release branch. + # * The workflow was triggered by pull request and the pull request is not a draft. + # * The workflow was triggered by on schedule to test building all artifacts. + if: | + github.event_name == 'push' || + github.event_name == 'schedule' || + (github.event_name == 'pull_request' && github.event.pull_request.draft == false) + runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","linux","small"]') }} outputs: - build-date: ${{ steps.get-metadata.outputs.build-date }} - filepath: ${{ steps.generate-metadata-file.outputs.filepath }} - go-version: ${{ steps.get-metadata.outputs.go-version }} - matrix-test-group: ${{ steps.get-metadata.outputs.matrix-test-group }} - package-name: ${{ steps.get-metadata.outputs.package-name }} - vault-revision: ${{ steps.get-metadata.outputs.vault-revision }} - vault-version: ${{ steps.get-metadata.outputs.vault-version }} - vault-base-version: ${{ steps.get-metadata.outputs.vault-base-version }} + app-changed: ${{ steps.changed-files.outputs.app-changed }} + build-date: ${{ steps.metadata.outputs.vault-build-date }} + checkout-ref: ${{ steps.checkout.outputs.ref }} + compute-build: ${{ steps.metadata.outputs.compute-build }} + compute-build-compat: ${{ steps.metadata.outputs.compute-build-compat }} + compute-build-ui: ${{ steps.metadata.outputs.compute-build-ui }} + compute-small: ${{ steps.metadata.outputs.compute-small }} + docs-changed: ${{ steps.changed-files.outputs.docs-changed }} + is-draft: ${{ steps.metadata.outputs.is-draft }} + is-enterprise: ${{ steps.metadata.outputs.is-enterprise }} + is-fork: ${{ steps.metadata.outputs.is-fork }} + labels: ${{ steps.metadata.outputs.labels }} + ui-changed: ${{ steps.changed-files.outputs.ui-changed }} + vault-binary-name: ${{ steps.metadata.outputs.vault-binary-name }} + vault-revision: ${{ steps.metadata.outputs.vault-revision }} + vault-version: ${{ steps.metadata.outputs.vault-version }} + vault-version-metadata: ${{ steps.metadata.outputs.vault-version-metadata }} + vault-version-package: ${{ steps.metadata.outputs.vault-version-package }} + workflow-trigger: ${{ steps.metadata.outputs.workflow-trigger }} steps: - - uses: actions/checkout@v3 - - name: Get metadata - id: get-metadata - env: - # MATRIX_MAX_TEST_GROUPS is required to determine the randomly selected - # test group. It should be set to the highest test_group used in the - # enos-run-matrices. - MATRIX_MAX_TEST_GROUPS: 5 - run: | - echo "build-date=$(make ci-get-date)" >> $GITHUB_OUTPUT - echo "go-version=$(cat ./.go-version)" >> $GITHUB_OUTPUT - echo "matrix-test-group=$(make ci-get-matrix-group-id)" >> $GITHUB_OUTPUT - echo "package-name=${{ env.PKG_NAME }}" >> $GITHUB_OUTPUT - echo "vault-base-version=$(make ci-get-version-base)" >> $GITHUB_OUTPUT - echo "vault-revision=$(make ci-get-revision)" >> $GITHUB_OUTPUT - echo "vault-version=$(make ci-get-version)" >> $GITHUB_OUTPUT - - uses: hashicorp/actions-generate-metadata@v1 - id: generate-metadata-file + # Run the changed-files action to determine what Git reference we should check out + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: ./.github/actions/changed-files + id: changed-files + - uses: ./.github/actions/checkout + id: checkout # Make sure we check out correct ref after checking changed files + # Get the vault version metadata + - uses: hashicorp/actions-set-product-version@v2 + id: set-product-version with: - version: ${{ steps.get-metadata.outputs.vault-version }} - product: ${{ steps.get-metadata.outputs.package-name }} - - uses: actions/upload-artifact@v3 + checkout: false # don't override the reference we've checked out + # Gather additional metadata about our execution context + - uses: ./.github/actions/metadata + id: metadata with: - name: metadata.json - path: ${{ steps.generate-metadata-file.outputs.filepath }} - if-no-files-found: error - - build-other: - name: Build Vault Other - needs: product-metadata - strategy: - matrix: - goos: [freebsd, windows, netbsd, openbsd, solaris] - goarch: [386, amd64, arm] - exclude: - - goos: solaris - goarch: 386 - - goos: solaris - goarch: arm - - goos: windows - goarch: arm - fail-fast: true - uses: ./.github/workflows/build-vault-oss.yml - with: - create-packages: false - goarch: ${{ matrix.goarch }} - goos: ${{ matrix.goos }} - go-tags: ui - go-version: ${{ needs.product-metadata.outputs.go-version }} - package-name: ${{ needs.product-metadata.outputs.package-name }} - vault-version: ${{ needs.product-metadata.outputs.vault-version }} - secrets: inherit - - build-linux: - name: Build Vault Linux - needs: product-metadata - strategy: - matrix: - goos: [linux] - goarch: [arm, arm64, 386, amd64] - fail-fast: true - uses: ./.github/workflows/build-vault-oss.yml - with: - goarch: ${{ matrix.goarch }} - goos: ${{ matrix.goos }} - go-tags: ui - go-version: ${{ needs.product-metadata.outputs.go-version }} - package-name: ${{ needs.product-metadata.outputs.package-name }} - vault-version: ${{ needs.product-metadata.outputs.vault-version }} - secrets: inherit - - build-darwin: - name: Build Vault Darwin - needs: product-metadata - strategy: - matrix: - goos: [darwin] - goarch: [amd64, arm64] - fail-fast: true - uses: ./.github/workflows/build-vault-oss.yml - with: - create-packages: false - goarch: ${{ matrix.goarch }} - goos: ${{ matrix.goos }} - go-tags: ui netcgo - go-version: ${{ needs.product-metadata.outputs.go-version }} - package-name: ${{ needs.product-metadata.outputs.package-name }} - vault-version: ${{ needs.product-metadata.outputs.vault-version }} - secrets: inherit + vault-version: ${{ steps.set-product-version.outputs.product-version }} + - uses: ./.github/actions/set-up-go + # Make sure all required Go modules are cached at this point. We don't want all of the Go + # tests and build jobs to download modules and race to upload them to the cache. + name: Ensure Go modules are cached + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Don't download them on a cache hit during setup, just make sure they're cached before + # subsequent workflows are run. + no-restore: true - build-docker: - name: Build Vault Docker - needs: - - product-metadata - - build-linux - runs-on: ubuntu-latest - strategy: - matrix: - arch: [arm, arm64, 386, amd64] - env: - repo: ${{ github.event.repository.name }} - version: ${{ needs.product-metadata.outputs.vault-version }} + ui: + # The Web UI workflow is a prerequisite workflow for building our artifacts. If the application + # or UI change we'll trigger this workflow but only build it if we don't already have the asset + # in our Github cache. + # + # Ensure the Web UI is built if any of the following conditions are true: + # * The workflow was triggered by a push (merge) to the main or release branch. + # * The workflow was triggered by on schedule to test building all artifacts. + # * The `build/all` tag is present on either a pull request or on the pull request that created + # a merge + # * The workflow was triggered by a pull request, the pull request is not a draft, and the UI + # or app changed. + if: | + needs.setup.outputs.workflow-trigger == 'push' || + needs.setup.outputs.workflow-trigger == 'schedule' || + contains(fromJSON(needs.setup.outputs.labels), 'build/all') || + ( + needs.setup.outputs.workflow-trigger == 'pull_request' && + needs.setup.outputs.is-draft == 'false' && + ( + needs.setup.outputs.ui-changed == 'true' || + needs.setup.outputs.app-changed == 'true' + ) + ) + needs: setup + runs-on: ${{ fromJSON(needs.setup.outputs.compute-build-ui) }} + outputs: + cache-key: ui-${{ steps.ui-hash.outputs.ui-hash }} steps: - - uses: actions/checkout@v3 - - uses: hashicorp/actions-docker-build@v1 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + ref: ${{ needs.setup.outputs.checkout-ref }} + - name: Get UI hash + id: ui-hash + run: echo "ui-hash=$(git ls-tree HEAD ui --object-only)" | tee -a "$GITHUB_OUTPUT" + - name: Set up UI asset cache + id: cache-ui-assets + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: - version: ${{ env.version }} - target: default - arch: ${{ matrix.arch }} - zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.vault-version }}_linux_${{ matrix.arch }}.zip - tags: | - docker.io/hashicorp/${{ env.repo }}:${{ env.version }} - public.ecr.aws/hashicorp/${{ env.repo }}:${{ env.version }} + enableCrossOsArchive: true + lookup-only: true + path: http/web_ui + # Only restore the UI asset cache if we haven't modified anything in the ui directory. + # Never do a partial restore of the web_ui if we don't get a cache hit. + key: ui-${{ steps.ui-hash.outputs.ui-hash }} + - if: steps.cache-ui-assets.outputs.cache-hit != 'true' + name: Set up node and yarn + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + with: + node-version-file: ui/package.json + cache: yarn + cache-dependency-path: ui/yarn.lock + - if: steps.cache-ui-assets.outputs.cache-hit != 'true' + name: Build UI + run: make ci-build-ui - build-ubi: - name: Build Vault Red Hat UBI + artifacts: + # Artifacts is where we'll build the various Vault binaries and package them into their respective + # Zip bundles, RPM and Deb packages, and container images. After we've packaged them we upload + # them to the Github Actions artifacts storage and execute our Enos test scenarios. If the + # workflow is triggered by a push to main CRT will take these artifacts from Github and perform + # all of the necessary notarizing and signing before uploading them to Artifactory. + # + # # Trigger the setup workflow if any of the following conditions are true: + # + # * The workflow was triggered by on schedule to test building all artifacts. + # * The Go app was changed. + # * The build/all label is present on a pull request or push. + if: | + needs.setup.outputs.workflow-trigger == 'schedule' || + needs.setup.outputs.app-changed == 'true' || + contains(fromJSON(needs.setup.outputs.labels), 'build/all') needs: - - product-metadata - - build-linux - runs-on: ubuntu-latest - strategy: - matrix: - arch: [amd64] - env: - repo: ${{ github.event.repository.name }} - version: ${{ needs.product-metadata.outputs.vault-version }} - steps: - - uses: actions/checkout@v2 - - uses: hashicorp/actions-docker-build@v1 - with: - version: ${{ env.version }} - target: ubi - arch: ${{ matrix.arch }} - zip_artifact_name: ${{ env.PKG_NAME }}_${{ needs.product-metadata.outputs.vault-version }}_linux_${{ matrix.arch }}.zip - redhat_tag: quay.io/redhat-isv-containers/5f89bb5e0b94cf64cfeb500a:${{ env.version }}-ubi + - setup + - ui # Don't build and test artifacts unless the UI build was triggered. + # The following is the only line that should be different between CE and Ent. + uses: ./.github/workflows/build-artifacts-ce.yml # Make sure we use the correct workflow. + with: + # The inputs defined here must be supported in both the build-artifacts-ce and + # build-artifacts-ent workflows. The implementations should seek to keep a compatible interface. + build-all: ${{ contains(fromJSON(needs.setup.outputs.labels), 'build/all') || needs.setup.outputs.workflow-trigger == 'schedule' }} + build-date: ${{ needs.setup.outputs.build-date }} + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + compute-build: ${{ needs.setup.outputs.compute-build }} + compute-build-compat: ${{ needs.setup.outputs.compute-build-compat }} + compute-small: ${{ needs.setup.outputs.compute-small }} + vault-revision: ${{ needs.setup.outputs.vault-revision }} + vault-version: ${{ needs.setup.outputs.vault-version }} + vault-version-package: ${{ needs.setup.outputs.vault-version-package }} + web-ui-cache-key: ${{ needs.ui.outputs.cache-key }} + secrets: inherit test: - name: Test ${{ matrix.build-artifact-name }} - # Only run the Enos workflow against branches that are created from the - # hashicorp/vault repository. This has the effect of limiting execution of - # Enos scenarios to branches that originate from authors that have write - # access to hashicorp/vault repository. This is required as Github Actions - # will not populate the required secrets for branches created by outside - # contributors in order to protect the secrets integrity. - if: "! github.event.pull_request.head.repo.fork" + # Test all of the testable artifacts if our repo isn't a fork. We don't test when the PR is + # created from a fork because secrets are not passed in and they are required. + if: ${{ needs.setup.outputs.is-fork == 'false' }} + name: test ${{ matrix.artifact }} needs: - - product-metadata - - build-linux - uses: ./.github/workflows/enos-run.yml + - setup + - ui + - artifacts + uses: ./.github/workflows/test-run-enos-scenario-matrix.yml strategy: fail-fast: false matrix: - include: - - matrix-file-name: build-github-oss-linux-amd64-zip - build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip - - matrix-file-name: build-github-oss-linux-arm64-zip - build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip + include: ${{ fromJSON(needs.artifacts.outputs.testable-packages) }} with: - build-artifact-name: ${{ matrix.build-artifact-name }} - matrix-file-name: ${{ matrix.matrix-file-name }} - matrix-test-group: ${{ needs.product-metadata.outputs.matrix-test-group }} - vault-edition: oss - vault-revision: ${{ needs.product-metadata.outputs.vault-revision }} + build-artifact-name: ${{ matrix.artifact }} + sample-max: 1 + sample-name: ${{ matrix.sample }} + ssh-key-name: ${{ github.event.repository.name }}-ci-ssh-key + vault-edition: ${{ matrix.edition }} + vault-revision: ${{ needs.setup.outputs.vault-revision }} + vault-version: ${{ needs.setup.outputs.vault-version-metadata }} secrets: inherit - test-docker-k8s: - name: Test Docker K8s - # Only run the Enos workflow against branches that are created from the - # hashicorp/vault repository. This has the effect of limiting execution of - # Enos scenarios to branches that originate from authors that have write - # access to hashicorp/vault repository. This is required as Github Actions - # will not populate the required secrets for branches created by outside - # contributors in order to protect the secrets integrity. - if: "! github.event.pull_request.head.repo.fork" + test-containers: + # Test all of the testable containers if our repo isn't a fork. We don't test when the PR is + # created from a fork because secrets are not passed in and they are required (for now). + if: ${{ needs.setup.outputs.is-fork == 'false' }} + name: test ${{ matrix.artifact }} needs: - - product-metadata - - build-docker + - setup + - ui + - artifacts uses: ./.github/workflows/enos-run-k8s.yml + strategy: + fail-fast: false + matrix: + include: ${{ fromJSON(needs.artifacts.outputs.testable-containers) }} with: - artifact-build-date: ${{ needs.product-metadata.outputs.build-date }} - artifact-name: ${{ github.event.repository.name }}_default_linux_amd64_${{ needs.product-metadata.outputs.vault-version }}_${{ needs.product-metadata.outputs.vault-revision }}.docker.tar - artifact-revision: ${{ needs.product-metadata.outputs.vault-revision }} - artifact-version: ${{ needs.product-metadata.outputs.vault-version }} + artifact-build-date: ${{ needs.setup.outputs.build-date }} + artifact-name: ${{ matrix.artifact }} + artifact-revision: ${{ needs.setup.outputs.vault-revision }} + artifact-version: ${{ needs.setup.outputs.vault-version-metadata }} secrets: inherit completed-successfully: - runs-on: ubuntu-latest + # build/completed-successfully is the only build workflow that must pass in order to merge + # a pull request. This workflow is used to determine the overall status of all the prior + # workflows and to notify various different channels of success or failure. As part of this + # workflow we create the necessary build metadata that is required for the CRT build system. + # + # Our logic here mirrors that of setup as it and this are the only two workflows that must + # be triggered together. + if: | + always() && + ( + github.event_name == 'push' || + github.event_name == 'schedule' || + (github.event_name == 'pull_request' && github.event.pull_request.draft == false) + ) + runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","linux","small"]') }} + permissions: write-all # Ensure we have id-token:write access for vault-auth. needs: - - build-other - - build-linux - - build-darwin - - build-docker - - build-ubi + - setup + - ui + - artifacts - test - - test-docker-k8s + - test-containers steps: - - run: echo "All build and test workflows have succeeded!" + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - id: status + name: Determine status + run: | + results=$(tr -d '\n' <<< '${{ toJSON(needs.*.result) }}') + if ! grep -q -v -E '(failure|cancelled)' <<< "$results"; then + result="failed" + else + result="success" + fi + { + echo "result=${result}" + echo "results=${results}" + } | tee -a "$GITHUB_OUTPUT" + - if: needs.setup.outputs.is-enterprise == 'true' + id: vault-auth + name: Vault Authenticate + run: vault-auth + - if: needs.setup.outputs.is-enterprise == 'true' + id: secrets + name: Fetch Vault Secrets + uses: hashicorp/vault-action@v3 + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/${{ github.repository }}/github_actions_notifications_bot token | SLACK_BOT_TOKEN; + - id: slackbot-token + run: + echo "slackbot-token=${{ needs.setup.outputs.is-enterprise != 'true' && secrets.SLACK_BOT_TOKEN || steps.secrets.outputs.SLACK_BOT_TOKEN }}" >> "$GITHUB_OUTPUT" + - if: | + needs.setup.outputs.workflow-trigger == 'pull_request' && + github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name && + (github.repository == 'hashicorp/vault' || github.repository == 'hashicorp/vault-enterprise') + name: Create or update a build status comment on the pull request + env: + ARTIFACTS: ${{ needs.artifacts.result }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.pull_request.number }} + REPO: ${{ github.event.repository.name }} + RUN_ID: ${{ github.run_id }} + TEST: ${{ needs.test.result }} + TEST_CONTAINERS: ${{ needs.test-containers.result }} + UI: ${{ needs.ui.result }} + run: ./.github/scripts/report-build-status.sh + - name: Notify build failures in Slack + if: | + always() && + steps.status.outputs.result != 'success' && + (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) + uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 + env: + SLACK_BOT_TOKEN: ${{ steps.slackbot-token.outputs.slackbot-token }} + with: + channel-id: "C05AABYEA9Y" # Notify #feed-vault-ci-official + # channel-id: "C05Q4D5V89W" # Notify #test-vault-ci-slack-integration + payload: | + { + "text": "${{ github.repository }} build failures on ${{ github.ref_name }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": ":rotating_light: ${{ github.repository }} build failures on ${{ github.ref_name }} :rotating_light:", + "emoji": true + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${{ needs.setup.result != 'failure' && ':white_check_mark:' || ':x:' }} Setup\n${{ needs.ui.result != 'failure' && ':white_check_mark:' || ':x:' }} Build UI\n${{ needs.artifacts.result != 'failure' && ':white_check_mark:' || ':x:' }} Build Vault Artifacts\n${{ needs.test.result != 'failure' && ':white_check_mark:' || ':x:' }} Enos package test scenarios\n${{ needs.test-containers.result != 'failure' && ':white_check_mark:' || ':x:' }} Enos container test scenarios" + }, + "accessory": { + "type": "button", + "text": { + "type": "plain_text", + "text": "View Failing Workflow", + "emoji": true + }, + "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - uses: hashicorp/actions-generate-metadata@v1 + if: needs.artifacts.result == 'success' # create build metadata if we successfully created artifacts + id: generate-metadata-file + with: + version: ${{ needs.setup.outputs.vault-version-metadata }} + product: ${{ needs.setup.outputs.vault-binary-name }} + - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + if: steps.generate-metadata-file.outcome == 'success' # upload our metadata if we created it + with: + name: metadata.json + path: ${{ steps.generate-metadata-file.outputs.filepath }} + if-no-files-found: error + - if: always() && steps.status.outputs.result != 'success' + name: Check for failed status + run: | + echo "One or more required build workflows failed: ${{ steps.status.outputs.results }}" + exit 1 diff --git a/.github/workflows/changelog-checker.yml b/.github/workflows/changelog-checker.yml index d8a380270b26..034a8657ee28 100644 --- a/.github/workflows/changelog-checker.yml +++ b/.github/workflows/changelog-checker.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # by default the checkout action doesn't checkout all branches @@ -56,9 +56,9 @@ jobs: # Else, we found some toolchain files. Let's make sure the contents are correct. if ! grep -q 'release-note:change' "$toolchain_files" || ! grep -q '^core: Bump Go version to' "$toolchain_files"; then echo "Invalid format for changelog. Expected format:" - echo "```release-note:change" + echo '```release-note:change' echo "core: Bump Go version to x.y.z." - echo "```" + echo '```' exit 1 else echo "Found Go toolchain changelog entry in PR!" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000000..cd3b8fe544e3 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,380 @@ +name: CI +on: + pull_request: + # The default types for pull_request are [opened, synchronize, reopened]. This is insufficient + # for our needs, since we're skipping stuff on PRs in draft mode.By adding the ready_for_review + # type, when a draft pr is marked ready, we run everything, including the stuff we'd have + # skipped up until now. + types: [opened, synchronize, reopened, ready_for_review] + push: + branches: + - main + - release/** + workflow_dispatch: + +concurrency: + group: ${{ github.head_ref || github.run_id }}-ci + cancel-in-progress: true + +jobs: + setup: + runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","linux","small"]') }} + outputs: + app-changed: ${{ steps.changed-files.outputs.app-changed }} + checkout-ref: ${{ steps.checkout.outputs.ref }} + compute-small: ${{ steps.metadata.outputs.compute-small }} + compute-test-go: ${{ steps.metadata.outputs.compute-test-go }} + compute-test-ui: ${{ steps.metadata.outputs.compute-test-ui }} + go-tags: ${{ steps.metadata.outputs.go-tags }} + is-draft: ${{ steps.metadata.outputs.is-draft }} + is-enterprise: ${{ steps.metadata.outputs.is-enterprise }} + is-fork: ${{ steps.metadata.outputs.is-fork }} + labels: ${{ steps.metadata.outputs.labels }} + ui-changed: ${{ steps.changed-files.outputs.ui-changed }} + workflow-trigger: ${{ steps.metadata.outputs.workflow-trigger }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: ./.github/actions/changed-files + id: changed-files + - uses: ./.github/actions/checkout + id: checkout # make sure we check out correct ref after checking changed files + - uses: ./.github/actions/metadata + id: metadata + - name: Ensure Go modules are cached + uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + no-restore: true # don't download them on a cache hit + + test-go: + # Run Go tests if the vault app changed + if: needs.setup.outputs.app-changed == 'true' + name: Run Go tests + needs: setup + uses: ./.github/workflows/test-go.yml + with: + # The regular Go tests use an extra runner to execute the binary-dependent tests. We isolate + # them there so that the other tests aren't slowed down waiting for a binary build. + binary-tests: true + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + go-arch: amd64 + go-tags: '${{ needs.setup.outputs.go-tags }},deadlock' + name: standard + runs-on: ${{ needs.setup.outputs.compute-test-go }} + runs-on-small: ${{ needs.setup.outputs.compute-small }} + test-timing-cache-key: go-test-timing-standard + total-runners: 16 + secrets: inherit + + test-go-testonly: + # Run Go tests tagged with "testonly" if the vault app changed + if: needs.setup.outputs.app-changed == 'true' + name: Run Go tests tagged with testonly + needs: setup + uses: ./.github/workflows/test-go.yml + with: + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + go-arch: amd64 + go-tags: '${{ needs.setup.outputs.go-tags }},deadlock,testonly' + name: testonly + runs-on: ${{ needs.setup.outputs.compute-test-go }} + runs-on-small: ${{ needs.setup.outputs.compute-small }} + testonly: true + test-timing-cache-enabled: false + total-runners: 2 # test runners cannot be less than 2 + secrets: inherit + + test-go-race: + # Run Go test with the data race detector enabled if the vault app changed and we're out of + # drafts mode. + if: needs.setup.outputs.app-changed == 'true' && needs.setup.outputs.is-draft == 'false' + name: Run Go tests with data race detection + needs: setup + uses: ./.github/workflows/test-go.yml + with: + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + env-vars: | + { + "VAULT_CI_GO_TEST_RACE": 1 + } + extra-flags: '-race' + name: race + go-arch: amd64 + go-tags: ${{ needs.setup.outputs.go-tags }} + runs-on: ${{ needs.setup.outputs.compute-test-go }} + runs-on-small: ${{ needs.setup.outputs.compute-small }} + test-timing-cache-key: go-test-timing-race + total-runners: 16 + secrets: inherit + + test-go-fips: + name: Run Go tests with FIPS configuration + # Run the Go tests with fips if the vault app changed, we're in the context vault enterprise + # and our trigger is a merge to main or releases/* or if the 'fips' label is present on a PR. + if: | + needs.setup.outputs.app-changed == 'true' && + needs.setup.outputs.is-enterprise == 'true' && + (needs.setup.outputs.workflow-trigger == 'push' || contains(needs.setup.outputs.labels, 'fips')) + needs: setup + uses: ./.github/workflows/test-go.yml + with: + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + env-vars: | + { + "GOEXPERIMENT": "boringcrypto" + } + name: fips + go-arch: amd64 + go-tags: '${{ needs.setup.outputs.go-tags }},deadlock,cgo,fips,fips_140_2' + runs-on: ${{ needs.setup.outputs.compute-test-go }} + runs-on-small: ${{ needs.setup.outputs.compute-small }} + test-timing-cache-key: go-test-timing-fips + total-runners: 16 + secrets: inherit + + test-ui: + name: Test UI + # Run the UI tests if our UI has changed, or a 'ui' label is present, or our workflow trigger + # was triggered by a merge to main or releases/*. + if: | + needs.setup.outputs.ui-changed == 'true' || + needs.setup.outputs.workflow-trigger == 'push' || + contains(github.event.pull_request.labels.*.name, 'ui') + needs: setup + permissions: + id-token: write + contents: read + runs-on: ${{ fromJSON(needs.setup.outputs.compute-test-ui) }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + name: status + with: + ref: ${{ needs.setup.outputs.checkout-ref }} + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Setup node.js without caching to allow running npm install -g yarn (next step) + - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + with: + node-version-file: './ui/package.json' + - run: npm install -g yarn + # Setup node.js with caching using the yarn.lock file + - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + with: + node-version-file: './ui/package.json' + cache: yarn + cache-dependency-path: ui/yarn.lock + - uses: browser-actions/setup-chrome@9683066f53b47e92c4104e1bd5535aff208c3530 # v1.6.2 + - name: ui-dependencies + working-directory: ./ui + run: | + yarn install --frozen-lockfile + npm rebuild node-sass + - if: needs.setup.outputs.is-enterprise == 'true' + id: vault-auth + name: Authenticate to Vault + run: vault-auth + - if: needs.setup.outputs.is-enterprise == 'true' + id: secrets + name: Fetch secrets + uses: hashicorp/vault-action@v3 + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/hashicorp/vault-enterprise/github-token username-and-token | PRIVATE_REPO_GITHUB_TOKEN; + kv/data/github/hashicorp/vault-enterprise/license license_1 | VAULT_LICENSE; + - if: needs.setup.outputs.is-enterprise == 'true' + name: Set up Git + run: git config --global url."https://${{ steps.secrets.outputs.PRIVATE_REPO_GITHUB_TOKEN }}@github.com".insteadOf https://github.com + - uses: ./.github/actions/install-external-tools + - name: build-go-dev + run: | + rm -rf ./pkg + mkdir ./pkg + make prep dev + - name: test-ui + env: + VAULT_LICENSE: ${{ steps.secrets.outputs.VAULT_LICENSE }} + run: | + export PATH="${PWD}/bin:${PATH}" + # Run Ember tests + cd ui + mkdir -p test-results/qunit + yarn ${{ needs.setup.outputs.is-enterprise == 'true' && 'test' || 'test:oss' }} + - if: always() + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: test-results-ui + path: ui/test-results + - if: always() + uses: test-summary/action@032c8a9cec6aaa3c20228112cae6ca10a3b29336 # v2.3 + with: + paths: "ui/test-results/qunit/results.xml" + show: "fail" + + tests-completed: + needs: + - setup + - test-go + - test-go-testonly + - test-go-race + - test-go-fips + - test-ui + if: always() + runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","linux","small"]') }} + permissions: write-all # Ensure we have id-token:write access for vault-auth. + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + # Determine the overall status of our required test jobs. + - name: Determine status + id: status + run: | + # Determine the overall status of the job. We allow fips and race tests to fail so we + # don't consider their result here. + # + # Encode the needs context into JSON, filter out unrequired workflows, shape the result + # into a more useful schema. Determine the overall status by comparing the total number of + # successful results with the number of required jobs. + if results=$(jq -rec 'del(.["test-go-fips"], .["test-go-race"]) as $required + | $required | keys as $jobs + | reduce $jobs[] as $job ([]; . + [{job: $job}+$required[$job]])' <<< '${{ toJSON(needs) }}' + ); then + # Determine if all of our required jobs have succeeded. + if jq -rec 'length as $expected + | [.[] | select((.result == "success") or (.result == "skipped"))] | length as $got + | $expected == $got' <<< "$results"; then + msg="All required test jobs succeeded!" + result="success" + else + msg="One or more required test jobs failed!" + result="failed" + fi + else + msg="Failed to decode and filter test results" + result="failed" + results="''" + fi + { + echo "msg=${msg}" + echo "result=${result}" + echo "results<> "$GITHUB_OUTPUT" + - if: | + always() && + needs.setup.outputs.workflow-trigger == 'push' && + ( + needs.test-go.result == 'failure' || + needs.test-go-race.result == 'failure' || + needs.test-go-race.outputs.data-race-result == 'failure' || + needs.test-go-testonly.result == 'failure' || + needs.test-ui.result == 'failure' + ) + name: Notify build failures in Slack + uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 + env: + SLACK_BOT_TOKEN: ${{ steps.slackbot-token.outputs.slackbot-token }} + with: + channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official, use "C05Q4D5V89W"/test-vault-ci-slack-integration for testing + payload: | + { + "text": "CE test failures on ${{ github.ref_name }}", + "text": "${{ github.repository }} build failures on ${{ github.ref_name }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": ":rotating_light: ${{ github.repository }} test failures on ${{ github.ref_name }} :rotating_light:", + "emoji": true + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${{ needs.test-go.result != 'failure' && ':white_check_mark:' || ':x:' }} Go tests\n${{ needs.test-go-race.result != 'failure' && ':white_check_mark:' || ':x:' }} Go race tests\n\t\t${{ needs.test-go-race.outputs.data-race-result != 'success' && ':x: Data race detected' || ':white_check_mark: No race detected' }}\n${{ needs.test-go-testonly.result != 'failure' && ':white_check_mark:' || ':x:' }} Go testonly tests\n${{ needs.test-ui.result != 'failure' && ':white_check_mark:' || ':x:' }} UI tests" + }, + "accessory": { + "type": "button", + "text": { + "type": "plain_text", + "text": "View Failing Workflow", + "emoji": true + }, + "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + # Only create the PR summary if it's a pull request and it is not a fork as we need access + # to secrets. + - if: ${{ needs.setup.outputs.is-fork == 'false' }} + name: Download failure summaries + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 + with: + pattern: failure-summary-*.md + path: failure-summaries + merge-multiple: true + - if: ${{ needs.setup.outputs.is-fork == 'false' }} + id: prepare-failure-summary + name: Prepare failure summary + run: | + # Sort all of the summary table rows and push them to a temp file. + temp_file_name=temp-$(date +%s) + cat failure-summaries/*.md | sort >> "$temp_file_name" + + # If there are test failures, present them in a format of a GitHub Markdown table. + if [ -s "$temp_file_name" ]; then + # Here we create the headings for the summary table + { + echo "| Test Type | Package | Test | Elapsed | Runner Index | Logs |" + echo "| --------- | ------- | ---- | ------- | ------------ | ---- |" + cat "$temp_file_name" + } >> "$GITHUB_STEP_SUMMARY" + else + if [ "${{ steps.status.outputs.result }}" == 'success' ]; then + echo "### All required Go tests passed! :white_check_mark:" >> "$GITHUB_STEP_SUMMARY" + fi + fi + { + echo 'table-test-results<> $GITHUB_OUTPUT - echo "aws role set to 'arn:aws:iam::505811019928:role/github_actions-vault_enterprise_ci'" - else - echo "aws_role=arn:aws:iam::040730498200:role/github_actions-vault_ci" >> $GITHUB_OUTPUT - echo "aws role set to 'arn:aws:iam::040730498200:role/github_actions-vault_ci'" - fi - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} - aws-region: us-east-1 - role-to-assume: ${{ steps.prepare_for_terraform.outputs.aws_role }} - role-skip-session-tagging: true - role-duration-seconds: 3600 - - name: Init Terraform - id: tf_init - run: | - terraform -chdir=enos/ci/bootstrap init - - name: Plan Terraform - id: tf_plan - run: | - terraform -chdir=enos/ci/bootstrap plan - - name: Apply Terraform - if: ${{ github.ref == 'refs/heads/main' }} - id: tf_apply - run: | - terraform -chdir=enos/ci/bootstrap apply -auto-approve diff --git a/.github/workflows/enos-fmt.yml b/.github/workflows/enos-fmt.yml deleted file mode 100644 index 298b2dc185f1..000000000000 --- a/.github/workflows/enos-fmt.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: enos_fmt - -on: - pull_request: - paths: - - enos/** - -jobs: - fmt_check: - # Only run this workflow on pull requests from hashicorp/vault branches - # as we need secrets to install enos. - if: "! github.event.pull_request.head.repo.fork" - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - steps: - - uses: actions/checkout@v3 - - uses: hashicorp/setup-terraform@v2 - with: - terraform_wrapper: false - - uses: hashicorp/action-setup-enos@v1 - with: - github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - - name: check formatting - working-directory: ./enos - run: make check-fmt diff --git a/.github/workflows/enos-lint.yml b/.github/workflows/enos-lint.yml new file mode 100644 index 000000000000..39d4a620f377 --- /dev/null +++ b/.github/workflows/enos-lint.yml @@ -0,0 +1,54 @@ +--- +name: lint-enos + +on: + pull_request: + paths: + - enos/** + +jobs: + metadata: + # Only run this workflow on pull requests from hashicorp/vault branches + # as we need secrets to install enos. + if: "! github.event.pull_request.head.repo.fork" + name: metadata + runs-on: ubuntu-latest + outputs: + runs-on: ${{ steps.metadata.outputs.runs-on }} + version: ${{ steps.metadata.outputs.version }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - id: set-product-version + uses: hashicorp/actions-set-product-version@v2 + - id: metadata + run: | + echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT" + github_repository="${{ github.repository }}" + if [ "${github_repository##*/}" == "vault-enterprise" ] ; then + echo 'runs-on=["self-hosted","ondemand","linux","type=c6a.4xlarge"]' >> "$GITHUB_OUTPUT" + else + echo 'runs-on="custom-linux-xl-vault-latest"' >> "$GITHUB_OUTPUT" + fi + + lint: + needs: metadata + runs-on: ${{ fromJSON(needs.metadata.outputs.runs-on) }} + env: + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: hashicorp/setup-terraform@v3 + with: + terraform_wrapper: false + terraform_version: "1.7.5" # Pin until 1.8.x crash has been resolved + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Ensure shellcheck is available for linting + run: which shellcheck || (sudo apt update && sudo apt install -y shellcheck) + - name: lint + working-directory: ./enos + env: + ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }} + run: make lint diff --git a/.github/workflows/enos-release-testing-oss.yml b/.github/workflows/enos-release-testing-oss.yml index 5f655b750faa..3ce6b6ab5372 100644 --- a/.github/workflows/enos-release-testing-oss.yml +++ b/.github/workflows/enos-release-testing-oss.yml @@ -11,50 +11,62 @@ jobs: if: ${{ startsWith(github.event.client_payload.payload.branch, 'release/') }} runs-on: ubuntu-latest outputs: - matrix-test-group: ${{ steps.get-metadata.outputs.matrix-test-group }} - vault-revision: ${{ steps.get-metadata.outputs.vault-revision }} - vault-version: ${{ steps.get-metadata.outputs.vault-version }} + vault-revision: ${{ github.event.client_payload.payload.sha }} + vault-version: ${{ github.event.client_payload.payload.version }} + vault-version-package: ${{ steps.get-metadata.outputs.vault-version-package }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 with: # Check out the repository at the same Git SHA that was used to create # the artifacts to get the correct metadata. ref: ${{ github.event.client_payload.payload.sha }} - id: get-metadata env: - # MATRIX_MAX_TEST_GROUPS is required to determine the randomly selected - # test group. It should be set to the highest test_group used in the - # enos-run-matrices. - MATRIX_MAX_TEST_GROUPS: 2 + VAULT_VERSION: ${{ github.event.client_payload.payload.version }} run: | - echo "matrix-test-group=$(make ci-get-matrix-group-id)" >> $GITHUB_OUTPUT - echo "vault-revision=$(make ci-get-revision)" >> $GITHUB_OUTPUT - echo "vault-version=$(make ci-get-version)" >> $GITHUB_OUTPUT + echo "vault-version-package=$(make ci-get-version-package)" >> "$GITHUB_OUTPUT" + - name: Release Artifact Info + run: | + # shellcheck disable=SC2129 + echo "__Product:__ ${{ github.event.client_payload.payload.product }}" >> "$GITHUB_STEP_SUMMARY" + echo "__Version:__ ${{ github.event.client_payload.payload.version }}" >> "$GITHUB_STEP_SUMMARY" + echo "__Commit:__ ${{ github.event.client_payload.payload.sha }}" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "[Build Workflow](https://github.com/${{github.event.client_payload.payload.org}}/${{github.event.client_payload.payload.repo}}/actions/runs/${{github.event.client_payload.payload.buildworkflowid}})" >> "$GITHUB_STEP_SUMMARY" test: name: Test ${{ matrix.build-artifact-name }} if: ${{ startsWith(github.event.client_payload.payload.branch, 'release/') }} needs: product-metadata - uses: ./.github/workflows/enos-run.yml + uses: ./.github/workflows/test-run-enos-scenario-matrix.yml strategy: fail-fast: false matrix: include: - - matrix-file-name: enos_release_testing_oss-artifactory-oss-linux-amd64-zip + - sample-name: release_ce_linux_amd64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb + - sample-name: release_ce_linux_arm64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb + - sample-name: release_ce_linux_amd64_rpm + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm + - sample-name: release_ce_linux_arm64_rpm + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm + - sample-name: release_ce_linux_amd64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip - - matrix-file-name: enos_release_testing_oss-artifactory-oss-linux-arm64-zip + - sample-name: release_ce_linux_arm64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip with: build-artifact-name: ${{ matrix.build-artifact-name }} - matrix-file-name: ${{ matrix.matrix-file-name }} - matrix-test-group: ${{ needs.product-metadata.outputs.matrix-test-group }} - vault-edition: oss + sample-max: 2 + sample-name: ${{ matrix.sample-name }} vault-revision: ${{ needs.product-metadata.outputs.vault-revision }} + vault-version: ${{ needs.product-metadata.outputs.vault-version }} secrets: inherit save-metadata: runs-on: linux + if: always() needs: test steps: - name: Persist metadata - uses: hashicorp/actions-persist-metadata@v1 + uses: hashicorp/actions-persist-metadata@v2 diff --git a/.github/workflows/enos-run-k8s.yml b/.github/workflows/enos-run-k8s.yml index e306966c1abe..440eb87ad930 100644 --- a/.github/workflows/enos-run-k8s.yml +++ b/.github/workflows/enos-run-k8s.yml @@ -31,38 +31,39 @@ jobs: GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Set up Terraform - uses: hashicorp/setup-terraform@v2 + uses: hashicorp/setup-terraform@v3 with: # the Terraform wrapper will break Terraform execution in Enos because # it changes the output to text when we expect it to be JSON. terraform_wrapper: false + terraform_version: "1.7.5" # Pin until 1.8.x crash has been resolved - name: Set up Enos uses: hashicorp/action-setup-enos@v1 with: github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - name: Download Docker Image id: download - uses: actions/download-artifact@v3 + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 with: name: ${{ inputs.artifact-name }} path: ./enos/support/downloads - name: Prepare for scenario execution env: - IS_ENT: ${{ startsWith(env.ARTIFACT_NAME, 'vault-enterprise' ) }} + IS_ENT: ${{ contains(env.ARTIFACT_NAME, 'vault-enterprise' ) }} run: | mkdir -p ./enos/support/terraform-plugin-cache - if ${IS_ENT} == true; then + if [ "$IS_ENT" == true ]; then echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true - echo "edition=ent" >> $GITHUB_ENV + echo "edition=ent" >> "$GITHUB_ENV" echo "edition set to 'ent'" - echo "image_repo=hashicorp/vault-enterprise" >> $GITHUB_ENV + echo "image_repo=hashicorp/vault-enterprise" >> "$GITHUB_ENV" echo "image repo set to 'hashicorp/vault-enterprise'" else - echo "edition=oss" >> $GITHUB_ENV - echo "edition set to 'oss'" - echo "image_repo=hashicorp/vault" >> $GITHUB_ENV + echo "edition=ce" >> "$GITHUB_ENV" + echo "edition set to 'ce'" + echo "image_repo=hashicorp/vault" >> "$GITHUB_ENV" echo "image repo set to 'hashicorp/vault'" fi - name: Run Enos scenario diff --git a/.github/workflows/enos-run.yml b/.github/workflows/enos-run.yml deleted file mode 100644 index 476d1ebd3c04..000000000000 --- a/.github/workflows/enos-run.yml +++ /dev/null @@ -1,159 +0,0 @@ ---- -name: enos - -on: - # Only trigger this working using workflow_call. This workflow requires many - # secrets that must be inherited from the caller workflow. - workflow_call: - inputs: - # The name of the artifact that we're going to use for testing. This should - # match exactly to build artifacts uploaded to Github and Artifactory. - build-artifact-name: - required: true - type: string - # The base name of the file in ./github/enos-run-matrices that we use to - # determine which scenarios to run for the build artifact. - # - # They are named in the format of: - # $caller_workflow_name-$artifact_source-$vault_edition-$platform-$arch-$packing_type - # - # Where each are: - # caller_workflow_name: the Github Actions workflow that is calling - # this one - # artifact_source: where we're getting the artifact from. Either - # "github" or "artifactory" - # vault_edition: which edition of vault that we're testing. e.g. "oss" - # or "ent" - # platform: the vault binary target platform, e.g. "linux" or "macos" - # arch: the vault binary target architecture, e.g. "arm64" or "amd64" - # packing_type: how vault binary is packaged, e.g. "zip", "deb", "rpm" - # - # Examples: - # build-github-oss-linux-amd64-zip - matrix-file-name: - required: true - type: string - # The test group we want to run. This corresponds to the test_group attribute - # defined in the enos-run-matrices files. - matrix-test-group: - default: 0 - type: string - runs-on: - # NOTE: The value should be JSON encoded as that's the only way we can - # pass arrays with workflow_call. - type: string - required: false - default: '"ubuntu-latest"' - ssh-key-name: - type: string - default: enos-ci-ssh-key - # Which edition of Vault we're using. e.g. "oss", "ent", "ent.hsm.fips1402" - vault-edition: - required: true - type: string - # The Git commit SHA used as the revision when building vault - vault-revision: - required: true - type: string - -jobs: - metadata: - runs-on: ${{ fromJSON(inputs.runs-on) }} - outputs: - build-date: ${{ steps.metadata.outputs.build-date }} - matrix: ${{ steps.metadata.outputs.matrix }} - version: ${{ steps.metadata.outputs.version }} - version-minor: ${{ steps.metadata.outputs.matrix }} - env: - # Pass the vault edition as VAULT_METADATA so the CI make targets can create - # values that consider the edition. - VAULT_METADATA: ${{ inputs.vault-edition }} - # Pass in the matrix and matrix group for filtering - MATRIX_FILE: ./.github/enos-run-matrices/${{ inputs.matrix-file-name }}.json - MATRIX_TEST_GROUP: ${{ inputs.matrix-test-group }} - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ inputs.vault-revision }} - - id: metadata - run: | - echo "build-date=$(make ci-get-date)" >> $GITHUB_OUTPUT - echo "version=$(make ci-get-version)" >> $GITHUB_OUTPUT - filtered=$(make ci-filter-matrix) - echo "matrix=$(echo $filtered)}" >> $GITHUB_OUTPUT - - # Run the Enos test scenarios - run: - needs: metadata - strategy: - fail-fast: false # don't fail as that can skip required cleanup steps for jobs - matrix: ${{ fromJson(needs.metadata.outputs.matrix) }} - runs-on: ubuntu-latest - env: - GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - # Pass in enos variables - ENOS_VAR_aws_region: ${{ matrix.aws_region }} - ENOS_VAR_aws_ssh_keypair_name: ${{ inputs.ssh-key-name }} - ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem - ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} - ENOS_VAR_artifactory_username: ${{ secrets.ARTIFACTORY_USER }} - ENOS_VAR_artifactory_token: ${{ secrets.ARTIFACTORY_TOKEN }} - ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache - ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }} - ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }} - ENOS_VAR_vault_revision: ${{ inputs.vault-revision }} - ENOS_VAR_vault_bundle_path: ./support/downloads/${{ inputs.build-artifact-name }} - ENOS_VAR_vault_license_path: ./support/vault.hclic - steps: - - uses: actions/checkout@v3 - - uses: hashicorp/setup-terraform@v2 - with: - # the Terraform wrapper will break Terraform execution in Enos because - # it changes the output to text when we expect it to be JSON. - terraform_wrapper: false - - uses: aws-actions/configure-aws-credentials@v1-node16 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: ${{ matrix.aws_region }} - role-to-assume: ${{ secrets.AWS_ROLE_ARN }} - role-skip-session-tagging: true - role-duration-seconds: 3600 - - uses: hashicorp/action-setup-enos@v1 - with: - github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - - name: Prepare scenario dependencies - run: | - mkdir -p ./enos/support/terraform-plugin-cache - echo "${{ secrets.ENOS_CI_SSH_KEY }}" > ./enos/support/private_key.pem - chmod 600 ./enos/support/private_key.pem - - if: contains(inputs.matrix-file-name, 'github') - uses: actions/download-artifact@v3 - with: - name: ${{ inputs.build-artifact-name }} - path: ./enos/support/downloads - - if: contains(inputs.matrix-file-name, 'ent') - name: Configure Vault license - run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true - - name: Run Enos scenario - id: run - # Continue once and retry to handle occasional blips when creating - # infrastructure. - continue-on-error: true - run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} - - name: Retry Enos scenario if necessary - id: run_retry - if: steps.run.outcome == 'failure' - run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} - - name: Ensure scenario has been destroyed - if: ${{ always() }} - # With Enos version 0.0.11 the destroy step returns an error if the infrastructure - # is already destroyed by enos run. So temporarily setting it to continue on error in GHA - continue-on-error: true - run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} - - name: Clean up Enos runtime directories - if: ${{ always() }} - run: | - rm -rf /tmp/enos* - rm -rf ./enos/support - rm -rf ./enos/.enos diff --git a/.github/workflows/goversion-checker.yml b/.github/workflows/goversion-checker.yml deleted file mode 100644 index 71ed31b65e5f..000000000000 --- a/.github/workflows/goversion-checker.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Check Go version - -on: - pull_request: - types: [opened, synchronize] - -jobs: - go-version-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.pull_request.head.sha }} - fetch-depth: 0 - - name: Verify go versions in tree are consistent with one another - run: | - GOVER=$(cat .go-version) - EXPECTED="docker.mirror.hashicorp.services/cimg/go:$GOVER" - GOT=$(yq .references.environment.GO_IMAGE .circleci/config/executors/@executors.yml) - if [ "$EXPECTED" != "$GOT" ]; then - echo "version mismatch, .go-version has '$GOVER' and circleci config uses '$GOT'" - exit 1 - fi \ No newline at end of file diff --git a/.github/workflows/milestone-checker.yml b/.github/workflows/milestone-checker.yml index 77ff50b8cf1f..294b58576492 100644 --- a/.github/workflows/milestone-checker.yml +++ b/.github/workflows/milestone-checker.yml @@ -5,7 +5,8 @@ name: Check Milestone on: pull_request: - types: [opened, synchronize, labeled, unlabeled] + # milestoned and demilestoned work (https://github.com/github/docs/issues/23909) but they aren't listed in the github documentation + types: [opened, synchronize, labeled, unlabeled, milestoned, demilestoned] # Runs on PRs to main and release branches branches: - main @@ -14,20 +15,11 @@ on: jobs: # checks that a milestone entry is present for a PR milestone-check: - # If there a `pr/no-milestone` label we ignore this check - if: "!contains(github.event.pull_request.labels.*.name, 'pr/no-milestone')" + # If there is a `pr/no-milestone` label, or this comes from a fork (community contributor) we ignore this check + if: ${{ ((github.repository == 'hashicorp/vault' || github.repository == 'hashicorp/vault-enterprise') + && (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name)) + && (!contains(github.event.pull_request.labels.*.name, 'pr/no-milestone')) }} runs-on: ubuntu-latest steps: - - name: Checkout Actions - uses: actions/checkout@v2 - with: - repository: "grafana/grafana-github-actions" - path: ./actions - ref: main - - name: Install Actions - run: npm install --production --prefix ./actions - - name: Run PR Checks - uses: ./actions/pr-checks - with: - token: ${{secrets.GITHUB_TOKEN}} - configPath: configs/milestone-check \ No newline at end of file + - name: Check milestone + run: ${{ github.event.pull_request.milestone != null }} diff --git a/.github/workflows/oss.yml b/.github/workflows/oss.yml index 4e03b9761ba4..9dedca7fbc28 100644 --- a/.github/workflows/oss.yml +++ b/.github/workflows/oss.yml @@ -19,9 +19,9 @@ jobs: runs-on: ubuntu-latest steps: - if: github.event.pull_request != null - uses: actions/checkout@v3 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - if: github.event.pull_request != null - uses: dorny/paths-filter@v2 + uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 id: changes with: # derived from CODEOWNERS @@ -58,17 +58,17 @@ jobs: - 'ui/**' - name: "Default to core board" - run: echo "PROJECT=170" >> $GITHUB_ENV + run: echo "PROJECT=170" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.cryptosec == 'true' - run: echo "PROJECT=172" >> $GITHUB_ENV + run: echo "PROJECT=172" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.ecosystem == 'true' - run: echo "PROJECT=169" >> $GITHUB_ENV + run: echo "PROJECT=169" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.devex == 'true' - run: echo "PROJECT=176" >> $GITHUB_ENV + run: echo "PROJECT=176" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.ui == 'true' - run: echo "PROJECT=171" >> $GITHUB_ENV + run: echo "PROJECT=171" >> "$GITHUB_ENV" - - uses: actions/add-to-project@v0.3.0 + - uses: actions/add-to-project@9bfe908f2eaa7ba10340b31e314148fcfe6a2458 # v1.0.1 with: project-url: https://github.com/orgs/hashicorp/projects/${{ env.PROJECT }} github-token: ${{ secrets.TRIAGE_GITHUB_TOKEN }} @@ -125,4 +125,4 @@ jobs: # ) { # deletedItemId # } - # }' -f project_id=$PROJECT_ID -f item_id=$item_id || true \ No newline at end of file + # }' -f project_id=$PROJECT_ID -f item_id=$item_id || true diff --git a/.github/workflows/plugin-update-check.yml b/.github/workflows/plugin-update-check.yml new file mode 100644 index 000000000000..c1a083af4c98 --- /dev/null +++ b/.github/workflows/plugin-update-check.yml @@ -0,0 +1,115 @@ +name: Plugin update check +run-name: ${{ inputs.repo }} update check + +on: + workflow_dispatch: + inputs: + repo: + type: string + description: 'The owner and repository name as per the github.repository context property.' + required: true + plugin_branch: + type: string + description: 'The name of the plugin branch.' + required: true + +jobs: + plugin-update-check: + runs-on: ubuntu-latest + env: + PLUGIN_REPO: "${{inputs.repo}}" + PLUGIN_BRANCH: "${{inputs.plugin_branch}}" + VAULT_BRANCH: "auto-plugin-update/${{inputs.repo}}/${{inputs.plugin_branch}}" + RUN_ID: "${{github.run_id}}" + steps: + - run: echo "Branch $PLUGIN_BRANCH of $PLUGIN_REPO" + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + # We don't use the default token so that checks are executed on the resulting PR + # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow + token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + with: + cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764 + go-version-file: .go-version + + - name: update plugin + run: | + go get "github.com/$PLUGIN_REPO@$PLUGIN_BRANCH" + go mod tidy + + - name: detect changes + id: changes + run: | + echo "count=$(git status --porcelain=v1 2>/dev/null | wc -l)" >> "$GITHUB_OUTPUT" + + - name: commit/push + if: steps.changes.outputs.count > 0 + run: | + git config user.name hc-github-team-secure-vault-ecosystem + git config user.email hc-github-team-secure-vault-ecosystem@users.noreply.github.com + git add . + git commit -m "Automated dependency upgrades" + git push -f origin ${{ github.ref_name }}:"$VAULT_BRANCH" + + - name: Open pull request if needed + id: pr + if: steps.changes.outputs.count > 0 + env: + GITHUB_TOKEN: ${{secrets.ELEVATED_GITHUB_TOKEN}} + # Only open a PR if the branch is not attached to an existing one + run: | + PR=$(gh pr list --head "$VAULT_BRANCH" --json number -q '.[0].number') + + if [ -z "$PR" ]; then + gh pr create \ + --head "$VAULT_BRANCH" \ + --title "[DO NOT MERGE]: $PLUGIN_REPO Automated plugin update check" \ + --body "Updates $PLUGIN_REPO to verify vault CI. Full log: https://github.com/hashicorp/vault/actions/runs/$RUN_ID" + + echo "vault_pr_num=$(gh pr list --head "$VAULT_BRANCH" --json number -q '.[0].number')" >> "$GITHUB_OUTPUT" + echo "vault_pr_url=$(gh pr list --head "$VAULT_BRANCH" --json url -q '.[0].url')" >> "$GITHUB_OUTPUT" + else + echo "Pull request already exists, won't create a new one." + fi + + - name: Add labels to Vault CI check PR + if: steps.changes.outputs.count > 0 + env: + # this is a different token to the one we have been using that should + # allow us to add labels + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + continue-on-error: true + run: | + if [ -z "${{ steps.pr.outputs.vault_pr_url }}" ]; then + echo "error: no vault PR found" + exit 1 + fi + + gh pr edit "${{ steps.pr.outputs.vault_pr_num }}" \ + --add-label "dependencies,pr/no-changelog,pr/no-milestone" \ + --repo hashicorp/vault + + - name: Comment on plugin PR + if: steps.changes.outputs.count > 0 + env: + GITHUB_TOKEN: ${{secrets.ELEVATED_GITHUB_TOKEN}} + run: | + # get Plugin PR number + plugin_pr_num=$(gh pr list --head "$PLUGIN_BRANCH" --json number --repo "$PLUGIN_REPO" -q '.[0].number') + + if [ -z "$plugin_pr_num" ]; then + echo "error: no plugin PR found" + exit 1 + fi + + if [ -z "${{ steps.pr.outputs.vault_pr_url }}" ]; then + echo "error: no vault PR found" + exit 1 + fi + + # make a comment on the plugin repo's PR + gh pr comment "$plugin_pr_num" \ + --body "Vault CI check PR: ${{ steps.pr.outputs.vault_pr_url }}" \ + --repo "$PLUGIN_REPO" diff --git a/.github/workflows/plugin-update.yml b/.github/workflows/plugin-update.yml new file mode 100644 index 000000000000..e2ea8c9ab0d3 --- /dev/null +++ b/.github/workflows/plugin-update.yml @@ -0,0 +1,106 @@ +name: Plugin update +run-name: Update ${{ inputs.plugin }} to v${{ inputs.version }} + +on: + workflow_dispatch: + inputs: + plugin: + description: 'Full name of the plugin, e.g., vault-plugin-auth-kubernetes' + required: true + type: string + version: + description: 'Version of the plugin with *NO* "v", e.g., 1.2.3' + required: true + type: string + +jobs: + plugin-update: + runs-on: ubuntu-latest + env: + VAULT_BRANCH: "update/${{ inputs.plugin }}/v${{ inputs.version }}" + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + # We don't use the default token so that checks are executed on the resulting PR + # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow + token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + with: + cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764 + go-version-file: .go-version + + - name: update plugin + run: | + go get "github.com/hashicorp/${{ inputs.plugin }}@v${{ inputs.version }}" + go mod tidy + + - name: detect changes + run: | + count=$(git status --porcelain=v1 2>/dev/null | wc -l) + if [ "$count" -eq 0 ]; then + echo "::error::no updates were made for ${{ inputs.plugin }} with tag v${{ inputs.version }}" + exit 1 + fi + + - name: commit/push + run: | + git config user.name hc-github-team-secure-vault-ecosystem + git config user.email hc-github-team-secure-vault-ecosystem@users.noreply.github.com + git add go.mod go.sum + git commit -m "Automated dependency upgrades" + git push -f origin ${{ github.ref_name }}:"$VAULT_BRANCH" + + - name: Open pull request if needed + id: pr + env: + GITHUB_TOKEN: ${{secrets.ELEVATED_GITHUB_TOKEN}} + # Only open a PR if the branch is not attached to an existing one + run: | + PR=$(gh pr list --head "$VAULT_BRANCH" --json number -q '.[0].number') + + if [ -z "$PR" ]; then + gh pr create \ + --head "$VAULT_BRANCH" \ + --reviewer "${{ github.actor }}" \ + --title "Update ${{ inputs.plugin }} to v${{ inputs.version }}" \ + --body "This PR was generated by a GitHub Action. Full log: https://github.com/hashicorp/vault/actions/runs/${{ github.run_id }}" + + echo "vault_pr_num=$(gh pr list --head "$VAULT_BRANCH" --json number -q '.[0].number')" >> "$GITHUB_OUTPUT" + echo "vault_pr_url=$(gh pr list --head "$VAULT_BRANCH" --json url -q '.[0].url')" >> "$GITHUB_OUTPUT" + else + echo "::notice::Pull request $PR already exists, won't create a new one." + fi + + - name: Add changelog + if: steps.pr.outputs.vault_pr_num != '' + run: | + PLUGIN="${{ inputs.plugin }}" + + # plugin type is one of auth/secrets/database + PLUGIN_TYPE=$(echo "$PLUGIN" | awk -F- '{print $3}') + echo "::debug::plugin type: $PLUGIN_TYPE" + + # plugin service is the rest of the repo name + PLUGIN_SERVICE=$(echo "$PLUGIN" | cut -d- -f 4-) + echo "::debug::plugin service: $PLUGIN_SERVICE" + + echo "\`\`\`release-note:change + ${PLUGIN_TYPE}/${PLUGIN_SERVICE}: Update plugin to v${{ inputs.version }} + \`\`\`" > "changelog/${{ steps.pr.outputs.vault_pr_num }}.txt" + + git add changelog/ + git commit -m "Add changelog" + git push origin ${{ github.ref_name }}:"$VAULT_BRANCH" + + - name: Add labels to Vault PR + if: steps.pr.outputs.vault_pr_num != '' + env: + # this is a different token to the one we have been using that should + # allow us to add labels + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + continue-on-error: true + run: | + gh pr edit "${{ steps.pr.outputs.vault_pr_num }}" \ + --add-label "dependencies" \ + --repo hashicorp/vault diff --git a/.github/workflows/remove-labels.yml b/.github/workflows/remove-labels.yml index 7531e9fdacb9..014b6752af7a 100644 --- a/.github/workflows/remove-labels.yml +++ b/.github/workflows/remove-labels.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Remove triaging labels from closed issues and PRs - uses: actions-ecosystem/action-remove-labels@v1 + uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1.3.0 with: labels: | waiting-for-response \ No newline at end of file diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml index 5d61d8af333a..9b8872e8aeb9 100644 --- a/.github/workflows/security-scan.yml +++ b/.github/workflows/security-scan.yml @@ -1,5 +1,10 @@ name: Security Scan +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + on: push: branches: [main] @@ -10,55 +15,59 @@ on: jobs: scan: - runs-on: - labels: ['linux', 'large'] - if: ${{ github.actor != 'dependabot[bot]' || github.actor != 'hc-github-team-secure-vault-core' }} + runs-on: ${{ fromJSON(vars.RUNNER_XL) }} + # The first check ensures this doesn't run on community-contributed PRs, who + # won't have the permissions to run this job. + if: ${{ (github.repository != 'hashicorp/vault' || (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name)) + && (github.actor != 'dependabot[bot]') && ( github.actor != 'hc-github-team-secure-vault-core') }} + steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 with: - go-version: 1.18 + cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764 + go-version-file: .go-version - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 with: python-version: 3.x - name: Clone Security Scanner repo - uses: actions/checkout@v3 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 with: repository: hashicorp/security-scanner token: ${{ secrets.HASHIBOT_PRODSEC_GITHUB_TOKEN }} path: security-scanner - ref: 2526c196a28bb367b1ac6c997ff48e9ebf06834f + ref: main - name: Install dependencies shell: bash env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - mkdir $HOME/.bin - cd $GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-semgrep + mkdir "$HOME/.bin" + cd "$GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-semgrep" go build -o scan-plugin-semgrep . - mv scan-plugin-semgrep $HOME/.bin - - cd $GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-codeql + mv scan-plugin-semgrep "$HOME/.bin" + + cd "$GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-codeql" go build -o scan-plugin-codeql . - mv scan-plugin-codeql $HOME/.bin - + mv scan-plugin-codeql "$HOME/.bin" + # Semgrep - python3 -m pip install semgrep - + python3 -m pip install semgrep==1.45.0 + # CodeQL LATEST=$(gh release list --repo https://github.com/github/codeql-action | cut -f 3 | sort --version-sort | tail -n1) gh release download --repo https://github.com/github/codeql-action --pattern codeql-bundle-linux64.tar.gz "$LATEST" - tar xf codeql-bundle-linux64.tar.gz -C $HOME/.bin - + tar xf codeql-bundle-linux64.tar.gz -C "$HOME/.bin" + # Add to PATH - echo "$HOME/.bin" >> $GITHUB_PATH - echo "$HOME/.bin/codeql" >> $GITHUB_PATH + echo "$HOME/.bin" >> "$GITHUB_PATH" + echo "$HOME/.bin/codeql" >> "$GITHUB_PATH" - name: Scan id: scan @@ -69,15 +78,15 @@ jobs: #SEMGREP_BASELINE_REF: ${{ github.base_ref }} with: repository: "$PWD" + cache-build: true + cache-go-modules: false - name: SARIF Output shell: bash - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | cat results.sarif - name: Upload SARIF file - uses: github/codeql-action/upload-sarif@v2 + uses: github/codeql-action/upload-sarif@c4fb451437765abf5018c6fbf22cce1a7da1e5cc # codeql-bundle-v2.17.1 with: sarif_file: results.sarif diff --git a/.github/workflows/stable-website.yaml b/.github/workflows/stable-website.yaml index fdd6da27f9d6..e18a02ffbbff 100644 --- a/.github/workflows/stable-website.yaml +++ b/.github/workflows/stable-website.yaml @@ -3,6 +3,11 @@ on: types: - closed +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + jobs: stable_website_cherry_pick: if: github.event.pull_request.merged && contains(github.event.pull_request.labels.*.name, 'docs-cherrypick') @@ -10,7 +15,7 @@ jobs: name: Cherry pick to stable-website branch steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 with: ref: stable-website - run: | diff --git a/.github/workflows/test-acc-dockeronly-nightly.yml b/.github/workflows/test-acc-dockeronly-nightly.yml new file mode 100644 index 000000000000..a3c6e484d072 --- /dev/null +++ b/.github/workflows/test-acc-dockeronly-nightly.yml @@ -0,0 +1,36 @@ +name: test-go-acceptance-nightly + +on: + # Change to nightly cadence once API-credential-requiring tests are added to the jobs + workflow_dispatch: + +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +# Currently the jobs here are only for acceptance tests that have no dependencies except for Docker +jobs: + plugins-database: + uses: ./.github/workflows/test-run-acc-tests-for-path.yml + strategy: + matrix: + name: [mongodb, mysql, postgresql] + with: + name: plugins-database-${{ matrix.name }} + path: plugins/database/${{ matrix.name }} + + external: + uses: ./.github/workflows/test-run-acc-tests-for-path.yml + strategy: + matrix: + name: [api, identity, token] + with: + name: external-${{ matrix.name }} + path: vault/external_tests/${{ matrix.name }} + + # Suggestions and tips for adding more acceptance test jobs: + # - the job name is up to you, but it should be derived from the path that the tests are found in + # - for instance, "plugins-database" is a job for acceptance tests in the plugins/database path + # - the path will be used with go test wildcards, but don't include the preceding "./" or following "/..." + # - the name parameter is used to construct the log artifact's name, make it something that is related to the path diff --git a/.github/workflows/test-ci-bootstrap.yml b/.github/workflows/test-ci-bootstrap.yml new file mode 100644 index 000000000000..0a0222b56799 --- /dev/null +++ b/.github/workflows/test-ci-bootstrap.yml @@ -0,0 +1,58 @@ +name: test-ci-bootstrap + +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - enos/ci/** + - .github/workflows/test-ci-bootstrap.yml + push: + branches: + - main + paths: + - enos/ci/** + - .github/workflows/test-ci-bootstrap.yml + +jobs: + bootstrap-ci: + runs-on: ubuntu-latest + env: + TF_WORKSPACE: "${{ github.event.repository.name }}-ci-enos-bootstrap" + TF_VAR_repository: ${{ github.event.repository.name }} + TF_VAR_aws_ssh_public_key: ${{ secrets.SSH_KEY_PUBLIC_CI }} + TF_TOKEN_app_terraform_io: ${{ secrets.TF_API_TOKEN }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - name: Set up Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: "1.7.5" # Pin until 1.8.x crash has been resolved + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: us-east-1 + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + - name: Init Terraform + id: tf_init + run: | + terraform -chdir=enos/ci/bootstrap init + - name: Plan Terraform + id: tf_plan + run: | + terraform -chdir=enos/ci/bootstrap plan + - name: Apply Terraform + if: ${{ github.ref == 'refs/heads/main' }} + id: tf_apply + run: | + terraform -chdir=enos/ci/bootstrap apply -auto-approve diff --git a/.github/workflows/test-ci-cleanup.yml b/.github/workflows/test-ci-cleanup.yml new file mode 100644 index 000000000000..c94d28fb4a53 --- /dev/null +++ b/.github/workflows/test-ci-cleanup.yml @@ -0,0 +1,88 @@ +name: test-ci-cleanup +on: + schedule: + # * is a special character in YAML so you have to quote this string + - cron: '05 02 * * *' + +jobs: + setup: + runs-on: ubuntu-latest + outputs: + regions: ${{steps.setup.outputs.regions}} + steps: + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: us-east-1 + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + - name: Get all regions + id: setup + run: | + echo "regions=$(aws ec2 describe-regions --region us-east-1 --output json --query 'Regions[].RegionName' | tr -d '\n ')" >> "$GITHUB_OUTPUT" + + aws-nuke: + needs: setup + runs-on: ubuntu-latest + container: + image: rebuy/aws-nuke + options: + --user root + -t + env: + AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }} + TIME_LIMIT: "72h" + timeout-minutes: 60 + steps: + - name: Configure AWS credentials + id: aws-configure + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: us-east-1 + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + mask-aws-account-id: false + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - name: Configure + run: | + cp enos/ci/aws-nuke.yml . + sed -i "s/ACCOUNT_NUM/${{ steps.aws-configure.outputs.aws-account-id }}/g" aws-nuke.yml + sed -i "s/TIME_LIMIT/${TIME_LIMIT}/g" aws-nuke.yml + # We don't care if cleanup succeeds or fails, because dependencies be dependenceies, + # we'll fail on actually actionable things in the quota steep afterwards. + - name: Clean up abandoned resources + # Filter STDERR because it's super noisy about things we don't have access to + run: | + aws-nuke -c aws-nuke.yml -q --no-dry-run --force 2>/tmp/aws-nuke-error.log || true + + check-quotas: + needs: [ setup, aws-nuke ] + runs-on: ubuntu-latest + container: + image: jantman/awslimitchecker + env: + AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID_CI }} + AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY_CI }} + strategy: + matrix: + region: ${{ fromJSON(needs.setup.outputs.regions) }} + steps: + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: us-east-1 + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + # Currently just checking VPC limits across all region, can add more checks here in future + - name: Check AWS Quotas + run: awslimitchecker -S "VPC" -r ${{matrix.region}} diff --git a/.github/workflows/test-enos-scenario-ui.yml b/.github/workflows/test-enos-scenario-ui.yml new file mode 100644 index 000000000000..40009f1d84cc --- /dev/null +++ b/.github/workflows/test-enos-scenario-ui.yml @@ -0,0 +1,151 @@ +--- +name: Vault UI Tests + +# cancel existing runs of the same workflow on the same ref +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} + cancel-in-progress: true + +on: + workflow_call: + inputs: + test_filter: + type: string + description: "A filter to limit the ui tests to. Will be appended to the ember test command as '-f='" + required: false + storage_backend: + type: string + description: "The storage backend to use, either 'raft' or 'consul'" + default: raft + workflow_dispatch: + inputs: + test_filter: + type: string + description: "A filter to limit the ui tests to. Will be appended to the ember test command as '-f='" + required: false + storage_backend: + description: "The storage backend to use, either 'raft' or 'consul'" + required: true + default: raft + type: choice + options: + - raft + - consul + +jobs: + get-metadata: + name: Get metadata + runs-on: ubuntu-latest + outputs: + runs-on: ${{ steps.get-metadata.outputs.runs-on }} + vault_edition: ${{ steps.get-metadata.outputs.vault_edition }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - id: get-metadata + env: + IS_ENT: ${{ startsWith(github.event.repository.name, 'vault-enterprise' ) }} + run: | + if [ "$IS_ENT" == true ]; then + echo "detected vault_edition=ent" + echo "runs-on=['self-hosted', 'ondemand', 'os=linux', 'type=m5d.4xlarge']" >> "$GITHUB_OUTPUT" + echo "vault_edition=ent" >> "$GITHUB_OUTPUT" + else + echo "detected vault_edition=oss" + echo "runs-on=\"custom-linux-xl-vault-latest\"" >> "$GITHUB_OUTPUT" + echo "vault_edition=oss" >> "$GITHUB_OUTPUT" + fi + + run-ui-tests: + name: Run UI Tests + needs: get-metadata + runs-on: ${{ fromJSON(needs.get-metadata.outputs.runs-on) }} + timeout-minutes: 90 + env: + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Pass in enos variables + ENOS_VAR_aws_region: us-east-1 + ENOS_VAR_aws_ssh_keypair_name: ${{ github.event.repository.name }}-ci-ssh-key + ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache + ENOS_VAR_vault_license_path: ./support/vault.hclic + GOPRIVATE: github.com/hashicorp + steps: + - name: Checkout + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Set Up Git + run: git config --global url."https://${{ secrets.elevated_github_token }}:@github.com".insteadOf "https://github.com" + - name: Set Up Node + uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + with: + node-version-file: './ui/package.json' + - name: Set Up Terraform + uses: hashicorp/setup-terraform@v3 + with: + cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} + terraform_wrapper: false + terraform_version: "1.7.5" # Pin until 1.8.x crash has been resolved + - name: Prepare scenario dependencies + run: | + mkdir -p ./enos/support/terraform-plugin-cache + echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem + chmod 600 ./enos/support/private_key.pem + - name: Set Up Vault Enterprise License + if: contains(github.event.repository.name, 'ent') + run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true + - name: Check Chrome Installed + id: chrome-check + run: echo "chrome-version=$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null || echo 'not-installed')" >> "$GITHUB_OUTPUT" + - name: Install Chrome Dependencies + if: steps.chrome-check.outputs.chrome-version == 'not-installed' + run: | + sudo apt update + sudo apt install -y libnss3-dev libgdk-pixbuf2.0-dev libgtk-3-dev libxss-dev libasound2 + - name: Install Chrome + if: steps.chrome-check.outputs.chrome-version == 'not-installed' + uses: browser-actions/setup-chrome@9683066f53b47e92c4104e1bd5535aff208c3530 # v1.6.2 + - name: Installed Chrome Version + run: | + echo "Installed Chrome Version = [$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null)]" + - name: Configure AWS credentials from Test account + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: us-east-1 + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + - name: Set Up Cluster + id: setup_cluster + env: + ENOS_VAR_ui_run_tests: false + # Continue once and retry to handle occasional blips when creating infrastructure. + continue-on-error: true + run: enos scenario launch --timeout 60m0s --chdir ./enos ui edition:${{ needs.get-metadata.outputs.vault_edition }} backend:${{ inputs.storage_backend }} + - name: Retry Set Up Cluster + id: setup_cluster_retry + if: steps.setup_cluster.outcome == 'failure' + env: + ENOS_VAR_ui_run_tests: false + run: enos scenario launch --timeout 60m0s --chdir ./enos ui edition:${{ needs.get-metadata.outputs.vault_edition }} backend:${{ inputs.storage_backend }} + - name: Run UI Tests + id: run_ui_tests + env: + ENOS_VAR_ui_test_filter: "${{ inputs.test_filter }}" + run: enos scenario run --timeout 60m0s --chdir ./enos ui edition:${{ needs.get-metadata.outputs.vault_edition }} backend:${{ inputs.storage_backend }} + - name: Ensure scenario has been destroyed + if: ${{ always() }} + run: enos scenario destroy --timeout 60m0s --chdir ./enos ui edition:${{ needs.get-metadata.outputs.vault_edition }} backend:${{ inputs.storage_backend }} + - name: Clean up Enos runtime directories + if: ${{ always() }} + run: | + rm -rf /tmp/enos* + rm -rf ./enos/support + rm -rf ./enos/.enos diff --git a/.github/workflows/test-go.yml b/.github/workflows/test-go.yml new file mode 100644 index 000000000000..d4d24923cb32 --- /dev/null +++ b/.github/workflows/test-go.yml @@ -0,0 +1,685 @@ +on: + workflow_call: + inputs: + go-arch: + description: The execution architecture (arm, amd64, etc.) + required: true + type: string + total-runners: + description: Number of runners to use for executing non-binary tests. + required: true + type: string + binary-tests: + description: Whether to run the binary tests. + required: false + default: false + type: boolean + env-vars: + description: A map of environment variables as JSON. + required: false + type: string + default: '{}' + extra-flags: + description: A space-separated list of additional build flags. + required: false + type: string + default: '' + runs-on: + description: An expression indicating which kind of runners to use Go testing jobs. + required: false + type: string + default: '"ubuntu-latest"' + runs-on-small: + description: An expression indicating which kind of runners to use for small computing jobs. + required: false + type: string + default: '"ubuntu-latest"' + go-tags: + description: A comma-separated list of additional build tags to consider satisfied during the build. + required: false + type: string + name: + description: | + A unique identifier to use for labeling artifacts and workflows. It is commonly used to + specify context, e.g: fips, race, testonly, standard. + required: true + type: string + go-test-parallelism: + description: The parallelism parameter for Go tests + required: false + default: 20 + type: number + timeout-minutes: + description: The maximum number of minutes that this workflow should run + required: false + default: 60 + type: number + testonly: + description: Whether to run the tests tagged with testonly. + required: false + default: false + type: boolean + test-timing-cache-enabled: + description: Cache the gotestsum test timing data. + required: false + default: true + type: boolean + test-timing-cache-key: + description: The cache key to use for gotestsum test timing data. + required: false + default: go-test-reports + type: string + checkout-ref: + description: The ref to use for checkout. + required: false + default: ${{ github.ref }} + type: string + outputs: + data-race-output: + description: A textual output of any data race detector failures + value: ${{ jobs.status.outputs.data-race-output }} + data-race-result: + description: Whether or not there were any data races detected + value: ${{ jobs.status.outputs.data-race-result }} + +env: ${{ fromJSON(inputs.env-vars) }} + +jobs: + test-matrix: + permissions: + id-token: write # Note: this permission is explicitly required for Vault auth + contents: read + runs-on: ${{ fromJSON(inputs.runs-on-small) }} + outputs: + go-test-dir: ${{ steps.metadata.outputs.go-test-dir }} + matrix: ${{ steps.build.outputs.matrix }} + matrix_ids: ${{ steps.build.outputs.matrix_ids }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - id: metadata + name: Set up metadata + run: echo "go-test-dir=test-results/go-test" | tee -a "$GITHUB_OUTPUT" + - name: Authenticate to Vault + id: vault-auth + if: github.repository == 'hashicorp/vault-enterprise' + run: vault-auth + - name: Fetch Secrets + id: secrets + if: github.repository == 'hashicorp/vault-enterprise' + uses: hashicorp/vault-action@v3 + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY; + kv/data/github/${{ github.repository }}/github-token username-and-token | github-token; + kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI; + kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2; + - id: setup-git-private + name: Setup Git configuration (private) + if: github.repository == 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com + - id: setup-git-public + name: Setup Git configuration (public) + if: github.repository != 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com + - uses: ./.github/actions/set-up-gotestsum + - run: mkdir -p ${{ steps.metadata.outputs.go-test-dir }} + - uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + if: inputs.test-timing-cache-enabled + with: + path: ${{ steps.metadata.outputs.go-test-dir }} + key: ${{ inputs.test-timing-cache-key }}-${{ github.run_number }} + restore-keys: | + ${{ inputs.test-timing-cache-key }}- + - name: Sanitize timing files + id: sanitize-timing-files + run: | + # Prune invalid timing files + find '${{ steps.metadata.outputs.go-test-dir }}' -mindepth 1 -type f -name "*.json" -exec sh -c ' + file="$1"; + jq . "$file" || rm "$file" + ' shell {} \; > /dev/null 2>&1 + - name: Build matrix excluding binary, integration, and testonly tests + id: build-non-binary + if: ${{ !inputs.testonly }} + env: + GOPRIVATE: github.com/hashicorp/* + run: | + # testonly tests need additional build tag though let's exclude them anyway for clarity + ( + make all-packages | grep -v "_binary" | grep -v "vault/integ" | grep -v "testonly" | gotestsum tool ci-matrix --debug \ + --partitions "${{ inputs.total-runners }}" \ + --timing-files '${{ steps.metadata.outputs.go-test-dir }}/*.json' > matrix.json + ) + - name: Build matrix for tests tagged with testonly + if: ${{ inputs.testonly }} + env: + GOPRIVATE: github.com/hashicorp/* + run: | + set -exo pipefail + # enable glob expansion + shopt -s nullglob + # testonly tagged tests need an additional tag to be included + # also running some extra tests for sanity checking with the testonly build tag + ( + go list -tags=testonly ./vault/external_tests/{kv,token,*replication-perf*,*testonly*} ./command/*testonly* ./vault/ | gotestsum tool ci-matrix --debug \ + --partitions "${{ inputs.total-runners }}" \ + --timing-files '${{ steps.metadata.outputs.go-test-dir }}/*.json' > matrix.json + ) + # disable glob expansion + shopt -u nullglob + - name: Capture list of binary tests + if: inputs.binary-tests + id: list-binary-tests + run: | + LIST="$(make all-packages | grep "_binary" | xargs)" + echo "list=$LIST" >> "$GITHUB_OUTPUT" + - name: Build complete matrix + id: build + run: | + set -exo pipefail + matrix_file="matrix.json" + if [ "${{ inputs.binary-tests}}" == "true" ] && [ -n "${{ steps.list-binary-tests.outputs.list }}" ]; then + export BINARY_TESTS="${{ steps.list-binary-tests.outputs.list }}" + jq --arg BINARY "${BINARY_TESTS}" --arg BINARY_INDEX "${{ inputs.total-runners }}" \ + '.include += [{ + "id": $BINARY_INDEX, + "estimatedRuntime": "N/A", + "packages": $BINARY, + "description": "partition $BINARY_INDEX - binary test packages" + }]' matrix.json > new-matrix.json + matrix_file="new-matrix.json" + fi + # convert the json to a map keyed by id + ( + echo -n "matrix=" + jq -c \ + '.include | map( { (.id|tostring): . } ) | add' "$matrix_file" + ) | tee -a "$GITHUB_OUTPUT" + # extract an array of ids from the json + ( + echo -n "matrix_ids=" + jq -c \ + '[ .include[].id | tostring ]' "$matrix_file" + ) | tee -a "$GITHUB_OUTPUT" + + test-go: + needs: test-matrix + permissions: + actions: read + contents: read + id-token: write # Note: this permission is explicitly required for Vault auth + runs-on: ${{ fromJSON(inputs.runs-on) }} + strategy: + fail-fast: false + matrix: + id: ${{ fromJSON(needs.test-matrix.outputs.matrix_ids) }} + env: + GOPRIVATE: github.com/hashicorp/* + TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }} + outputs: + go-test-results-download-pattern: ${{ steps.metadata.outputs.go-test-results-download-pattern }} + data-race-log-download-pattern: ${{ steps.metadata.outputs.data-race-log-download-pattern }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - id: metadata + name: Set up metadata + run: | + # Metadata variables that are used throughout the workflow + # Example comments assume: + # - needs.test-matrix.outputs.go-test-dir == test-results/go-test + # - inputs.name == testonly + # - matrix.id == 1 + name='${{ inputs.name }}-${{ matrix.id }}' # testonly-1 + go_test_dir='${{ needs.test-matrix.outputs.go-test-dir }}' # test-results/go-test + test_results_dir="$(dirname "$go_test_dir")" # test-results + go_test_dir_absolute="$(pwd)/${go_test_dir}" # /home/runner/work/vault/vault/test-results/go-test + go_test_log_dir="${go_test_dir}/logs" # test-results/go-test/logs + go_test_log_dir_absolute="${go_test_dir_absolute}/logs" # /home/runner/work/vault/vault/test-results/go-test/logs + go_test_log_archive_name="test-logs-${name}.tar" # test-logs-testonly-1.tar + go_test_results_upload_key="${test_results_dir}-${name}" # test-results/go-test-testonly-1 + go_test_results_download_pattern="${test_results_dir}-${{ inputs.name }}-*" # test-results/go-test-testonly-* + gotestsum_results_prefix="results" # results + gotestsum_junitfile=${go_test_dir}/${gotestsum_results_prefix}-${name}.xml # test-results/go-test/results-testonly-1.xml + gotestsum_jsonfile=${go_test_dir}/${gotestsum_results_prefix}-${name}.json # test-results/go-test/results-testonly-1.json + gotestsum_timing_events=failure-summary-${name}.json # failure-summary-testonly-1.json + failure_summary_file_name="failure-summary-${name}.md" # failure-summary-testonly-1.md + data_race_log_file="data-race.log" # data-race.log + data_race_log_download_pattern="data-race-${{ inputs.name }}*.log" # data-race-testonly-*.log + data_race_log_upload_key="data-race-${name}.log" # data-race-testonly-1.log + { + echo "name=${name}" + echo "failure-summary-file-name=${failure_summary_file_name}" + echo "data-race-log-file=${data_race_log_file}" + echo "data-race-log-download-pattern=${data_race_log_download_pattern}" + echo "data-race-log-upload-key=${data_race_log_upload_key}" + echo "go-test-dir=${go_test_dir}" + echo "go-test-log-archive-name=${go_test_log_archive_name}" + echo "go-test-log-dir=${go_test_log_dir}" + echo "go-test-log-dir-absolute=${go_test_log_dir_absolute}" + echo "go-test-results-download-pattern=${go_test_results_download_pattern}" + echo "go-test-results-upload-key=${go_test_results_upload_key}" + echo "gotestsum-jsonfile=${gotestsum_jsonfile}" + echo "gotestsum-junitfile=${gotestsum_junitfile}" + echo "gotestsum-results-prefix=${gotestsum_results_prefix}" + echo "gotestsum-timing-events=${gotestsum_timing_events}" + } | tee -a "$GITHUB_OUTPUT" + - name: Authenticate to Vault + id: vault-auth + if: github.repository == 'hashicorp/vault-enterprise' + run: vault-auth + - name: Fetch Secrets + id: secrets + if: github.repository == 'hashicorp/vault-enterprise' + uses: hashicorp/vault-action@v3 + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY; + kv/data/github/${{ github.repository }}/github-token username-and-token | github-token; + kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI; + kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2; + - id: setup-git-private + name: Setup Git configuration (private) + if: github.repository == 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com + - id: setup-git-public + name: Setup Git configuration (public) + if: github.repository != 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com + - uses: ./.github/actions/install-external-tools + - name: Build Vault HSM binary for tests + if: inputs.binary-tests && matrix.id == inputs.total-runners && github.repository == 'hashicorp/vault-enterprise' + env: + GOPRIVATE: github.com/hashicorp/* + run: | + set -exo pipefail + time make prep enthsmdev + # The subsequent build of vault will blow away the bin folder + mv bin/vault vault-hsm-binary + - if: inputs.binary-tests && matrix.id == inputs.total-runners + name: Build dev binary for binary tests + # The dev mode binary has to exist for binary tests that are dispatched on the last runner. + env: + GOPRIVATE: github.com/hashicorp/* + run: time make prep dev + - name: Install gVisor + # Enterprise repo runners do not allow sudo, so can't install gVisor there yet. + if: github.repository != 'hashicorp/vault-enterprise' + run: | + ( + set -e + ARCH="$(uname -m)" + URL="https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}" + wget --quiet "${URL}/runsc" "${URL}/runsc.sha512" \ + "${URL}/containerd-shim-runsc-v1" "${URL}/containerd-shim-runsc-v1.sha512" + sha512sum -c runsc.sha512 \ + -c containerd-shim-runsc-v1.sha512 + rm -f -- *.sha512 + chmod a+rx runsc containerd-shim-runsc-v1 + sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin + ) + sudo tee /etc/docker/daemon.json < /dev/null 2>&1; then + exit 0 + fi + # Curl does not always exit 1 if things go wrong. To determine if this is successful we'll + # we'll silence all non-error output and check the results to determine success. + if ! out="$(curl -sSL --fail https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64 --output /usr/local/bin/datadog-ci 2>&1)"; then + printf "failed to download datadog-ci: %s" "$out" + fi + if [[ -n "$out" ]]; then + printf "failed to download datadog-ci: %s" "$out" + fi + chmod +x /usr/local/bin/datadog-ci + - name: Upload test results to DataDog + continue-on-error: true + env: + DD_ENV: ci + run: | + if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then + export DATADOG_API_KEY=${{ secrets.DATADOG_API_KEY }} + fi + datadog-ci junit upload --service "$GITHUB_REPOSITORY" '${{ steps.metadata.outputs.gotestsum-junitfile }}' + if: success() || failure() + - name: Archive test logs + if: always() + id: archive-test-logs + # actions/upload-artifact will compress the artifact for us. We create a tarball to preserve + # permissions and to support file names with special characters. + run: | + tar -cvf '${{ steps.metadata.outputs.go-test-log-archive-name }}' -C "${{ steps.metadata.outputs.go-test-log-dir }}" . + - name: Upload test logs archives + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: ${{ steps.metadata.outputs.go-test-log-archive-name }} + path: ${{ steps.metadata.outputs.go-test-log-archive-name }} + retention-days: 7 + if: success() || failure() + - name: Upload test results + if: success() || failure() + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: ${{ steps.metadata.outputs.go-test-results-upload-key }} + path: | + ${{ steps.metadata.outputs.go-test-dir }}/${{ steps.metadata.outputs.gotestsum-results-prefix}}*.json + ${{ steps.metadata.outputs.go-test-dir }}/${{ steps.metadata.outputs.gotestsum-results-prefix}}*.xml + # We cache relevant timing data with actions/cache later so we can let the file expire quickly + retention-days: 1 + - name: Check for data race failures + if: success() || failure() + id: data-race-check + working-directory: ${{ needs.test-matrix.outputs.go-test-dir }} + run: | + # Scan gotestsum output files for data race errors. + data_race_tests=() + data_race_log='${{ steps.metadata.outputs.data-race-log-file }}' + for file in *.json; do + # Check if test results contains offending phrase + if grep -q "WARNING: DATA RACE" "$file"; then + data_race_tests+=("test-go (${{ matrix.id }})") + touch "$data_race_log" + + # Write output to our log file so we can aggregate it in the final workflow + { + echo "=============== test-go (${{ matrix.id }}) ===========================" + sed -n '/WARNING: DATA RACE/,/==================/p' "$file" | jq -r -j '.Output' + } | tee -a "$data_race_log" + fi + done + + result="success" + # Fail the action if there were any failed race tests + if (("${#data_race_tests[@]}" > 0)); then + result="failure" + fi + echo "data-race-result=${result}" | tee -a "$GITHUB_OUTPUT" + - name: Upload data race detector failure log + if: | + (success() || failure()) && + steps.data-race-check.outputs.data-race-result == 'failure' + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: ${{ steps.metadata.outputs.data-race-log-upload-key }} + path: ${{ steps.metadata.outputs.go-test-dir }}/${{ steps.metadata.outputs.data-race-log-file }} + # Set the minimum retention possible. We only upload this because it's the only way to + # aggregate results from matrix workflows. + retention-days: 1 + if-no-files-found: error # Make sure we always upload the data race logs if it failed + # GitHub Actions doesn't expose the job ID or the URL to the job execution, + # so we have to fetch it from the API + - name: Fetch job logs URL + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + if: success() || failure() + continue-on-error: true + with: + retries: 3 + script: | + // We surround the whole script with a try-catch block, to avoid each of the matrix jobs + // displaying an error in the GHA workflow run annotations, which gets very noisy. + // If an error occurs, it will be logged so that we don't lose any information about the reason for failure. + try { + const fs = require("fs"); + const result = await github.rest.actions.listJobsForWorkflowRun({ + owner: context.repo.owner, + per_page: 100, + repo: context.repo.repo, + run_id: context.runId, + }); + + // Determine what job name to use for the query. These values are hardcoded, because GHA doesn't + // expose them in any of the contexts available within a workflow run. + let prefixToSearchFor; + switch ("${{ inputs.name }}") { + case "race": + prefixToSearchFor = 'Run Go tests with data race detection / test-go (${{ matrix.id }})' + break + case "fips": + prefixToSearchFor = 'Run Go tests with FIPS configuration / test-go (${{ matrix.id }})' + break + default: + prefixToSearchFor = 'Run Go tests / test-go (${{ matrix.id }})' + } + + const jobData = result.data.jobs.filter( + (job) => job.name.startsWith(prefixToSearchFor) + ); + const url = jobData[0].html_url; + const envVarName = "GH_JOB_URL"; + const envVar = envVarName + "=" + url; + const envFile = process.env.GITHUB_ENV; + + fs.appendFile(envFile, envVar, (err) => { + if (err) throw err; + console.log("Successfully set " + envVarName + " to: " + url); + }); + } catch (error) { + console.log("Error: " + error); + return + } + - name: Prepare failure summary + if: success() || failure() + continue-on-error: true + run: | + # This jq query filters out successful tests, leaving only the failures. + # Then, it formats the results into rows of a Markdown table.k + # An example row will resemble this: + # | github.com/hashicorp/vault/package | TestName | fips | 0 | 2 | [view results](github.com/link-to-logs) | + jq -r -n 'inputs + | select(.Action == "fail") + | "| ${{inputs.name}} | \(.Package) | \(.Test // "-") | \(.Elapsed) | ${{ matrix.id }} | [view test results :scroll:](${{ env.GH_JOB_URL }}) |"' \ + '${{ steps.metadata.outputs.gotestsum-timing-events }}' \ + >> '${{ steps.metadata.outputs.failure-summary-file-name }}' + - name: Upload failure summary + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + if: success() || failure() + with: + name: ${{ steps.metadata.outputs.failure-summary-file-name }} + path: ${{ steps.metadata.outputs.failure-summary-file-name }} + + + status: + # Perform final data aggregation and determine overall status + if: always() + needs: + - test-matrix + - test-go + runs-on: ${{ fromJSON(inputs.runs-on-small) }} + outputs: + data-race-output: ${{ steps.status.outputs.data-race-output }} + data-race-result: ${{ steps.status.outputs.data-race-result }} + steps: + - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 + with: + pattern: ${{ needs.test-go.outputs.data-race-log-download-pattern }} + path: data-race-logs + merge-multiple: true + # Determine our success/failure status by checking the result status and data race status. + - id: status + name: Determine status result + run: | + # Determine status result + result="success" + + # Aggregate all of our test workflows and determine our Go test result from them. + test_go_results=$(tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | jq -Mrc) + if ! grep -q -v -E '(failure|cancelled)' <<< "$test_go_results"; then + test_go_result="failed" + result="failed" + else + test_go_result="success" + fi + + # If we have downloaded data race detector logs then at least one Go test job detected + # a data race during execution. We'll fail on that. + if [ -z "$(ls -A data-race-logs)" ]; then + data_race_output="" + data_race_result="success" + else + data_race_output="$(cat data-race-logs/*)" + data_race_result="failed" + result="failed" + fi + + # Write Go and data race results to outputs. + { + echo "data-race-output< /dev/null 2>&1 + + ls -lhR '${{ needs.test-matrix.outputs.go-test-dir }}' + # Determine our overall pass/fail with our Go test results + - if: always() && steps.status.outputs.result != 'success' + name: Check for failed status + run: | + printf "One or more required go-test workflows failed. Required workflow statuses: ${{ steps.status.outputs.test-go-results }}\n ${{ steps.status.outputs.data-race-output }}" + exit 1 diff --git a/.github/workflows/test-run-acc-tests-for-path.yml b/.github/workflows/test-run-acc-tests-for-path.yml new file mode 100644 index 000000000000..372647a1fe06 --- /dev/null +++ b/.github/workflows/test-run-acc-tests-for-path.yml @@ -0,0 +1,32 @@ +name: test-run-go-tests-for-path + +on: + workflow_call: + inputs: + name: + description: 'The name to use that will appear in the output log file artifact' + required: true + type: string + path: + description: 'The path to the test without the precedeing "./" or following "/..." e.g. go test -v ./$path/...' + required: true + type: string + # We will need to add the capacity for receiving passed secrets once we get to the tests that require API credentials + +env: + VAULT_ACC: 1 + +jobs: + go-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - run: go test -v ./${{ inputs.path }}/... 2>&1 | tee ${{ inputs.name }}.txt + - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + name: ${{ inputs.name }}-output + path: ${{ inputs.name }}.txt + retention-days: 2 diff --git a/.github/workflows/test-run-enos-scenario-matrix.yml b/.github/workflows/test-run-enos-scenario-matrix.yml new file mode 100644 index 000000000000..15d80fad72a9 --- /dev/null +++ b/.github/workflows/test-run-enos-scenario-matrix.yml @@ -0,0 +1,203 @@ +--- +name: enos + +on: + # Only trigger this working using workflow_call. This workflow requires many + # secrets that must be inherited from the caller workflow. + workflow_call: + inputs: + # The name of the artifact that we're going to use for testing. This should + # match exactly to build artifacts uploaded to Github and Artifactory. + build-artifact-name: + required: true + type: string + # The maximum number of scenarios to include in the test sample. + sample-max: + default: 1 + type: number + # The name of the enos scenario sample that defines compatible scenarios we can + # can test with. + sample-name: + required: true + type: string + runs-on: + # NOTE: The value should be JSON encoded as that's the only way we can + # pass arrays with workflow_call. + type: string + required: false + default: '"ubuntu-latest"' + ssh-key-name: + type: string + default: ${{ github.event.repository.name }}-ci-ssh-key + vault-edition: + required: false + type: string + default: ce + # The Git commit SHA used as the revision when building vault + vault-revision: + required: true + type: string + vault-version: + required: true + type: string + +jobs: + metadata: + runs-on: ${{ fromJSON(inputs.runs-on) }} + outputs: + build-date: ${{ steps.metadata.outputs.build-date }} + sample: ${{ steps.metadata.outputs.sample }} + vault-version: ${{ steps.metadata.outputs.vault-version }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + ref: ${{ inputs.vault-revision }} + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - id: metadata + run: | + build_date=$(make ci-get-date) + sample_seed=$(date +%s%N) + sample=$(enos scenario sample observe "${{ inputs.sample-name }}" --chdir ./enos --min 1 --max "${{ inputs.sample-max }}" --seed "${sample_seed}" --format json | jq -c ".observation.elements") + if [[ "${{ inputs.vault-edition }}" == "ce" ]]; then + vault_version="${{ inputs.vault-version }}" + else + # shellcheck disable=2001 + vault_version="$(sed 's/+ent/+${{ inputs.vault-edition }}/g' <<< '${{ inputs.vault-version }}')" + fi + { + echo "build-date=${build_date}" + echo "vault-version=${vault_version}" + echo "sample=${sample}" + echo "sample-seed=${sample_seed}" # This isn't used outside of here but is nice to know for duplicating observations + } | tee -a "$GITHUB_OUTPUT" + + # Run the Enos test scenario(s) + run: + needs: metadata + name: run ${{ matrix.scenario.id.filter }} + strategy: + fail-fast: false # don't fail as that can skip required cleanup steps for jobs + matrix: + include: ${{ fromJSON(needs.metadata.outputs.sample) }} + runs-on: ${{ fromJSON(inputs.runs-on) }} + env: + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Pass in enos variables + ENOS_VAR_aws_region: ${{ matrix.attributes.aws_region }} + ENOS_VAR_aws_ssh_keypair_name: ${{ inputs.ssh-key-name }} + ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + ENOS_VAR_artifactory_username: ${{ secrets.ARTIFACTORY_USER }} + ENOS_VAR_artifactory_token: ${{ secrets.ARTIFACTORY_TOKEN }} + ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache + ENOS_VAR_vault_artifact_path: ./support/downloads/${{ inputs.build-artifact-name }} + ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }} + ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.vault-version }} + ENOS_VAR_vault_revision: ${{ inputs.vault-revision }} + ENOS_VAR_vault_license_path: ./support/vault.hclic + ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + ref: ${{ inputs.vault-revision }} + - uses: hashicorp/setup-terraform@v3 + with: + # the Terraform wrapper will break Terraform execution in Enos because + # it changes the output to text when we expect it to be JSON. + terraform_wrapper: false + terraform_version: "1.7.5" # Pin until 1.8.x crash has been resolved + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: ${{ matrix.attributes.aws_region }} + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Prepare scenario dependencies + id: prepare_scenario + run: | + mkdir -p "./enos/support/terraform-plugin-cache" + echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > "./enos/support/private_key.pem" + chmod 600 "./enos/support/private_key.pem" + echo "debug_data_artifact_name=enos-debug-data_$(echo "${{ matrix.scenario }}" | sed -e 's/ /_/g' | sed -e 's/:/=/g')" >> "$GITHUB_OUTPUT" + - if: contains(inputs.sample-name, 'build') + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 + with: + name: ${{ inputs.build-artifact-name }} + path: ./enos/support/downloads + - if: contains(inputs.sample-name, 'ent') + name: Configure Vault license + run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true + - id: launch + name: enos scenario launch ${{ matrix.scenario.id.filter }} + # Continue once and retry to handle occasional blips when creating infrastructure. + continue-on-error: true + run: enos scenario launch --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - if: steps.launch.outcome == 'failure' + id: launch_retry + name: Retry enos scenario launch ${{ matrix.scenario.id.filter }} + run: enos scenario launch --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - name: Upload Debug Data + if: failure() + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + with: + # The name of the artifact is the same as the matrix scenario name with the spaces replaced with underscores and colons replaced by equals. + name: ${{ steps.prepare_scenario.outputs.debug_data_artifact_name }} + path: ${{ env.ENOS_DEBUG_DATA_ROOT_DIR }} + retention-days: 30 + continue-on-error: true + - if: ${{ always() }} + id: destroy + name: enos scenario destroy ${{ matrix.scenario.id.filter }} + continue-on-error: true + run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - if: steps.destroy.outcome == 'failure' + id: destroy_retry + name: Retry enos scenario destroy ${{ matrix.scenario.id.filter }} + continue-on-error: true + run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - name: Clean up Enos runtime directories + id: cleanup + if: ${{ always() }} + continue-on-error: true + run: | + rm -rf /tmp/enos* + rm -rf ./enos/support + rm -rf ./enos/.enos + # Send slack notifications to #feed-vault-enos-failures any of our enos scenario commands fail. + # There is an incoming webhook set up on the "Enos Vault Failure Bot" Slackbot: + # https://api.slack.com/apps/A05E31CH1LG/incoming-webhooks + - if: ${{ always() && ! cancelled() }} + name: Notify launch failed + uses: hashicorp/actions-slack-status@v2 + with: + failure-message: "enos scenario launch ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.launch.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + - if: ${{ always() && ! cancelled() }} + name: Notify retry launch failed + uses: hashicorp/actions-slack-status@v2 + with: + failure-message: "retry enos scenario launch ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.launch_retry.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + - if: ${{ always() && ! cancelled() }} + name: Notify destroy failed + uses: hashicorp/actions-slack-status@v2 + with: + failure-message: "enos scenario destroy ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.destroy.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + - if: ${{ always() && ! cancelled() }} + name: Notify retry destroy failed + uses: hashicorp/actions-slack-status@v2 + with: + failure-message: "retry enos scenario destroy ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.destroy_retry.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.gitignore b/.gitignore index 83d0d703c6ac..95e675edf57b 100644 --- a/.gitignore +++ b/.gitignore @@ -60,18 +60,21 @@ Vagrantfile !enos/**/*.hcl # Enos -enos/.enos -enos/support -# Enos local Terraform files -enos/.terraform/* -enos/.terraform.lock.hcl -enos/*.tfstate -enos/*.tfstate.* +.enos +enos-local.vars.hcl +enos/**/support +enos/**/kubeconfig +.terraform +.terraform.lock.hcl +.tfstate.* .DS_Store .idea .vscode +# VSCode debugger executable +__debug_bin* + dist/* # ignore ctags @@ -126,3 +129,8 @@ website/components/node_modules .buildcache/ .releaser/ *.log + +tools/godoctests/.bin +tools/gonilnilfunctions/.bin +tools/codechecker/.bin +.ci-bootstrap \ No newline at end of file diff --git a/.go-version b/.go-version index 843f863534dc..6fee2fedb0a4 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.19.4 +1.22.2 diff --git a/.hooks/pre-commit b/.hooks/pre-commit index 17309e55a9d7..40482966c985 100755 --- a/.hooks/pre-commit +++ b/.hooks/pre-commit @@ -35,9 +35,7 @@ block() { # Add all check functions to this space separated list. # They are executed in this order (see end of file). -CHECKS="ui_lint circleci_verify" - -MIN_CIRCLECI_VERSION=0.1.5575 +CHECKS="ui_lint backend_lint" # Run ui linter if changes in that dir detected. ui_lint() { @@ -62,80 +60,16 @@ ui_lint() { $LINTER || block "UI lint failed" } -# Check .circleci/config.yml is up to date and valid, and that all changes are -# included together in this commit. -circleci_verify() { - # Change to the root dir of the repo. - cd "$(git rev-parse --show-toplevel)" - - # Fail early if we accidentally used '.yaml' instead of '.yml' - if ! git diff --name-only --cached --exit-code -- '.circleci/***.yaml'; then - # This is just for consistency, as I keep making this mistake - Sam. - block "ERROR: File(s) with .yaml extension detected. Please rename them .yml instead." - fi - - # Succeed early if no changes to yml files in .circleci/ are currently staged. - # make ci-verify is slow so we really don't want to run it unnecessarily. - if git diff --name-only --cached --exit-code -- '.circleci/***.yml'; then - return 0 - fi - # Make sure to add no explicit output before this line, as it would just be noise - # for those making non-circleci changes. - echo "==> Verifying config changes in .circleci/" - echo "--> OK: All files are .yml not .yaml" - - # Ensure commit includes _all_ files in .circleci/ - # So not only are the files up to date, but we are also committing them in one go. - if ! git diff --name-only --exit-code -- '.circleci/***.yml'; then - echo "ERROR: Some .yml diffs in .circleci/ are staged, others not." - block "Please commit the entire .circleci/ directory together, or omit it altogether." - fi - - echo "--> OK: All .yml files in .circleci are staged." - - if ! REASON=$(check_circleci_cli_version); then - echo "*** WARNING: Unable to verify changes in .circleci/:" - echo "--> $REASON" - # We let this pass if there is no valid circleci version installed. +backend_lint() { + # Silently succeed if no changes staged for Go code files. + staged=$(git diff --name-only --cached --exit-code -- '*.go') + ret=$? + if [ $ret -eq 0 ]; then return 0 fi - if ! make -C .circleci ci-verify; then - block "ERROR: make ci-verify failed" - fi - - echo "--> OK: make ci-verify succeeded." -} - -check_circleci_cli_version() { - if ! command -v circleci > /dev/null 2>&1; then - echo "circleci cli not installed." - return 1 - fi - - CCI="circleci --skip-update-check" - - if ! THIS_VERSION=$($CCI version) > /dev/null 2>&1; then - # Guards against very old versions that do not have --skip-update-check. - echo "The installed circleci cli is too old. Please upgrade to at least $MIN_CIRCLECI_VERSION." - return 1 - fi - - # SORTED_MIN is the lower of the THIS_VERSION and MIN_CIRCLECI_VERSION. - if ! SORTED_MIN="$(printf "%s\n%s" "$MIN_CIRCLECI_VERSION" "$THIS_VERSION" | sort -V | head -n1)"; then - echo "Failed to sort versions. Please open an issue to report this." - return 1 - fi - - if [ "$THIS_VERSION" != "${THIS_VERSION#$MIN_CIRCLECI_VERSION}" ]; then - return 0 # OK - Versions have the same prefix, so we consider them equal. - elif [ "$SORTED_MIN" = "$MIN_CIRCLECI_VERSION" ]; then - return 0 # OK - MIN_CIRCLECI_VERSION is lower than THIS_VERSION. - fi - - # Version too low. - echo "The installed circleci cli v$THIS_VERSION is too old. Please upgrade to at least $MIN_CIRCLECI_VERSION" - return 1 + # Only run check-fmt on staged files + ./scripts/go-helper.sh check-fmt "${staged}" || block "Backend linting failed; run 'make fmt' to fix." } for CHECK in $CHECKS; do diff --git a/.release/ci.hcl b/.release/ci.hcl index 0be4e8ba9b71..7e49af7dec0c 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + schema = "1" project "vault" { @@ -29,145 +32,13 @@ event "build" { } } -event "upload-dev" { +event "prepare" { depends = ["build"] - action "upload-dev" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "upload-dev" - depends = ["build"] - } - - notification { - on = "fail" - } -} - -event "quality-tests" { - depends = ["upload-dev"] - action "quality-tests" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "quality-tests" - } - - notification { - on = "fail" - } -} - -event "security-scan-binaries" { - depends = ["quality-tests"] - action "security-scan-binaries" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "security-scan-binaries" - config = "security-scan.hcl" - } - - notification { - on = "fail" - } -} - -event "security-scan-containers" { - depends = ["security-scan-binaries"] - action "security-scan-containers" { + action "prepare" { organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "security-scan-containers" - config = "security-scan.hcl" - } - - notification { - on = "fail" - } -} - -event "notarize-darwin-amd64" { - depends = ["security-scan-containers"] - action "notarize-darwin-amd64" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "notarize-darwin-amd64" - } - - notification { - on = "fail" - } -} - -event "notarize-darwin-arm64" { - depends = ["notarize-darwin-amd64"] - action "notarize-darwin-arm64" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "notarize-darwin-arm64" - } - - notification { - on = "fail" - } -} - -event "notarize-windows-386" { - depends = ["notarize-darwin-arm64"] - action "notarize-windows-386" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "notarize-windows-386" - } - - notification { - on = "fail" - } -} - -event "notarize-windows-amd64" { - depends = ["notarize-windows-386"] - action "notarize-windows-amd64" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "notarize-windows-amd64" - } - - notification { - on = "fail" - } -} - -event "sign" { - depends = ["notarize-windows-amd64"] - action "sign" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "sign" - } - - notification { - on = "fail" - } -} - -event "sign-linux-rpms" { - depends = ["sign"] - action "sign-linux-rpms" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "sign-linux-rpms" - } - - notification { - on = "fail" - } -} - -event "verify" { - depends = ["sign-linux-rpms"] - action "verify" { - organization = "hashicorp" - repository = "crt-workflows-common" - workflow = "verify" + repository = "crt-workflows-common" + workflow = "prepare" + depends = ["build"] } notification { @@ -176,7 +47,7 @@ event "verify" { } event "enos-release-testing-oss" { - depends = ["verify"] + depends = ["prepare"] action "enos-release-testing-oss" { organization = "hashicorp" repository = "vault" @@ -282,8 +153,17 @@ event "post-publish-website" { } } -event "update-ironbank" { +event "bump-version" { depends = ["post-publish-website"] + action "bump-version" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "bump-version" + } +} + +event "update-ironbank" { + depends = ["bump-version"] action "update-ironbank" { organization = "hashicorp" repository = "crt-workflows-common" diff --git a/.release/docker/docker-entrypoint.sh b/.release/docker/docker-entrypoint.sh index 3b72da25b7f4..a3b581697c35 100755 --- a/.release/docker/docker-entrypoint.sh +++ b/.release/docker/docker-entrypoint.sh @@ -1,4 +1,7 @@ #!/usr/bin/dumb-init /bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -e # Note above that we run dumb-init as PID 1 in order to reap zombie processes diff --git a/.release/docker/ubi-docker-entrypoint.sh b/.release/docker/ubi-docker-entrypoint.sh index 6f818bcd439f..dda1260bb8bc 100755 --- a/.release/docker/ubi-docker-entrypoint.sh +++ b/.release/docker/ubi-docker-entrypoint.sh @@ -1,4 +1,7 @@ #!/bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -e # Prevent core dumps diff --git a/.release/linux/package/etc/vault.d/vault.hcl b/.release/linux/package/etc/vault.d/vault.hcl index 33c2e5f3225e..18ff8b4bbce9 100644 --- a/.release/linux/package/etc/vault.d/vault.hcl +++ b/.release/linux/package/etc/vault.d/vault.hcl @@ -1,4 +1,7 @@ -# Full configuration options can be found at https://www.vaultproject.io/docs/configuration +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# Full configuration options can be found at https://developer.hashicorp.com/vault/docs/configuration ui = true diff --git a/.release/linux/package/usr/lib/systemd/system/vault.service b/.release/linux/package/usr/lib/systemd/system/vault.service index 45c896b2c9a5..6408b49b3d69 100644 --- a/.release/linux/package/usr/lib/systemd/system/vault.service +++ b/.release/linux/package/usr/lib/systemd/system/vault.service @@ -1,6 +1,6 @@ [Unit] Description="HashiCorp Vault - A tool for managing secrets" -Documentation=https://www.vaultproject.io/docs/ +Documentation=https://developer.hashicorp.com/vault/docs Requires=network-online.target After=network-online.target ConditionFileNotEmpty=/etc/vault.d/vault.hcl @@ -29,6 +29,7 @@ RestartSec=5 TimeoutStopSec=30 LimitNOFILE=65536 LimitMEMLOCK=infinity +LimitCORE=0 [Install] WantedBy=multi-user.target diff --git a/.release/release-metadata.hcl b/.release/release-metadata.hcl index 19aadfc71ae1..8d480ad4a73a 100644 --- a/.release/release-metadata.hcl +++ b/.release/release-metadata.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + url_docker_registry_dockerhub = "https://hub.docker.com/r/hashicorp/vault" url_docker_registry_ecr = "https://gallery.ecr.aws/hashicorp/vault" url_license = "https://github.com/hashicorp/vault/blob/main/LICENSE" diff --git a/.release/security-scan.hcl b/.release/security-scan.hcl index 6d394feaacc1..3917c269cd49 100644 --- a/.release/security-scan.hcl +++ b/.release/security-scan.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + container { dependencies = true alpine_secdb = true diff --git a/CHANGELOG-pre-v1.10.md b/CHANGELOG-pre-v1.10.md new file mode 100644 index 000000000000..49f1b912d7b0 --- /dev/null +++ b/CHANGELOG-pre-v1.10.md @@ -0,0 +1,3510 @@ +## 1.9.10 + +### September 30, 2022 + +SECURITY: + +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] + +BUG FIXES: + +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] + +## 1.9.9 + +### August 31, 2022 + +SECURITY: + +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +CHANGES: + +* core: Bump Go version to 1.17.13. + +BUG FIXES: + +* core (enterprise): Fix some races in merkle index flushing code found in testing +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] + +SECURITY: + +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +## 1.9.8 + +### July 21, 2022 + +SECURITY: + +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] + +CHANGES: + +* core: Bump Go version to 1.17.12. + +IMPROVEMENTS: + +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] + +BUG FIXES: + +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] + +## 1.9.7 + +### June 10, 2022 + +CHANGES: + +* core: Bump Go version to 1.17.11. [[GH-go-ver-197](https://github.com/hashicorp/vault/pull/go-ver-197)] + +IMPROVEMENTS: + +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] + +BUG FIXES: + +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. +* ui: Fixes client count timezone bug [[GH-15743](https://github.com/hashicorp/vault/pull/15743)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-15666](https://github.com/hashicorp/vault/pull/15666)] + +## 1.9.6 + +### April 29, 2022 + +BUG FIXES: + +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] + +## 1.9.5 + +### April 22, 2022 + +CHANGES: + +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.9. [[GH-15045](https://github.com/hashicorp/vault/pull/15045)] + +IMPROVEMENTS: + +* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] +* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer +* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] + +BUG FIXES: + +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] +* metrics/autosnapshots (enterprise) : Fix bug that could cause +vault.autosnapshots.save.errors to not be incremented when there is an +autosnapshot save error. +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] +* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] +* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] +* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + +## 1.9.4 + +### March 3, 2022 + +SECURITY: + +* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. +* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. + +CHANGES: + +* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft +Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] + +IMPROVEMENTS: + +* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] +* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] + +BUG FIXES: + +* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] +* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] +* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] +* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] +* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] +* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] +* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) +operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] +* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] +* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] +* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] +* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] +* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] +* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] +* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] + +## 1.9.3 + +### January 27, 2022 + +IMPROVEMENTS: + +* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13698](https://github.com/hashicorp/vault/pull/13698)] +* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] +* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] +* http (enterprise): Serve /sys/license/status endpoint within namespaces + +BUG FIXES: + +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] +* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] +* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] +* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] +* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] +* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. +* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] +* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] +* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] + +## 1.9.2 + +### December 21, 2021 + +CHANGES: + +* go: Update go version to 1.17.5 [[GH-13408](https://github.com/hashicorp/vault/pull/13408)] + +IMPROVEMENTS: + +* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] + +BUG FIXES: + +* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] + +## 1.9.1 + +### December 9, 2021 + +SECURITY: + +* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. + +IMPROVEMENTS: + +* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] + +BUG FIXES: + +* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] +* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] +* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] +* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] +* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] +* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] +* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] +* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] + +## 1.9.0 + +### November 17, 2021 + +CHANGES: + +* auth/kubernetes: `disable_iss_validation` defaults to true. [#127](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/127) [[GH-12975](https://github.com/hashicorp/vault/pull/12975)] +* expiration: VAULT_16_REVOKE_PERMITPOOL environment variable has been removed. [[GH-12888](https://github.com/hashicorp/vault/pull/12888)] +* expiration: VAULT_LEASE_USE_LEGACY_REVOCATION_STRATEGY environment variable has +been removed. [[GH-12888](https://github.com/hashicorp/vault/pull/12888)] +* go: Update go version to 1.17.2 +* secrets/ssh: Roles with empty allowed_extensions will now forbid end-users +specifying extensions when requesting ssh key signing. Update roles setting +allowed_extensions to `*` to permit any extension to be specified by an end-user. [[GH-12847](https://github.com/hashicorp/vault/pull/12847)] + +FEATURES: + +* **Customizable HTTP Headers**: Add support to define custom HTTP headers for root path (`/`) and also on API endpoints (`/v1/*`) [[GH-12485](https://github.com/hashicorp/vault/pull/12485)] +* **Deduplicate Token With Entities in Activity Log**: Vault tokens without entities are now tracked with client IDs and deduplicated in the Activity Log [[GH-12820](https://github.com/hashicorp/vault/pull/12820)] +* **Elasticsearch Database UI**: The UI now supports adding and editing Elasticsearch connections in the database secret engine. [[GH-12672](https://github.com/hashicorp/vault/pull/12672)] +* **KV Custom Metadata**: Add ability in kv-v2 to specify version-agnostic custom key metadata via the +metadata endpoint. The data will be present in responses made to the data endpoint independent of the +calling token's `read` access to the metadata endpoint. [[GH-12907](https://github.com/hashicorp/vault/pull/12907)] +* **KV patch (Tech Preview)**: Add partial update support for the `//data/:path` kv-v2 +endpoint through HTTP `PATCH`. A new `patch` ACL capability has been added and +is required to make such requests. [[GH-12687](https://github.com/hashicorp/vault/pull/12687)] +* **Key Management Secrets Engine (Enterprise)**: Adds support for distributing and managing keys in GCP Cloud KMS. +* **Local Auth Mount Entities (enterprise)**: Logins on `local` auth mounts will +generate identity entities for the tokens issued. The aliases of the entity +resulting from local auth mounts (local-aliases), will be scoped by the cluster. +This means that the local-aliases will never leave the geographical boundary of +the cluster where they were issued. This is something to be mindful about for +those who have implemented local auth mounts for complying with GDPR guidelines. +* **Namespaces (Enterprise)**: Adds support for locking Vault API for particular namespaces. +* **OIDC Identity Provider (Tech Preview)**: Adds support for Vault to be an OpenID Connect (OIDC) provider. [[GH-12932](https://github.com/hashicorp/vault/pull/12932)] +* **Oracle Database UI**: The UI now supports adding and editing Oracle connections in the database secret engine. [[GH-12752](https://github.com/hashicorp/vault/pull/12752)] +* **Postgres Database UI**: The UI now supports adding and editing Postgres connections in the database secret engine. [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] + +SECURITY: + +* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5, 1.8.4, and 1.9.0. +* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. + +IMPROVEMENTS: + +* agent/cache: Process persistent cache leases in dependency order during restore to ensure child leases are always correctly restored [[GH-12843](https://github.com/hashicorp/vault/pull/12843)] +* agent/cache: Use an in-process listener between consul-template and vault-agent when caching is enabled and either templates or a listener is defined [[GH-12762](https://github.com/hashicorp/vault/pull/12762)] +* agent/cache: tolerate partial restore failure from persistent cache [[GH-12718](https://github.com/hashicorp/vault/pull/12718)] +* agent/template: add support for new 'writeToFile' template function [[GH-12505](https://github.com/hashicorp/vault/pull/12505)] +* api: Add configuration option for ensuring isolated read-after-write semantics for all Client requests. [[GH-12814](https://github.com/hashicorp/vault/pull/12814)] +* api: adds native Login method to Go client module with different auth method interfaces to support easier authentication [[GH-12796](https://github.com/hashicorp/vault/pull/12796)] +* api: Move mergeStates and other required utils from agent to api module [[GH-12731](https://github.com/hashicorp/vault/pull/12731)] +* api: Support VAULT_HTTP_PROXY environment variable to allow overriding the Vault client's HTTP proxy [[GH-12582](https://github.com/hashicorp/vault/pull/12582)] +* auth/approle: The `role/:name/secret-id-accessor/lookup` endpoint now returns a 404 status code when the `secret_id_accessor` cannot be found [[GH-12788](https://github.com/hashicorp/vault/pull/12788)] +* auth/approle: expose secret_id_accessor as WrappedAccessor when creating wrapped secret-id. [[GH-12425](https://github.com/hashicorp/vault/pull/12425)] +* auth/aws: add profile support for AWS credentials when using the AWS auth method [[GH-12621](https://github.com/hashicorp/vault/pull/12621)] +* auth/kubernetes: validate JWT against the provided role on alias look ahead operations [[GH-12688](https://github.com/hashicorp/vault/pull/12688)] +* auth/kubernetes: Add ability to configure entity alias names based on the serviceaccount's namespace and name. [#110](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/110) [#112](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/112) [[GH-12633](https://github.com/hashicorp/vault/pull/12633)] +* auth/ldap: include support for an optional user filter field when searching for users [[GH-11000](https://github.com/hashicorp/vault/pull/11000)] +* auth/oidc: Adds the `skip_browser` CLI option to allow users to skip opening the default browser during the authentication flow. [[GH-12876](https://github.com/hashicorp/vault/pull/12876)] +* auth/okta: Send x-forwarded-for in Okta Push Factor request [[GH-12320](https://github.com/hashicorp/vault/pull/12320)] +* auth/token: Add `allowed_policies_glob` and `disallowed_policies_glob` fields to token roles to allow glob matching of policies [[GH-7277](https://github.com/hashicorp/vault/pull/7277)] +* cli: Operator diagnose now tests for missing or partial telemetry configurations. [[GH-12802](https://github.com/hashicorp/vault/pull/12802)] +* cli: add new http option : -header which enable sending arbitrary headers with the cli [[GH-12508](https://github.com/hashicorp/vault/pull/12508)] +* command: operator generate-root -decode: allow passing encoded token via stdin [[GH-12881](https://github.com/hashicorp/vault/pull/12881)] +* core/token: Return the token_no_default_policy config on token role read if set [[GH-12565](https://github.com/hashicorp/vault/pull/12565)] +* core: Add support for go-sockaddr templated addresses in config. [[GH-9109](https://github.com/hashicorp/vault/pull/9109)] +* core: adds custom_metadata field for aliases [[GH-12502](https://github.com/hashicorp/vault/pull/12502)] +* core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region [[GH-12724](https://github.com/hashicorp/vault/pull/12724)] +* core: Update github.com/ulikunitz/xz to fix security vulnerability GHSA-25xm-hr59-7c27. [[GH-12253](https://github.com/hashicorp/vault/pull/12253)] +* core: Upgrade github.com/gogo/protobuf [[GH-12255](https://github.com/hashicorp/vault/pull/12255)] +* core: build with Go 1.17, and mitigate a breaking change they made that could impact how approle and ssh interpret IPs/CIDRs [[GH-12868](https://github.com/hashicorp/vault/pull/12868)] +* core: observe the client counts broken down by namespace for partial month client count [[GH-12393](https://github.com/hashicorp/vault/pull/12393)] +* core: Artifact builds will now only run on merges to the release branches or to `main` +* core: The [dockerfile](https://github.com/hashicorp/vault/blob/main/Dockerfile) that is used to build the vault docker image available at [hashicorp/vault](https://hub.docker.com/repository/docker/hashicorp/vault) now lives in the root of this repo, and the entrypoint is available under [.release/docker/docker-entrypoint.sh](https://github.com/hashicorp/vault/blob/main/.release/docker/docker-entrypoint.sh) +* core: The vault linux packaging service configs and pre/post install scripts are now available under [.release/linux](https://github.com/hashicorp/vault/blob/main/.release/linux) +* core: Vault linux packages are now available for all supported linux architectures including arm, arm64, 386, and amd64 +* db/cassandra: make the connect_timeout config option actually apply to connection timeouts, in addition to non-connection operations [[GH-12903](https://github.com/hashicorp/vault/pull/12903)] +* identity/token: Only return keys from the `.well-known/keys` endpoint that are being used by roles to sign/verify tokens. [[GH-12780](https://github.com/hashicorp/vault/pull/12780)] +* identity: fix issue where Cache-Control header causes stampede of requests for JWKS keys [[GH-12414](https://github.com/hashicorp/vault/pull/12414)] +* physical/etcd: Upgrade etcd3 client to v3.5.0 and etcd2 to v2.305.0. [[GH-11980](https://github.com/hashicorp/vault/pull/11980)] +* pki: adds signature_bits field to customize signature algorithm on CAs and certs signed by Vault [[GH-11245](https://github.com/hashicorp/vault/pull/11245)] +* plugin: update the couchbase gocb version in the couchbase plugin [[GH-12483](https://github.com/hashicorp/vault/pull/12483)] +* replication (enterprise): Add merkle.flushDirty.num_pages_outstanding metric which specifies number of +outstanding dirty pages that were not flushed. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] +* sdk/framework: The '+' wildcard is now supported for parameterizing unauthenticated paths. [[GH-12668](https://github.com/hashicorp/vault/pull/12668)] +* secrets/aws: Add conditional template that allows custom usernames for both STS and IAM cases [[GH-12185](https://github.com/hashicorp/vault/pull/12185)] +* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] +* secrets/azure: Adds support for using Microsoft Graph API since Azure Active Directory API is being removed in 2022. [#67](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/67) [[GH-12629](https://github.com/hashicorp/vault/pull/12629)] +* secrets/database: Update MSSQL dependency github.com/denisenkom/go-mssqldb to v0.11.0 and include support for contained databases in MSSQL plugin [[GH-12839](https://github.com/hashicorp/vault/pull/12839)] +* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] +* secrets/pki: Use entropy augmentation when available when generating root and intermediate CA key material. [[GH-12559](https://github.com/hashicorp/vault/pull/12559)] +* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] +* secrets/pki: Support ed25519 as a key for the pki backend [[GH-11780](https://github.com/hashicorp/vault/pull/11780)] +* secrets/rabbitmq: Update dependency github.com/michaelklishin/rabbit-hole to v2 and resolve UserInfo.tags regression from RabbitMQ v3.9 [[GH-12877](https://github.com/hashicorp/vault/pull/12877)] +* secrets/ssh: Let allowed_users template mix templated and non-templated parts. [[GH-10886](https://github.com/hashicorp/vault/pull/10886)] +* secrets/ssh: Use entropy augmentation when available for generation of the signing key. [[GH-12560](https://github.com/hashicorp/vault/pull/12560)] +* serviceregistration: add `external-source: "vault"` metadata value for Consul registration. [[GH-12163](https://github.com/hashicorp/vault/pull/12163)] +* storage/raft: Best-effort handling of cancelled contexts. [[GH-12162](https://github.com/hashicorp/vault/pull/12162)] +* transform (enterprise): Add advanced features for encoding and decoding for Transform FPE +* transform (enterprise): Add a `reference` field to batch items, and propogate it to the response +* ui: Add KV secret search box when no metadata list access. [[GH-12626](https://github.com/hashicorp/vault/pull/12626)] +* ui: Add custom metadata to KV secret engine and metadata to config [[GH-12169](https://github.com/hashicorp/vault/pull/12169)] +* ui: Creates new StatText component [[GH-12295](https://github.com/hashicorp/vault/pull/12295)] +* ui: client count monthly view [[GH-12554](https://github.com/hashicorp/vault/pull/12554)] +* ui: creates bar chart component for displaying client count data by namespace [[GH-12437](https://github.com/hashicorp/vault/pull/12437)] +* ui: Add creation time to KV 2 version history and version view [[GH-12663](https://github.com/hashicorp/vault/pull/12663)] +* ui: Added resize for JSON editor [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] +* ui: Adds warning about white space in KV secret engine. [[GH-12921](https://github.com/hashicorp/vault/pull/12921)] +* ui: Click to copy database static role last rotation value in tooltip [[GH-12890](https://github.com/hashicorp/vault/pull/12890)] +* ui: Filter DB connection attributes so only relevant attrs POST to backend [[GH-12770](https://github.com/hashicorp/vault/pull/12770)] +* ui: Removes empty rows from DB config views [[GH-12819](https://github.com/hashicorp/vault/pull/12819)] +* ui: Standardizes toolbar presentation of destructive actions [[GH-12895](https://github.com/hashicorp/vault/pull/12895)] +* ui: Updates font for table row value fields [[GH-12908](https://github.com/hashicorp/vault/pull/12908)] +* ui: namespace search in client count views [[GH-12577](https://github.com/hashicorp/vault/pull/12577)] +* ui: parse and display pki cert metadata [[GH-12541](https://github.com/hashicorp/vault/pull/12541)] +* ui: replaces Vault's use of elazarl/go-bindata-assetfs in building the UI with Go's native Embed package [[GH-11208](https://github.com/hashicorp/vault/pull/11208)] +* ui: updated client tracking config view [[GH-12422](https://github.com/hashicorp/vault/pull/12422)] + +DEPRECATIONS: + +* auth/kubernetes: deprecate `disable_iss_validation` and `issuer` configuration fields [#127](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/127) [[GH-12975](https://github.com/hashicorp/vault/pull/12975)] + +BUG FIXES: + +* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] +* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] +* api: Fixes storage APIs returning incorrect error when parsing responses [[GH-12338](https://github.com/hashicorp/vault/pull/12338)] +* auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature [[GH-12519](https://github.com/hashicorp/vault/pull/12519)] +* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] +* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] +* auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. [[GH-12265](https://github.com/hashicorp/vault/pull/12265)] +* cli/api: Providing consistency for the use of comma separated parameters in auth/secret enable/tune [[GH-12126](https://github.com/hashicorp/vault/pull/12126)] +* cli: fixes CLI requests when namespace is both provided as argument and part of the path [[GH-12720](https://github.com/hashicorp/vault/pull/12720)] +* cli: fixes CLI requests when namespace is both provided as argument and part of the path [[GH-12911](https://github.com/hashicorp/vault/pull/12911)] +* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] +* core (enterprise): Allow deletion of stored licenses on DR secondary nodes +* core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Fix data race during perf standby sealing +* core (enterprise): Fixes reading raft auto-snapshot configuration from performance standby node [[GH-12317](https://github.com/hashicorp/vault/pull/12317)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* core (enterprise): namespace header included in responses, Go client uses it when displaying error messages [[GH-12196](https://github.com/hashicorp/vault/pull/12196)] +* core/api: Fix an arm64 bug converting a negative int to an unsigned int [[GH-12372](https://github.com/hashicorp/vault/pull/12372)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] +* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] +* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] +* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] +* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] +* core: fix byte printing for diagnose disk checks [[GH-12229](https://github.com/hashicorp/vault/pull/12229)] +* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] +* database/couchbase: change default template to truncate username at 128 characters [[GH-12301](https://github.com/hashicorp/vault/pull/12301)] +* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* http: removed unpublished true from logical_system path, making openapi spec consistent with documentation [[GH-12713](https://github.com/hashicorp/vault/pull/12713)] +* identity/token: Adds missing call to unlock mutex in key deletion error handling [[GH-12916](https://github.com/hashicorp/vault/pull/12916)] +* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] +* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] +* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] +* identity: dedup from_entity_ids when merging two entities [[GH-10101](https://github.com/hashicorp/vault/pull/10101)] +* identity: disallow creation of role without a key parameter [[GH-12208](https://github.com/hashicorp/vault/pull/12208)] +* identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl [[GH-12151](https://github.com/hashicorp/vault/pull/12151)] +* identity: merge associated entity groups when merging entities [[GH-10085](https://github.com/hashicorp/vault/pull/10085)] +* identity: suppress duplicate policies on entities [[GH-12812](https://github.com/hashicorp/vault/pull/12812)] +* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests +* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls +* kmip (enterprise): Forward KMIP register operations to the active node +* license: ignore stored terminated license while autoloading is enabled [[GH-2104](https://github.com/hashicorp/vault/pull/2104)] +* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* pki: Fix regression preventing email addresses being used as a common name within certificates [[GH-12716](https://github.com/hashicorp/vault/pull/12716)] +* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] +* plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems [[GH-12378](https://github.com/hashicorp/vault/pull/12378)] +* raft (enterprise): Fix panic when updating auto-snapshot config +* replication (enterprise): Fix issue where merkle.flushDirty.num_pages metric is not emitted if number +of dirty pages is 0. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] +* replication (enterprise): Fix merkle.saveCheckpoint.num_dirty metric to accurately specify the number +of dirty pages in the merkle tree at time of checkpoint creation. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] +* sdk/database: Fix a DeleteUser error message on the gRPC client. [[GH-12351](https://github.com/hashicorp/vault/pull/12351)] +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. [[GH-12379](https://github.com/hashicorp/vault/pull/12379)] +* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12934](https://github.com/hashicorp/vault/pull/12934)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12600](https://github.com/hashicorp/vault/pull/12600)] +* secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. [[GH-12418](https://github.com/hashicorp/vault/pull/12418)] +* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* storage/raft (enterprise): Ensure that raft autosnapshot backoff retry duration never hits 0s +* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] +* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] +* storage/raft: Support `addr_type=public_v6` in auto-join [[GH-12366](https://github.com/hashicorp/vault/pull/12366)] +* transform (enterprise): Enforce minimum cache size for Transform backend and reset cache size without a restart +* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. +* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] +* ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. [[GH-12550](https://github.com/hashicorp/vault/pull/12550)] +* ui: Fix bug where edit role form on auth method is invalid by default [[GH-12646](https://github.com/hashicorp/vault/pull/12646)] +* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] +* ui: Fixed text overflow in flash messages [[GH-12357](https://github.com/hashicorp/vault/pull/12357)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: Remove spinner after token renew [[GH-12887](https://github.com/hashicorp/vault/pull/12887)] +* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] +* ui: Show day of month instead of day of year in the expiration warning dialog [[GH-11984](https://github.com/hashicorp/vault/pull/11984)] +* ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. [[GH-12409](https://github.com/hashicorp/vault/pull/12409)] +* ui: fix missing navbar items on login to namespace [[GH-12478](https://github.com/hashicorp/vault/pull/12478)] +* ui: update bar chart when model changes [[GH-12622](https://github.com/hashicorp/vault/pull/12622)] +* ui: updating database TTL picker help text. [[GH-12212](https://github.com/hashicorp/vault/pull/12212)] + +## 1.8.12 + +### June 10, 2022 + +BUG FIXES: + +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. + +## 1.8.11 + +### April 29, 2022 + +BUG FIXES: + +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] + +## 1.8.10 + +### April 22, 2022 + +CHANGES: + +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.16.15. [[GH-go-ver-1810](https://github.com/hashicorp/vault/pull/go-ver-1810)] + +IMPROVEMENTS: + +* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] +* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer + +BUG FIXES: + +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* metrics/autosnapshots (enterprise) : Fix bug that could cause +vault.autosnapshots.save.errors to not be incremented when there is an +autosnapshot save error. +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] +* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + +## 1.8.9 + +### March 3, 2022 + +* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. +* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. + +IMPROVEMENTS: + +* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] + +BUG FIXES: + +* auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature [[GH-12519](https://github.com/hashicorp/vault/pull/12519)] +* database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] +* secrets/openldap: Fix panic from nil logger in backend [[GH-14170](https://github.com/hashicorp/vault/pull/14170)] +* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] +* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] + +## 1.8.8 + +### January 27, 2022 + +IMPROVEMENTS: + +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] + +BUG FIXES: + +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13494](https://github.com/hashicorp/vault/pull/13494)] +* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions +* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13549](https://github.com/hashicorp/vault/pull/13549)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] +* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] +* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] +* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] + +## 1.8.7 + +### December 21, 2021 + +CHANGES: + +* go: Update go version to 1.16.12 [[GH-13422](https://github.com/hashicorp/vault/pull/13422)] + +## 1.8.6 + +### December 9, 2021 + +CHANGES: + +* go: Update go version to 1.16.9 [[GH-13029](https://github.com/hashicorp/vault/pull/13029)] + +SECURITY: + +* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. + +BUG FIXES: + +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] +* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] + +## 1.8.5 + +### November 4, 2021 + +SECURITY: + +* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. + +BUG FIXES: + +* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] +* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] +* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* identity/token: Adds missing call to unlock mutex in key deletion error handling [[GH-12916](https://github.com/hashicorp/vault/pull/12916)] +* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests +* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls +* kmip (enterprise): Forward KMIP register operations to the active node +* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12952](https://github.com/hashicorp/vault/pull/12952)] +* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. + +## 1.8.4 + +### 6 October 2021 + +SECURITY: + +* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5 and 1.8.4. + +IMPROVEMENTS: + +* core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region [[GH-12724](https://github.com/hashicorp/vault/pull/12724)] + +BUG FIXES: + +* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] +* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* pki: Fix regression preventing email addresses being used as a common name within certificates [[GH-12716](https://github.com/hashicorp/vault/pull/12716)] +* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* ui: Fix bug where edit role form on auth method is invalid by default [[GH-12646](https://github.com/hashicorp/vault/pull/12646)] + +## 1.8.3 + +### 29 September 2021 + +IMPROVEMENTS: + +* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] + +BUG FIXES: + +* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] +* core (enterprise): Allow deletion of stored licenses on DR secondary nodes +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] +* raft (enterprise): Fix panic when updating auto-snapshot config +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12599](https://github.com/hashicorp/vault/pull/12599)] +* secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. [[GH-12418](https://github.com/hashicorp/vault/pull/12418)] +* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] +* ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. [[GH-12550](https://github.com/hashicorp/vault/pull/12550)] +* ui: Show day of month instead of day of year in the expiration warning dialog [[GH-11984](https://github.com/hashicorp/vault/pull/11984)] + +## 1.8.2 + +### 26 August 2021 + +CHANGES: + +* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 +* go: Update go version to 1.16.7 [[GH-12408](https://github.com/hashicorp/vault/pull/12408)] + +BUG FIXES: + +* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] +* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] +* database/couchbase: change default template to truncate username at 128 characters [[GH-12300](https://github.com/hashicorp/vault/pull/12300)] +* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems [[GH-12378](https://github.com/hashicorp/vault/pull/12378)] +* sdk/database: Fix a DeleteUser error message on the gRPC client. [[GH-12351](https://github.com/hashicorp/vault/pull/12351)] +* secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. [[GH-12379](https://github.com/hashicorp/vault/pull/12379)] +* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. [[GH-12409](https://github.com/hashicorp/vault/pull/12409)] + +## 1.8.1 + +### August 5th, 2021 + +CHANGES: + +* go: Update go version to 1.16.6 [[GH-12245](https://github.com/hashicorp/vault/pull/12245)] + +IMPROVEMENTS: + +* serviceregistration: add `external-source: "vault"` metadata value for Consul registration. [[GH-12163](https://github.com/hashicorp/vault/pull/12163)] + +BUG FIXES: + +* auth/aws: Remove warning stating AWS Token TTL will be capped by the Default Lease TTL. [[GH-12026](https://github.com/hashicorp/vault/pull/12026)] +* auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. [[GH-12258](https://github.com/hashicorp/vault/pull/12258)] +* core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified +* core: fix byte printing for diagnose disk checks [[GH-12229](https://github.com/hashicorp/vault/pull/12229)] +* identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl [[GH-12151](https://github.com/hashicorp/vault/pull/12151)] + +## 1.8.0 + +### July 28th, 2021 + +CHANGES: + +* agent: Errors in the template engine will no longer cause agent to exit unless +explicitly defined to do so. A new configuration parameter, +`exit_on_retry_failure`, within the new top-level stanza, `template_config`, can +be set to `true` in order to cause agent to exit. Note that for agent to exit if +`template.error_on_missing_key` is set to `true`, `exit_on_retry_failure` must +be also set to `true`. Otherwise, the template engine will log an error but then +restart its internal runner. [[GH-11775](https://github.com/hashicorp/vault/pull/11775)] +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* core (enterprise): License/EULA changes that ensure the presence of a valid HashiCorp license to +start Vault. More information is available in the [Vault License FAQ](https://www.vaultproject.io/docs/enterprise/license/faqs) + +FEATURES: + +* **GCP Secrets Engine Static Accounts**: Adds ability to use existing service accounts for generation + of service account keys and access tokens. [[GH-12023](https://github.com/hashicorp/vault/pull/12023)] +* **Key Management Secrets Engine (Enterprise)**: Adds general availability for distributing and managing keys in AWS KMS. [[GH-11958](https://github.com/hashicorp/vault/pull/11958)] +* **License Autoloading (Enterprise)**: Licenses may now be automatically loaded from the environment or disk. +* **MySQL Database UI**: The UI now supports adding and editing MySQL connections in the database secret engine [[GH-11532](https://github.com/hashicorp/vault/pull/11532)] +* **Vault Diagnose**: A new `vault operator` command to detect common issues with vault server setups. + +SECURITY: + +* storage/raft: When initializing Vault’s Integrated Storage backend, excessively broad filesystem permissions may be set for the underlying Bolt database used by Vault’s Raft implementation. This vulnerability, CVE-2021-38553, was fixed in Vault 1.8.0. +* ui: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. + +IMPROVEMENTS: + +* agent/template: Added static_secret_render_interval to specify how often to fetch non-leased secrets [[GH-11934](https://github.com/hashicorp/vault/pull/11934)] +* agent: Allow Agent auto auth to read symlinked JWT files [[GH-11502](https://github.com/hashicorp/vault/pull/11502)] +* api: Allow a leveled logger to be provided to `api.Client` through `SetLogger`. [[GH-11696](https://github.com/hashicorp/vault/pull/11696)] +* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] +* cli/api: Add lease lookup command [[GH-11129](https://github.com/hashicorp/vault/pull/11129)] +* core: Add `prefix_filter` to telemetry config [[GH-12025](https://github.com/hashicorp/vault/pull/12025)] +* core: Add a darwin/arm64 binary release supporting the Apple M1 CPU [[GH-12071](https://github.com/hashicorp/vault/pull/12071)] +* core: Add a small (<1s) exponential backoff to failed TCP listener Accept failures. [[GH-11588](https://github.com/hashicorp/vault/pull/11588)] +* core (enterprise): Add controlled capabilities to control group policy stanza +* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] +* core: Add metrics to report if a node is a perf standby, if a node is a dr secondary or primary, and if a node is a perf secondary or primary. [[GH-11472](https://github.com/hashicorp/vault/pull/11472)] +* core: Send notifications to systemd on start, stop, and configuration reload. [[GH-11517](https://github.com/hashicorp/vault/pull/11517)] +* core: add irrevocable lease list and count apis [[GH-11607](https://github.com/hashicorp/vault/pull/11607)] +* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] +* core: Improve renew/revoke performance using per-lease locks [[GH-11122](https://github.com/hashicorp/vault/pull/11122)] +* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] +* go: Update to Go 1.16.5 [[GH-11802](https://github.com/hashicorp/vault/pull/11802)] +* replication: Delay evaluation of X-Vault-Index headers until merkle sync completes. +* secrets/rabbitmq: Add ability to customize dynamic usernames [[GH-11899](https://github.com/hashicorp/vault/pull/11899)] +* secrets/ad: Add `rotate-role` endpoint to allow rotations of service accounts. [[GH-11942](https://github.com/hashicorp/vault/pull/11942)] +* secrets/aws: add IAM tagging support for iam_user roles [[GH-10953](https://github.com/hashicorp/vault/pull/10953)] +* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] +* secrets/database/elasticsearch: Add ability to customize dynamic usernames [[GH-11957](https://github.com/hashicorp/vault/pull/11957)] +* secrets/database/influxdb: Add ability to customize dynamic usernames [[GH-11796](https://github.com/hashicorp/vault/pull/11796)] +* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodbatlas: Adds the ability to customize username generation for dynamic users in MongoDB Atlas. [[GH-11956](https://github.com/hashicorp/vault/pull/11956)] +* secrets/database/redshift: Add ability to customize dynamic usernames [[GH-12016](https://github.com/hashicorp/vault/pull/12016)] +* secrets/database/snowflake: Add ability to customize dynamic usernames [[GH-11997](https://github.com/hashicorp/vault/pull/11997)] +* ssh: add support for templated values in SSH CA DefaultExtensions [[GH-11495](https://github.com/hashicorp/vault/pull/11495)] +* storage/raft: Improve raft batch size selection [[GH-11907](https://github.com/hashicorp/vault/pull/11907)] +* storage/raft: change freelist type to map and set nofreelistsync to true [[GH-11895](https://github.com/hashicorp/vault/pull/11895)] +* storage/raft: Switch to shared raft-boltdb library and add boltdb metrics [[GH-11269](https://github.com/hashicorp/vault/pull/11269)] +* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] +* storage/raft (enterprise): Enable Autopilot on DR secondary clusters +* ui: Add Validation to KV secret engine [[GH-11785](https://github.com/hashicorp/vault/pull/11785)] +* ui: Add database secret engine support for MSSQL [[GH-11231](https://github.com/hashicorp/vault/pull/11231)] +* ui: Add push notification message when selecting okta auth. [[GH-11442](https://github.com/hashicorp/vault/pull/11442)] +* ui: Add regex validation to Transform Template pattern input [[GH-11586](https://github.com/hashicorp/vault/pull/11586)] +* ui: Add specific error message if unseal fails due to license [[GH-11705](https://github.com/hashicorp/vault/pull/11705)] +* ui: Add validation support for open api form fields [[GH-11963](https://github.com/hashicorp/vault/pull/11963)] +* ui: Added auth method descriptions to UI login page [[GH-11795](https://github.com/hashicorp/vault/pull/11795)] +* ui: JSON fields on database can be cleared on edit [[GH-11708](https://github.com/hashicorp/vault/pull/11708)] +* ui: Obscure secret values on input and displayOnly fields like certificates. [[GH-11284](https://github.com/hashicorp/vault/pull/11284)] +* ui: Redesign of KV 2 Delete toolbar. [[GH-11530](https://github.com/hashicorp/vault/pull/11530)] +* ui: Replace tool partials with components. [[GH-11672](https://github.com/hashicorp/vault/pull/11672)] +* ui: Show description on secret engine list [[GH-11995](https://github.com/hashicorp/vault/pull/11995)] +* ui: Update ember to latest LTS and upgrade UI dependencies [[GH-11447](https://github.com/hashicorp/vault/pull/11447)] +* ui: Update partials to components [[GH-11680](https://github.com/hashicorp/vault/pull/11680)] +* ui: Updated ivy code mirror component for consistency [[GH-11500](https://github.com/hashicorp/vault/pull/11500)] +* ui: Updated node to v14, latest stable build [[GH-12049](https://github.com/hashicorp/vault/pull/12049)] +* ui: Updated search select component styling [[GH-11360](https://github.com/hashicorp/vault/pull/11360)] +* ui: add transform secrets engine to features list [[GH-12003](https://github.com/hashicorp/vault/pull/12003)] +* ui: add validations for duplicate path kv engine [[GH-11878](https://github.com/hashicorp/vault/pull/11878)] +* ui: show site-wide banners for license warnings if applicable [[GH-11759](https://github.com/hashicorp/vault/pull/11759)] +* ui: update license page with relevant autoload info [[GH-11778](https://github.com/hashicorp/vault/pull/11778)] + +DEPRECATIONS: + +* secrets/gcp: Deprecated the `/gcp/token/:roleset` and `/gcp/key/:roleset` paths for generating + secrets for rolesets. Use `/gcp/roleset/:roleset/token` and `/gcp/roleset/:roleset/key` instead. [[GH-12023](https://github.com/hashicorp/vault/pull/12023)] + +BUG FIXES: + +* activity: Omit wrapping tokens and control groups from client counts [[GH-11826](https://github.com/hashicorp/vault/pull/11826)] +* agent/cert: Fix issue where the API client on agent was not honoring certificate + information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] +* agent/template: fix command shell quoting issue [[GH-11838](https://github.com/hashicorp/vault/pull/11838)] +* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] +* agent: fix timestamp format in log messages from the templating engine [[GH-11838](https://github.com/hashicorp/vault/pull/11838)] +* auth/approle: fixing dereference of nil pointer [[GH-11864](https://github.com/hashicorp/vault/pull/11864)] +* auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to + bring in a verification key caching fix. [[GH-11784](https://github.com/hashicorp/vault/pull/11784)] +* auth/kubernetes: Fix AliasLookahead to correctly extract ServiceAccount UID when using ephemeral JWTs [[GH-12073](https://github.com/hashicorp/vault/pull/12073)] +* auth/ldap: Fix a bug where the LDAP auth method does not return the request_timeout configuration parameter on config read. [[GH-11975](https://github.com/hashicorp/vault/pull/11975)] +* cli: Add support for response wrapping in `vault list` and `vault kv list` with output format other than `table`. [[GH-12031](https://github.com/hashicorp/vault/pull/12031)] +* cli: vault delete and vault kv delete should support the same output options (e.g. -format) as vault write. [[GH-11992](https://github.com/hashicorp/vault/pull/11992)] +* core (enterprise): Fix orphan return value from auth methods executed on performance standby nodes. +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core (enterprise): serialize access to HSM entropy generation to avoid errors in concurrent key generation. +* core/metrics: Add generic KV mount support for vault.kv.secret.count telemetry metric [[GH-12020](https://github.com/hashicorp/vault/pull/12020)] +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* core: Fix edge cases in the configuration endpoint for barrier key autorotation. [[GH-11541](https://github.com/hashicorp/vault/pull/11541)] +* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] +* core (enterprise): Fix panic on DR secondary when there are lease count quotas [[GH-11742](https://github.com/hashicorp/vault/pull/11742)] +* core: Fix race that allowed remounting on path used by another mount [[GH-11453](https://github.com/hashicorp/vault/pull/11453)] +* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] +* core: Fixed double counting of http requests after operator stepdown [[GH-11970](https://github.com/hashicorp/vault/pull/11970)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] +* mongo-db: default username template now strips invalid '.' characters [[GH-11872](https://github.com/hashicorp/vault/pull/11872)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* replication: Fix panic trying to update walState during identity group invalidation. +* replication: Fix: mounts created within a namespace that was part of an Allow + filtering rule would not appear on performance secondary if created after rule + was defined. +* secret/pki: use case insensitive domain name comparison as per RFC1035 section 2.3.3 +* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] +* secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] [[GH-11836](https://github.com/hashicorp/vault/pull/11836)] +* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] +* secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations [[GH-11861](https://github.com/hashicorp/vault/pull/11861)] +* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] +* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* secrets/openldap: Fix bug where schema was not compatible with rotate-root [#24](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/24) [[GH-12019](https://github.com/hashicorp/vault/pull/12019)] +* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] +* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] +* storage/raft: Tweak creation of vault.db file [[GH-12034](https://github.com/hashicorp/vault/pull/12034)] +* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] +* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] +* transform (enterprise): Fix an issue with malformed transform configuration + storage when upgrading from 1.5 to 1.6. See Upgrade Notes for 1.6.x. +* ui: Add role from database connection automatically populates the database for new role [[GH-11119](https://github.com/hashicorp/vault/pull/11119)] +* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] +* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] +* ui: Fix Version History queryParams on LinkedBlock [[GH-12079](https://github.com/hashicorp/vault/pull/12079)] +* ui: Fix bug where database secret engines with custom names cannot delete connections [[GH-11127](https://github.com/hashicorp/vault/pull/11127)] +* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] +* ui: Fix database role CG access [[GH-12111](https://github.com/hashicorp/vault/pull/12111)] +* ui: Fix date display on expired token notice [[GH-11142](https://github.com/hashicorp/vault/pull/11142)] +* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] +* ui: Fix error message caused by control group [[GH-11143](https://github.com/hashicorp/vault/pull/11143)] +* ui: Fix footer URL linking to the correct version changelog. [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] +* ui: Fix issue where logging in without namespace input causes error [[GH-11094](https://github.com/hashicorp/vault/pull/11094)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] +* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] +* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] +* ui: Fixed and updated lease renewal picker [[GH-11256](https://github.com/hashicorp/vault/pull/11256)] +* ui: fix control group access for database credential [[GH-12024](https://github.com/hashicorp/vault/pull/12024)] +* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] +* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] + +## 1.7.10 + +### March 3, 2022 + +SECURITY: + +* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. + +BUG FIXES: + +* database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] +* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] + +## 1.7.9 + +### January 27, 2022 + +IMPROVEMENTS: + +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] + +BUG FIXES: + +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13493](https://github.com/hashicorp/vault/pull/13493)] +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13735](https://github.com/hashicorp/vault/pull/13735)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] + +## 1.7.8 + +### December 21, 2021 + +CHANGES: + +* go: Update go version to 1.16.12 [[GH-13422](https://github.com/hashicorp/vault/pull/13422)] + +BUG FIXES: + +* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] +* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] + +## 1.7.7 + +### December 9, 2021 + +SECURITY: + +* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. + +BUG FIXES: + +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] + +## 1.7.6 + +### November 4, 2021 + +SECURITY: + +* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. + +BUG FIXES: + +* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] +* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] +* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] +* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests +* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls +* kmip (enterprise): Forward KMIP register operations to the active node +* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12957](https://github.com/hashicorp/vault/pull/12957)] +* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. + +## 1.7.5 + +### 29 September 2021 + +SECURITY: + +* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5 and 1.8.4. + +IMPROVEMENTS: + +* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] + +BUG FIXES: + +* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] +* raft (enterprise): Fix panic when updating auto-snapshot config +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12598](https://github.com/hashicorp/vault/pull/12598)] +* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] +* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] + +## 1.7.4 + +### 26 August 2021 + +SECURITY: + +* _UI Secret Caching_: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. + +CHANGES: + +* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 +* go: Update go version to 1.15.15 [[GH-12411](https://github.com/hashicorp/vault/pull/12411)] + +IMPROVEMENTS: + +* ui: Updated node to v14, latest stable build [[GH-12049](https://github.com/hashicorp/vault/pull/12049)] + +BUG FIXES: + +* replication (enterprise): Fix a panic that could occur when checking the last wal and the log shipper buffer is empty. +* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] +* database/couchbase: change default template to truncate username at 128 characters [[GH-12299](https://github.com/hashicorp/vault/pull/12299)] +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations [[GH-11861](https://github.com/hashicorp/vault/pull/11861)] +* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] +* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] +* ui: Fix database role CG access [[GH-12111](https://github.com/hashicorp/vault/pull/12111)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: fix control group access for database credential [[GH-12024](https://github.com/hashicorp/vault/pull/12024)] +* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] + +## 1.7.3 + +### June 16th, 2021 + +CHANGES: + +* go: Update go version to 1.15.13 [[GH-11857](https://github.com/hashicorp/vault/pull/11857)] + +IMPROVEMENTS: + +* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] +* ui: Add specific error message if unseal fails due to license [[GH-11705](https://github.com/hashicorp/vault/pull/11705)] + +BUG FIXES: + +* auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to +bring in a verification key caching fix. [[GH-11784](https://github.com/hashicorp/vault/pull/11784)] +* core (enterprise): serialize access to HSM entropy generation to avoid errors in concurrent key generation. +* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] +* secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] [[GH-11836](https://github.com/hashicorp/vault/pull/11836)] +* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] + +## 1.7.2 + +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.9.1 to use IAM Service Account Credentials API for +signing JWTs [[GH-11494](https://github.com/hashicorp/vault/pull/11494)] + +IMPROVEMENTS: + +* api, agent: LifetimeWatcher now does more retries when renewal failures occur. This also impacts Agent auto-auth and leases managed via Agent caching. [[GH-11445](https://github.com/hashicorp/vault/pull/11445)] +* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] +* http: Add optional HTTP response headers for hostname and raft node ID [[GH-11289](https://github.com/hashicorp/vault/pull/11289)] +* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] +* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] + +BUG FIXES: + +* agent/cert: Fix issue where the API client on agent was not honoring certificate +information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] +* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] +* replication: Fix panic trying to update walState during identity group invalidation. [[GH-1865](https://github.com/hashicorp/vault/pull/1865)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* secrets/keymgmt (enterprise): Fixes audit logging for the read key response. +* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] +* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] +* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] + +## 1.7.1 + +### 21 April 2021 + +SECURITY: + +* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the + Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions + 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) +* The Cassandra Database and Storage backends were not correctly verifying TLS certificates. This issue affects all + versions of Vault and Vault Enterprise and was fixed in versions 1.6.4, and 1.7.1. (CVE-2021-27400) + +CHANGES: + +* go: Update to Go 1.15.11 [[GH-11395](https://github.com/hashicorp/vault/pull/11395)] + +IMPROVEMENTS: + +* auth/jwt: Adds ability to directly provide service account JSON in G Suite provider config. [[GH-11388](https://github.com/hashicorp/vault/pull/11388)] +* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] +* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] +* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] +* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] + +BUG FIXES: + +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] +* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] +* core: requests forwarded by standby weren't always timed out. [[GH-11322](https://github.com/hashicorp/vault/pull/11322)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* replication: Fix: mounts created within a namespace that was part of an Allow + filtering rule would not appear on performance secondary if created after rule + was defined. +* replication: Perf standby nodes on newly enabled DR secondary sometimes couldn't connect to active node with TLS errors. [[GH-1823](https://github.com/hashicorp/vault/pull/1823)] +* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] +* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] +* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] +* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] +* storage/raft: using raft for ha_storage with a different storage backend was broken in 1.7.0, now fixed. [[GH-11340](https://github.com/hashicorp/vault/pull/11340)] +* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] +* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] +* ui: Fix OIDC bug seen when running on HCP [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] +* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] +* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] + +## 1.7.0 + +### 24 March 2021 + +CHANGES: + +* agent: Failed auto-auth attempts are now throttled by an exponential backoff instead of the +~2 second retry delay. The maximum backoff may be configured with the new `max_backoff` parameter, +which defaults to 5 minutes. [[GH-10964](https://github.com/hashicorp/vault/pull/10964)] +* aws/auth: AWS Auth concepts and endpoints that use the "whitelist" and "blacklist" terms +have been updated to more inclusive language (e.g. `/auth/aws/identity-whitelist` has been +updated to`/auth/aws/identity-accesslist`). The old and new endpoints are aliases, +sharing the same underlying data. The legacy endpoint names are considered **deprecated** +and will be removed in a future release (not before Vault 1.9). The complete list of +endpoint changes is available in the [AWS Auth API docs](/api-docs/auth/aws#deprecations-effective-in-vault-1-7). +* go: Update Go version to 1.15.10 [[GH-11114](https://github.com/hashicorp/vault/pull/11114)] [[GH-11173](https://github.com/hashicorp/vault/pull/11173)] + +FEATURES: + +* **Aerospike Storage Backend**: Add support for using Aerospike as a storage backend [[GH-10131](https://github.com/hashicorp/vault/pull/10131)] +* **Autopilot for Integrated Storage**: A set of features has been added to allow for automatic operator-friendly management of Vault servers. This is only applicable when integrated storage is in use. + * **Dead Server Cleanup**: Dead servers will periodically be cleaned up and removed from the Raft peer set, to prevent them from interfering with the quorum size and leader elections. + * **Server Health Checking**: An API has been added to track the state of servers, including their health. + * **New Server Stabilization**: When a new server is added to the cluster, there will be a waiting period where it must be healthy and stable for a certain amount of time before being promoted to a full, voting member. +* **Tokenization Secrets Engine (Enterprise)**: The Tokenization Secrets Engine is now generally available. We have added support for MySQL, key rotation, and snapshot/restore. +* replication (enterprise): The log shipper is now memory as well as length bound, and length and size can be separately configured. +* agent: Support for persisting the agent cache to disk [[GH-10938](https://github.com/hashicorp/vault/pull/10938)] +* auth/jwt: Adds `max_age` role parameter and `auth_time` claim validation. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] +* core (enterprise): X-Vault-Index and related headers can be used by clients to manage eventual consistency. +* kmip (enterprise): Use entropy augmentation to generate kmip certificates +* sdk: Private key generation in the certutil package now allows custom io.Readers to be used. [[GH-10653](https://github.com/hashicorp/vault/pull/10653)] +* secrets/aws: add IAM tagging support for iam_user roles [[GH-10953](https://github.com/hashicorp/vault/pull/10953)] +* secrets/database/cassandra: Add ability to customize dynamic usernames [[GH-10906](https://github.com/hashicorp/vault/pull/10906)] +* secrets/database/couchbase: Add ability to customize dynamic usernames [[GH-10995](https://github.com/hashicorp/vault/pull/10995)] +* secrets/database/mongodb: Add ability to customize dynamic usernames [[GH-10858](https://github.com/hashicorp/vault/pull/10858)] +* secrets/database/mssql: Add ability to customize dynamic usernames [[GH-10767](https://github.com/hashicorp/vault/pull/10767)] +* secrets/database/mysql: Add ability to customize dynamic usernames [[GH-10834](https://github.com/hashicorp/vault/pull/10834)] +* secrets/database/postgresql: Add ability to customize dynamic usernames [[GH-10766](https://github.com/hashicorp/vault/pull/10766)] +* secrets/db/snowflake: Added support for Snowflake to the Database Secret Engine [[GH-10603](https://github.com/hashicorp/vault/pull/10603)] +* secrets/keymgmt (enterprise): Adds beta support for distributing and managing keys in AWS KMS. +* secrets/keymgmt (enterprise): Adds general availability for distributing and managing keys in Azure Key Vault. +* secrets/openldap: Added dynamic roles to OpenLDAP similar to the combined database engine [[GH-10996](https://github.com/hashicorp/vault/pull/10996)] +* secrets/terraform: New secret engine for managing Terraform Cloud API tokens [[GH-10931](https://github.com/hashicorp/vault/pull/10931)] +* ui: Adds check for feature flag on application, and updates namespace toolbar on login if present [[GH-10588](https://github.com/hashicorp/vault/pull/10588)] +* ui: Adds the wizard to the Database Secret Engine [[GH-10982](https://github.com/hashicorp/vault/pull/10982)] +* ui: Database secrets engine, supporting MongoDB only [[GH-10655](https://github.com/hashicorp/vault/pull/10655)] + +IMPROVEMENTS: + +* agent: Add a `vault.retry` stanza that allows specifying number of retries on failure; this applies both to templating and proxied requests. [[GH-11113](https://github.com/hashicorp/vault/pull/11113)] +* agent: Agent can now run as a Windows service. [[GH-10231](https://github.com/hashicorp/vault/pull/10231)] +* agent: Better concurrent request handling on identical requests proxied through Agent. [[GH-10705](https://github.com/hashicorp/vault/pull/10705)] +* agent: Route templating server through cache when persistent cache is enabled. [[GH-10927](https://github.com/hashicorp/vault/pull/10927)] +* agent: change auto-auth to preload an existing token on start [[GH-10850](https://github.com/hashicorp/vault/pull/10850)] +* auth/approle: Secrets ID generation endpoint now returns `secret_id_ttl` as part of its response. [[GH-10826](https://github.com/hashicorp/vault/pull/10826)] +* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] +* auth/okta: Adds support for Okta Verify TOTP MFA. [[GH-10942](https://github.com/hashicorp/vault/pull/10942)] +* changelog: Add dependencies listed in dependencies/2-25-21 [[GH-11015](https://github.com/hashicorp/vault/pull/11015)] +* command/debug: Now collects logs (at level `trace`) as a periodic output. [[GH-10609](https://github.com/hashicorp/vault/pull/10609)] +* core (enterprise): "vault status" command works when a namespace is set. [[GH-10725](https://github.com/hashicorp/vault/pull/10725)] +* core (enterprise): Update Trial Enterprise license from 30 minutes to 6 hours +* core/metrics: Added "vault operator usage" command. [[GH-10365](https://github.com/hashicorp/vault/pull/10365)] +* core/metrics: New telemetry metrics reporting lease expirations by time interval and namespace [[GH-10375](https://github.com/hashicorp/vault/pull/10375)] +* core: Added active since timestamp to the status output of active nodes. [[GH-10489](https://github.com/hashicorp/vault/pull/10489)] +* core: Check audit device with a test message before adding it. [[GH-10520](https://github.com/hashicorp/vault/pull/10520)] +* core: Track barrier encryption count and automatically rotate after a large number of operations or on a schedule [[GH-10774](https://github.com/hashicorp/vault/pull/10774)] +* core: add metrics for active entity count [[GH-10514](https://github.com/hashicorp/vault/pull/10514)] +* core: add partial month client count api [[GH-11022](https://github.com/hashicorp/vault/pull/11022)] +* core: dev mode listener allows unauthenticated sys/metrics requests [[GH-10992](https://github.com/hashicorp/vault/pull/10992)] +* core: reduce memory used by leases [[GH-10726](https://github.com/hashicorp/vault/pull/10726)] +* secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. [[GH-10558](https://github.com/hashicorp/vault/pull/10558)] +* storage/raft (enterprise): Listing of peers is now allowed on DR secondary +cluster nodes, as an update operation that takes in DR operation token for +authenticating the request. +* transform (enterprise): Improve FPE transformation performance +* transform (enterprise): Use transactions with batch tokenization operations for improved performance +* ui: Clarify language on usage metrics page empty state [[GH-10951](https://github.com/hashicorp/vault/pull/10951)] +* ui: Customize MongoDB input fields on Database Secrets Engine [[GH-10949](https://github.com/hashicorp/vault/pull/10949)] +* ui: Upgrade Ember-cli from 3.8 to 3.22. [[GH-9972](https://github.com/hashicorp/vault/pull/9972)] +* ui: Upgrade Storybook from 5.3.19 to 6.1.17. [[GH-10904](https://github.com/hashicorp/vault/pull/10904)] +* ui: Upgrade date-fns from 1.3.0 to 2.16.1. [[GH-10848](https://github.com/hashicorp/vault/pull/10848)] +* ui: Upgrade dependencies to resolve potential JS vulnerabilities [[GH-10677](https://github.com/hashicorp/vault/pull/10677)] +* ui: better errors on Database secrets engine role create [[GH-10980](https://github.com/hashicorp/vault/pull/10980)] + +BUG FIXES: + +* agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present [[GH-10556](https://github.com/hashicorp/vault/pull/10556)] +* agent: Set TokenParent correctly in the Index to be cached. [[GH-10833](https://github.com/hashicorp/vault/pull/10833)] +* agent: Set namespace for template server in agent. [[GH-10757](https://github.com/hashicorp/vault/pull/10757)] +* api/sys/config/ui: Fixes issue where multiple UI custom header values are ignored and only the first given value is used [[GH-10490](https://github.com/hashicorp/vault/pull/10490)] +* api: Fixes CORS API methods that were outdated and invalid [[GH-10444](https://github.com/hashicorp/vault/pull/10444)] +* auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. [[GH-10546](https://github.com/hashicorp/vault/pull/10546)] +* auth/jwt: Fixes an issue where JWT verification keys weren't updated after a `jwks_url` change. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] +* auth/jwt: Fixes an issue where `jwt_supported_algs` were not being validated for JWT auth using +`jwks_url` and `jwt_validation_pubkeys`. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] +* auth/oci: Fixes alias name to use the role name, and not the literal string `name` [[GH-10](https://github.com/hashicorp/vault-plugin-auth-oci/pull/10)] [[GH-10952](https://github.com/hashicorp/vault/pull/10952)] +* consul-template: Update consul-template vendor version and associated dependencies to master, +pulling in [[GH-10756](https://github.com/hashicorp/vault/pull/10756)] +* core (enterprise): Limit entropy augmentation during token generation to root tokens. [[GH-10487](https://github.com/hashicorp/vault/pull/10487)] +* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. +* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] +* core: Avoid disclosing IP addresses in the errors of unauthenticated requests [[GH-10579](https://github.com/hashicorp/vault/pull/10579)] +* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] +* core: Fix duplicate quotas on performance standby nodes. [[GH-10855](https://github.com/hashicorp/vault/pull/10855)] +* core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and +`staleAge` are set appropriately. [[GH-10536](https://github.com/hashicorp/vault/pull/10536)] +* core: Make all APIs that report init status consistent, and make them report +initialized=true when a Raft join is in progress. [[GH-10498](https://github.com/hashicorp/vault/pull/10498)] +* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] +* core: Turn off case sensitivity for allowed entity alias check during token create operation. [[GH-10743](https://github.com/hashicorp/vault/pull/10743)] +* http: change max_request_size to be unlimited when the config value is less than 0 [[GH-10072](https://github.com/hashicorp/vault/pull/10072)] +* license: Fix license caching issue that prevents new licenses to get picked up by the license manager [[GH-10424](https://github.com/hashicorp/vault/pull/10424)] +* metrics: Protect emitMetrics from panicking during post-seal [[GH-10708](https://github.com/hashicorp/vault/pull/10708)] +* quotas/rate-limit: Fix quotas enforcing old rate limit quota paths [[GH-10689](https://github.com/hashicorp/vault/pull/10689)] +* replication (enterprise): Fix bug with not starting merkle sync while requests are in progress +* secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled [[GH-10384](https://github.com/hashicorp/vault/pull/10384)] +* secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length [[GH-10433](https://github.com/hashicorp/vault/pull/10433)] +* secrets/database: Sanitize `private_key` field when reading database plugin config [[GH-10416](https://github.com/hashicorp/vault/pull/10416)] +* secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists [[GH-10759](https://github.com/hashicorp/vault/pull/10759)] +* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] +* serviceregistration: Fix race during shutdown of Consul service registration. [[GH-10901](https://github.com/hashicorp/vault/pull/10901)] +* storage/raft (enterprise): Automated snapshots with Azure required specifying +`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. +* storage/raft (enterprise): Reading a non-existent auto snapshot config now returns 404. +* storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and +didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided +the given key will be used to encrypt the snapshot using AWS KMS. +* transform (enterprise): Fix bug tokenization handling metadata on exportable stores +* transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect +* transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path +* transform (enterprise): Make expiration timestamps human readable +* transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error +* ui: Add role from database connection automatically populates the database for new role [[GH-11119](https://github.com/hashicorp/vault/pull/11119)] +* ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation [[GH-10417](https://github.com/hashicorp/vault/pull/10417)] +* ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. [[GH-10596](https://github.com/hashicorp/vault/pull/10596)] +* ui: Fix expected response from feature-flags endpoint [[GH-10684](https://github.com/hashicorp/vault/pull/10684)] +* ui: Fix footer URL linking to the correct version changelog. [[GH-10491](https://github.com/hashicorp/vault/pull/10491)] + +DEPRECATIONS: + +* aws/auth: AWS Auth endpoints that use the "whitelist" and "blacklist" terms have been deprecated. +Refer to the CHANGES section for additional details. + +## 1.6.7 + +### 29 September 2021 + +BUG FIXES: + +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12597](https://github.com/hashicorp/vault/pull/12597)] + +## 1.6.6 + +### 26 August 2021 + +SECURITY: + +* _UI Secret Caching_: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. + +CHANGES: + +* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 +* go: Update go version to 1.15.15 [[GH-12423](https://github.com/hashicorp/vault/pull/12423)] + +IMPROVEMENTS: + +* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] + +BUG FIXES: + +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] +* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] +* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] + +## 1.6.5 + +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.8.1 to use IAM Service Account Credentials API for +signing JWTs [[GH-11498](https://github.com/hashicorp/vault/pull/11498)] + +BUG FIXES: + +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] + +## 1.6.4 + +### 21 April 2021 + +SECURITY: + +* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the + Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions + 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) +* The Cassandra Database and Storage backends were not correctly verifying TLS certificates. This issue affects all + versions of Vault and Vault Enterprise and was fixed in versions 1.6.4, and 1.7.1. (CVE-2021-27400) + +CHANGES: + +* go: Update to Go 1.15.11 [[GH-11396](https://github.com/hashicorp/vault/pull/11396)] + +IMPROVEMENTS: + +* command/debug: Now collects logs (at level `trace`) as a periodic output. [[GH-10609](https://github.com/hashicorp/vault/pull/10609)] +* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] +* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] + +BUG FIXES: + +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] +* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* pki: Preserve ordering of all DN attribute values when issuing certificates [[GH-11259](https://github.com/hashicorp/vault/pull/11259)] +* replication: Fix: mounts created within a namespace that was part of an Allow + filtering rule would not appear on performance secondary if created after rule + was defined. +* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] +* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] + +## 1.6.3 + +### February 25, 2021 + +SECURITY: + +* Limited Unauthenticated License Metadata Read: We addressed a security vulnerability that allowed for the unauthenticated +reading of Vault license metadata from DR Secondaries. This vulnerability affects Vault Enterprise and is +fixed in 1.6.3 (CVE-2021-27668). + +CHANGES: + +* secrets/mongodbatlas: Move from whitelist to access list API [[GH-10966](https://github.com/hashicorp/vault/pull/10966)] + +IMPROVEMENTS: + +* ui: Clarify language on usage metrics page empty state [[GH-10951](https://github.com/hashicorp/vault/pull/10951)] + +BUG FIXES: + +* auth/kubernetes: Cancel API calls to TokenReview endpoint when request context +is closed [[GH-10930](https://github.com/hashicorp/vault/pull/10930)] +* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] +* quotas: Fix duplicate quotas on performance standby nodes. [[GH-10855](https://github.com/hashicorp/vault/pull/10855)] +* quotas/rate-limit: Fix quotas enforcing old rate limit quota paths [[GH-10689](https://github.com/hashicorp/vault/pull/10689)] +* replication (enterprise): Don't write request count data on DR Secondaries. +Fixes DR Secondaries becoming out of sync approximately every 30s. [[GH-10970](https://github.com/hashicorp/vault/pull/10970)] +* secrets/azure (enterprise): Forward service principal credential creation to the +primary cluster if called on a performance standby or performance secondary. [[GH-10902](https://github.com/hashicorp/vault/pull/10902)] + +## 1.6.2 + +### January 29, 2021 + +SECURITY: + +* IP Address Disclosure: We fixed a vulnerability where, under some error +conditions, Vault would return an error message disclosing internal IP +addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in +1.6.2 (CVE-2021-3024). +* Limited Unauthenticated Remove Peer: As of Vault 1.6, the remove-peer command +on DR secondaries did not require authentication. This issue impacts the +stability of HA architecture, as a bad actor could remove all standby +nodes from a DR +secondary. This issue affects Vault Enterprise 1.6.0 and 1.6.1, and is fixed in +1.6.2 (CVE-2021-3282). +* Mount Path Disclosure: Vault previously returned different HTTP status codes for +existent and non-existent mount paths. This behavior would allow unauthenticated +brute force attacks to reveal which paths had valid mounts. This issue affects +Vault and Vault Enterprise and is fixed in 1.6.2 (CVE-2020-25594). + +CHANGES: + +* go: Update go version to 1.15.7 [[GH-10730](https://github.com/hashicorp/vault/pull/10730)] + +FEATURES: + +* ui: Adds check for feature flag on application, and updates namespace toolbar on login if present [[GH-10588](https://github.com/hashicorp/vault/pull/10588)] + +IMPROVEMENTS: + +* core (enterprise): "vault status" command works when a namespace is set. [[GH-10725](https://github.com/hashicorp/vault/pull/10725)] +* core: reduce memory used by leases [[GH-10726](https://github.com/hashicorp/vault/pull/10726)] +* storage/raft (enterprise): Listing of peers is now allowed on DR secondary +cluster nodes, as an update operation that takes in DR operation token for +authenticating the request. +* core: allow setting tls_servername for raft retry/auto-join [[GH-10698](https://github.com/hashicorp/vault/pull/10698)] + +BUG FIXES: + +* agent: Set namespace for template server in agent. [[GH-10757](https://github.com/hashicorp/vault/pull/10757)] +* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] +* metrics: Protect emitMetrics from panicking during post-seal [[GH-10708](https://github.com/hashicorp/vault/pull/10708)] +* secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists [[GH-10759](https://github.com/hashicorp/vault/pull/10759)] +* storage/raft (enterprise): Automated snapshots with Azure required specifying +`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. +* storage/raft (enterprise): Autosnapshots config and storage weren't excluded from +performance replication, causing conflicts and errors. +* ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. [[GH-10596](https://github.com/hashicorp/vault/pull/10596)] +* ui: Fix expected response from feature-flags endpoint [[GH-10684](https://github.com/hashicorp/vault/pull/10684)] + +## 1.6.1 + +### December 16, 2020 + +SECURITY: + +* LDAP Auth Method: We addressed an issue where error messages returned by the + LDAP auth method allowed user enumeration [[GH-10537](https://github.com/hashicorp/vault/pull/10537)]. This vulnerability affects Vault OSS and Vault + Enterprise and is fixed in 1.5.6 and 1.6.1 (CVE-2020-35177). +* Sentinel EGP: We've fixed incorrect handling of namespace paths to prevent + users within namespaces from applying Sentinel EGP policies to paths above + their namespace. This vulnerability affects Vault Enterprise and is fixed in + 1.5.6 and 1.6.1 (CVE-2020-35453). + +IMPROVEMENTS: + +* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] +* core/metrics: Added "vault operator usage" command. [[GH-10365](https://github.com/hashicorp/vault/pull/10365)] +* secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. [[GH-10558](https://github.com/hashicorp/vault/pull/10558)] + +BUG FIXES: + +* agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present [[GH-10556](https://github.com/hashicorp/vault/pull/10556)] +* auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. [[GH-10546](https://github.com/hashicorp/vault/pull/10546)] +* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. +* core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. [[GH-10456](https://github.com/hashicorp/vault/pull/10456)] +* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] +* core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and +`staleAge` are set appropriately. [[GH-10536](https://github.com/hashicorp/vault/pull/10536)] +* core: Make all APIs that report init status consistent, and make them report +initialized=true when a Raft join is in progress. [[GH-10498](https://github.com/hashicorp/vault/pull/10498)] +* secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled [[GH-10384](https://github.com/hashicorp/vault/pull/10384)] +* secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length [[GH-10433](https://github.com/hashicorp/vault/pull/10433)] +* secrets/database: Sanitize `private_key` field when reading database plugin config [[GH-10416](https://github.com/hashicorp/vault/pull/10416)] +* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] +* storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided the given key will be used to encrypt the snapshot using AWS KMS. +* transform (enterprise): Fix bug tokenization handling metadata on exportable stores +* transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path +* transform (enterprise): Make expiration timestamps human readable +* transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error +* transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect +* ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation [[GH-10417](https://github.com/hashicorp/vault/pull/10417)] +* ui: Fix footer URL linking to the correct version changelog. [[GH-10491](https://github.com/hashicorp/vault/pull/10491)] +* ui: Fox radio click on secrets and auth list pages. [[GH-10586](https://github.com/hashicorp/vault/pull/10586)] + +## 1.6.0 + +### November 11th, 2020 + +NOTE: + +Binaries for 32-bit macOS (i.e. the `darwin_386` build) will no longer be published. This target was dropped in the latest version of the Go compiler. + +CHANGES: + +* agent: Agent now properly returns a non-zero exit code on error, such as one due to template rendering failure. Using `error_on_missing_key` in the template config will cause agent to immediately exit on failure. In order to make agent properly exit due to continuous failure from template rendering errors, the old behavior of indefinitely restarting the template server is now changed to exit once the default retry attempt of 12 times (with exponential backoff) gets exhausted. [[GH-9670](https://github.com/hashicorp/vault/pull/9670)] +* token: Periodic tokens generated by auth methods will have the period value stored in its token entry. [[GH-7885](https://github.com/hashicorp/vault/pull/7885)] +* core: New telemetry metrics reporting mount table size and number of entries [[GH-10201](https://github.com/hashicorp/vault/pull/10201)] +* go: Updated Go version to 1.15.4 [[GH-10366](https://github.com/hashicorp/vault/pull/10366)] + +FEATURES: + +* **Couchbase Secrets**: Vault can now manage static and dynamic credentials for Couchbase. [[GH-9664](https://github.com/hashicorp/vault/pull/9664)] +* **Expanded Password Policy Support**: Custom password policies are now supported for all database engines. +* **Integrated Storage Auto Snapshots (Enterprise)**: This feature enables an operator to schedule snapshots of the integrated storage backend and ensure those snapshots are persisted elsewhere. +* **Integrated Storage Cloud Auto Join**: This feature for integrated storage enables Vault nodes running in the cloud to automatically discover and join a Vault cluster via operator-supplied metadata. +* **Key Management Secrets Engine (Enterprise; Tech Preview)**: This new secret engine allows securely distributing and managing keys to Azure cloud KMS services. +* **Seal Migration**: With Vault 1.6, we will support migrating from an auto unseal mechanism to a different mechanism of the same type. For example, if you were using an AWS KMS key to automatically unseal, you can now migrate to a different AWS KMS key. +* **Tokenization (Enterprise; Tech Preview)**: Tokenization supports creating irreversible “tokens” from sensitive data. Tokens can be used in less secure environments, protecting the original data. +* **Vault Client Count**: Vault now counts the number of active entities (and non-entity tokens) per month and makes this information available via the "Metrics" section of the UI. + +IMPROVEMENTS: + +* auth/approle: Role names can now be referenced in templated policies through the `approle.metadata.role_name` property [[GH-9529](https://github.com/hashicorp/vault/pull/9529)] +* auth/aws: Improve logic check on wildcard `BoundIamPrincipalARNs` and include role name on error messages on check failure [[GH-10036](https://github.com/hashicorp/vault/pull/10036)] +* auth/jwt: Add support for fetching groups and user information from G Suite during authentication. [[GH-123](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/123)] +* auth/jwt: Adding EdDSA (ed25519) to supported algorithms [[GH-129](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/129)] +* auth/jwt: Improve cli authorization error [[GH-137](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/137)] +* auth/jwt: Add OIDC namespace_in_state option [[GH-140](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/140)] +* secrets/transit: fix missing plaintext in bulk decrypt response [[GH-9991](https://github.com/hashicorp/vault/pull/9991)] +* command/server: Delay informational messages in -dev mode until logs have settled. [[GH-9702](https://github.com/hashicorp/vault/pull/9702)] +* command/server: Add environment variable support for `disable_mlock`. [[GH-9931](https://github.com/hashicorp/vault/pull/9931)] +* core/metrics: Add metrics for storage cache [[GH_10079](https://github.com/hashicorp/vault/pull/10079)] +* core/metrics: Add metrics for leader status [[GH 10147](https://github.com/hashicorp/vault/pull/10147)] +* physical/azure: Add the ability to use Azure Instance Metadata Service to set the credentials for Azure Blob storage on the backend. [[GH-10189](https://github.com/hashicorp/vault/pull/10189)] +* sdk/framework: Add a time type for API fields. [[GH-9911](https://github.com/hashicorp/vault/pull/9911)] +* secrets/database: Added support for password policies to all databases [[GH-9641](https://github.com/hashicorp/vault/pull/9641), + [and more](https://github.com/hashicorp/vault/pulls?q=is%3Apr+is%3Amerged+dbpw)] +* secrets/database/cassandra: Added support for static credential rotation [[GH-10051](https://github.com/hashicorp/vault/pull/10051)] +* secrets/database/elasticsearch: Added support for static credential rotation [[GH-19](https://github.com/hashicorp/vault-plugin-database-elasticsearch/pull/19)] +* secrets/database/hanadb: Added support for root credential & static credential rotation [[GH-10142](https://github.com/hashicorp/vault/pull/10142)] +* secrets/database/hanadb: Default password generation now includes dashes. Custom statements may need to be updated + to include quotes around the password field [[GH-10142](https://github.com/hashicorp/vault/pull/10142)] +* secrets/database/influxdb: Added support for static credential rotation [[GH-10118](https://github.com/hashicorp/vault/pull/10118)] +* secrets/database/mongodbatlas: Added support for root credential rotation [[GH-14](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/14)] +* secrets/database/mongodbatlas: Support scopes field in creations statements for MongoDB Atlas database plugin [[GH-15](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/15)] +* seal/awskms: Add logging during awskms auto-unseal [[GH-9794](https://github.com/hashicorp/vault/pull/9794)] +* storage/azure: Update SDK library to use [azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) since previous library has been deprecated. [[GH-9577](https://github.com/hashicorp/vault/pull/9577/)] +* secrets/ad: `rotate-root` now supports POST requests like other secret engines [[GH-70](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/70)] +* ui: Add ui functionality for the Transform Secret Engine [[GH-9665](https://github.com/hashicorp/vault/pull/9665)] +* ui: Pricing metrics dashboard [[GH-10049](https://github.com/hashicorp/vault/pull/10049)] + +BUG FIXES: + +* auth/jwt: Fix bug preventing config edit UI from rendering [[GH-141](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/141)] +* cli: Don't open or overwrite a raft snapshot file on an unsuccessful `vault operator raft snapshot` [[GH-9894](https://github.com/hashicorp/vault/pull/9894)] +* core: Implement constant time version of shamir GF(2^8) math [[GH-9932](https://github.com/hashicorp/vault/pull/9932)] +* core: Fix resource leak in plugin API (plugin-dependent, not all plugins impacted) [[GH-9557](https://github.com/hashicorp/vault/pull/9557)] +* core: Fix race involved in enabling certain features via a license change +* core: Fix error handling in HCL parsing of objects with invalid syntax [[GH-410](https://github.com/hashicorp/hcl/pull/410)] +* identity: Check for timeouts in entity API [[GH-9925](https://github.com/hashicorp/vault/pull/9925)] +* secrets/database: Fix handling of TLS options in mongodb connection strings [[GH-9519](https://github.com/hashicorp/vault/pull/9519)] +* secrets/gcp: Ensure that the IAM policy version is appropriately set after a roleset's bindings have changed. [[GH-93](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/93)] +* ui: Mask LDAP bindpass while typing [[GH-10087](https://github.com/hashicorp/vault/pull/10087)] +* ui: Update language in promote dr modal flow [[GH-10155](https://github.com/hashicorp/vault/pull/10155)] +* ui: Update language on replication primary dashboard for clarity [[GH-10205](https://github.com/hashicorp/vault/pull/10217)] +* core: Fix bug where updating an existing path quota could introduce a conflict. [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] + +## 1.5.9 + +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.7.2 to use IAM Service Account Credentials API for +signing JWTs [[GH-11499](https://github.com/hashicorp/vault/pull/11499)] + +BUG FIXES: + +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] + +## 1.5.8 + +### 21 April 2021 + +SECURITY: + +* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the + Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions + 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) + +CHANGES: + +* go: Update to Go 1.14.15 [[GH-11397](https://github.com/hashicorp/vault/pull/11397)] + +IMPROVEMENTS: + +* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] + +BUG FIXES: + +* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. [[GH-10456](https://github.com/hashicorp/vault/pull/10456)] + +## 1.5.7 + +### January 29, 2021 + +SECURITY: + +* IP Address Disclosure: We fixed a vulnerability where, under some error +conditions, Vault would return an error message disclosing internal IP +addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in +1.6.2 and 1.5.7 (CVE-2021-3024). +* Mount Path Disclosure: Vault previously returned different HTTP status codes for +existent and non-existent mount paths. This behavior would allow unauthenticated +brute force attacks to reveal which paths had valid mounts. This issue affects +Vault and Vault Enterprise and is fixed in 1.6.2 and 1.5.7 (CVE-2020-25594). + +IMPROVEMENTS: + +* storage/raft (enterprise): Listing of peers is now allowed on DR secondary +cluster nodes, as an update operation that takes in DR operation token for +authenticating the request. + +BUG FIXES: + +* core: Avoid disclosing IP addresses in the errors of unauthenticated requests [[GH-10579](https://github.com/hashicorp/vault/pull/10579)] +* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] + +## 1.5.6 + +### December 16, 2020 + +SECURITY: + +* LDAP Auth Method: We addressed an issue where error messages returned by the + LDAP auth method allowed user enumeration [[GH-10537](https://github.com/hashicorp/vault/pull/10537)]. This vulnerability affects Vault OSS and Vault + Enterprise and is fixed in 1.5.6 and 1.6.1 (CVE-2020-35177). +* Sentinel EGP: We've fixed incorrect handling of namespace paths to prevent + users within namespaces from applying Sentinel EGP policies to paths above + their namespace. This vulnerability affects Vault Enterprise and is fixed in + 1.5.6 and 1.6.1. + +IMPROVEMENTS: + +* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] + +BUG FIXES: + +* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. +* core: Fix bug where updating an existing path quota could introduce a conflict [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] +* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] +* quotas (enterprise): Reset cache before loading quotas in the db during startup +* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] + +## 1.5.5 + +### October 21, 2020 + +IMPROVEMENTS: + +* auth/aws, core/seal, secret/aws: Set default IMDS timeouts to match AWS SDK [[GH-10133](https://github.com/hashicorp/vault/pull/10133)] + +BUG FIXES: + +* auth/aws: Restrict region selection when in the aws-us-gov partition to avoid IAM errors [[GH-9947](https://github.com/hashicorp/vault/pull/9947)] +* core (enterprise): Allow operators to add and remove (Raft) peers in a DR secondary cluster using Integrated Storage. +* core (enterprise): Add DR operation token to the remove peer API and CLI command (when DR secondary). +* core (enterprise): Fix deadlock in handling EGP policies +* core (enterprise): Fix extraneous error messages in DR Cluster +* secrets/mysql: Conditionally overwrite TLS parameters for MySQL secrets engine [[GH-9729](https://github.com/hashicorp/vault/pull/9729)] +* secrets/ad: Fix bug where `password_policy` setting was not using correct key when `ad/config` was read [[GH-71](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/71)] +* ui: Fix issue with listing roles and methods on the same auth methods with different names [[GH-10122](https://github.com/hashicorp/vault/pull/10122)] + +## 1.5.4 + +### September 24th, 2020 + +SECURITY: + +* Batch Token Expiry: We addressed an issue where batch token leases could outlive their TTL because we were not scheduling the expiration time correctly. This vulnerability affects Vault OSS and Vault Enterprise 1.0 and newer and is fixed in 1.4.7 and 1.5.4 (CVE-2020-25816). + +IMPROVEMENTS: + +* secrets/pki: Handle expiration of a cert not in storage as a success [[GH-9880](https://github.com/hashicorp/vault/pull/9880)] +* auth/kubernetes: Add an option to disable defaulting to the local CA cert and service account JWT when running in a Kubernetes pod [[GH-97]](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/97) +* secrets/gcp: Add check for 403 during rollback to prevent repeated deletion calls [[GH-97](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/97)] +* core: Disable usage metrics collection on performance standby nodes. [[GH-9966](https://github.com/hashicorp/vault/pull/9966)] +* credential/aws: Added X-Amz-Content-Sha256 as a default STS request header [[GH-10009](https://github.com/hashicorp/vault/pull/10009)] + +BUG FIXES: + +* agent: Fix `disable_fast_negotiation` not being set on the auth method when configured by user. [[GH-9892](https://github.com/hashicorp/vault/pull/9892)] +* core (enterprise): Fix hang when cluster-wide plugin reload cleanup is slow on unseal +* core (enterprise): Fix an error in cluster-wide plugin reload cleanup following such a reload +* core: Fix crash when metrics collection encounters zero-length keys in KV store [[GH-9811](https://github.com/hashicorp/vault/pull/9881)] +* mfa (enterprise): Fix incorrect handling of PingID responses that could result in auth requests failing +* replication (enterprise): Improve race condition when using a newly created token on a performance standby node +* replication (enterprise): Only write failover cluster addresses if they've changed +* ui: fix bug where dropdown for identity/entity management is not reflective of actual policy [[GH-9958](https://github.com/hashicorp/vault/pull/9958)] + +## 1.5.3 + +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust +* secrets/ssh: Fixed a bug with role option for SSH signing algorithm to allow more than RSA signing + +## 1.5.2.1 + +### August 21st, 2020 + +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.5.2 + +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.5.2 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.5.2) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + +## 1.5.1 + +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) +* When using Vault Agent with cert auto-auth and caching enabled, under certain circumstances, clients without permission to access agent's token may retrieve the token without login credentials. This vulnerability affects Vault Agent 1.1.0 and newer and is fixed in 1.5.1 (CVE-2020-17455) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.5.1 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.5.1) + +CHANGES: + +* pki: The tidy operation will now remove revoked certificates if the parameter `tidy_revoked_certs` is set to `true`. This will result in certificate entries being immediately removed, as opposed to awaiting until its NotAfter time. Note that this only affects certificates that have been already revoked. [[GH-9609](https://github.com/hashicorp/vault/pull/9609)] +* go: Updated Go version to 1.14.7 + +IMPROVEMENTS: + +* auth/jwt: Add support for fetching groups and user information from G Suite during authentication. [[GH-9574](https://github.com/hashicorp/vault/pull/9574)] +* auth/jwt: Add EdDSA to supported algorithms. [[GH-129](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/129)] +* secrets/openldap: Add "ad" schema that allows the engine to correctly rotate AD passwords. [[GH-9740](https://github.com/hashicorp/vault/pull/9740)] +* pki: Add a `allowed_domains_template` parameter that enables the use of identity templating within the `allowed_domains` parameter. [[GH-8509](https://github.com/hashicorp/vault/pull/8509)] +* secret/azure: Use write-ahead-logs to cleanup any orphaned Service Principals [[GH-9773](https://github.com/hashicorp/vault/pull/9773)] +* ui: Wrap TTL option on transit engine export action is updated to a new component. [[GH-9632](https://github.com/hashicorp/vault/pull/9632)] +* ui: Wrap Tool uses newest version of TTL Picker component. [[GH-9691](https://github.com/hashicorp/vault/pull/9691)] + +BUG FIXES: + +* secrets/gcp: Ensure that the IAM policy version is appropriately set after a roleset's bindings have changed. [[GH-9603](https://github.com/hashicorp/vault/pull/9603)] +* replication (enterprise): Fix status API output incorrectly stating replication is in `idle` state. +* replication (enterprise): Use PrimaryClusterAddr if it's been set +* core: Fix panic when printing over-long info fields at startup [[GH-9681](https://github.com/hashicorp/vault/pull/9681)] +* core: Seal migration using the new minimal-downtime strategy didn't work properly with performance standbys. [[GH-9690](https://github.com/hashicorp/vault/pull/9690)] +* core: Vault failed to start when there were non-string values in seal configuration [[GH-9555](https://github.com/hashicorp/vault/pull/9555)] +* core: Handle a trailing slash in the API address used for enabling replication + +## 1.5.0 + +### July 21st, 2020 + +CHANGES: + +* audit: Token TTL and issue time are now provided in the auth portion of audit logs. [[GH-9091](https://github.com/hashicorp/vault/pull/9091)] +* auth/gcp: Changes the default name of the entity alias that gets created to be the role ID for both IAM and GCE authentication. [[GH-99](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/99)] +* core: Remove the addition of newlines to parsed configuration when using integer/boolean values [[GH-8928](https://github.com/hashicorp/vault/pull/8928)] +* cubbyhole: Reject reads and writes to an empty ("") path. [[GH-8971](https://github.com/hashicorp/vault/pull/8971)] +* secrets/azure: Default password generation changed from uuid to cryptographically secure randomized string [[GH-40](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/40)] +* storage/gcs: The `credentials_file` config option has been removed. The `GOOGLE_APPLICATION_CREDENTIALS` environment variable + or default credentials may be used instead [[GH-9424](https://github.com/hashicorp/vault/pull/9424)] +* storage/raft: The storage configuration now accepts a new `max_entry_size` config that will limit + the total size in bytes of any entry committed via raft. It defaults to `"1048576"` (1MiB). [[GH-9027](https://github.com/hashicorp/vault/pull/9027)] +* token: Token creation with custom token ID via `id` will no longer allow periods (`.`) as part of the input string. + The final generated token value may contain periods, such as the `s.` prefix for service token + indication. [[GH-8646](https://github.com/hashicorp/vault/pull/8646/files)] +* token: Token renewals will now return token policies within the `token_policies` , identity policies within `identity_policies`, and the full policy set within `policies`. [[GH-8535](https://github.com/hashicorp/vault/pull/8535)] +* go: Updated Go version to 1.14.4 + +FEATURES: + +* **Monitoring**: We have released a Splunk App [9] for Enterprise customers. The app is accompanied by an updated monitoring guide and a few new metrics to enable OSS users to effectively monitor Vault. +* **Password Policies**: Allows operators to customize how passwords are generated for select secret engines (OpenLDAP, Active Directory, Azure, and RabbitMQ). +* **Replication UI Improvements**: We have redesigned the replication UI to highlight the state and relationship between primaries and secondaries and improved management workflows, enabling a more holistic understanding of multiple Vault clusters. +* **Resource Quotas**: As of 1.5, Vault supports specifying a quota to rate limit requests on OSS and Enterprise. Enterprise customers also have access to set quotas on the number of leases that can be generated on a path. +* **OpenShift Support**: We have updated the Helm charts to allow users to install Vault onto their OpenShift clusters. +* **Seal Migration**: We have made updates to allow migrations from auto unseal to Shamir unseal on Enterprise. +* **AWS Auth Web Identity Support**: We've added support for AWS Web Identities, which will be used in the credentials chain if present. +* **Vault Monitor**: Similar to the monitor command for Consul and Nomad, we have added the ability for Vault to stream logs from other Vault servers at varying log levels. +* **AWS Secrets Groups Support**: IAM users generated by Vault may now be added to IAM Groups. +* **Integrated Storage as HA Storage**: In Vault 1.5, it is possible to use Integrated Storage as HA Storage with a different storage backend as regular storage. +* **OIDC Auth Provider Extensions**: We've added support to OIDC Auth to incorporate IdP-specific extensions. Currently this includes expanded Azure AD groups support. +* **GCP Secrets**: Support BigQuery dataset ACLs in absence of IAM endpoints. +* **KMIP**: Add support for signing client certificates requests (CSRs) rather than having them be generated entirely within Vault. + +IMPROVEMENTS: + +* audit: Replication status requests are no longer audited. [[GH-8877](https://github.com/hashicorp/vault/pull/8877)] +* audit: Added mount_type field to requests and responses. [[GH-9167](https://github.com/hashicorp/vault/pull/9167)] +* auth/aws: Add support for Web Identity credentials [[GH-7738](https://github.com/hashicorp/vault/pull/7738)] +* auth/jwt: Support users that are members of more than 200 groups on Azure [[GH-120](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/120)] +* auth/kerberos: Support identities without userPrincipalName [[GH-44](https://github.com/hashicorp/vault-plugin-auth-kerberos/issues/44)] +* auth/kubernetes: Allow disabling `iss` validation [[GH-91](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/91)] +* auth/kubernetes: Try reading the ca.crt and TokenReviewer JWT from the default service account [[GH-83](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/83)] +* cli: Support reading TLS parameters from file for the `vault operator raft join` command. [[GH-9060](https://github.com/hashicorp/vault/pull/9060)] +* cli: Add a new subcommand, `vault monitor`, for tailing server logs in the console. [[GH-8477](https://github.com/hashicorp/vault/pull/8477)] +* core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)] +* core: Added Password Policies for user-configurable password generation [[GH-8637](https://github.com/hashicorp/vault/pull/8637)] +* core: New telemetry metrics covering token counts, token creation, KV secret counts, lease creation. [[GH-9239](https://github.com/hashicorp/vault/pull/9239)] [[GH-9250](https://github.com/hashicorp/vault/pull/9250)] [[GH-9244](https://github.com/hashicorp/vault/pull/9244)] [[GH-9052](https://github.com/hashicorp/vault/pull/9052)] +* physical/gcs: The storage backend now uses a dedicated client for HA lock updates to prevent lock table update failures when flooded by other client requests. [[GH-9424](https://github.com/hashicorp/vault/pull/9424)] +* physical/spanner: The storage backend now uses a dedicated client for HA lock updates to prevent lock table update failures when flooded by other client requests. [[GH-9423](https://github.com/hashicorp/vault/pull/9423)] +* plugin: Add SDK method, `Sys.ReloadPlugin`, and CLI command, `vault plugin reload`, for reloading plugins. [[GH-8777](https://github.com/hashicorp/vault/pull/8777)] +* plugin (enterprise): Add a scope field to plugin reload, which when global, reloads the plugin anywhere in a cluster. [[GH-9347](https://github.com/hashicorp/vault/pull/9347)] +* sdk/framework: Support accepting TypeFloat parameters over the API [[GH-8923](https://github.com/hashicorp/vault/pull/8923)] +* secrets/aws: Add iam_groups parameter to role create/update [[GH-8811](https://github.com/hashicorp/vault/pull/8811)] +* secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-11](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/11)] +* secrets/database: Add static role rotation for MSSQL database plugin [[GH-9062](https://github.com/hashicorp/vault/pull/9062)] +* secrets/database: Allow InfluxDB to use insecure TLS without cert bundle [[GH-8778](https://github.com/hashicorp/vault/pull/8778)] +* secrets/gcp: Support BigQuery dataset ACLs in absence of IAM endpoints [[GH-78](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/78)] +* secrets/pki: Allow 3072-bit RSA keys [[GH-8343](https://github.com/hashicorp/vault/pull/8343)] +* secrets/ssh: Add a CA-mode role option to specify signing algorithm [[GH-9096](https://github.com/hashicorp/vault/pull/9096)] +* secrets/ssh: The [Vault SSH Helper](https://github.com/hashicorp/vault-ssh-helper) can now be configured to reference a mount in a namespace [[GH-44](https://github.com/hashicorp/vault-ssh-helper/pull/44)] +* secrets/transit: Transit requests that make use of keys now include a new field `key_version` in their responses [[GH-9100](https://github.com/hashicorp/vault/pull/9100)] +* secrets/transit: Improving transit batch encrypt and decrypt latencies [[GH-8775](https://github.com/hashicorp/vault/pull/8775)] +* sentinel: Add a sentinel config section, and "additional_enabled_modules", a list of Sentinel modules that may be imported in addition to the defaults. +* ui: Update TTL picker styling on SSH secret engine [[GH-8891](https://github.com/hashicorp/vault/pull/8891)] +* ui: Only render the JWT input field of the Vault login form on mounts configured for JWT auth [[GH-8952](https://github.com/hashicorp/vault/pull/8952)] +* ui: Add replication dashboards. Improve replication management workflows. [[GH-8705]](https://github.com/hashicorp/vault/pull/8705). +* ui: Update alert banners to match design systems black text. [[GH-9463]](https://github.com/hashicorp/vault/pull/9463). + +BUG FIXES: + +* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-7](https://github.com/hashicorp/vault-plugin-auth-oci/pull/7)] +* core: Extend replicated cubbyhole fix in 1.4.0 to cover case where a performance primary is also a DR primary [[GH-9148](https://github.com/hashicorp/vault/pull/9148)] +* replication (enterprise): Use the PrimaryClusterAddr if it's been set +* seal/awskms: fix AWS KMS auto-unseal when AWS_ROLE_SESSION_NAME not set [[GH-9416](https://github.com/hashicorp/vault/pull/9416)] +* sentinel: fix panic due to concurrent map access when rules iterate over metadata maps +* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9186](https://github.com/hashicorp/vault/pull/9186)] +* secrets/database: Fix issue where rotating root database credentials while Vault's storage backend is unavailable causes Vault to lose access to the database [[GH-8782](https://github.com/hashicorp/vault/pull/8782)] +* secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9129](https://github.com/hashicorp/vault/pull/9129)] +* secrets/database: Fix parsing of multi-line PostgreSQL statements [[GH-8512](https://github.com/hashicorp/vault/pull/8512)] +* secrets/gcp: Fix issue were updates were not being applied to the `token_scopes` of a roleset. [[GH-90](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/90)] +* secrets/kv: Return the value of delete_version_after when reading kv/config, even if it is set to the default. [[GH-42](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/42)] +* ui: Add Toggle component into core addon so it is available in KMIP and other Ember Engines.[[GH-8913]](https://github.com/hashicorp/vault/pull/8913) +* ui: Disallow max versions value of large than 9999999999999999 on kv2 secrets engine. [[GH-9242](https://github.com/hashicorp/vault/pull/9242)] +* ui: Add and upgrade missing dependencies to resolve a failure with `make static-dist`. [[GH-9277](https://github.com/hashicorp/vault/pull/9371)] + +## 1.4.7.1 + +### October 15th, 2020 + +### Enterprise Only + +BUG FIXES: + +* replication (enterprise): Fix panic when old filter path evaluation fails + +## 1.4.7 + +### September 24th, 2020 + +SECURITY: + +* Batch Token Expiry: We addressed an issue where batch token leases could outlive their TTL because we were not scheduling the expiration time correctly. This vulnerability affects Vault OSS and Vault Enterprise 1.0 and newer and is fixed in 1.4.7 and 1.5.4 (CVE-2020-25816). + +IMPROVEMENTS: + +* secret/azure: Use write-ahead-logs to cleanup any orphaned Service Principals [[GH-9773](https://github.com/hashicorp/vault/pull/9773)] + +BUG FIXES: + +* replication (enterprise): Don't stop replication if old filter path evaluation fails + +## 1.4.6 + +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust +* secrets/ssh: Fixed a bug with role option for SSH signing algorithm to allow more than RSA signing [[GH-9824](https://github.com/hashicorp/vault/pull/9824)] + +## 1.4.5.1 + +### August 21st, 2020 + +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.4.5 + +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.4.5 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.4.5) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + +## 1.4.4 + +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.4.4 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.4.4) + +BUG FIXES: + +* auth/okta: fix bug introduced in 1.4.0: only 200 external groups were fetched even if user belonged to more [[GH-9580](https://github.com/hashicorp/vault/pull/9580)] +* seal/awskms: fix AWS KMS auto-unseal when AWS_ROLE_SESSION_NAME not set [[GH-9416](https://github.com/hashicorp/vault/pull/9416)] +* secrets/aws: Fix possible issue creating access keys when using Performance Standbys [[GH-9606](https://github.com/hashicorp/vault/pull/9606)] + +IMPROVEMENTS: + +* auth/aws: Retry on transient failures during AWS IAM auth login attempts [[GH-8727](https://github.com/hashicorp/vault/pull/8727)] +* ui: Add transit key algorithms aes128-gcm96, ecdsa-p384, ecdsa-p521 to the UI. [[GH-9070](https://github.com/hashicorp/vault/pull/9070)] & [[GH-9520](https://github.com/hashicorp/vault/pull/9520)] + +## 1.4.3 + +### July 2nd, 2020 + +IMPROVEMENTS: + +* auth/aws: Add support for Web Identity credentials [[GH-9251](https://github.com/hashicorp/vault/pull/9251)] +* auth/kerberos: Support identities without userPrincipalName [[GH-44](https://github.com/hashicorp/vault-plugin-auth-kerberos/issues/44)] +* core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)] +* secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-9311](https://github.com/hashicorp/vault/pull/9311)] +* physical/mysql: Require TLS or plaintext flagging in MySQL configuration [[GH-9012](https://github.com/hashicorp/vault/pull/9012)] +* ui: Link to the Vault Changelog in the UI footer [[GH-9216](https://github.com/hashicorp/vault/pull/9216)] + +BUG FIXES: + +* agent: Restart template server when it shuts down [[GH-9200](https://github.com/hashicorp/vault/pull/9200)] +* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-9278](https://github.com/hashicorp/vault/pull/9278)] +* replication: The issue causing cubbyholes in namespaces on performance secondaries to not work, which was fixed in 1.4.0, was still an issue when the primary was both a performance primary and DR primary. +* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values +* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9207](https://github.com/hashicorp/vault/pull/9207)] +* secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9208](https://github.com/hashicorp/vault/pull/9208)] +* secrets/gcp: Fix issue were updates were not being applied to the `token_scopes` of a roleset. [[GH-9277](https://github.com/hashicorp/vault/pull/9277)] + +## 1.4.2 (May 21st, 2020) + +SECURITY: + +* core: Proxy environment variables are now redacted before being logged, in case the URLs include a username:password. This vulnerability, CVE-2020-13223, is fixed in 1.3.6 and 1.4.2, but affects 1.4.0 and 1.4.1, as well as older versions of Vault [[GH-9022](https://github.com/hashicorp/vault/pull/9022)] +* secrets/gcp: Fix a regression in 1.4.0 where the system TTLs were being used instead of the configured backend TTLs for dynamic service accounts. This vulnerability is CVE-2020-12757. [[GH-85](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/85)] + +IMPROVEMENTS: + +* storage/raft: The storage stanza now accepts `leader_ca_cert_file`, `leader_client_cert_file`, and + `leader_client_key_file` parameters to read and parse TLS certificate information from paths on disk. + Existing non-path based parameters will continue to work, but their values will need to be provided as a + single-line string with newlines delimited by `\n`. [[GH-8894](https://github.com/hashicorp/vault/pull/8894)] +* storage/raft: The `vault status` CLI command and the `sys/leader` API now contain the committed and applied + raft indexes. [[GH-9011](https://github.com/hashicorp/vault/pull/9011)] + +BUG FIXES: + +* auth/aws: Fix token renewal issues caused by the metadata changes in 1.4.1 [[GH-8991](https://github.com/hashicorp/vault/pull/8991)] +* auth/ldap: Fix 1.4.0 regression that could result in auth failures when LDAP auth config includes upndomain. [[GH-9041](https://github.com/hashicorp/vault/pull/9041)] +* secrets/ad: Forward rotation requests from standbys to active clusters [[GH-66](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/66)] +* secrets/database: Prevent generation of usernames that are not allowed by the MongoDB Atlas API [[GH-9](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/9)] +* secrets/database: Return an error if a manual rotation of static account credentials fails [[GH-9035](https://github.com/hashicorp/vault/pull/9035)] +* secrets/openldap: Forward all rotation requests from standbys to active clusters [[GH-9028](https://github.com/hashicorp/vault/pull/9028)] +* secrets/transform (enterprise): Fix panic that could occur when accessing cached template entries, such as a requests + that accessed templates directly or indirectly from a performance standby node. +* serviceregistration: Fix a regression for Consul service registration that ignored using the listener address as + the redirect address unless api_addr was provided. It now properly uses the same redirect address as the one + used by Vault's Core object. [[GH-8976](https://github.com/hashicorp/vault/pull/8976)] +* storage/raft: Advertise the configured cluster address to the rest of the nodes in the raft cluster. This fixes + an issue where a node advertising 0.0.0.0 is not using a unique hostname. [[GH-9008](https://github.com/hashicorp/vault/pull/9008)] +* storage/raft: Fix panic when multiple nodes attempt to join the cluster at once. [[GH-9008](https://github.com/hashicorp/vault/pull/9008)] +* sys: The path provided in `sys/internal/ui/mounts/:path` is now namespace-aware. This fixes an issue + with `vault kv` subcommands that had namespaces provided in the path returning permission denied all the time. + [[GH-8962](https://github.com/hashicorp/vault/pull/8962)] +* ui: Fix snowman that appears when namespaces have more than one period [[GH-8910](https://github.com/hashicorp/vault/pull/8910)] + +## 1.4.1 (April 30th, 2020) + +CHANGES: + +* auth/aws: The default set of metadata fields added in 1.4.1 has been changed to `account_id` and `auth_type` [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] +* storage/raft: Disallow `ha_storage` to be specified if `raft` is set as the `storage` type. [[GH-8707](https://github.com/hashicorp/vault/pull/8707)] + +IMPROVEMENTS: + +* auth/aws: The set of metadata stored during login is now configurable [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] +* auth/aws: Improve region selection to avoid errors seen if the account hasn't enabled some newer AWS regions [[GH-8679](https://github.com/hashicorp/vault/pull/8679)] +* auth/azure: Enable login from Azure VMs with user-assigned identities [[GH-33](https://github.com/hashicorp/vault-plugin-auth-azure/pull/33)] +* auth/gcp: The set of metadata stored during login is now configurable [[GH-92](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/92)] +* auth/gcp: The type of alias name used during login is now configurable [[GH-95](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/95)] +* auth/ldap: Improve error messages during LDAP operation failures [[GH-8740](https://github.com/hashicorp/vault/pull/8740)] +* identity: Add a batch delete API for identity entities [[GH-8785]](https://github.com/hashicorp/vault/pull/8785) +* identity: Improve performance of logins when no group updates are needed [[GH-8795]](https://github.com/hashicorp/vault/pull/8795) +* metrics: Add `vault.identity.num_entities` metric [[GH-8816]](https://github.com/hashicorp/vault/pull/8816) +* secrets/kv: Allow `delete-version-after` to be reset to 0 via the CLI [[GH-8635](https://github.com/hashicorp/vault/pull/8635)] +* secrets/rabbitmq: Improve error handling and reporting [[GH-8619](https://github.com/hashicorp/vault/pull/8619)] +* ui: Provide One Time Password during Operation Token generation process [[GH-8630]](https://github.com/hashicorp/vault/pull/8630) + +BUG FIXES: + +* auth/okta: Fix MFA regression (introduced in [GH-8143](https://github.com/hashicorp/vault/pull/8143)) from 1.4.0 [[GH-8807](https://github.com/hashicorp/vault/pull/8807)] +* auth/userpass: Fix upgrade value for `token_bound_cidrs` being ignored due to incorrect key provided [[GH-8826](https://github.com/hashicorp/vault/pull/8826/files)] +* config/seal: Fix segfault when seal block is removed [[GH-8517](https://github.com/hashicorp/vault/pull/8517)] +* core: Fix an issue where users attempting to build Vault could receive Go module checksum errors [[GH-8770](https://github.com/hashicorp/vault/pull/8770)] +* core: Fix blocked requests if a SIGHUP is issued during a long-running request has the state lock held. + Also fixes deadlock that can happen if `vault debug` with the config target is ran during this time. + [[GH-8755](https://github.com/hashicorp/vault/pull/8755)] +* core: Always rewrite the .vault-token file as part of a `vault login` to ensure permissions and ownership are set correctly [[GH-8867](https://github.com/hashicorp/vault/pull/8867)] +* database/mongodb: Fix context deadline error that may result due to retry attempts on failed commands + [[GH-8863](https://github.com/hashicorp/vault/pull/8863)] +* http: Fix superflous call messages from the http package on logs caused by missing returns after + `respondError` calls [[GH-8796](https://github.com/hashicorp/vault/pull/8796)] +* namespace (enterprise): Fix namespace listing to return `key_info` when a scoping namespace is also provided. +* seal/gcpkms: Fix panic that could occur if all seal parameters were provided via environment + variables [[GH-8840](https://github.com/hashicorp/vault/pull/8840)] +* storage/raft: Fix memory allocation and incorrect metadata tracking issues with snapshots [[GH-8793](https://github.com/hashicorp/vault/pull/8793)] +* storage/raft: Fix panic that could occur if `disable_clustering` was set to true on Raft storage cluster [[GH-8784](https://github.com/hashicorp/vault/pull/8784)] +* storage/raft: Handle errors returned from the API during snapshot operations [[GH-8861](https://github.com/hashicorp/vault/pull/8861)] +* sys/wrapping: Allow unwrapping of wrapping tokens which contain nil data [[GH-8714](https://github.com/hashicorp/vault/pull/8714)] + +## 1.4.0 (April 7th, 2020) + +CHANGES: + +* cli: The raft configuration command has been renamed to list-peers to avoid + confusion. + +FEATURES: + +* **Kerberos Authentication**: Vault now supports Kerberos authentication using a SPNEGO token. + Login can be performed using the Vault CLI, API, or agent. +* **Kubernetes Service Discovery**: A new Kubernetes service discovery feature where, if + configured, Vault will tag Vault pods with their current health status. For more, see [#8249](https://github.com/hashicorp/vault/pull/8249). +* **MongoDB Atlas Secrets**: Vault can now generate dynamic credentials for both MongoDB Atlas databases + as well as the [Atlas programmatic interface](https://docs.atlas.mongodb.com/tutorial/manage-programmatic-access/). +* **OpenLDAP Secrets Engine**: We now support password management of existing OpenLDAP user entries. For more, see [#8360](https://github.com/hashicorp/vault/pull/8360/). +* **Redshift Database Secrets Engine**: The database secrets engine now supports static and dynamic secrets for the Amazon Web Services (AWS) Redshift service. +* **Service Registration Config**: A newly introduced `service_registration` configuration stanza, that allows for service registration to be configured separately from the storage backend. For more, see [#7887](https://github.com/hashicorp/vault/pull/7887/). +* **Transform Secrets Engine (Enterprise)**: A new secrets engine that handles secure data transformations against provided input values. +* **Integrated Storage**: Promoted out of beta and into general availability for both open-source and enterprise workloads. + +IMPROVEMENTS: + +* agent: add option to force the use of the auth-auth token, and ignore the Vault token in the request [[GH-8101](https://github.com/hashicorp/vault/pull/8101)] +* api: Restore and fix DNS SRV Lookup [[GH-8520](https://github.com/hashicorp/vault/pull/8520)] +* audit: HMAC http_raw_body in audit log; this ensures that large authenticated Prometheus metrics responses get + replaced with short HMAC values [[GH-8130](https://github.com/hashicorp/vault/pull/8130)] +* audit: Generate-root, generate-recovery-token, and generate-dr-operation-token requests and responses are now audited. [[GH-8301](https://github.com/hashicorp/vault/pull/8301)] +* auth/aws: Reduce the number of simultaneous STS client credentials needed [[GH-8161](https://github.com/hashicorp/vault/pull/8161)] +* auth/azure: subscription ID, resource group, vm and vmss names are now stored in alias metadata [[GH-30](https://github.com/hashicorp/vault-plugin-auth-azure/pull/30)] +* auth/jwt: Additional OIDC callback parameters available for CLI logins [[GH-80](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/80) & [GH-86](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/86)] +* auth/jwt: Bound claims may be optionally configured using globs [[GH-89](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/89)] +* auth/jwt: Timeout during OIDC CLI login if process doesn't complete within 2 minutes [[GH-97](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/97)] +* auth/jwt: Add support for the `form_post` response mode [[GH-98](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/98)] +* auth/jwt: add optional client_nonce to authorization flow [[GH-104](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/104)] +* auth/okta: Upgrade okta sdk lib, which should improve handling of groups [[GH-8143](https://github.com/hashicorp/vault/pull/8143)] +* aws: Add support for v2 of the instance metadata service (see [issue 7924](https://github.com/hashicorp/vault/issues/7924) for all linked PRs) +* core: Separate out service discovery interface from storage interface to allow + new types of service discovery not coupled to storage [[GH-7887](https://github.com/hashicorp/vault/pull/7887)] +* core: Add support for telemetry option `metrics_prefix` [[GH-8340](https://github.com/hashicorp/vault/pull/8340)] +* core: Entropy Augmentation can now be used with AWS KMS and Vault Transit seals +* core: Allow tls_min_version to be set to TLS 1.3 [[GH-8305](https://github.com/hashicorp/vault/pull/8305)] +* cli: Incorrect TLS configuration will now correctly fail [[GH-8025](https://github.com/hashicorp/vault/pull/8025)] +* identity: Allow specifying a custom `client_id` for identity tokens [[GH-8165](https://github.com/hashicorp/vault/pull/8165)] +* metrics/prometheus: improve performance with high volume of metrics updates [[GH-8507](https://github.com/hashicorp/vault/pull/8507)] +* replication (enterprise): Fix race condition causing clusters with high throughput writes to sometimes + fail to enter streaming-wal mode +* replication (enterprise): Secondary clusters can now perform an extra gRPC call to all nodes in a primary + cluster in an attempt to resolve the active node's address +* replication (enterprise): The replication status API now outputs `last_performance_wal`, `last_dr_wal`, + and `connection_state` values +* replication (enterprise): DR secondary clusters can now be recovered by the `replication/dr/secondary/recover` + API +* replication (enterprise): We now allow for an alternate means to create a Disaster Recovery token, by using a batch + token that is created with an ACL that allows for access to one or more of the DR endpoints. +* secrets/database/mongodb: Switched internal MongoDB driver to mongo-driver [[GH-8140](https://github.com/hashicorp/vault/pull/8140)] +* secrets/database/mongodb: Add support for x509 client authorization to MongoDB [[GH-8329](https://github.com/hashicorp/vault/pull/8329)] +* secrets/database/oracle: Add support for static credential rotation [[GH-26](https://github.com/hashicorp/vault-plugin-database-oracle/pull/26)] +* secrets/consul: Add support to specify TLS options per Consul backend [[GH-4800](https://github.com/hashicorp/vault/pull/4800)] +* secrets/gcp: Allow specifying the TTL for a service key [[GH-54](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/54)] +* secrets/gcp: Add support for rotating root keys [[GH-53](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/53)] +* secrets/gcp: Handle version 3 policies for Resource Manager IAM requests [[GH-77](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/77)] +* secrets/nomad: Add support to specify TLS options per Nomad backend [[GH-8083](https://github.com/hashicorp/vault/pull/8083)] +* secrets/ssh: Allowed users can now be templated with identity information [[GH-7548](https://github.com/hashicorp/vault/pull/7548)] +* secrets/transit: Adding RSA3072 key support [[GH-8151](https://github.com/hashicorp/vault/pull/8151)] +* storage/consul: Vault returns now a more descriptive error message when only a client cert or + a client key has been provided [[GH-4930]](https://github.com/hashicorp/vault/pull/8084) +* storage/raft: Nodes in the raft cluster can all be given possible leader + addresses for them to continuously try and join one of them, thus automating + the process of join to a greater extent [[GH-7856](https://github.com/hashicorp/vault/pull/7856)] +* storage/raft: Fix a potential deadlock that could occur on leadership transition [[GH-8547](https://github.com/hashicorp/vault/pull/8547)] +* storage/raft: Refresh TLS keyring on snapshot restore [[GH-8546](https://github.com/hashicorp/vault/pull/8546)] +* storage/etcd: Bumped etcd client API SDK [[GH-7931](https://github.com/hashicorp/vault/pull/7931) & [GH-4961](https://github.com/hashicorp/vault/pull/4961) & [GH-4349](https://github.com/hashicorp/vault/pull/4349) & [GH-7582](https://github.com/hashicorp/vault/pull/7582)] +* ui: Make Transit Key actions more prominent [[GH-8304](https://github.com/hashicorp/vault/pull/8304)] +* ui: Add Core Usage Metrics [[GH-8347](https://github.com/hashicorp/vault/pull/8347)] +* ui: Add refresh Namespace list on the Namespace dropdown, and redesign of Namespace dropdown menu [[GH-8442](https://github.com/hashicorp/vault/pull/8442)] +* ui: Update transit actions to codeblocks & automatically encode plaintext unless indicated [[GH-8462](https://github.com/hashicorp/vault/pull/8462)] +* ui: Display the results of transit key actions in a modal window [[GH-8462](https://github.com/hashicorp/vault/pull/8575)] +* ui: Transit key version styling updates & ability to copy key from dropdown [[GH-8480](https://github.com/hashicorp/vault/pull/8480)] + +BUG FIXES: + +* agent: Fix issue where TLS options are ignored for agent template feature [[GH-7889](https://github.com/hashicorp/vault/pull/7889)] +* auth/jwt: Use lower case role names for `default_role` to match the `role` case convention [[GH-100](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/100)] +* auth/ldap: Fix a bug where the UPNDOMAIN parameter was wrongly used to lookup the group + membership of the given user [[GH-6325]](https://github.com/hashicorp/vault/pull/8333) +* cli: Support autocompletion for nested mounts [[GH-8303](https://github.com/hashicorp/vault/pull/8303)] +* cli: Fix CLI namespace autocompletion [[GH-8315](https://github.com/hashicorp/vault/pull/8315)] +* identity: Fix incorrect caching of identity token JWKS responses [[GH-8412](https://github.com/hashicorp/vault/pull/8412)] +* metrics/stackdriver: Fix issue that prevents the stackdriver metrics library to create unnecessary stackdriver descriptors [[GH-8073](https://github.com/hashicorp/vault/pull/8073)] +* replication (enterprise): Fix issue causing cubbyholes in namespaces on performance secondaries to not work. +* replication (enterprise): Unmounting a dynamic secrets backend could sometimes lead to replication errors. Change the order of operations to prevent that. +* seal (enterprise): Fix seal migration when transactional seal wrap backend is in use. +* secrets/database/influxdb: Fix potential panic if connection to the InfluxDB database cannot be established [[GH-8282](https://github.com/hashicorp/vault/pull/8282)] +* secrets/database/mysql: Ensures default static credential rotation statements are used [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] +* secrets/database/mysql: Fix inconsistent query parameter names: {{name}} or {{username}} for + different queries. Now it allows for either for backwards compatibility [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] +* secrets/database/postgres: Fix inconsistent query parameter names: {{name}} or {{username}} for + different queries. Now it allows for either for backwards compatibility [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] +* secrets/pki: Support FQDNs in DNS Name [[GH-8288](https://github.com/hashicorp/vault/pull/8288)] +* storage/raft: Allow seal migration to be performed on Vault clusters using raft storage [[GH-8103](https://github.com/hashicorp/vault/pull/8103)] +* telemetry: Prometheus requests on standby nodes will now return an error instead of forwarding + the request to the active node [[GH-8280](https://github.com/hashicorp/vault/pull/8280)] +* ui: Fix broken popup menu on the transit secrets list page [[GH-8348](https://github.com/hashicorp/vault/pull/8348)] +* ui: Update headless Chrome flag to fix `yarn run test:oss` [[GH-8035](https://github.com/hashicorp/vault/pull/8035)] +* ui: Update CLI to accept empty strings as param value to reset previously-set values +* ui: Fix bug where error states don't clear when moving between action tabs on Transit [[GH-8354](https://github.com/hashicorp/vault/pull/8354)] + +## 1.3.10 + +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust + +## 1.3.9.1 + +### August 21st, 2020 + +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.3.9 + +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.3.9 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.3.9) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + +## 1.3.8 + +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.3.8 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.3.8) + +## 1.3.7 + +### July 2nd, 2020 + +BUG FIXES: + +* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values +* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9363](https://github.com/hashicorp/vault/pull/9363)] + +## 1.3.6 (May 21st, 2020) + +SECURITY: + +* core: proxy environment variables are now redacted before being logged, in case the URLs include a username:password. This vulnerability, CVE-2020-13223, is fixed in 1.3.6 and 1.4.2, but affects 1.4 and 1.4.1, as well as older versions of Vault [[GH-9022](https://github.com/hashicorp/vault/pull/9022)] + +BUG FIXES: + +* auth/aws: Fix token renewal issues caused by the metadata changes in 1.3.5 [[GH-8991](https://github.com/hashicorp/vault/pull/8991)] +* replication: Fix mount filter bug that allowed replication filters to hide local mounts on a performance secondary + +## 1.3.5 (April 28th, 2020) + +CHANGES: + +* auth/aws: The default set of metadata fields added in 1.3.2 has been changed to `account_id` and `auth_type` [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] + +IMPROVEMENTS: + +* auth/aws: The set of metadata stored during login is now configurable [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] + +## 1.3.4 (March 19th, 2020) + +SECURITY: + +* A vulnerability was identified in Vault and Vault Enterprise such that, under certain circumstances, an Entity's Group membership may inadvertently include Groups the Entity no longer has permissions to. This vulnerability, CVE-2020-10660, affects Vault and Vault Enterprise versions 0.9.0 and newer, and is fixed in 1.3.4. [[GH-8606](https://github.com/hashicorp/vault/pull/8606)] +* A vulnerability was identified in Vault Enterprise such that, under certain circumstances, existing nested-path policies may give access to Namespaces created after-the-fact. This vulnerability, CVE-2020-10661, affects Vault Enterprise versions 0.11 and newer, and is fixed in 1.3.4. + +## 1.3.3 (March 5th, 2020) + +BUG FIXES: + +* approle: Fix excessive locking during tidy, which could potentially block new approle logins for long enough to cause an outage [[GH-8418](https://github.com/hashicorp/vault/pull/8418)] +* cli: Fix issue where Raft snapshots from standby nodes created an empty backup file [[GH-8097](https://github.com/hashicorp/vault/pull/8097)] +* identity: Fix incorrect caching of identity token JWKS responses [[GH-8412](https://github.com/hashicorp/vault/pull/8412)] +* kmip: role read now returns tls_client_ttl +* kmip: fix panic when templateattr not provided in rekey request +* secrets/database/influxdb: Fix potential panic if connection to the InfluxDB database cannot be established [[GH-8282](https://github.com/hashicorp/vault/pull/8282)] +* storage/mysql: Fix potential crash when using MySQL as coordination for high availability [[GH-8300](https://github.com/hashicorp/vault/pull/8300)] +* storage/raft: Fix potential crash when using Raft as coordination for high availability [[GH-8356](https://github.com/hashicorp/vault/pull/8356)] +* ui: Fix missing License menu item [[GH-8230](https://github.com/hashicorp/vault/pull/8230)] +* ui: Fix bug where default auth method on login is defaulted to auth method that is listing-visibility=unauth instead of “other” [[GH-8218](https://github.com/hashicorp/vault/pull/8218)] +* ui: Fix bug where KMIP details were not shown in the UI Wizard [[GH-8255](https://github.com/hashicorp/vault/pull/8255)] +* ui: Show Error messages on Auth Configuration page when you hit permission errors [[GH-8500](https://github.com/hashicorp/vault/pull/8500)] +* ui: Remove duplicate form inputs for the GitHub config [[GH-8519](https://github.com/hashicorp/vault/pull/8519)] +* ui: Correct HMAC capitalization [[GH-8528](https://github.com/hashicorp/vault/pull/8528)] +* ui: Fix danger message in DR [[GH-8555](https://github.com/hashicorp/vault/pull/8555)] +* ui: Fix certificate field for LDAP config [[GH-8573](https://github.com/hashicorp/vault/pull/8573)] + +## 1.3.2 (January 22nd, 2020) + +SECURITY: + +* When deleting a namespace on Vault Enterprise, in certain circumstances, the deletion + process will fail to revoke dynamic secrets for a mount in that namespace. This will + leave any dynamic secrets in remote systems alive and will fail to clean them up. This + vulnerability, CVE-2020-7220, affects Vault Enterprise 0.11.0 and newer. + +IMPROVEMENTS: + +* auth/aws: Add aws metadata to identity alias [[GH-7985](https://github.com/hashicorp/vault/pull/7985)] +* auth/kubernetes: Allow both names and namespaces to be set to "*" [[GH-78](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/78)] + +BUG FIXES: + +* auth/azure: Fix Azure compute client to use correct base URL [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] +* auth/ldap: Fix renewal of tokens without configured policies that are + generated by an LDAP login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] +* auth/okta: Fix renewal of tokens without configured policies that are + generated by an Okta login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] +* core: Fix seal migration error when attempting to migrate from auto unseal to shamir [[GH-8172](https://github.com/hashicorp/vault/pull/8172)] +* core: Fix seal migration config issue when migrating from auto unseal to auto unseal [[GH-8172](https://github.com/hashicorp/vault/pull/8172)] +* plugin: Fix issue where a plugin unwrap request potentially used an expired token [[GH-8058](https://github.com/hashicorp/vault/pull/8058)] +* replication: Fix issue where a forwarded request from a performance/standby node could run into + a timeout +* secrets/database: Fix issue where a manual static role rotation could potentially panic [[GH-8098](https://github.com/hashicorp/vault/pull/8098)] +* secrets/database: Fix issue where a manual root credential rotation request is not forwarded + to the primary node [[GH-8125](https://github.com/hashicorp/vault/pull/8125)] +* secrets/database: Fix issue where a manual static role rotation request is not forwarded + to the primary node [[GH-8126](https://github.com/hashicorp/vault/pull/8126)] +* secrets/database/mysql: Fix issue where special characters for a MySQL password were encoded [[GH-8040](https://github.com/hashicorp/vault/pull/8040)] +* ui: Fix deleting namespaces [[GH-8132](https://github.com/hashicorp/vault/pull/8132)] +* ui: Fix Error handler on kv-secret edit and kv-secret view pages [[GH-8133](https://github.com/hashicorp/vault/pull/8133)] +* ui: Fix OIDC callback to check storage [[GH-7929](https://github.com/hashicorp/vault/pull/7929)]. +* ui: Change `.box-radio` height to min-height to prevent overflow issues [[GH-8065](https://github.com/hashicorp/vault/pull/8065)] + +## 1.3.1 (December 18th, 2019) + +IMPROVEMENTS: + +* agent: Add ability to set `exit-after-auth` via the CLI [[GH-7920](https://github.com/hashicorp/vault/pull/7920)] +* auth/ldap: Add a `request_timeout` configuration option to prevent connection + requests from hanging [[GH-7909](https://github.com/hashicorp/vault/pull/7909)] +* auth/kubernetes: Add audience to tokenreview API request for Kube deployments where issuer + is not Kube. [[GH-74](https://github.com/hashicorp/vault/pull/74)] +* secrets/ad: Add a `request_timeout` configuration option to prevent connection + requests from hanging [[GH-59](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/59)] +* storage/postgresql: Add support for setting `connection_url` from enviornment + variable `VAULT_PG_CONNECTION_URL` [[GH-7937](https://github.com/hashicorp/vault/pull/7937)] +* telemetry: Add `enable_hostname_label` option to telemetry stanza [[GH-7902](https://github.com/hashicorp/vault/pull/7902)] +* telemetry: Add accept header check for prometheus mime type [[GH-7958](https://github.com/hashicorp/vault/pull/7958)] + +BUG FIXES: + +* agent: Fix issue where Agent exits before all templates are rendered when + using and `exit_after_auth` [[GH-7899](https://github.com/hashicorp/vault/pull/7899)] +* auth/aws: Fixes region-related issues when using a custom `sts_endpoint` by adding + a `sts_region` parameter [[GH-7922](https://github.com/hashicorp/vault/pull/7922)] +* auth/token: Fix panic when getting batch tokens on a performance standby from a role + that does not exist [[GH-8027](https://github.com/hashicorp/vault/pull/8027)] +* core: Improve warning message for lease TTLs [[GH-7901](https://github.com/hashicorp/vault/pull/7901)] +* identity: Fix identity token panic during invalidation [[GH-8043](https://github.com/hashicorp/vault/pull/8043)] +* plugin: Fix a panic that could occur if a mount/auth entry was unable to + mount the plugin backend and a request that required the system view to be + retrieved was made [[GH-7991](https://github.com/hashicorp/vault/pull/7991)] +* replication: Add `generate-public-key` endpoint to list of allowed endpoints + for existing DR secondaries +* secrets/gcp: Fix panic if bindings aren't provided in roleset create/update. [[GH-56](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/56)] +* secrets/pki: Prevent generating certificate on performance standby when storing + [[GH-7904](https://github.com/hashicorp/vault/pull/7904)] +* secrets/transit: Prevent restoring keys to new names that are sub paths [[GH-7998](https://github.com/hashicorp/vault/pull/7998)] +* storage/s3: Fix a bug in configurable S3 paths that was preventing use of S3 as + a source during `operator migrate` operations [[GH-7966](https://github.com/hashicorp/vault/pull/7966)] +* ui: Ensure secrets with a period in their key can be viewed and copied [[GH-7926](https://github.com/hashicorp/vault/pull/7926)] +* ui: Fix status menu after demotion [[GH-7997](https://github.com/hashicorp/vault/pull/7997)] +* ui: Fix select dropdowns in Safari when running Mojave [[GH-8023](https://github.com/hashicorp/vault/pull/8023)] + +## 1.3 (November 14th, 2019) + +CHANGES: + +* Secondary cluster activation: There has been a change to the way that activating + performance and DR secondary clusters works when using public keys for + encryption of the parameters rather than a wrapping token. This flow was + experimental and never documented. It is now officially supported and + documented but is not backwards compatible with older Vault releases. +* Cluster cipher suites: On its cluster port, Vault will no longer advertise + the full TLS 1.2 cipher suite list by default. Although this port is only + used for Vault-to-Vault communication and would always pick a strong cipher, + it could cause false flags on port scanners and other security utilities + that assumed insecure ciphers were being used. The previous behavior can be + achieved by setting the value of the (undocumented) `cluster_cipher_suites` + config flag to `tls12`. +* API/Agent Renewal behavior: The API now allows multiple options for how it + deals with renewals. The legacy behavior in the Agent/API is for the renewer + (now called the lifetime watcher) to exit on a renew error, leading to a + reauthentication. The new default behavior is for the lifetime watcher to + ignore 5XX errors and simply retry as scheduled, using the existing lease + duration. It is also possible, within custom code, to disable renewals + entirely, which allows the lifetime watcher to simply return when it + believes it is time for your code to renew or reauthenticate. + +FEATURES: + +* **Vault Debug**: A new top-level subcommand, `debug`, is added that allows + operators to retrieve debugging information related to a particular Vault + node. Operators can use this simple workflow to capture triaging information, + which can then be consumed programmatically or by support and engineering teams. + It has the abilitity to probe for config, host, metrics, pprof, server status, + and replication status. +* **Recovery Mode**: Vault server can be brought up in recovery mode to resolve + outages caused due to data store being in bad state. This is a privileged mode + that allows `sys/raw` API calls to perform surgical corrections to the data + store. Bad storage state can be caused by bugs. However, this is usually + observed when known (and fixed) bugs are hit by older versions of Vault. +* **Entropy Augmentation (Enterprise)**: Vault now supports sourcing entropy from + external source for critical security parameters. Currently an HSM that + supports PKCS#11 is the only supported source. +* **Active Directory Secret Check-In/Check-Out**: In the Active Directory secrets + engine, users or applications can check out a service account for use, and its + password will be rotated when it's checked back in. +* **Vault Agent Template**: Vault Agent now supports rendering templates containing + Vault secrets to disk, similar to Consul Template [[GH-7652](https://github.com/hashicorp/vault/pull/7652)] +* **Transit Key Type Support**: Signing and verification is now supported with the P-384 + (secp384r1) and P-521 (secp521r1) ECDSA curves [[GH-7551](https://github.com/hashicorp/vault/pull/7551)] and encryption and + decryption is now supported via AES128-GCM96 [[GH-7555](https://github.com/hashicorp/vault/pull/7555)] +* **SSRF Protection for Vault Agent**: Vault Agent has a configuration option to + require a specific header before allowing requests [[GH-7627](https://github.com/hashicorp/vault/pull/7627)] +* **AWS Auth Method Root Rotation**: The credential used by the AWS auth method can + now be rotated, to ensure that only Vault knows the credentials it is using [[GH-7131](https://github.com/hashicorp/vault/pull/7131)] +* **New UI Features**: The UI now supports managing users and groups for the + Userpass, Cert, Okta, and Radius auth methods. +* **Shamir with Stored Master Key**: The on disk format for Shamir seals has changed, + allowing for a secondary cluster using Shamir downstream from a primary cluster + using Auto Unseal. [[GH-7694](https://github.com/hashicorp/vault/pull/7694)] +* **Stackdriver Metrics Sink**: Vault can now send metrics to + [Stackdriver](https://cloud.google.com/stackdriver/). See the [configuration + documentation](https://www.vaultproject.io/docs/config/index.html) for + details. [[GH-6957](https://github.com/hashicorp/vault/pull/6957)] +* **Filtered Paths Replication (Enterprise)**: Based on the predecessor Filtered Mount Replication, + Filtered Paths Replication allows now filtering of namespaces in addition to mounts. + With this feature, Filtered Mount Replication should be considered deprecated. +* **Token Renewal via Accessor**: Tokens can now be renewed via the accessor value through + the new `auth/token/renew-accessor` endpoint if the caller's token has + permission to access that endpoint. +* **Improved Integrated Storage (Beta)**: Improved raft write performance, added support for + non-voter nodes, along with UI support for: using raft storage, joining a raft cluster, + and downloading and restoring a snapshot. + +IMPROVEMENTS: + +* agent: Add ability to set the TLS SNI name used by Agent [[GH-7519](https://github.com/hashicorp/vault/pull/7519)] +* agent & api: Change default renewer behavior to ignore 5XX errors [[GH-7733](https://github.com/hashicorp/vault/pull/7733)] +* auth/jwt: The redirect callback host may now be specified for CLI logins + [[GH-71](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/71)] +* auth/jwt: Bound claims may now contain boolean values [[GH-73](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/73)] +* auth/jwt: CLI logins can now open the browser when running in WSL [[GH-77](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/77)] +* core: Exit ScanView if context has been cancelled [[GH-7419](https://github.com/hashicorp/vault/pull/7419)] +* core: re-encrypt barrier and recovery keys if the unseal key is updated + [[GH-7493](https://github.com/hashicorp/vault/pull/7493)] +* core: Don't advertise the full set of TLS 1.2 cipher suites on the cluster + port, even though only strong ciphers were used [[GH-7487](https://github.com/hashicorp/vault/pull/7487)] +* core (enterprise): Add background seal re-wrap +* core/metrics: Add config parameter to allow unauthenticated sys/metrics + access. [[GH-7550](https://github.com/hashicorp/vault/pull/7550)] +* metrics: Upgrade DataDog library to improve performance [[GH-7794](https://github.com/hashicorp/vault/pull/7794)] +* replication (enterprise): Write-Ahead-Log entries will not duplicate the + data belonging to the encompassing physical entries of the transaction, + thereby improving the performance and storage capacity. +* replication (enterprise): Added more replication metrics +* replication (enterprise): Reindex process now compares subpages for a more + accurate indexing process. +* replication (enterprise): Reindex API now accepts a new `skip_flush` + parameter indicating all the changes should not be flushed while the tree is + locked. +* secrets/aws: The root config can now be read [[GH-7245](https://github.com/hashicorp/vault/pull/7245)] +* secrets/aws: Role paths may now contain the '@' character [[GH-7553](https://github.com/hashicorp/vault/pull/7553)] +* secrets/database/cassandra: Add ability to skip verfication of connection + [[GH-7614](https://github.com/hashicorp/vault/pull/7614)] +* secrets/gcp: Fix panic during rollback if the roleset has been deleted + [[GH-52](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/52)] +* storage/azure: Add config parameter to Azure storage backend to allow + specifying the ARM endpoint [[GH-7567](https://github.com/hashicorp/vault/pull/7567)] +* storage/cassandra: Improve storage efficiency by eliminating unnecessary + copies of value data [[GH-7199](https://github.com/hashicorp/vault/pull/7199)] +* storage/raft: Improve raft write performance by utilizing FSM Batching + [[GH-7527](https://github.com/hashicorp/vault/pull/7527)] +* storage/raft: Add support for non-voter nodes [[GH-7634](https://github.com/hashicorp/vault/pull/7634)] +* sys: Add a new `sys/host-info` endpoint for querying information about + the host [[GH-7330](https://github.com/hashicorp/vault/pull/7330)] +* sys: Add a new set of endpoints under `sys/pprof/` that allows profiling + information to be extracted [[GH-7473](https://github.com/hashicorp/vault/pull/7473)] +* sys: Add endpoint that counts the total number of active identity entities + [[GH-7541](https://github.com/hashicorp/vault/pull/7541)] +* sys: `sys/seal-status` now has a `storage_type` field denoting what type of + storage + the cluster is configured to use +* sys: Add a new `sys/internal/counters/tokens` endpoint, that counts the + total number of active service token accessors in the shared token storage. + [[GH-7541](https://github.com/hashicorp/vault/pull/7541)] +* sys/config: Add a new endpoint under `sys/config/state/sanitized` that + returns the configuration state of the server. It excludes config values + from `storage`, `ha_storage`, and `seal` stanzas and some values + from `telemetry` due to potential sensitive entries in those fields. +* ui: when using raft storage, you can now join a raft cluster, download a + snapshot, and restore a snapshot from the UI [[GH-7410](https://github.com/hashicorp/vault/pull/7410)] +* ui: clarify when secret version is deleted in the secret version history + dropdown [[GH-7714](https://github.com/hashicorp/vault/pull/7714)] + +BUG FIXES: + +* agent: Fix a data race on the token value for inmemsink [[GH-7707](https://github.com/hashicorp/vault/pull/7707)] +* api: Fix Go API using lease revocation via URL instead of body [[GH-7777](https://github.com/hashicorp/vault/pull/7777)] +* api: Allow setting a function to control retry behavior [[GH-7331](https://github.com/hashicorp/vault/pull/7331)] +* auth/gcp: Fix a bug where region information in instance groups names could + cause an authorization attempt to fail [[GH-74](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/74)] +* cli: Fix a bug where a token of an unknown format (e.g. in ~/.vault-token) + could cause confusing error messages during `vault login` [[GH-7508](https://github.com/hashicorp/vault/pull/7508)] +* cli: Fix a bug where the `namespace list` command with JSON formatting + always returned an empty object [[GH-7705](https://github.com/hashicorp/vault/pull/7705)] +* cli: Command timeouts are now always specified solely by the + `VAULT_CLIENT_TIMEOUT` value. [[GH-7469](https://github.com/hashicorp/vault/pull/7469)] +* core: Don't allow registering a non-root zero TTL token lease. This is purely + defense in depth as the lease would be revoked immediately anyways, but + there's no real reason to allow registration. [[GH-7524](https://github.com/hashicorp/vault/pull/7524)] +* core: Correctly revoke the token that's present in the response auth from a + auth/token/ request if there's partial failure during the process. [[GH-7835](https://github.com/hashicorp/vault/pull/7835)] +* identity (enterprise): Fixed identity case sensitive loading in secondary + cluster [[GH-7327](https://github.com/hashicorp/vault/pull/7327)] +* identity: Ensure only replication primary stores the identity case sensitivity state [[GH-7820](https://github.com/hashicorp/vault/pull/7820)] +* raft: Fixed VAULT_CLUSTER_ADDR env being ignored at startup [[GH-7619](https://github.com/hashicorp/vault/pull/7619)] +* secrets/pki: Don't allow duplicate SAN names in issued certs [[GH-7605](https://github.com/hashicorp/vault/pull/7605)] +* sys/health: Pay attention to the values provided for `standbyok` and + `perfstandbyok` rather than simply using their presence as a key to flip on + that behavior [[GH-7323](https://github.com/hashicorp/vault/pull/7323)] +* ui: using the `wrapped_token` query param will work with `redirect_to` and + will automatically log in as intended [[GH-7398](https://github.com/hashicorp/vault/pull/7398)] +* ui: fix an error when initializing from the UI using PGP keys [[GH-7542](https://github.com/hashicorp/vault/pull/7542)] +* ui: show all active kv v2 secret versions even when `delete_version_after` is configured [[GH-7685](https://github.com/hashicorp/vault/pull/7685)] +* ui: Ensure that items in the top navigation link to pages that users have access to [[GH-7590](https://github.com/hashicorp/vault/pull/7590)] + +## 1.2.7 + +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust + +## 1.2.6.1 + +### August 21st, 2020 + +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.2.6 + +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.2.6 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.2.6) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + +## 1.2.5 + +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.2.5 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.2.5) + +BUG FIXES: + +* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values + +## 1.2.4 (November 7th, 2019) + +SECURITY: + +* In a non-root namespace, revocation of a token scoped to a non-root + namespace did not trigger the expected revocation of dynamic secret leases + associated with that token. As a result, dynamic secret leases in non-root + namespaces may outlive the token that created them. This vulnerability, + CVE-2019-18616, affects Vault Enterprise 0.11.0 and newer. +* Disaster Recovery secondary clusters did not delete already-replicated data + after a mount filter has been created on an upstream Performance secondary + cluster. As a result, encrypted secrets may remain replicated on a Disaster + Recovery secondary cluster after application of a mount filter excluding + those secrets from replication. This vulnerability, CVE-2019-18617, affects + Vault Enterprise 0.8 and newer. +* Update version of Go to 1.12.12 to fix Go bug golang.org/issue/34960 which + corresponds to CVE-2019-17596. + +CHANGES: + +* auth/aws: If a custom `sts_endpoint` is configured, Vault Agent and the CLI + should provide the corresponding region via the `region` parameter (which + already existed as a CLI parameter, and has now been added to Agent). The + automatic region detection added to the CLI and Agent in 1.2 has been removed. + +IMPROVEMENTS: + +* cli: Ignore existing token during CLI login [[GH-7508](https://github.com/hashicorp/vault/pull/7508)] +* core: Log proxy settings from environment on startup [[GH-7528](https://github.com/hashicorp/vault/pull/7528)] +* core: Cache whether we've been initialized to reduce load on storage [[GH-7549](https://github.com/hashicorp/vault/pull/7549)] + +BUG FIXES: + +* agent: Fix handling of gzipped responses [[GH-7470](https://github.com/hashicorp/vault/pull/7470)] +* cli: Fix panic when pgp keys list is empty [[GH-7546](https://github.com/hashicorp/vault/pull/7546)] +* cli: Command timeouts are now always specified solely by the + `VAULT_CLIENT_TIMEOUT` value. [[GH-7469](https://github.com/hashicorp/vault/pull/7469)] +* core: add hook for initializing seals for migration [[GH-7666](https://github.com/hashicorp/vault/pull/7666)] +* core (enterprise): Migrating from one auto unseal method to another never + worked on enterprise, now it does. +* identity: Add required field `response_types_supported` to identity token + `.well-known/openid-configuration` response [[GH-7533](https://github.com/hashicorp/vault/pull/7533)] +* identity: Fixed nil pointer panic when merging entities [[GH-7712](https://github.com/hashicorp/vault/pull/7712)] +* replication (Enterprise): Fix issue causing performance standbys nodes + disconnecting when under high loads. +* secrets/azure: Fix panic that could occur if client retries timeout [[GH-7793](https://github.com/hashicorp/vault/pull/7793)] +* secrets/database: Fix bug in combined DB secrets engine that can result in + writes to static-roles endpoints timing out [[GH-7518](https://github.com/hashicorp/vault/pull/7518)] +* secrets/pki: Improve tidy to continue when value is nil [[GH-7589](https://github.com/hashicorp/vault/pull/7589)] +* ui (Enterprise): Allow kv v2 secrets that are gated by Control Groups to be + viewed in the UI [[GH-7504](https://github.com/hashicorp/vault/pull/7504)] + +## 1.2.3 (September 12, 2019) + +FEATURES: + +* **Oracle Cloud (OCI) Integration**: Vault now support using Oracle Cloud for + storage, auto unseal, and authentication. + +IMPROVEMENTS: + +* auth/jwt: Groups claim matching now treats a string response as a single + element list [[GH-63](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/63)] +* auth/kubernetes: enable better support for projected tokens API by allowing + user to specify issuer [[GH-65](https://github.com/hashicorp/vault/pull/65)] +* auth/pcf: The PCF auth plugin was renamed to the CF auth plugin, maintaining + full backwards compatibility [[GH-7346](https://github.com/hashicorp/vault/pull/7346)] +* replication: Premium packages now come with unlimited performance standby + nodes + +BUG FIXES: + +* agent: Allow batch tokens and other non-renewable tokens to be used for + agent operations [[GH-7441](https://github.com/hashicorp/vault/pull/7441)] +* auth/jwt: Fix an error where newer (v1.2) token_* configuration parameters + were not being applied to tokens generated using the OIDC login flow + [[GH-67](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/67)] +* raft: Fix an incorrect JSON tag on `leader_ca_cert` in the join request [[GH-7393](https://github.com/hashicorp/vault/pull/7393)] +* seal/transit: Allow using Vault Agent for transit seal operations [[GH-7441](https://github.com/hashicorp/vault/pull/7441)] +* storage/couchdb: Fix a file descriptor leak [[GH-7345](https://github.com/hashicorp/vault/pull/7345)] +* ui: Fix a bug where the status menu would disappear when trying to revoke a + token [[GH-7337](https://github.com/hashicorp/vault/pull/7337)] +* ui: Fix a regression that prevented input of custom items in search-select + [[GH-7338](https://github.com/hashicorp/vault/pull/7338)] +* ui: Fix an issue with the namespace picker being unable to render nested + namespaces named with numbers and sorting of namespaces in the picker + [[GH-7333](https://github.com/hashicorp/vault/pull/7333)] + +## 1.2.2 (August 15, 2019) + +CHANGES: + +* auth/pcf: The signature format has been updated to use the standard Base64 + encoding instead of the URL-safe variant. Signatures created using the + previous format will continue to be accepted [PCF-27] +* core: The http response code returned when an identity token key is not found + has been changed from 400 to 404 + +IMPROVEMENTS: + +* identity: Remove 512 entity limit for groups [[GH-7317](https://github.com/hashicorp/vault/pull/7317)] + +BUG FIXES: + +* auth/approle: Fix an error where an empty `token_type` string was not being + correctly handled as `TokenTypeDefault` [[GH-7273](https://github.com/hashicorp/vault/pull/7273)] +* auth/radius: Fix panic when logging in [[GH-7286](https://github.com/hashicorp/vault/pull/7286)] +* ui: the string-list widget will now honor multiline input [[GH-7254](https://github.com/hashicorp/vault/pull/7254)] +* ui: various visual bugs in the KV interface were addressed [[GH-7307](https://github.com/hashicorp/vault/pull/7307)] +* ui: fixed incorrect URL to access help in LDAP auth [[GH-7299](https://github.com/hashicorp/vault/pull/7299)] + +## 1.2.1 (August 6th, 2019) + +BUG FIXES: + +* agent: Fix a panic on creds pulling in some error conditions in `aws` and + `alicloud` auth methods [[GH-7238](https://github.com/hashicorp/vault/pull/7238)] +* auth/approle: Fix error reading role-id on a role created pre-1.2 [[GH-7231](https://github.com/hashicorp/vault/pull/7231)] +* auth/token: Fix sudo check in non-root namespaces on create [[GH-7224](https://github.com/hashicorp/vault/pull/7224)] +* core: Fix health checks with perfstandbyok=true returning the wrong status + code [[GH-7240](https://github.com/hashicorp/vault/pull/7240)] +* ui: The web CLI will now parse input as a shell string, with special + characters escaped [[GH-7206](https://github.com/hashicorp/vault/pull/7206)] +* ui: The UI will now redirect to a page after authentication [[GH-7088](https://github.com/hashicorp/vault/pull/7088)] +* ui (Enterprise): The list of namespaces is now cleared when logging + out [[GH-7186](https://github.com/hashicorp/vault/pull/7186)] + +## 1.2.0 (July 30th, 2019) + +CHANGES: + +* Token store roles use new, common token fields for the values + that overlap with other auth backends. `period`, `explicit_max_ttl`, and + `bound_cidrs` will continue to work, with priority being given to the + `token_` prefixed versions of those parameters. They will also be returned + when doing a read on the role if they were used to provide values initially; + however, in Vault 1.4 if `period` or `explicit_max_ttl` is zero they will no + longer be returned. (`explicit_max_ttl` was already not returned if empty.) +* Due to underlying changes in Go version 1.12 and Go > 1.11.5, Vault is now + stricter about what characters it will accept in path names. Whereas before + it would filter out unprintable characters (and this could be turned off), + control characters and other invalid characters are now rejected within Go's + HTTP library before the request is passed to Vault, and this cannot be + disabled. To continue using these (e.g. for already-written paths), they + must be properly percent-encoded (e.g. `\r` becomes `%0D`, `\x00` becomes + `%00`, and so on). +* The user-configured regions on the AWSKMS seal stanza will now be preferred + over regions set in the enclosing environment. This is a _breaking_ change. +* All values in audit logs now are omitted if they are empty. This helps + reduce the size of audit log entries by not reproducing keys in each entry + that commonly don't contain any value, which can help in cases where audit + log entries are above the maximum UDP packet size and others. +* Both PeriodicFunc and WALRollback functions will be called if both are + provided. Previously WALRollback would only be called if PeriodicFunc was + not set. See [[GH-6717](https://github.com/hashicorp/vault/pull/6717)] for + details. +* Vault now uses Go's official dependency management system, Go Modules, to + manage dependencies. As a result to both reduce transitive dependencies for + API library users and plugin authors, and to work around various conflicts, + we have moved various helpers around, mostly under an `sdk/` submodule. A + couple of functions have also moved from plugin helper code to the `api/` + submodule. If you are a plugin author, take a look at some of our official + plugins and the paths they are importing for guidance. +* AppRole uses new, common token fields for values that overlap + with other auth backends. `period` and `policies` will continue to work, + with priority being given to the `token_` prefixed versions of those + parameters. They will also be returned when doing a read on the role if they + were used to provide values initially. +* In AppRole, `"default"` is no longer automatically added to the `policies` + parameter. This was a no-op since it would always be added anyways by + Vault's core; however, this can now be explicitly disabled with the new + `token_no_default_policy` field. +* In AppRole, `bound_cidr_list` is no longer returned when reading a role +* rollback: Rollback will no longer display log messages when it runs; it will + only display messages on error. +* Database plugins will now default to 4 `max_open_connections` + rather than 2. + +FEATURES: + +* **Integrated Storage**: Vault 1.2 includes a _tech preview_ of a new way to + manage storage directly within a Vault cluster. This new integrated storage + solution is based on the Raft protocol which is also used to back HashiCorp + Consul and HashiCorp Nomad. +* **Combined DB credential rotation**: Alternative mode for the Combined DB + Secret Engine to automatically rotate existing database account credentials + and set Vault as the source of truth for credentials. +* **Identity Tokens**: Vault's Identity system can now generate OIDC-compliant + ID tokens. These customizable tokens allow encapsulating a signed, verifiable + snapshot of identity information and metadata. They can be use by other + applications—even those without Vault authorization—as a way of establishing + identity based on a Vault entity. +* **Pivotal Cloud Foundry plugin**: New auth method using Pivotal Cloud + Foundry certificates for Vault authentication. +* **ElasticSearch database plugin**: New ElasticSearch database plugin issues + unique, short-lived ElasticSearch credentials. +* **New UI Features**: An HTTP Request Volume Page and new UI for editing LDAP + Users and Groups have been added. +* **HA support for Postgres**: PostgreSQL versions >= 9.5 may now but used as + and HA storage backend. +* **KMIP secrets engine (Enterprise)**: Allows Vault to operate as a KMIP + Server, seamlessly brokering cryptographic operations for traditional + infrastructure. +* Common Token Fields: Auth methods now use common fields for controlling + token behavior, making it easier to understand configuration across methods. +* **Vault API explorer**: The Vault UI now includes an embedded API explorer + where you can browse the endpoints avaliable to you and make requests. To try + it out, open the Web CLI and type `api`. + +IMPROVEMENTS: + +* agent: Allow EC2 nonce to be passed in [[GH-6953](https://github.com/hashicorp/vault/pull/6953)] +* agent: Add optional `namespace` parameter, which sets the default namespace + for the auto-auth functionality [[GH-6988](https://github.com/hashicorp/vault/pull/6988)] +* agent: Add cert auto-auth method [[GH-6652](https://github.com/hashicorp/vault/pull/6652)] +* api: Add support for passing data to delete operations via `DeleteWithData` + [[GH-7139](https://github.com/hashicorp/vault/pull/7139)] +* audit/file: Dramatically speed up file operations by changing + locking/marshaling order [[GH-7024](https://github.com/hashicorp/vault/pull/7024)] +* auth/jwt: A JWKS endpoint may now be configured for signature verification [[GH-43](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/43)] +* auth/jwt: A new `verbose_oidc_logging` role parameter has been added to help + troubleshoot OIDC configuration [[GH-57](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/57)] +* auth/jwt: `bound_claims` will now match received claims that are lists if any element + of the list is one of the expected values [[GH-50](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/50)] +* auth/jwt: Leeways for `nbf` and `exp` are now configurable, as is clock skew + leeway [[GH-53](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/53)] +* auth/kubernetes: Allow service names/namespaces to be configured as globs + [[GH-58](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/58)] +* auth/token: Allow the support of the identity system for the token backend + via token roles [[GH-6267](https://github.com/hashicorp/vault/pull/6267)] +* auth/token: Add a large set of token configuration options to token store + roles [[GH-6662](https://github.com/hashicorp/vault/pull/6662)] +* cli: `path-help` now allows `-format=json` to be specified, which will + output OpenAPI [[GH-7006](https://github.com/hashicorp/vault/pull/7006)] +* cli: Add support for passing parameters to `vault delete` operations + [[GH-7139](https://github.com/hashicorp/vault/pull/7139)] +* cli: Add a log-format CLI flag that can specify either "standard" or "json" + for the log format for the `vault server`command. [[GH-6840](https://github.com/hashicorp/vault/pull/6840)] +* cli: Add `-dev-no-store-token` to allow dev servers to not store the + generated token at the tokenhelper location [[GH-7104](https://github.com/hashicorp/vault/pull/7104)] +* identity: Allow a group alias' canonical ID to be modified +* namespaces: Namespaces can now be created and deleted from performance + replication secondaries +* plugins: Change the default for `max_open_connections` for DB plugins to 4 + [[GH-7093](https://github.com/hashicorp/vault/pull/7093)] +* replication: Client TLS authentication is now supported when enabling or + updating a replication secondary +* secrets/database: Cassandra operations will now cancel on client timeout + [[GH-6954](https://github.com/hashicorp/vault/pull/6954)] +* secrets/kv: Add optional `delete_version_after` parameter, which takes a + duration and can be set on the mount and/or the metadata for a specific key + [[GH-7005](https://github.com/hashicorp/vault/pull/7005)] +* storage/postgres: LIST now performs better on large datasets [[GH-6546](https://github.com/hashicorp/vault/pull/6546)] +* storage/s3: A new `path` parameter allows selecting the path within a bucket + for Vault data [[GH-7157](https://github.com/hashicorp/vault/pull/7157)] +* ui: KV v1 and v2 will now gracefully degrade allowing a write without read + workflow in the UI [[GH-6570](https://github.com/hashicorp/vault/pull/6570)] +* ui: Many visual improvements with the addition of Toolbars [[GH-6626](https://github.com/hashicorp/vault/pull/6626)], the restyling + of the Confirm Action component [[GH-6741](https://github.com/hashicorp/vault/pull/6741)], and using a new set of glyphs for our + Icon component [[GH-6736](https://github.com/hashicorp/vault/pull/6736)] +* ui: Lazy loading parts of the application so that the total initial payload is + smaller [[GH-6718](https://github.com/hashicorp/vault/pull/6718)] +* ui: Tabbing to auto-complete in filters will first complete a common prefix if there + is one [[GH-6759](https://github.com/hashicorp/vault/pull/6759)] +* ui: Removing jQuery from the application makes the initial JS payload smaller [[GH-6768](https://github.com/hashicorp/vault/pull/6768)] + +BUG FIXES: + +* audit: Log requests and responses due to invalid wrapping token provided + [[GH-6541](https://github.com/hashicorp/vault/pull/6541)] +* audit: Fix bug preventing request counter queries from working with auditing + enabled [[GH-6767](https://github.com/hashicorp/vault/pull/6767) +* auth/aws: AWS Roles are now upgraded and saved to the latest version just + after the AWS credential plugin is mounted. [[GH-7025](https://github.com/hashicorp/vault/pull/7025)] +* auth/aws: Fix a case where a panic could stem from a malformed assumed-role ARN + when parsing this value [[GH-6917](https://github.com/hashicorp/vault/pull/6917)] +* auth/aws: Fix an error complaining about a read-only view that could occur + during updating of a role when on a performance replication secondary + [[GH-6926](https://github.com/hashicorp/vault/pull/6926)] +* auth/jwt: Fix a regression introduced in 1.1.1 that disabled checking of client_id + for OIDC logins [[GH-54](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/54)] +* auth/jwt: Fix a panic during OIDC CLI logins that could occur if the Vault server + response is empty [[GH-55](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/55)] +* auth/jwt: Fix issue where OIDC logins might intermittently fail when using + performance standbys [[GH-61](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/61)] +* identity: Fix a case where modifying aliases of an entity could end up + moving the entity into the wrong namespace +* namespaces: Fix a behavior (currently only known to be benign) where we + wouldn't delete policies through the official functions before wiping the + namespaces on deletion +* secrets/database: Escape username/password before using in connection URL + [[GH-7089](https://github.com/hashicorp/vault/pull/7089)] +* secrets/pki: Forward revocation requests to active node when on a + performance standby [[GH-7173](https://github.com/hashicorp/vault/pull/7173)] +* ui: Fix timestamp on some transit keys [[GH-6827](https://github.com/hashicorp/vault/pull/6827)] +* ui: Show Entities and Groups in Side Navigation [[GH-7138](https://github.com/hashicorp/vault/pull/7138)] +* ui: Ensure dropdown updates selected item on HTTP Request Metrics page + +## 1.1.4/1.1.5 (July 25th/30th, 2019) + +NOTE: + +Although 1.1.4 was tagged, we realized very soon after the tag was publicly +pushed that an intended fix was accidentally left out. As a result, 1.1.4 was +not officially announced and 1.1.5 should be used as the release after 1.1.3. + +IMPROVEMENTS: + +* identity: Allow a group alias' canonical ID to be modified +* namespaces: Improve namespace deletion performance [[GH-6939](https://github.com/hashicorp/vault/pull/6939)] +* namespaces: Namespaces can now be created and deleted from performance + replication secondaries + +BUG FIXES: + +* api: Add backwards compat support for API env vars [[GH-7135](https://github.com/hashicorp/vault/pull/7135)] +* auth/aws: Fix a case where a panic could stem from a malformed assumed-role + ARN when parsing this value [[GH-6917](https://github.com/hashicorp/vault/pull/6917)] +* auth/ldap: Add `use_pre111_group_cn_behavior` flag to allow recovering from + a regression caused by a bug fix starting in 1.1.1 [[GH-7208](https://github.com/hashicorp/vault/pull/7208)] +* auth/aws: Use a role cache to avoid separate locking paths [[GH-6926](https://github.com/hashicorp/vault/pull/6926)] +* core: Fix a deadlock if a panic happens during request handling [[GH-6920](https://github.com/hashicorp/vault/pull/6920)] +* core: Fix an issue that may cause key upgrades to not be cleaned up properly + [[GH-6949](https://github.com/hashicorp/vault/pull/6949)] +* core: Don't shutdown if key upgrades fail due to canceled context [[GH-7070](https://github.com/hashicorp/vault/pull/7070)] +* core: Fix panic caused by handling requests while vault is inactive +* identity: Fix reading entity and groups that have spaces in their names + [[GH-7055](https://github.com/hashicorp/vault/pull/7055)] +* identity: Ensure entity alias operations properly verify namespace [[GH-6886](https://github.com/hashicorp/vault/pull/6886)] +* mfa: Fix a nil pointer panic that could occur if invalid Duo credentials + were supplied +* replication: Forward step-down on perf standbys to match HA behavior +* replication: Fix various read only storage errors on performance standbys +* replication: Stop forwarding before stopping replication to eliminate some + possible bad states +* secrets/database: Allow cassandra queries to be cancled [[GH-6954](https://github.com/hashicorp/vault/pull/6954)] +* storage/consul: Fix a regression causing vault to not connect to consul over + unix sockets [[GH-6859](https://github.com/hashicorp/vault/pull/6859)] +* ui: Fix saving of TTL and string array fields generated by Open API [[GH-7094](https://github.com/hashicorp/vault/pull/7094)] + +## 1.1.3 (June 5th, 2019) + +IMPROVEMENTS: + +* agent: Now supports proxying request query parameters [[GH-6772](https://github.com/hashicorp/vault/pull/6772)] +* core: Mount table output now includes a UUID indicating the storage path [[GH-6633](https://github.com/hashicorp/vault/pull/6633)] +* core: HTTP server timeout values are now configurable [[GH-6666](https://github.com/hashicorp/vault/pull/6666)] +* replication: Improve performance of the reindex operation on secondary clusters + when mount filters are in use +* replication: Replication status API now returns the state and progress of a reindex + +BUG FIXES: + +* api: Return the Entity ID in the secret output [[GH-6819](https://github.com/hashicorp/vault/pull/6819)] +* auth/jwt: Consider bound claims when considering if there is at least one + bound constraint [[GH-49](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/49)] +* auth/okta: Fix handling of group names containing slashes [[GH-6665](https://github.com/hashicorp/vault/pull/6665)] +* cli: Add deprecated stored-shares flag back to the init command [[GH-6677](https://github.com/hashicorp/vault/pull/6677)] +* cli: Fix a panic when the KV command would return no data [[GH-6675](https://github.com/hashicorp/vault/pull/6675)] +* cli: Fix issue causing CLI list operations to not return proper format when + there is an empty response [[GH-6776](https://github.com/hashicorp/vault/pull/6776)] +* core: Correctly honor non-HMAC request keys when auditing requests [[GH-6653](https://github.com/hashicorp/vault/pull/6653)] +* core: Fix the `x-vault-unauthenticated` value in OpenAPI for a number of + endpoints [[GH-6654](https://github.com/hashicorp/vault/pull/6654)] +* core: Fix issue where some OpenAPI parameters were incorrectly listed as + being sent as a header [[GH-6679](https://github.com/hashicorp/vault/pull/6679)] +* core: Fix issue that would allow duplicate mount names to be used [[GH-6771](https://github.com/hashicorp/vault/pull/6771)] +* namespaces: Fix behavior when using `root` instead of `root/` as the + namespace header value +* pki: fix a panic when a client submits a null value [[GH-5679](https://github.com/hashicorp/vault/pull/5679)] +* replication: Properly update mount entry cache on a secondary to apply all + new values after a tune +* replication: Properly close connection on bootstrap error +* replication: Fix an issue causing startup problems if a namespace policy + wasn't replicated properly +* replication: Fix longer than necessary WAL replay during an initial reindex +* replication: Fix error during mount filter invalidation on DR secondary clusters +* secrets/ad: Make time buffer configurable [AD-35] +* secrets/gcp: Check for nil config when getting credentials [[GH-35](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/35)] +* secrets/gcp: Fix error checking in some cases where the returned value could + be 403 instead of 404 [[GH-37](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/37)] +* secrets/gcpkms: Disable key rotation when deleting a key [[GH-10](https://github.com/hashicorp/vault-plugin-secrets-gcpkms/pull/10)] +* storage/consul: recognize `https://` address even if schema not specified + [[GH-6602](https://github.com/hashicorp/vault/pull/6602)] +* storage/dynamodb: Fix an issue where a deleted lock key in DynamoDB (HA) + could cause constant switching of the active node [[GH-6637](https://github.com/hashicorp/vault/pull/6637)] +* storage/dynamodb: Eliminate a high-CPU condition that could occur if an + error was received from the DynamoDB API [[GH-6640](https://github.com/hashicorp/vault/pull/6640)] +* storage/gcs: Correctly use configured chunk size values [[GH-6655](https://github.com/hashicorp/vault/pull/6655)] +* storage/mssql: Use the correct database when pre-created schemas exist + [[GH-6356](https://github.com/hashicorp/vault/pull/6356)] +* ui: Fix issue with select arrows on drop down menus [[GH-6627](https://github.com/hashicorp/vault/pull/6627)] +* ui: Fix an issue where sensitive input values weren't being saved to the + server [[GH-6586](https://github.com/hashicorp/vault/pull/6586)] +* ui: Fix web cli parsing when using quoted values [[GH-6755](https://github.com/hashicorp/vault/pull/6755)] +* ui: Fix a namespace workflow mapping identities from external namespaces by + allowing arbitrary input in search-select component [[GH-6728](https://github.com/hashicorp/vault/pull/6728)] + +## 1.1.2 (April 18th, 2019) + +This is a bug fix release containing the two items below. It is otherwise +unchanged from 1.1.1. + +BUG FIXES: + +* auth/okta: Fix a potential dropped error [[GH-6592](https://github.com/hashicorp/vault/pull/6592)] +* secrets/kv: Fix a regression on upgrade where a KVv2 mount could fail to be + mounted on unseal if it had previously been mounted but not written to + [[GH-31](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/31)] + +## 1.1.1 (April 11th, 2019) + +SECURITY: + +* Given: (a) performance replication is enabled; (b) performance standbys are + in use on the performance replication secondary cluster; and (c) mount + filters are in use, if a mount that was previously available to a secondary + is updated to be filtered out, although the data would be removed from the + secondary cluster, the in-memory cache of the data would not be purged on + the performance standby nodes. As a result, the previously-available data + could still be read from memory if it was ever read from disk, and if this + included mount configuration data this could result in token or lease + issuance. The issue is fixed in this release; in prior releases either an + active node changeover (such as a step-down) or a restart of the standby + nodes is sufficient to cause the performance standby nodes to clear their + cache. A CVE is in the process of being issued; the number is + CVE-2019-11075. +* Roles in the JWT Auth backend using the OIDC login flow (i.e. role_type of + “oidc”) were not enforcing bound_cidrs restrictions, if any were configured + for the role. This issue did not affect roles of type “jwt”. + +CHANGES: + +* auth/jwt: Disallow logins of role_type "oidc" via the `/login` path [[GH-38](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/38)] +* core/acl: New ordering defines which policy wins when there are multiple + inexact matches and at least one path contains `+`. `+*` is now illegal in + policy paths. The previous behavior simply selected any matching + segment-wildcard path that matched. [[GH-6532](https://github.com/hashicorp/vault/pull/6532)] +* replication: Due to technical limitations, mounting and unmounting was not + previously possible from a performance secondary. These have been resolved, + and these operations may now be run from a performance secondary. + +IMPROVEMENTS: + +* agent: Allow AppRole auto-auth without a secret-id [[GH-6324](https://github.com/hashicorp/vault/pull/6324)] +* auth/gcp: Cache clients to improve performance and reduce open file usage +* auth/jwt: Bounds claims validiation will now allow matching the received + claims against a list of expected values [[GH-41](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/41)] +* secret/gcp: Cache clients to improve performance and reduce open file usage +* replication: Mounting/unmounting/remounting/mount-tuning is now supported + from a performance secondary cluster +* ui: Suport for authentication via the RADIUS auth method [[GH-6488](https://github.com/hashicorp/vault/pull/6488)] +* ui: Navigating away from secret list view will clear any page-specific + filter that was applied [[GH-6511](https://github.com/hashicorp/vault/pull/6511)] +* ui: Improved the display when OIDC auth errors [[GH-6553](https://github.com/hashicorp/vault/pull/6553)] + +BUG FIXES: + +* agent: Allow auto-auth to be used with caching without having to define any + sinks [[GH-6468](https://github.com/hashicorp/vault/pull/6468)] +* agent: Disallow some nonsensical config file combinations [[GH-6471](https://github.com/hashicorp/vault/pull/6471)] +* auth/ldap: Fix CN check not working if CN was not all in uppercase [[GH-6518](https://github.com/hashicorp/vault/pull/6518)] +* auth/jwt: The CLI helper for OIDC logins will now open the browser to the correct + URL when running on Windows [[GH-37](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/37)] +* auth/jwt: Fix OIDC login issue where configured TLS certs weren't being used [[GH-40](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/40)] +* auth/jwt: Fix an issue where the `oidc_scopes` parameter was not being included in + the response to a role read request [[GH-35](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/35)] +* core: Fix seal migration case when migrating to Shamir and a seal block + wasn't explicitly specified [[GH-6455](https://github.com/hashicorp/vault/pull/6455)] +* core: Fix unwrapping when using namespaced wrapping tokens [[GH-6536](https://github.com/hashicorp/vault/pull/6536)] +* core: Fix incorrect representation of required properties in OpenAPI output + [[GH-6490](https://github.com/hashicorp/vault/pull/6490)] +* core: Fix deadlock that could happen when using the UI [[GH-6560](https://github.com/hashicorp/vault/pull/6560)] +* identity: Fix updating groups removing existing members [[GH-6527](https://github.com/hashicorp/vault/pull/6527)] +* identity: Properly invalidate group alias in performance secondary [[GH-6564](https://github.com/hashicorp/vault/pull/6564)] +* identity: Use namespace context when loading entities and groups to ensure + merging of duplicate entries works properly [[GH-6563](https://github.com/hashicorp/vault/pull/6563)] +* replication: Fix performance standby election failure [[GH-6561](https://github.com/hashicorp/vault/pull/6561)] +* replication: Fix mount filter invalidation on performance standby nodes +* replication: Fix license reloading on performance standby nodes +* replication: Fix handling of control groups on performance standby nodes +* replication: Fix some forwarding scenarios with request bodies using + performance standby nodes [[GH-6538](https://github.com/hashicorp/vault/pull/6538)] +* secret/gcp: Fix roleset binding when using JSON [[GH-27](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/27)] +* secret/pki: Use `uri_sans` param in when not using CSR parameters [[GH-6505](https://github.com/hashicorp/vault/pull/6505)] +* storage/dynamodb: Fix a race condition possible in HA configurations that could + leave the cluster without a leader [[GH-6512](https://github.com/hashicorp/vault/pull/6512)] +* ui: Fix an issue where in production builds OpenAPI model generation was + failing, causing any form using it to render labels with missing fields [[GH-6474](https://github.com/hashicorp/vault/pull/6474)] +* ui: Fix issue nav-hiding when moving between namespaces [[GH-6473](https://github.com/hashicorp/vault/pull/6473)] +* ui: Secrets will always show in the nav regardless of access to cubbyhole [[GH-6477](https://github.com/hashicorp/vault/pull/6477)] +* ui: fix SSH OTP generation [[GH-6540](https://github.com/hashicorp/vault/pull/6540)] +* ui: add polyfill to load UI in IE11 [[GH-6567](https://github.com/hashicorp/vault/pull/6567)] +* ui: Fix issue where some elements would fail to work properly if using ACLs + with segment-wildcard paths (`/+/` segments) [[GH-6525](https://github.com/hashicorp/vault/pull/6525)] + +## 1.1.0 (March 18th, 2019) + +CHANGES: + +* auth/jwt: The `groups_claim_delimiter_pattern` field has been removed. If the + groups claim is not at the top level, it can now be specified as a + [JSONPointer](https://tools.ietf.org/html/rfc6901). +* auth/jwt: Roles now have a "role type" parameter with a default type of + "oidc". To configure new JWT roles, a role type of "jwt" must be explicitly + specified. +* cli: CLI commands deprecated in 0.9.2 are now removed. Please see the CLI + help/warning output in previous versions of Vault for updated commands. +* core: Vault no longer automatically mounts a K/V backend at the "secret/" + path when initializing Vault +* core: Vault's cluster port will now be open at all times on HA standby nodes +* plugins: Vault no longer supports running netRPC plugins. These were + deprecated in favor of gRPC based plugins and any plugin built since 0.9.4 + defaults to gRPC. Older plugins may need to be recompiled against the latest + Vault dependencies. + +FEATURES: + +* **Vault Agent Caching**: Vault Agent can now be configured to act as a + caching proxy to Vault. Clients can send requests to Vault Agent and the + request will be proxied to the Vault server and cached locally in Agent. + Currently Agent will cache generated leases and tokens and keep them + renewed. The proxy can also use the Auto Auth feature so clients do not need + to authenticate to Vault, but rather can make requests to Agent and have + Agent fully manage token lifecycle. +* **OIDC Redirect Flow Support**: The JWT auth backend now supports OIDC + roles. These allow authentication via an OIDC-compliant provider via the + user's browser. The login may be initiated from the Vault UI or through + the `vault login` command. +* **ACL Path Wildcard**: ACL paths can now use the `+` character to enable + wild card matching for a single directory in the path definition. +* **Transit Auto Unseal**: Vault can now be configured to use the Transit + Secret Engine in another Vault cluster as an auto unseal provider. + +IMPROVEMENTS: + +* auth/jwt: A default role can be set. It will be used during JWT/OIDC logins if + a role is not specified. +* auth/jwt: Arbitrary claims data can now be copied into token & alias metadata. +* auth/jwt: An arbitrary set of bound claims can now be configured for a role. +* auth/jwt: The name "oidc" has been added as an alias for the jwt backend. Either + name may be specified in the `auth enable` command. +* command/server: A warning will be printed when 'tls_cipher_suites' includes a + blacklisted cipher suite or all cipher suites are blacklisted by the HTTP/2 + specification [[GH-6300](https://github.com/hashicorp/vault/pull/6300)] +* core/metrics: Prometheus pull support using a new sys/metrics endpoint. [[GH-5308](https://github.com/hashicorp/vault/pull/5308)] +* core: On non-windows platforms a SIGUSR2 will make the server log a dump of + all running goroutines' stack traces for debugging purposes [[GH-6240](https://github.com/hashicorp/vault/pull/6240)] +* replication: The initial replication indexing process on newly initialized or upgraded + clusters now runs asynchronously +* sentinel: Add token namespace id and path, available in rules as + token.namespace.id and token.namespace.path +* ui: The UI is now leveraging OpenAPI definitions to pull in fields for various forms. + This means, it will not be necessary to add fields on the go and JS sides in the future. + [[GH-6209](https://github.com/hashicorp/vault/pull/6209)] + +BUG FIXES: + +* auth/jwt: Apply `bound_claims` validation across all login paths +* auth/jwt: Update `bound_audiences` validation during non-OIDC logins to accept + any matched audience, as documented and handled in OIDC logins [[GH-30](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/30)] +* auth/token: Fix issue where empty values for token role update call were + ignored [[GH-6314](https://github.com/hashicorp/vault/pull/6314)] +* core: The `operator migrate` command will no longer hang on empty key names + [[GH-6371](https://github.com/hashicorp/vault/pull/6371)] +* identity: Fix a panic at login when external group has a nil alias [[GH-6230](https://github.com/hashicorp/vault/pull/6230)] +* namespaces: Clear out identity store items upon namespace deletion +* replication/perfstandby: Fixed a bug causing performance standbys to wait + longer than necessary after forwarding a write to the active node +* replication/mountfilter: Fix a deadlock that could occur when mount filters + were updated [[GH-6426](https://github.com/hashicorp/vault/pull/6426)] +* secret/kv: Fix issue where a v1→v2 upgrade could run on a performance + standby when using a local mount +* secret/ssh: Fix for a bug where attempting to delete the last ssh role + in the zeroaddress configuration could fail [[GH-6390](https://github.com/hashicorp/vault/pull/6390)] +* secret/totp: Uppercase provided keys so they don't fail base32 validation + [[GH-6400](https://github.com/hashicorp/vault/pull/6400)] +* secret/transit: Multiple HMAC, Sign or Verify operations can now be + performed with one API call using the new `batch_input` parameter [[GH-5875](https://github.com/hashicorp/vault/pull/5875)] +* sys: `sys/internal/ui/mounts` will no longer return secret or auth mounts + that have been filtered. Similarly, `sys/internal/ui/mount/:path` will + return a error response if a filtered mount path is requested. [[GH-6412](https://github.com/hashicorp/vault/pull/6412)] +* ui: Fix for a bug where you couldn't access the data tab after clicking on + wrap details on the unwrap page [[GH-6404](https://github.com/hashicorp/vault/pull/6404)] +* ui: Fix an issue where the policies tab was erroneously hidden [[GH-6301](https://github.com/hashicorp/vault/pull/6301)] +* ui: Fix encoding issues with kv interfaces [[GH-6294](https://github.com/hashicorp/vault/pull/6294)] + +## 1.0.3.1 (March 14th, 2019) (Enterprise Only) + +SECURITY: + +* A regression was fixed in replication mount filter code introduced in Vault + 1.0 that caused the underlying filtered data to be replicated to + secondaries. This data was not accessible to users via Vault's API but via a + combination of privileged configuration file changes/Vault commands it could + be read. Upgrading to this version or 1.1 will fix this issue and cause the + replicated data to be deleted from filtered secondaries. More information + was sent to customer contacts on file. + +## 1.0.3 (February 12th, 2019) + +CHANGES: + +* New AWS authentication plugin mounts will default to using the generated + role ID as the Identity alias name. This applies to both EC2 and IAM auth. + Existing mounts that explicitly set this value will not be affected but + mounts that specified no preference will switch over on upgrade. +* The default policy now allows a token to look up its associated identity + entity either by name or by id [[GH-6105](https://github.com/hashicorp/vault/pull/6105)] +* The Vault UI's navigation and onboarding wizard now only displays items that + are permitted in a users' policy [[GH-5980](https://github.com/hashicorp/vault/pull/5980), [GH-6094](https://github.com/hashicorp/vault/pull/6094)] +* An issue was fixed that caused recovery keys to not work on secondary + clusters when using a different unseal mechanism/key than the primary. This + would be hit if the cluster was rekeyed or initialized after 1.0. We recommend + rekeying the recovery keys on the primary cluster if you meet the above + requirements. + +FEATURES: + +* **cURL Command Output**: CLI commands can now use the `-output-curl-string` + flag to print out an equivalent cURL command. +* **Response Headers From Plugins**: Plugins can now send back headers that + will be included in the response to a client. The set of allowed headers can + be managed by the operator. + +IMPROVEMENTS: + +* auth/aws: AWS EC2 authentication can optionally create entity aliases by + role ID [[GH-6133](https://github.com/hashicorp/vault/pull/6133)] +* auth/jwt: The supported set of signing algorithms is now configurable [JWT + plugin [GH-16](https://github.com/hashicorp/vault/pull/16)] +* core: When starting from an uninitialized state, HA nodes will now attempt + to auto-unseal using a configured auto-unseal mechanism after the active + node initializes Vault [[GH-6039](https://github.com/hashicorp/vault/pull/6039)] +* secret/database: Add socket keepalive option for Cassandra [[GH-6201](https://github.com/hashicorp/vault/pull/6201)] +* secret/ssh: Add signed key constraints, allowing enforcement of key types + and minimum key sizes [[GH-6030](https://github.com/hashicorp/vault/pull/6030)] +* secret/transit: ECDSA signatures can now be marshaled in JWS-compatible + fashion [[GH-6077](https://github.com/hashicorp/vault/pull/6077)] +* storage/etcd: Support SRV service names [[GH-6087](https://github.com/hashicorp/vault/pull/6087)] +* storage/aws: Support specifying a KMS key ID for server-side encryption + [[GH-5996](https://github.com/hashicorp/vault/pull/5996)] + +BUG FIXES: + +* core: Fix a rare case where a standby whose connection is entirely torn down + to the active node, then reconnects to the same active node, may not + successfully resume operation [[GH-6167](https://github.com/hashicorp/vault/pull/6167)] +* cors: Don't duplicate headers when they're written [[GH-6207](https://github.com/hashicorp/vault/pull/6207)] +* identity: Persist merged entities only on the primary [[GH-6075](https://github.com/hashicorp/vault/pull/6075)] +* replication: Fix a potential race when a token is created and then used with + a performance standby very quickly, before an associated entity has been + replicated. If the entity is not found in this scenario, the request will + forward to the active node. +* replication: Fix issue where recovery keys would not work on secondary + clusters if using a different unseal mechanism than the primary. +* replication: Fix a "failed to register lease" error when using performance + standbys +* storage/postgresql: The `Get` method will now return an Entry object with + the `Key` member correctly populated with the full path that was requested + instead of just the last path element [[GH-6044](https://github.com/hashicorp/vault/pull/6044)] + +## 1.0.2 (January 15th, 2019) + +SECURITY: + +* When creating a child token from a parent with `bound_cidrs`, the list of + CIDRs would not be propagated to the child token, allowing the child token + to be used from any address. + +CHANGES: + +* secret/aws: Role now returns `credential_type` instead of `credential_types` + to match role input. If a legacy role that can supply more than one + credential type, they will be concatenated with a `,`. +* physical/dynamodb, autoseal/aws: Instead of Vault performing environment + variable handling, and overriding static (config file) values if found, we + use the default AWS SDK env handling behavior, which also looks for + deprecated values. If you were previously providing both config values and + environment values, please ensure the config values are unset if you want to + use environment values. +* Namespaces (Enterprise): Providing "root" as the header value for + `X-Vault-Namespace` will perform the request on the root namespace. This is + equivalent to providing an empty value. Creating a namespace called "root" in + the root namespace is disallowed. + +FEATURES: + +* **InfluxDB Database Plugin**: Use Vault to dynamically create and manage InfluxDB + users + +IMPROVEMENTS: + +* auth/aws: AWS EC2 authentication can optionally create entity aliases by + image ID [[GH-5846](https://github.com/hashicorp/vault/pull/5846)] +* autoseal/gcpckms: Reduce the required permissions for the GCPCKMS autounseal + [[GH-5999](https://github.com/hashicorp/vault/pull/5999)] +* physical/foundationdb: TLS support added. [[GH-5800](https://github.com/hashicorp/vault/pull/5800)] + +BUG FIXES: + +* api: Fix a couple of places where we were using the `LIST` HTTP verb + (necessary to get the right method into the wrapping lookup function) and + not then modifying it to a `GET`; although this is officially the verb Vault + uses for listing and it's fully legal to use custom verbs, since many WAFs + and API gateways choke on anything outside of RFC-standardized verbs we fall + back to `GET` [[GH-6026](https://github.com/hashicorp/vault/pull/6026)] +* autoseal/aws: Fix reading session tokens when AWS access key/secret key are + also provided [[GH-5965](https://github.com/hashicorp/vault/pull/5965)] +* command/operator/rekey: Fix help output showing `-delete-backup` when it + should show `-backup-delete` [[GH-5981](https://github.com/hashicorp/vault/pull/5981)] +* core: Fix bound_cidrs not being propagated to child tokens +* replication: Correctly forward identity entity creation that originates from + performance standby nodes (Enterprise) +* secret/aws: Make input `credential_type` match the output type (string, not + array) [[GH-5972](https://github.com/hashicorp/vault/pull/5972)] +* secret/cubbyhole: Properly cleanup cubbyhole after token revocation [[GH-6006](https://github.com/hashicorp/vault/pull/6006)] +* secret/pki: Fix reading certificates on windows with the file storage backend [[GH-6013](https://github.com/hashicorp/vault/pull/6013)] +* ui (enterprise): properly display perf-standby count on the license page [[GH-5971](https://github.com/hashicorp/vault/pull/5971)] +* ui: fix disappearing nested secrets and go to the nearest parent when deleting + a secret - [[GH-5976](https://github.com/hashicorp/vault/pull/5976)] +* ui: fix error where deleting an item via the context menu would fail if the + item name contained dots [[GH-6018](https://github.com/hashicorp/vault/pull/6018)] +* ui: allow saving of kv secret after an errored save attempt [[GH-6022](https://github.com/hashicorp/vault/pull/6022)] +* ui: fix display of kv-v1 secret containing a key named "keys" [[GH-6023](https://github.com/hashicorp/vault/pull/6023)] + +## 1.0.1 (December 14th, 2018) + +SECURITY: + +* Update version of Go to 1.11.3 to fix Go bug + which corresponds to + CVE-2018-16875 +* Database user revocation: If a client has configured custom revocation + statements for a role with a value of `""`, that statement would be executed + verbatim, resulting in a lack of actual revocation but success for the + operation. Vault will now strip empty statements from any provided; as a + result if an empty statement is provided, it will behave as if no statement + is provided, falling back to the default revocation statement. + +CHANGES: + +* secret/database: On role read, empty statements will be returned as empty + slices instead of potentially being returned as JSON null values. This makes + it more in line with other parts of Vault and makes it easier for statically + typed languages to interpret the values. + +IMPROVEMENTS: + +* cli: Strip iTerm extra characters from password manager input [[GH-5837](https://github.com/hashicorp/vault/pull/5837)] +* command/server: Setting default kv engine to v1 in -dev mode can now be + specified via -dev-kv-v1 [[GH-5919](https://github.com/hashicorp/vault/pull/5919)] +* core: Add operationId field to OpenAPI output [[GH-5876](https://github.com/hashicorp/vault/pull/5876)] +* ui: Added ability to search for Group and Policy IDs when creating Groups + and Entities instead of typing them in manually + +BUG FIXES: + +* auth/azure: Cache azure authorizer [15] +* auth/gcp: Remove explicit project for service account in GCE authorizer [[GH-58](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/58)] +* cli: Show correct stored keys/threshold for autoseals [[GH-5910](https://github.com/hashicorp/vault/pull/5910)] +* cli: Fix backwards compatibility fallback when listing plugins [[GH-5913](https://github.com/hashicorp/vault/pull/5913)] +* core: Fix upgrades when the seal config had been created on early versions + of vault [[GH-5956](https://github.com/hashicorp/vault/pull/5956)] +* namespaces: Correctly reload the proper mount when tuning or reloading the + mount [[GH-5937](https://github.com/hashicorp/vault/pull/5937)] +* secret/azure: Cache azure authorizer [19] +* secret/database: Strip empty statements on user input [[GH-5955](https://github.com/hashicorp/vault/pull/5955)] +* secret/gcpkms: Add path for retrieving the public key [[GH-5](https://github.com/hashicorp/vault-plugin-secrets-gcpkms/pull/5)] +* secret/pki: Fix panic that could occur during tidy operation when malformed + data was found [[GH-5931](https://github.com/hashicorp/vault/pull/5931)] +* secret/pki: Strip empty line in ca_chain output [[GH-5779](https://github.com/hashicorp/vault/pull/5779)] +* ui: Fixed a bug where the web CLI was not usable via the `fullscreen` + command - [[GH-5909](https://github.com/hashicorp/vault/pull/5909)] +* ui: Fix a bug where you couldn't write a jwt auth method config [[GH-5936](https://github.com/hashicorp/vault/pull/5936)] + +## 1.0.0 (December 3rd, 2018) + +SECURITY: + +* When debugging a customer incident we discovered that in the case of + malformed data from an autoseal mechanism, Vault's master key could be + logged in Vault's server log. For this to happen, the data would need to be + modified by the autoseal mechanism after being submitted to it by Vault but + prior to encryption, or after decryption, prior to it being returned to + Vault. To put it another way, it requires the data that Vault submits for + encryption to not match the data returned after decryption. It is not + sufficient for the autoseal mechanism to return an error, and it cannot be + triggered by an outside attacker changing the on-disk ciphertext as all + autoseal mechanisms use authenticated encryption. We do not believe that + this is generally a cause for concern; since it involves the autoseal + mechanism returning bad data to Vault but with no error, in a working Vault + configuration this code path should never be hit, and if hitting this issue + Vault will not be unsealing properly anyways so it will be obvious what is + happening and an immediate rekey of the master key can be performed after + service is restored. We have filed for a CVE (CVE-2018-19786) and a CVSS V3 + score of 5.2 has been assigned. + +CHANGES: + +* Tokens are now prefixed by a designation to indicate what type of token they + are. Service tokens start with `s.` and batch tokens start with `b.`. + Existing tokens will still work (they are all of service type and will be + considered as such). Prefixing allows us to be more efficient when consuming + a token, which keeps the critical path of requests faster. +* Paths within `auth/token` that allow specifying a token or accessor in the + URL have been removed. These have been deprecated since March 2016 and + undocumented, but were retained for backwards compatibility. They shouldn't + be used due to the possibility of those paths being logged, so at this point + they are simply being removed. +* Vault will no longer accept updates when the storage key has invalid UTF-8 + character encoding [[GH-5819](https://github.com/hashicorp/vault/pull/5819)] +* Mount/Auth tuning the `options` map on backends will now upsert any provided + values, and keep any of the existing values in place if not provided. The + options map itself cannot be unset once it's set, but the keypairs within the + map can be unset if an empty value is provided, with the exception of the + `version` keypair which is handled differently for KVv2 purposes. +* Agent no longer automatically reauthenticates when new credentials are + detected. It's not strictly necessary and in some cases was causing + reauthentication much more often than intended. +* HSM Regenerate Key Support Removed: Vault no longer supports destroying and + regenerating encryption keys on an HSM; it only supports creating them. + Although this has never been a source of a customer incident, it is simply a + code path that is too trivial to activate, especially by mistyping + `regenerate_key` instead of `generate_key`. +* Barrier Config Upgrade (Enterprise): When upgrading from Vault 0.8.x, the + seal type in the barrier config storage entry will be upgraded from + "hsm-auto" to "awskms" or "pkcs11" upon unseal if using AWSKMS or HSM seals. + If performing seal migration, the barrier config should first be upgraded + prior to starting migration. +* Go API client uses pooled HTTP client: The Go API client now uses a + connection-pooling HTTP client by default. For CLI operations this makes no + difference but it should provide significant performance benefits for those + writing custom clients using the Go API library. As before, this can be + changed to any custom HTTP client by the caller. +* Builtin Secret Engines and Auth Methods are integrated deeper into the + plugin system. The plugin catalog can now override builtin plugins with + custom versions of the same name. Additionally the plugin system now + requires a plugin `type` field when configuring plugins, this can be "auth", + "database", or "secret". + +FEATURES: + +* **Auto-Unseal in Open Source**: Cloud-based auto-unseal has been migrated + from Enterprise to Open Source. We've created a migrator to allow migrating + between Shamir seals and auto unseal methods. +* **Batch Tokens**: Batch tokens trade off some features of service tokens for no + storage overhead, and in most cases can be used across performance + replication clusters. +* **Replication Speed Improvements**: We've worked hard to speed up a lot of + operations when using Vault Enterprise Replication. +* **GCP KMS Secrets Engine**: This new secrets engine provides a Transit-like + pattern to keys stored within GCP Cloud KMS. +* **AppRole support in Vault Agent Auto-Auth**: You can now use AppRole + credentials when having Agent automatically authenticate to Vault +* **OpenAPI Support**: Descriptions of mounted backends can be served directly + from Vault +* **Kubernetes Projected Service Account Tokens**: Projected Service Account + Tokens are now supported in Kubernetes auth +* **Response Wrapping in UI**: Added ability to wrap secrets and easily copy + the wrap token or secret JSON in the UI + +IMPROVEMENTS: + +* agent: Support for configuring the location of the kubernetes service account + [[GH-5725](https://github.com/hashicorp/vault/pull/5725)] +* auth/token: New tokens are indexed in storage HMAC-SHA256 instead of SHA1 +* secret/totp: Allow @ character to be part of key name [[GH-5652](https://github.com/hashicorp/vault/pull/5652)] +* secret/consul: Add support for new policy based tokens added in Consul 1.4 + [[GH-5586](https://github.com/hashicorp/vault/pull/5586)] +* ui: Improve the token auto-renew warning, and automatically begin renewal + when a user becomes active again [[GH-5662](https://github.com/hashicorp/vault/pull/5662)] +* ui: The unbundled UI page now has some styling [[GH-5665](https://github.com/hashicorp/vault/pull/5665)] +* ui: Improved banner and popup design [[GH-5672](https://github.com/hashicorp/vault/pull/5672)] +* ui: Added token type to auth method mount config [[GH-5723](https://github.com/hashicorp/vault/pull/5723)] +* ui: Display additonal wrap info when unwrapping. [[GH-5664](https://github.com/hashicorp/vault/pull/5664)] +* ui: Empty states have updated styling and link to relevant actions and + documentation [[GH-5758](https://github.com/hashicorp/vault/pull/5758)] +* ui: Allow editing of KV V2 data when a token doesn't have capabilities to + read secret metadata [[GH-5879](https://github.com/hashicorp/vault/pull/5879)] + +BUG FIXES: + +* agent: Fix auth when multiple redirects [[GH-5814](https://github.com/hashicorp/vault/pull/5814)] +* cli: Restore the `-policy-override` flag [[GH-5826](https://github.com/hashicorp/vault/pull/5826)] +* core: Fix rekey progress reset which did not happen under certain + circumstances. [[GH-5743](https://github.com/hashicorp/vault/pull/5743)] +* core: Migration from autounseal to shamir will clean up old keys [[GH-5671](https://github.com/hashicorp/vault/pull/5671)] +* identity: Update group memberships when entity is deleted [[GH-5786](https://github.com/hashicorp/vault/pull/5786)] +* replication/perfstandby: Fix audit table upgrade on standbys [[GH-5811](https://github.com/hashicorp/vault/pull/5811)] +* replication/perfstandby: Fix redirect on approle update [[GH-5820](https://github.com/hashicorp/vault/pull/5820)] +* secrets/azure: Fix valid roles being rejected for duplicate ids despite + having distinct scopes + [[GH-16](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/16)] +* storage/gcs: Send md5 of values to GCS to avoid potential corruption + [[GH-5804](https://github.com/hashicorp/vault/pull/5804)] +* secrets/kv: Fix issue where storage version would get incorrectly downgraded + [[GH-5809](https://github.com/hashicorp/vault/pull/5809)] +* secrets/kv: Disallow empty paths on a `kv put` while accepting empty paths + for all other operations for backwards compatibility + [[GH-19](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/19)] +* ui: Allow for secret creation in kv v2 when cas_required=true [[GH-5823](https://github.com/hashicorp/vault/pull/5823)] +* ui: Fix dr secondary operation token generation via the ui [[GH-5818](https://github.com/hashicorp/vault/pull/5818)] +* ui: Fix the PKI context menu so that items load [[GH-5824](https://github.com/hashicorp/vault/pull/5824)] +* ui: Update DR Secondary Token generation command [[GH-5857](https://github.com/hashicorp/vault/pull/5857)] +* ui: Fix pagination bug where controls would be rendered once for each + item when viewing policies [[GH-5866](https://github.com/hashicorp/vault/pull/5866)] +* ui: Fix bug where `sys/leases/revoke` required 'sudo' capability to show + the revoke button in the UI [[GH-5647](https://github.com/hashicorp/vault/pull/5647)] +* ui: Fix issue where certain pages wouldn't render in a namespace [[GH-5692](https://github.com/hashicorp/vault/pull/5692)] diff --git a/CHANGELOG-v0.md b/CHANGELOG-v0.md new file mode 100644 index 000000000000..4a3f1931395b --- /dev/null +++ b/CHANGELOG-v0.md @@ -0,0 +1,3494 @@ +## 0.11.6 (December 14th, 2018) + +This release contains the three security fixes from 1.0.0 and 1.0.1 and the +following bug fixes from 1.0.0/1.0.1: + + * namespaces: Correctly reload the proper mount when tuning or reloading the + mount [[GH-5937](https://github.com/hashicorp/vault/pull/5937)] + * replication/perfstandby: Fix audit table upgrade on standbys [[GH-5811](https://github.com/hashicorp/vault/pull/5811)] + * replication/perfstandby: Fix redirect on approle update [[GH-5820](https://github.com/hashicorp/vault/pull/5820)] + * secrets/kv: Fix issue where storage version would get incorrectly downgraded + [[GH-5809](https://github.com/hashicorp/vault/pull/5809)] + +It is otherwise identical to 0.11.5. + +## 0.11.5 (November 13th, 2018) + +BUG FIXES: + + * agent: Fix issue when specifying two file sinks [[GH-5610](https://github.com/hashicorp/vault/pull/5610)] + * auth/userpass: Fix minor timing issue that could leak the presence of a + username [[GH-5614](https://github.com/hashicorp/vault/pull/5614)] + * autounseal/alicloud: Fix issue interacting with the API (Enterprise) + * autounseal/azure: Fix key version tracking (Enterprise) + * cli: Fix panic that could occur if parameters were not provided [[GH-5603](https://github.com/hashicorp/vault/pull/5603)] + * core: Fix buggy behavior if trying to remount into a namespace + * identity: Fix duplication of entity alias entity during alias transfer + between entities [[GH-5733](https://github.com/hashicorp/vault/pull/5733)] + * namespaces: Fix tuning of auth mounts in a namespace + * ui: Fix bug where editing secrets as JSON doesn't save properly [[GH-5660](https://github.com/hashicorp/vault/pull/5660)] + * ui: Fix issue where IE 11 didn't render the UI and also had a broken form + when trying to use tool/hash [[GH-5714](https://github.com/hashicorp/vault/pull/5714)] + +## 0.11.4 (October 23rd, 2018) + +CHANGES: + + * core: HA lock file is no longer copied during `operator migrate` [[GH-5503](https://github.com/hashicorp/vault/pull/5503)]. + We've categorized this as a change, but generally this can be considered + just a bug fix, and no action is needed. + +FEATURES: + + * **Transit Key Trimming**: Keys in transit secret engine can now be trimmed to + remove older unused key versions + * **Web UI support for KV Version 2**: Browse, delete, undelete and destroy + individual secret versions in the UI + * **Azure Existing Service Principal Support**: Credentials can now be generated + against an existing service principal + +IMPROVEMENTS: + + * core: Add last WAL in leader/health output for easier debugging [[GH-5523](https://github.com/hashicorp/vault/pull/5523)] + * identity: Identity names will now be handled case insensitively by default. + This includes names of entities, aliases and groups [[GH-5404](https://github.com/hashicorp/vault/pull/5404)] + * secrets/aws: Added role-option max_sts_ttl to cap TTL for AWS STS + credentials [[GH-5500](https://github.com/hashicorp/vault/pull/5500)] + * secret/database: Allow Cassandra user to be non-superuser so long as it has + role creation permissions [[GH-5402](https://github.com/hashicorp/vault/pull/5402)] + * secret/radius: Allow setting the NAS Identifier value in the generated + packet [[GH-5465](https://github.com/hashicorp/vault/pull/5465)] + * secret/ssh: Allow usage of JSON arrays when setting zero addresses [[GH-5528](https://github.com/hashicorp/vault/pull/5528)] + * secret/transit: Allow trimming unused keys [[GH-5388](https://github.com/hashicorp/vault/pull/5388)] + * ui: Support KVv2 [[GH-5547](https://github.com/hashicorp/vault/pull/5547)], [[GH-5563](https://github.com/hashicorp/vault/pull/5563)] + * ui: Allow viewing and updating Vault license via the UI + * ui: Onboarding will now display your progress through the chosen tutorials + * ui: Dynamic secret backends obfuscate sensitive data by default and + visibility is toggleable + +BUG FIXES: + + * agent: Fix potential hang during agent shutdown [[GH-5026](https://github.com/hashicorp/vault/pull/5026)] + * auth/ldap: Fix listing of users/groups that contain slashes [[GH-5537](https://github.com/hashicorp/vault/pull/5537)] + * core: Fix memory leak during some expiration calls [[GH-5505](https://github.com/hashicorp/vault/pull/5505)] + * core: Fix generate-root operations requiring empty `otp` to be provided + instead of an empty body [[GH-5495](https://github.com/hashicorp/vault/pull/5495)] + * identity: Remove lookup check during alias removal from entity [[GH-5524](https://github.com/hashicorp/vault/pull/5524)] + * secret/pki: Fix TTL/MaxTTL check when using `sign-verbatim` [[GH-5549](https://github.com/hashicorp/vault/pull/5549)] + * secret/pki: Fix regression in 0.11.2+ causing the NotBefore value of + generated certificates to be set to the Unix epoch if the role value was not + set, instead of using the default of 30 seconds [[GH-5481](https://github.com/hashicorp/vault/pull/5481)] + * storage/mysql: Use `varbinary` instead of `varchar` when creating HA tables + [[GH-5529](https://github.com/hashicorp/vault/pull/5529)] + +## 0.11.3 (October 8th, 2018) + +SECURITY: + + * Revocation: A regression in 0.11.2 (OSS) and 0.11.0 (Enterprise) caused + lease IDs containing periods (`.`) to not be revoked properly. Upon startup + when revocation is tried again these should now revoke successfully. + +IMPROVEMENTS: + + * auth/ldap: Listing of users and groups return absolute paths [[GH-5537](https://github.com/hashicorp/vault/pull/5537)] + * secret/pki: OID SANs can now specify `*` to allow any value [[GH-5459](https://github.com/hashicorp/vault/pull/5459)] + +BUG FIXES: + + * auth/ldap: Fix panic if specific values were given to be escaped [[GH-5471](https://github.com/hashicorp/vault/pull/5471)] + * cli/auth: Fix panic if `vault auth` was given no parameters [[GH-5473](https://github.com/hashicorp/vault/pull/5473)] + * secret/database/mongodb: Fix panic that could occur at high load [[GH-5463](https://github.com/hashicorp/vault/pull/5463)] + * secret/pki: Fix CA generation not allowing OID SANs [[GH-5459](https://github.com/hashicorp/vault/pull/5459)] + +## 0.11.2 (October 2nd, 2018) + +CHANGES: + + * `sys/seal-status` now includes an `initialized` boolean in the output. If + Vault is not initialized, it will return a `200` with this value set `false` + instead of a `400`. + * `passthrough_request_headers` will now deny certain headers from being + provided to backends based on a global denylist. + * Token Format: Tokens are now represented as a base62 value; tokens in + namespaces will have the namespace identifier appended. (This appeared in + Enterprise in 0.11.0, but is only in OSS in 0.11.2.) + +FEATURES: + + * **AWS Secret Engine Root Credential Rotation**: The credential used by the AWS + secret engine can now be rotated, to ensure that only Vault knows the + credentials it is using [[GH-5140](https://github.com/hashicorp/vault/pull/5140)] + * **Storage Backend Migrator**: A new `operator migrate` command allows offline + migration of data between two storage backends + * **AliCloud KMS Auto Unseal and Seal Wrap Support (Enterprise)**: AliCloud KMS can now be used a support seal for + Auto Unseal and Seal Wrapping + +BUG FIXES: + + * auth/okta: Fix reading deprecated `token` parameter if a token was + previously set in the configuration [[GH-5409](https://github.com/hashicorp/vault/pull/5409)] + * core: Re-add deprecated capabilities information for now [[GH-5360](https://github.com/hashicorp/vault/pull/5360)] + * core: Fix handling of cyclic token relationships [[GH-4803](https://github.com/hashicorp/vault/pull/4803)] + * storage/mysql: Fix locking on MariaDB [[GH-5343](https://github.com/hashicorp/vault/pull/5343)] + * replication: Fix DR API when using a token [[GH-5398](https://github.com/hashicorp/vault/pull/5398)] + * identity: Ensure old group alias is removed when a new one is written [[GH-5350](https://github.com/hashicorp/vault/pull/5350)] + * storage/alicloud: Don't call uname on package init [[GH-5358](https://github.com/hashicorp/vault/pull/5358)] + * secrets/jwt: Fix issue where request context would be canceled too early + * ui: fix need to have update for aws iam creds generation [GF-5294] + * ui: fix calculation of token expiry [[GH-5435](https://github.com/hashicorp/vault/pull/5435)] + +IMPROVEMENTS: + + * auth/aws: The identity alias name can now configured to be either IAM unique + ID of the IAM Principal, or ARN of the caller identity [[GH-5247](https://github.com/hashicorp/vault/pull/5247)] + * auth/cert: Add allowed_organizational_units support [[GH-5252](https://github.com/hashicorp/vault/pull/5252)] + * cli: Format TTLs for non-secret responses [[GH-5367](https://github.com/hashicorp/vault/pull/5367)] + * identity: Support operating on entities and groups by their names [[GH-5355](https://github.com/hashicorp/vault/pull/5355)] + * plugins: Add `env` parameter when registering plugins to the catalog to allow + operators to include environment variables during plugin execution. [[GH-5359](https://github.com/hashicorp/vault/pull/5359)] + * secrets/aws: WAL Rollback improvements [[GH-5202](https://github.com/hashicorp/vault/pull/5202)] + * secrets/aws: Allow specifying STS role-default TTLs [[GH-5138](https://github.com/hashicorp/vault/pull/5138)] + * secrets/pki: Add configuration support for setting NotBefore [[GH-5325](https://github.com/hashicorp/vault/pull/5325)] + * core: Support for passing the Vault token via an Authorization Bearer header [[GH-5397](https://github.com/hashicorp/vault/pull/5397)] + * replication: Reindex process now runs in the background and does not block other + vault operations + * storage/zookeeper: Enable TLS based communication with Zookeeper [[GH-4856](https://github.com/hashicorp/vault/pull/4856)] + * ui: you can now init a cluster with a seal config [[GH-5428](https://github.com/hashicorp/vault/pull/5428)] + * ui: added the option to force promote replication clusters [[GH-5438](https://github.com/hashicorp/vault/pull/5438)] + * replication: Allow promotion of a secondary when data is syncing with a "force" flag + +## 0.11.1.1 (September 17th, 2018) (Enterprise Only) + +BUG FIXES: + + * agent: Fix auth handler-based wrapping of output tokens [[GH-5316](https://github.com/hashicorp/vault/pull/5316)] + * core: Properly store the replication checkpoint file if it's larger than the + storage engine's per-item limit + * core: Improve WAL deletion rate + * core: Fix token creation on performance standby nodes + * core: Fix unwrapping inside a namespace + * core: Always forward tidy operations from performance standby nodes + +IMPROVEMENTS: + + * auth/aws: add support for key/value pairs or JSON values for + `iam_request_headers` with IAM auth method [[GH-5320](https://github.com/hashicorp/vault/pull/5320)] + * auth/aws, secret/aws: Throttling errors from the AWS API will now be + reported as 502 errors by Vault, along with the original error [[GH-5270](https://github.com/hashicorp/vault/pull/5270)] + * replication: Start fetching during a sync from where it previously errored + +## 0.11.1 (September 6th, 2018) + +SECURITY: + + * Random Byte Reading in Barrier: Prior to this release, Vault was not + properly checking the error code when reading random bytes for the IV for + AES operations in its cryptographic barrier. Specifically, this means that + such an IV could potentially be zero multiple times, causing nonce re-use + and weakening the security of the key. On most platforms this should never + happen because reading from kernel random sources is non-blocking and always + successful, but there may be platform-specific behavior that has not been + accounted for. (Vault has tests to check exactly this, and the tests have + never seen nonce re-use.) + +FEATURES: + + * AliCloud Agent Support: Vault Agent can now authenticate against the + AliCloud auth method. + * UI: Enable AliCloud auth method and Azure secrets engine via the UI. + +IMPROVEMENTS: + + * core: Logging level for most logs (not including secrets/auth plugins) can + now be changed on-the-fly via `SIGHUP`, reading the desired value from + Vault's config file [[GH-5280](https://github.com/hashicorp/vault/pull/5280)] + +BUG FIXES: + + * core: Ensure we use a background context when stepping down [[GH-5290](https://github.com/hashicorp/vault/pull/5290)] + * core: Properly check error return from random byte reading [[GH-5277](https://github.com/hashicorp/vault/pull/5277)] + * core: Re-add `sys/` top-route injection for now [[GH-5241](https://github.com/hashicorp/vault/pull/5241)] + * core: Policies stored in minified JSON would return an error [[GH-5229](https://github.com/hashicorp/vault/pull/5229)] + * core: Evaluate templated policies in capabilities check [[GH-5250](https://github.com/hashicorp/vault/pull/5250)] + * identity: Update MemDB with identity group alias while loading groups [[GH-5289](https://github.com/hashicorp/vault/pull/5289)] + * secrets/database: Fix nil pointer when revoking some leases [[GH-5262](https://github.com/hashicorp/vault/pull/5262)] + * secrets/pki: Fix sign-verbatim losing extra Subject attributes [[GH-5245](https://github.com/hashicorp/vault/pull/5245)] + * secrets/pki: Remove certificates from store when tidying revoked + certificates and simplify API [[GH-5231](https://github.com/hashicorp/vault/pull/5231)] + * ui: JSON editor will not coerce input to an object, and will now show an + error about Vault expecting an object [[GH-5271](https://github.com/hashicorp/vault/pull/5271)] + * ui: authentication form will now default to any methods that have been tuned + to show up for unauthenticated users [[GH-5281](https://github.com/hashicorp/vault/pull/5281)] + + +## 0.11.0 (August 28th, 2018) + +DEPRECATIONS/CHANGES: + + * Request Timeouts: A default request timeout of 90s is now enforced. This + setting can be overwritten in the config file. If you anticipate requests + taking longer than 90s this setting should be updated before upgrading. + * (NOTE: will be re-added into 0.11.1 as it broke more than anticipated. There + will be some further guidelines around when this will be removed again.) + * `sys/` Top Level Injection: For the last two years for backwards + compatibility data for various `sys/` routes has been injected into both the + Secret's Data map and into the top level of the JSON response object. + However, this has some subtle issues that pop up from time to time and is + becoming increasingly complicated to maintain, so it's finally being + removed. + * Path Fallback for List Operations: For a very long time Vault has + automatically adjusted `list` operations to always end in a `/`, as list + operations operates on prefixes, so all list operations by definition end + with `/`. This was done server-side so affects all clients. However, this + has also led to a lot of confusion for users writing policies that assume + that the path that they use in the CLI is the path used internally. Starting + in 0.11, ACL policies gain a new fallback rule for listing: they will use a + matching path ending in `/` if available, but if not found, they will look + for the same path without a trailing `/`. This allows putting `list` + capabilities in the same path block as most other capabilities for that + path, while not providing any extra access if `list` wasn't actually + provided there. + * Performance Standbys On By Default: If you flavor/license of Vault + Enterprise supports Performance Standbys, they are on by default. You can + disable this behavior per-node with the `disable_performance_standby` + configuration flag. + * AWS Secret Engine Roles: The AWS Secret Engine roles are now explicit about + the type of AWS credential they are generating; this reduces reduce + ambiguity that existed previously as well as enables new features for + specific credential types. Writing role data and generating credentials + remain backwards compatible; however, the data returned when reading a + role's configuration has changed in backwards-incompatible ways. Anything + that depended on reading role data from the AWS secret engine will break + until it is updated to work with the new format. + * Token Format (Enterprise): Tokens are now represented as a base62 value; + tokens in namespaces will have the namespace identifier appended. + +FEATURES: + + * **Namespaces (Enterprise)**: A set of features within Vault Enterprise + that allows Vault environments to support *Secure Multi-tenancy* within a + single Vault Enterprise infrastructure. Through namespaces, Vault + administrators can support tenant isolation for teams and individuals as + well as empower those individuals to self-manage their own tenant + environment. + * **Performance Standbys (Enterprise)**: Standby nodes can now service + requests that do not modify storage. This provides near-horizontal scaling + of a cluster in some workloads, and is the intra-cluster analogue of + the existing Performance Replication feature, which replicates to distinct + clusters in other datacenters, geos, etc. + * **AliCloud OSS Storage**: AliCloud OSS can now be used for Vault storage. + * **AliCloud Auth Plugin**: AliCloud's identity services can now be used to + grant access to Vault. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-auth-alicloud) for + more information. + * **Azure Secrets Plugin**: There is now a plugin (pulled in to Vault) that + allows generating credentials to allow access to Azure. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-secrets-azure) for + more information. + * **HA Support for MySQL Storage**: MySQL storage now supports HA. + * **ACL Templating**: ACL policies can now be templated using identity Entity, + Groups, and Metadata. + * **UI Onboarding wizards**: The Vault UI can provide contextual help and + guidance, linking out to relevant links or guides on vaultproject.io for + various workflows in Vault. + +IMPROVEMENTS: + + * agent: Add `exit_after_auth` to be able to use the Agent for a single + authentication [[GH-5013](https://github.com/hashicorp/vault/pull/5013)] + * auth/approle: Add ability to set token bound CIDRs on individual Secret IDs + [[GH-5034](https://github.com/hashicorp/vault/pull/5034)] + * cli: Add support for passing parameters to `vault read` operations [[GH-5093](https://github.com/hashicorp/vault/pull/5093)] + * secrets/aws: Make credential types more explicit [[GH-4360](https://github.com/hashicorp/vault/pull/4360)] + * secrets/nomad: Support for longer token names [[GH-5117](https://github.com/hashicorp/vault/pull/5117)] + * secrets/pki: Allow disabling CRL generation [[GH-5134](https://github.com/hashicorp/vault/pull/5134)] + * storage/azure: Add support for different Azure environments [[GH-4997](https://github.com/hashicorp/vault/pull/4997)] + * storage/file: Sort keys in list responses [[GH-5141](https://github.com/hashicorp/vault/pull/5141)] + * storage/mysql: Support special characters in database and table names. + +BUG FIXES: + + * auth/jwt: Always validate `aud` claim even if `bound_audiences` isn't set + (IOW, error in this case) + * core: Prevent Go's HTTP library from interspersing logs in a different + format and/or interleaved [[GH-5135](https://github.com/hashicorp/vault/pull/5135)] + * identity: Properly populate `mount_path` and `mount_type` on group lookup + [[GH-5074](https://github.com/hashicorp/vault/pull/5074)] + * identity: Fix persisting alias metadata [[GH-5188](https://github.com/hashicorp/vault/pull/5188)] + * identity: Fix carryover issue from previously fixed race condition that + could cause Vault not to start up due to two entities referencing the same + alias. These entities are now merged. [[GH-5000](https://github.com/hashicorp/vault/pull/5000)] + * replication: Fix issue causing some pages not to flush to storage + * secrets/database: Fix inability to update custom SQL statements on + database roles. [[GH-5080](https://github.com/hashicorp/vault/pull/5080)] + * secrets/pki: Disallow putting the CA's serial on its CRL. While technically + legal, doing so inherently means the CRL can't be trusted anyways, so it's + not useful and easy to footgun. [[GH-5134](https://github.com/hashicorp/vault/pull/5134)] + * storage/gcp,spanner: Fix data races [[GH-5081](https://github.com/hashicorp/vault/pull/5081)] + +## 0.10.4 (July 25th, 2018) + +SECURITY: + + * Control Groups: The associated Identity entity with a request was not being + properly persisted. As a result, the same authorizer could provide more than + one authorization. + +DEPRECATIONS/CHANGES: + + * Revocations of dynamic secrets leases are now queued/asynchronous rather + than synchronous. This allows Vault to take responsibility for revocation + even if the initial attempt fails. The previous synchronous behavior can be + attained via the `-sync` CLI flag or `sync` API parameter. When in + synchronous mode, if the operation results in failure it is up to the user + to retry. + * CLI Retries: The CLI will no longer retry commands on 5xx errors. This was a + source of confusion to users as to why Vault would "hang" before returning a + 5xx error. The Go API client still defaults to two retries. + * Identity Entity Alias metadata: You can no longer manually set metadata on + entity aliases. All alias data (except the canonical entity ID it refers to) + is intended to be managed by the plugin providing the alias information, so + allowing it to be set manually didn't make sense. + +FEATURES: + + * **JWT/OIDC Auth Method**: The new `jwt` auth method accepts JWTs and either + validates signatures locally or uses OIDC Discovery to fetch the current set + of keys for signature validation. Various claims can be specified for + validation (in addition to the cryptographic signature) and a user and + optional groups claim can be used to provide Identity information. + * **FoundationDB Storage**: You can now use FoundationDB for storing Vault + data. + * **UI Control Group Workflow (enterprise)**: The UI will now detect control + group responses and provides a workflow to view the status of the request + and to authorize requests. + * **Vault Agent (Beta)**: Vault Agent is a daemon that can automatically + authenticate for you across a variety of authentication methods, provide + tokens to clients, and keep the tokens renewed, reauthenticating as + necessary. + +IMPROVEMENTS: + + * auth/azure: Add support for virtual machine scale sets + * auth/gcp: Support multiple bindings for region, zone, and instance group + * cli: Add subcommands for interacting with the plugin catalog [[GH-4911](https://github.com/hashicorp/vault/pull/4911)] + * cli: Add a `-description` flag to secrets and auth tune subcommands to allow + updating an existing secret engine's or auth method's description. This + change also allows the description to be unset by providing an empty string. + * core: Add config flag to disable non-printable character check [[GH-4917](https://github.com/hashicorp/vault/pull/4917)] + * core: A `max_request_size` parameter can now be set per-listener to adjust + the maximum allowed size per request [[GH-4824](https://github.com/hashicorp/vault/pull/4824)] + * core: Add control group request endpoint to default policy [[GH-4904](https://github.com/hashicorp/vault/pull/4904)] + * identity: Identity metadata is now passed through to plugins [[GH-4967](https://github.com/hashicorp/vault/pull/4967)] + * replication: Add additional saftey checks and logging when replication is + in a bad state + * secrets/kv: Add support for using `-field=data` to KVv2 when using `vault + kv` [[GH-4895](https://github.com/hashicorp/vault/pull/4895)] + * secrets/pki: Add the ability to tidy revoked but unexpired certificates + [[GH-4916](https://github.com/hashicorp/vault/pull/4916)] + * secrets/ssh: Allow Vault to work with single-argument SSH flags [[GH-4825](https://github.com/hashicorp/vault/pull/4825)] + * secrets/ssh: SSH executable path can now be configured in the CLI [[GH-4937](https://github.com/hashicorp/vault/pull/4937)] + * storage/swift: Add additional configuration options [[GH-4901](https://github.com/hashicorp/vault/pull/4901)] + * ui: Choose which auth methods to show to unauthenticated users via + `listing_visibility` in the auth method edit forms [[GH-4854](https://github.com/hashicorp/vault/pull/4854)] + * ui: Authenticate users automatically by passing a wrapped token to the UI via + the new `wrapped_token` query parameter [[GH-4854](https://github.com/hashicorp/vault/pull/4854)] + +BUG FIXES: + + * api: Fix response body being cleared too early [[GH-4987](https://github.com/hashicorp/vault/pull/4987)] + * auth/approle: Fix issue with tidy endpoint that would unnecessarily remove + secret accessors [[GH-4981](https://github.com/hashicorp/vault/pull/4981)] + * auth/aws: Fix updating `max_retries` [[GH-4980](https://github.com/hashicorp/vault/pull/4980)] + * auth/kubernetes: Trim trailing whitespace when sending JWT + * cli: Fix parsing of environment variables for integer flags [[GH-4925](https://github.com/hashicorp/vault/pull/4925)] + * core: Fix returning 500 instead of 503 if a rekey is attempted when Vault is + sealed [[GH-4874](https://github.com/hashicorp/vault/pull/4874)] + * core: Fix issue releasing the leader lock in some circumstances [[GH-4915](https://github.com/hashicorp/vault/pull/4915)] + * core: Fix a panic that could happen if the server was shut down while still + starting up + * core: Fix deadlock that would occur if a leadership loss occurs at the same + time as a seal operation [[GH-4932](https://github.com/hashicorp/vault/pull/4932)] + * core: Fix issue with auth mounts failing to renew tokens due to policies + changing [[GH-4960](https://github.com/hashicorp/vault/pull/4960)] + * auth/radius: Fix issue where some radius logins were being canceled too early + [[GH-4941](https://github.com/hashicorp/vault/pull/4941)] + * core: Fix accidental seal of vault of we lose leadership during startup + [[GH-4924](https://github.com/hashicorp/vault/pull/4924)] + * core: Fix standby not being able to forward requests larger than 4MB + [[GH-4844](https://github.com/hashicorp/vault/pull/4844)] + * core: Avoid panic while processing group memberships [[GH-4841](https://github.com/hashicorp/vault/pull/4841)] + * identity: Fix a race condition creating aliases [[GH-4965](https://github.com/hashicorp/vault/pull/4965)] + * plugins: Fix being unable to send very large payloads to or from plugins + [[GH-4958](https://github.com/hashicorp/vault/pull/4958)] + * physical/azure: Long list responses would sometimes be truncated [[GH-4983](https://github.com/hashicorp/vault/pull/4983)] + * replication: Allow replication status requests to be processed while in + merkle sync + * replication: Ensure merkle reindex flushes all changes to storage immediately + * replication: Fix a case where a network interruption could cause a secondary + to be unable to reconnect to a primary + * secrets/pki: Fix permitted DNS domains performing improper validation + [[GH-4863](https://github.com/hashicorp/vault/pull/4863)] + * secrets/database: Fix panic during DB creds revocation [[GH-4846](https://github.com/hashicorp/vault/pull/4846)] + * ui: Fix usage of cubbyhole backend in the UI [[GH-4851](https://github.com/hashicorp/vault/pull/4851)] + * ui: Fix toggle state when a secret is JSON-formatted [[GH-4913](https://github.com/hashicorp/vault/pull/4913)] + * ui: Fix coercion of falsey values to empty string when editing secrets as + JSON [[GH-4977](https://github.com/hashicorp/vault/pull/4977)] + +## 0.10.3 (June 20th, 2018) + +DEPRECATIONS/CHANGES: + + * In the audit log and in client responses, policies are now split into three + parameters: policies that came only from tokens, policies that came only + from Identity, and the combined set. Any previous location of policies via + the API now contains the full, combined set. + * When a token is tied to an Identity entity and the entity is deleted, the + token will no longer be usable, regardless of the validity of the token + itself. + * When authentication succeeds but no policies were defined for that specific + user, most auth methods would allow a token to be generated but a few would + reject the authentication, namely `ldap`, `okta`, and `radius`. Since the + `default` policy is added by Vault's core, this would incorrectly reject + valid authentications before they would in fact be granted policies. This + inconsistency has been addressed; valid authentications for these methods + now succeed even if no policy was specifically defined in that method for + that user. + +FEATURES: + + * Root Rotation for Active Directory: You can now command Vault to rotate the + configured root credentials used in the AD secrets engine, to ensure that + only Vault knows the credentials it's using. + * URI SANs in PKI: You can now configure URI Subject Alternate Names in the + `pki` backend. Roles can limit which SANs are allowed via globbing. + * `kv rollback` Command: You can now use `vault kv rollback` to roll a KVv2 + path back to a previous non-deleted/non-destroyed version. The previous + version becomes the next/newest version for the path. + * Token Bound CIDRs in AppRole: You can now add CIDRs to which a token + generated from AppRole will be bound. + +IMPROVEMENTS: + + * approle: Return 404 instead of 202 on invalid role names during POST + operations [[GH-4778](https://github.com/hashicorp/vault/pull/4778)] + * core: Add idle and initial header read/TLS handshake timeouts to connections + to ensure server resources are cleaned up [[GH-4760](https://github.com/hashicorp/vault/pull/4760)] + * core: Report policies in token, identity, and full sets [[GH-4747](https://github.com/hashicorp/vault/pull/4747)] + * secrets/databases: Add `create`/`update` distinction for connection + configurations [[GH-3544](https://github.com/hashicorp/vault/pull/3544)] + * secrets/databases: Add `create`/`update` distinction for role configurations + [[GH-3544](https://github.com/hashicorp/vault/pull/3544)] + * secrets/databases: Add best-effort revocation logic for use when a role has + been deleted [[GH-4782](https://github.com/hashicorp/vault/pull/4782)] + * secrets/kv: Add `kv rollback` [[GH-4774](https://github.com/hashicorp/vault/pull/4774)] + * secrets/pki: Add URI SANs support [[GH-4675](https://github.com/hashicorp/vault/pull/4675)] + * secrets/ssh: Allow standard SSH command arguments to be used, without + requiring username@hostname syntax [[GH-4710](https://github.com/hashicorp/vault/pull/4710)] + * storage/consul: Add context support so that requests are cancelable + [[GH-4739](https://github.com/hashicorp/vault/pull/4739)] + * sys: Added `hidden` option to `listing_visibility` field on `sys/mounts` + API [[GH-4827](https://github.com/hashicorp/vault/pull/4827)] + * ui: Secret values are obfuscated by default and visibility is toggleable [[GH-4422](https://github.com/hashicorp/vault/pull/4422)] + +BUG FIXES: + + * auth/approle: Fix panic due to metadata being nil [[GH-4719](https://github.com/hashicorp/vault/pull/4719)] + * auth/aws: Fix delete path for tidy operations [[GH-4799](https://github.com/hashicorp/vault/pull/4799)] + * core: Optimizations to remove some speed regressions due to the + security-related changes in 0.10.2 + * storage/dynamodb: Fix errors seen when reading existing DynamoDB data [[GH-4721](https://github.com/hashicorp/vault/pull/4721)] + * secrets/database: Fix default MySQL root rotation statement [[GH-4748](https://github.com/hashicorp/vault/pull/4748)] + * secrets/gcp: Fix renewal for GCP account keys + * secrets/kv: Fix writing to the root of a KVv2 mount from `vault kv` commands + incorrectly operating on a root+mount path instead of being an error + [[GH-4726](https://github.com/hashicorp/vault/pull/4726)] + * seal/pkcs11: Add `CKK_SHA256_HMAC` to the search list when finding HMAC + keys, fixing lookup on some Thales devices + * replication: Fix issue enabling replication when a non-auth mount and auth + mount have the same name + * auth/kubernetes: Fix issue verifying ECDSA signed JWTs + * ui: add missing edit mode for auth method configs [[GH-4770](https://github.com/hashicorp/vault/pull/4770)] + +## 0.10.2 (June 6th, 2018) + +SECURITY: + + * Tokens: A race condition was identified that could occur if a token's + lease expired while Vault was not running. In this case, when Vault came + back online, sometimes it would properly revoke the lease but other times it + would not, leading to a Vault token that no longer had an expiration and had + essentially unlimited lifetime. This race was per-token, not all-or-nothing + for all tokens that may have expired during Vault's downtime. We have fixed + the behavior and put extra checks in place to help prevent any similar + future issues. In addition, the logic we have put in place ensures that such + lease-less tokens can no longer be used (unless they are root tokens that + never had an expiration to begin with). + * Convergent Encryption: The version 2 algorithm used in `transit`'s + convergent encryption feature is susceptible to offline + plaintext-confirmation attacks. As a result, we are introducing a version 3 + algorithm that mitigates this. If you are currently using convergent + encryption, we recommend upgrading, rotating your encryption key (the new + key version will use the new algorithm), and rewrapping your data (the + `rewrap` endpoint can be used to allow a relatively non-privileged user to + perform the rewrapping while never divulging the plaintext). + * AppRole case-sensitive role name secret-id leaking: When using a mixed-case + role name via AppRole, deleting a secret-id via accessor or other operations + could end up leaving the secret-id behind and valid but without an accessor. + This has now been fixed, and we have put checks in place to prevent these + secret-ids from being used. + +DEPRECATIONS/CHANGES: + + * PKI duration return types: The PKI backend now returns durations (e.g. when + reading a role) as an integer number of seconds instead of a Go-style + string, in line with how the rest of Vault's API returns durations. + +FEATURES: + + * Active Directory Secrets Engine: A new `ad` secrets engine has been created + which allows Vault to rotate and provide credentials for configured AD + accounts. + * Rekey Verification: Rekey operations can now require verification. This + turns on a two-phase process where the existing key shares authorize + generating a new master key, and a threshold of the new, returned key shares + must be provided to verify that they have been successfully received in + order for the actual master key to be rotated. + * CIDR restrictions for `cert`, `userpass`, and `kubernetes` auth methods: + You can now limit authentication to specific CIDRs; these will also be + encoded in resultant tokens to limit their use. + * Vault UI Browser CLI: The UI now supports usage of read/write/list/delete + commands in a CLI that can be accessed from the nav bar. Complex inputs such + as JSON files are not currently supported. This surfaces features otherwise + unsupported in Vault's UI. + * Azure Key Vault Auto Unseal/Seal Wrap Support (Enterprise): Azure Key Vault + can now be used a support seal for Auto Unseal and Seal Wrapping. + +IMPROVEMENTS: + + * api: Close renewer's doneCh when the renewer is stopped, so that programs + expecting a final value through doneCh behave correctly [[GH-4472](https://github.com/hashicorp/vault/pull/4472)] + * auth/cert: Break out `allowed_names` into component parts and add + `allowed_uri_sans` [[GH-4231](https://github.com/hashicorp/vault/pull/4231)] + * auth/ldap: Obfuscate error messages pre-bind for greater security [[GH-4700](https://github.com/hashicorp/vault/pull/4700)] + * cli: `vault login` now supports a `-no-print` flag to suppress printing + token information but still allow storing into the token helper [[GH-4454](https://github.com/hashicorp/vault/pull/4454)] + * core/pkcs11 (enterprise): Add support for CKM_AES_CBC_PAD, CKM_RSA_PKCS, and + CKM_RSA_PKCS_OAEP mechanisms + * core/pkcs11 (enterprise): HSM slots can now be selected by token label + instead of just slot number + * core/token: Optimize token revocation by removing unnecessary list call + against the storage backend when calling revoke-orphan on tokens [[GH-4465](https://github.com/hashicorp/vault/pull/4465)] + * core/token: Refactor token revocation logic to not block on the call when + underlying leases are pending revocation by moving the expiration logic to + the expiration manager [[GH-4512](https://github.com/hashicorp/vault/pull/4512)] + * expiration: Allow revoke-prefix and revoke-force to work on single leases as + well as prefixes [[GH-4450](https://github.com/hashicorp/vault/pull/4450)] + * identity: Return parent group info when reading a group [[GH-4648](https://github.com/hashicorp/vault/pull/4648)] + * identity: Provide more contextual key information when listing entities, + groups, and aliases + * identity: Passthrough EntityID to backends [[GH-4663](https://github.com/hashicorp/vault/pull/4663)] + * identity: Adds ability to request entity information through system view + [GH_4681] + * secret/pki: Add custom extended key usages [[GH-4667](https://github.com/hashicorp/vault/pull/4667)] + * secret/pki: Add custom PKIX serial numbers [[GH-4694](https://github.com/hashicorp/vault/pull/4694)] + * secret/ssh: Use hostname instead of IP in OTP mode, similar to CA mode + [[GH-4673](https://github.com/hashicorp/vault/pull/4673)] + * storage/file: Attempt in some error conditions to do more cleanup [[GH-4684](https://github.com/hashicorp/vault/pull/4684)] + * ui: wrapping lookup now distplays the path [[GH-4644](https://github.com/hashicorp/vault/pull/4644)] + * ui: Identity interface now has more inline actions to make editing and adding + aliases to an entity or group easier [[GH-4502](https://github.com/hashicorp/vault/pull/4502)] + * ui: Identity interface now lists groups by name [[GH-4655](https://github.com/hashicorp/vault/pull/4655)] + * ui: Permission denied errors still render the sidebar in the Access section + [[GH-4658](https://github.com/hashicorp/vault/pull/4658)] + * replication: Improve performance of index page flushes and WAL garbage + collecting + +BUG FIXES: + + * auth/approle: Make invalid role_id a 400 error instead of 500 [[GH-4470](https://github.com/hashicorp/vault/pull/4470)] + * auth/cert: Fix Identity alias using serial number instead of common name + [[GH-4475](https://github.com/hashicorp/vault/pull/4475)] + * cli: Fix panic running `vault token capabilities` with multiple paths + [[GH-4552](https://github.com/hashicorp/vault/pull/4552)] + * core: When using the `use_always` option with PROXY protocol support, do not + require `authorized_addrs` to be set [[GH-4065](https://github.com/hashicorp/vault/pull/4065)] + * core: Fix panic when certain combinations of policy paths and allowed/denied + parameters were used [[GH-4582](https://github.com/hashicorp/vault/pull/4582)] + * secret/gcp: Make `bound_region` able to use short names + * secret/kv: Fix response wrapping for KV v2 [[GH-4511](https://github.com/hashicorp/vault/pull/4511)] + * secret/kv: Fix address flag not being honored correctly [[GH-4617](https://github.com/hashicorp/vault/pull/4617)] + * secret/pki: Fix `safety_buffer` for tidy being allowed to be negative, + clearing all certs [[GH-4641](https://github.com/hashicorp/vault/pull/4641)] + * secret/pki: Fix `key_type` not being allowed to be set to `any` [[GH-4595](https://github.com/hashicorp/vault/pull/4595)] + * secret/pki: Fix path length parameter being ignored when using + `use_csr_values` and signing an intermediate CA cert [[GH-4459](https://github.com/hashicorp/vault/pull/4459)] + * secret/ssh: Only append UserKnownHostsFile to args when configured with a + value [[GH-4674](https://github.com/hashicorp/vault/pull/4674)] + * storage/dynamodb: Fix listing when one child is left within a nested path + [[GH-4570](https://github.com/hashicorp/vault/pull/4570)] + * storage/gcs: Fix swallowing an error on connection close [[GH-4691](https://github.com/hashicorp/vault/pull/4691)] + * ui: Fix HMAC algorithm in transit [[GH-4604](https://github.com/hashicorp/vault/pull/4604)] + * ui: Fix unwrap of auth responses via the UI's unwrap tool [[GH-4611](https://github.com/hashicorp/vault/pull/4611)] + * ui (enterprise): Fix parsing of version string that blocked some users from seeing + enterprise-specific pages in the UI [[GH-4547](https://github.com/hashicorp/vault/pull/4547)] + * ui: Fix incorrect capabilities path check when viewing policies [[GH-4566](https://github.com/hashicorp/vault/pull/4566)] + * replication: Fix error while running plugins on a newly created replication + secondary + * replication: Fix issue with token store lookups after a secondary's mount table + is invalidated. + * replication: Improve startup time when a large merkle index is in use. + * replication: Fix panic when storage becomes unreachable during unseal. + +## 0.10.1/0.9.7 (April 25th, 2018) + +The following two items are in both 0.9.7 and 0.10.1. They only affect +Enterprise, and as such 0.9.7 is an Enterprise-only release: + +SECURITY: + + * EGPs: A regression affecting 0.9.6 and 0.10.0 causes EGPs to not be applied + correctly if an EGP is updated in a running Vault after initial write or + after it is loaded on unseal. This has been fixed. + +BUG FIXES: + + * Fixed an upgrade issue affecting performance secondaries when migrating from + a version that did not include Identity to one that did. + +All other content in this release is for 0.10.1 only. + +DEPRECATIONS/CHANGES: + + * `vault kv` and Vault versions: In 0.10.1 some issues with `vault kv` against + v1 K/V engine mounts are fixed. However, using 0.10.1 for both the server + and CLI versions is required. + * Mount information visibility: Users that have access to any path within a + mount can now see information about that mount, such as its type and + options, via some API calls. + * Identity and Local Mounts: Local mounts would allow creating Identity + entities but these would not be able to be used successfully (even locally) + in replicated scenarios. We have now disallowed entities and groups from + being created for local mounts in the first place. + +FEATURES: + + * X-Forwarded-For support: `X-Forwarded-For` headers can now be used to set the + client IP seen by Vault. See the [TCP listener configuration + page](https://www.vaultproject.io/docs/configuration/listener/tcp.html) for + details. + * CIDR IP Binding for Tokens: Tokens now support being bound to specific + CIDR(s) for usage. Currently this is implemented in Token Roles; usage can be + expanded to other authentication backends over time. + * `vault kv patch` command: A new `kv patch` helper command that allows + modifying only some values in existing data at a K/V path, but uses + check-and-set to ensure that this modification happens safely. + * AppRole Local Secret IDs: Roles can now be configured to generate secret IDs + local to the cluster. This enables performance secondaries to generate and + consume secret IDs without contacting the primary. + * AES-GCM Support for PKCS#11 [BETA] (Enterprise): For supporting HSMs, + AES-GCM can now be used in lieu of AES-CBC/HMAC-SHA256. This has currently + only been fully tested on AWS CloudHSM. + * Auto Unseal/Seal Wrap Key Rotation Support (Enterprise): Auto Unseal + mechanisms, including PKCS#11 HSMs, now support rotation of encryption keys, + and migration between key and encryption types, such as from AES-CBC to + AES-GCM, can be performed at the same time (where supported). + +IMPROVEMENTS: + + * auth/approle: Support for cluster local secret IDs. This enables secondaries + to generate secret IDs without contacting the primary [[GH-4427](https://github.com/hashicorp/vault/pull/4427)] + * auth/token: Add to the token lookup response, the policies inherited due to + identity associations [[GH-4366](https://github.com/hashicorp/vault/pull/4366)] + * auth/token: Add CIDR binding to token roles [[GH-815](https://github.com/hashicorp/vault/pull/815)] + * cli: Add `vault kv patch` [[GH-4432](https://github.com/hashicorp/vault/pull/4432)] + * core: Add X-Forwarded-For support [[GH-4380](https://github.com/hashicorp/vault/pull/4380)] + * core: Add token CIDR-binding support [[GH-815](https://github.com/hashicorp/vault/pull/815)] + * identity: Add the ability to disable an entity. Disabling an entity does not + revoke associated tokens, but while the entity is disabled they cannot be + used. [[GH-4353](https://github.com/hashicorp/vault/pull/4353)] + * physical/consul: Allow tuning of session TTL and lock wait time [[GH-4352](https://github.com/hashicorp/vault/pull/4352)] + * replication: Dynamically adjust WAL cleanup over a period of time based on + the rate of writes committed + * secret/ssh: Update dynamic key install script to use shell locking to avoid + concurrent modifications [[GH-4358](https://github.com/hashicorp/vault/pull/4358)] + * ui: Access to `sys/mounts` is no longer needed to use the UI - the list of + engines will show you the ones you implicitly have access to (because you have + access to to secrets in those engines) [[GH-4439](https://github.com/hashicorp/vault/pull/4439)] + +BUG FIXES: + + * cli: Fix `vault kv` backwards compatibility with KV v1 engine mounts + [[GH-4430](https://github.com/hashicorp/vault/pull/4430)] + * identity: Persist entity memberships in external identity groups across + mounts [[GH-4365](https://github.com/hashicorp/vault/pull/4365)] + * identity: Fix error preventing authentication using local mounts on + performance secondary replication clusters [[GH-4407](https://github.com/hashicorp/vault/pull/4407)] + * replication: Fix issue causing secondaries to not connect properly to a + pre-0.10 primary until the primary was upgraded + * secret/gcp: Fix panic on rollback when a roleset wasn't created properly + [[GH-4344](https://github.com/hashicorp/vault/pull/4344)] + * secret/gcp: Fix panic on renewal + * ui: Fix IE11 form submissions in a few parts of the application [[GH-4378](https://github.com/hashicorp/vault/pull/4378)] + * ui: Fix IE file saving on policy pages and init screens [[GH-4376](https://github.com/hashicorp/vault/pull/4376)] + * ui: Fixed an issue where the AWS secret backend would show the wrong menu + [[GH-4371](https://github.com/hashicorp/vault/pull/4371)] + * ui: Fixed an issue where policies with commas would not render in the + interface properly [[GH-4398](https://github.com/hashicorp/vault/pull/4398)] + * ui: Corrected the saving of mount tune ttls for auth methods [[GH-4431](https://github.com/hashicorp/vault/pull/4431)] + * ui: Credentials generation no longer checks capabilities before making + api calls. This should fix needing "update" capabilites to read IAM + credentials in the AWS secrets engine [[GH-4446](https://github.com/hashicorp/vault/pull/4446)] + +## 0.10.0 (April 10th, 2018) + +SECURITY: + + * Log sanitization for Combined Database Secret Engine: In certain failure + scenarios with incorrectly formatted connection urls, the raw connection + errors were being returned to the user with the configured database + credentials. Errors are now sanitized before being returned to the user. + +DEPRECATIONS/CHANGES: + + * Database plugin compatibility: The database plugin interface was enhanced to + support some additional functionality related to root credential rotation + and supporting templated URL strings. The changes were made in a + backwards-compatible way and all builtin plugins were updated with the new + features. Custom plugins not built into Vault will need to be upgraded to + support templated URL strings and root rotation. Additionally, the + Initialize method was deprecated in favor of a new Init method that supports + configuration modifications that occur in the plugin back to the primary + data store. + * Removal of returned secret information: For a long time Vault has returned + configuration given to various secret engines and auth methods with secret + values (such as secret API keys or passwords) still intact, and with a + warning to the user on write that anyone with read access could see the + secret. This was mostly done to make it easy for tools like Terraform to + judge whether state had drifted. However, it also feels quite un-Vault-y to + do this and we've never felt very comfortable doing so. In 0.10 we have gone + through and removed this behavior from the various backends; fields which + contained secret values are simply no longer returned on read. We are + working with the Terraform team to make changes to their provider to + accommodate this as best as possible, and users of other tools may have to + make adjustments, but in the end we felt that the ends did not justify the + means and we needed to prioritize security over operational convenience. + * LDAP auth method case sensitivity: We now treat usernames and groups + configured locally for policy assignment in a case insensitive fashion by + default. Existing configurations will continue to work as they do now; + however, the next time a configuration is written `case_sensitive_names` + will need to be explicitly set to `true`. + * TTL handling within core: All lease TTL handling has been centralized within + the core of Vault to ensure consistency across all backends. Since this was + previously delegated to individual backends, there may be some slight + differences in TTLs generated from some backends. + * Removal of default `secret/` mount: In 0.12 we will stop mounting `secret/` + by default at initialization time (it will still be available in `dev` + mode). + +FEATURES: + + * OSS UI: The Vault UI is now fully open-source. Similarly to the CLI, some + features are only available with a supporting version of Vault, but the code + base is entirely open. + * Versioned K/V: The `kv` backend has been completely revamped, featuring + flexible versioning of values, check-and-set protections, and more. A new + `vault kv` subcommand allows friendly interactions with it. Existing mounts + of the `kv` backend can be upgraded to the new versioned mode (downgrades + are not currently supported). The old "passthrough" mode is still the + default for new mounts; versioning can be turned on by setting the + `-version=2` flag for the `vault secrets enable` command. + * Database Root Credential Rotation: Database configurations can now rotate + their own configured admin/root credentials, allowing configured credentials + for a database connection to be rotated immediately after sending them into + Vault, invalidating the old credentials and ensuring only Vault knows the + actual valid values. + * Azure Authentication Plugin: There is now a plugin (pulled in to Vault) that + allows authenticating Azure machines to Vault using Azure's Managed Service + Identity credentials. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-auth-azure) for more + information. + * GCP Secrets Plugin: There is now a plugin (pulled in to Vault) that allows + generating secrets to allow access to GCP. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-secrets-gcp) for more + information. + * Selective Audit HMACing of Request and Response Data Keys: HMACing in audit + logs can be turned off for specific keys in the request input map and + response `data` map on a per-mount basis. + * Passthrough Request Headers: Request headers can now be selectively passed + through to backends on a per-mount basis. This is useful in various cases + when plugins are interacting with external services. + * HA for Google Cloud Storage: The GCS storage type now supports HA. + * UI support for identity: Add and edit entities, groups, and their associated + aliases. + * UI auth method support: Enable, disable, and configure all of the built-in + authentication methods. + * UI (Enterprise): View and edit Sentinel policies. + +IMPROVEMENTS: + + * core: Centralize TTL generation for leases in core [[GH-4230](https://github.com/hashicorp/vault/pull/4230)] + * identity: API to update group-alias by ID [[GH-4237](https://github.com/hashicorp/vault/pull/4237)] + * secret/cassandra: Update Cassandra storage delete function to not use batch + operations [[GH-4054](https://github.com/hashicorp/vault/pull/4054)] + * storage/mysql: Allow setting max idle connections and connection lifetime + [[GH-4211](https://github.com/hashicorp/vault/pull/4211)] + * storage/gcs: Add HA support [[GH-4226](https://github.com/hashicorp/vault/pull/4226)] + * ui: Add Nomad to the list of available secret engines + * ui: Adds ability to set static headers to be returned by the UI + +BUG FIXES: + + * api: Fix retries not working [[GH-4322](https://github.com/hashicorp/vault/pull/4322)] + * auth/gcp: Invalidate clients on config change + * auth/token: Revoke-orphan and tidy operations now correctly cleans up the + parent prefix entry in the underlying storage backend. These operations also + mark corresponding child tokens as orphans by removing the parent/secondary + index from the entries. [[GH-4193](https://github.com/hashicorp/vault/pull/4193)] + * command: Re-add `-mfa` flag and migrate to OSS binary [[GH-4223](https://github.com/hashicorp/vault/pull/4223)] + * core: Fix issue occurring from mounting two auth backends with the same path + with one mount having `auth/` in front [[GH-4206](https://github.com/hashicorp/vault/pull/4206)] + * mfa: Invalidation of MFA configurations (Enterprise) + * replication: Fix a panic on some non-64-bit platforms + * replication: Fix invalidation of policies on performance secondaries + * secret/pki: When tidying if a value is unexpectedly nil, delete it and move + on [[GH-4214](https://github.com/hashicorp/vault/pull/4214)] + * storage/s3: Fix panic if S3 returns no Content-Length header [[GH-4222](https://github.com/hashicorp/vault/pull/4222)] + * ui: Fixed an issue where the UI was checking incorrect paths when operating + on transit keys. Capabilities are now checked when attempting to encrypt / + decrypt, etc. + * ui: Fixed IE 11 layout issues and JS errors that would stop the application + from running. + * ui: Fixed the link that gets rendered when a user doesn't have permissions + to view the root of a secret engine. The link now sends them back to the list + of secret engines. + * replication: Fix issue with DR secondaries when using mount specified local + paths. + * cli: Fix an issue where generating a dr operation token would not output the + token [[GH-4328](https://github.com/hashicorp/vault/pull/4328)] + +## 0.9.6 (March 20th, 2018) + +DEPRECATIONS/CHANGES: + + * The AWS authentication backend now allows binds for inputs as either a + comma-delimited string or a string array. However, to keep consistency with + input and output, when reading a role the binds will now be returned as + string arrays rather than strings. + * In order to prefix-match IAM role and instance profile ARNs in AWS auth + backend, you now must explicitly opt-in by adding a `*` to the end of the + ARN. Existing configurations will be upgraded automatically, but when + writing a new role configuration the updated behavior will be used. + +FEATURES: + + * Replication Activation Enhancements: When activating a replication + secondary, a public key can now be fetched first from the target cluster. + This public key can be provided to the primary when requesting the + activation token. If provided, the public key will be used to perform a + Diffie-Hellman key exchange resulting in a shared key that encrypts the + contents of the activation token. The purpose is to protect against + accidental disclosure of the contents of the token if unwrapped by the wrong + party, given that the contents of the token are highly sensitive. If + accidentally unwrapped, the contents of the token are not usable by the + unwrapping party. It is important to note that just as a malicious operator + could unwrap the contents of the token, a malicious operator can pretend to + be a secondary and complete the Diffie-Hellman exchange on their own; this + feature provides defense in depth but still requires due diligence around + replication activation, including multiple eyes on the commands/tokens and + proper auditing. + +IMPROVEMENTS: + + * api: Update renewer grace period logic. It no longer is static, but rather + dynamically calculates one based on the current lease duration after each + renew. [[GH-4090](https://github.com/hashicorp/vault/pull/4090)] + * auth/approle: Allow array input for bound_cidr_list [4078] + * auth/aws: Allow using lists in role bind parameters [[GH-3907](https://github.com/hashicorp/vault/pull/3907)] + * auth/aws: Allow binding by EC2 instance IDs [[GH-3816](https://github.com/hashicorp/vault/pull/3816)] + * auth/aws: Allow non-prefix-matched IAM role and instance profile ARNs + [[GH-4071](https://github.com/hashicorp/vault/pull/4071)] + * auth/ldap: Set a very large size limit on queries [[GH-4169](https://github.com/hashicorp/vault/pull/4169)] + * core: Log info notifications of revoked leases for all leases/reasons, not + just expirations [[GH-4164](https://github.com/hashicorp/vault/pull/4164)] + * physical/couchdb: Removed limit on the listing of items [[GH-4149](https://github.com/hashicorp/vault/pull/4149)] + * secret/pki: Support certificate policies [[GH-4125](https://github.com/hashicorp/vault/pull/4125)] + * secret/pki: Add ability to have CA:true encoded into intermediate CSRs, to + improve compatibility with some ADFS scenarios [[GH-3883](https://github.com/hashicorp/vault/pull/3883)] + * secret/transit: Allow selecting signature algorithm as well as hash + algorithm when signing/verifying [[GH-4018](https://github.com/hashicorp/vault/pull/4018)] + * server: Make sure `tls_disable_client_cert` is actually a true value rather + than just set [[GH-4049](https://github.com/hashicorp/vault/pull/4049)] + * storage/dynamodb: Allow specifying max retries for dynamo client [[GH-4115](https://github.com/hashicorp/vault/pull/4115)] + * storage/gcs: Allow specifying chunk size for transfers, which can reduce + memory utilization [[GH-4060](https://github.com/hashicorp/vault/pull/4060)] + * sys/capabilities: Add the ability to use multiple paths for capability + checking [[GH-3663](https://github.com/hashicorp/vault/pull/3663)] + +BUG FIXES: + + * auth/aws: Fix honoring `max_ttl` when a corresponding role `ttl` is not also + set [[GH-4107](https://github.com/hashicorp/vault/pull/4107)] + * auth/okta: Fix honoring configured `max_ttl` value [[GH-4110](https://github.com/hashicorp/vault/pull/4110)] + * auth/token: If a periodic token being issued has a period greater than the + max_lease_ttl configured on the token store mount, truncate it. This matches + renewal behavior; before it was inconsistent between issuance and renewal. + [[GH-4112](https://github.com/hashicorp/vault/pull/4112)] + * cli: Improve error messages around `vault auth help` when there is no CLI + helper for a particular method [[GH-4056](https://github.com/hashicorp/vault/pull/4056)] + * cli: Fix autocomplete installation when using Fish as the shell [[GH-4094](https://github.com/hashicorp/vault/pull/4094)] + * secret/database: Properly honor mount-tuned max TTL [[GH-4051](https://github.com/hashicorp/vault/pull/4051)] + * secret/ssh: Return `key_bits` value when reading a role [[GH-4098](https://github.com/hashicorp/vault/pull/4098)] + * sys: When writing policies on a performance replication secondary, properly + forward requests to the primary [[GH-4129](https://github.com/hashicorp/vault/pull/4129)] + +## 0.9.5 (February 26th, 2018) + +IMPROVEMENTS: + + * auth: Allow sending default_lease_ttl and max_lease_ttl values when enabling + auth methods. [[GH-4019](https://github.com/hashicorp/vault/pull/4019)] + * secret/database: Add list functionality to `database/config` endpoint + [[GH-4026](https://github.com/hashicorp/vault/pull/4026)] + * physical/consul: Allow setting a specific service address [[GH-3971](https://github.com/hashicorp/vault/pull/3971)] + * replication: When bootstrapping a new secondary, if the initial cluster + connection fails, Vault will attempt to roll back state so that + bootstrapping can be tried again, rather than having to recreate the + downstream cluster. This will still require fetching a new secondary + activation token. + +BUG FIXES: + + * auth/aws: Update libraries to fix regression verifying PKCS#7 identity + documents [[GH-4014](https://github.com/hashicorp/vault/pull/4014)] + * listener: Revert to Go 1.9 for now to allow certificates with non-DNS names + in their DNS SANs to be used for Vault's TLS connections [[GH-4028](https://github.com/hashicorp/vault/pull/4028)] + * replication: Fix issue with a performance secondary/DR primary node losing + its DR primary status when performing an update-primary operation + * replication: Fix issue where performance secondaries could be unable to + automatically connect to a performance primary after that performance + primary has been promoted to a DR primary from a DR secondary + * ui: Fix behavior when a value contains a `.` + +## 0.9.4 (February 20th, 2018) + +SECURITY: + + * Role Tags used with the EC2 style of AWS auth were being improperly parsed; + as a result they were not being used to properly restrict values. + Implementations following our suggestion of using these as defense-in-depth + rather than the only source of restriction should not have significant + impact. + +FEATURES: + + * **ChaCha20-Poly1305 support in `transit`**: You can now encrypt and decrypt + with ChaCha20-Poly1305 in `transit`. Key derivation and convergent + encryption is also supported. + * **Okta Push support in Okta Auth Backend**: If a user account has MFA + required within Okta, an Okta Push MFA flow can be used to successfully + finish authentication. + * **PKI Improvements**: Custom OID subject alternate names can now be set, + subject to allow restrictions that support globbing. Additionally, Country, + Locality, Province, Street Address, and Postal Code can now be set in + certificate subjects. + * **Manta Storage**: Joyent Triton Manta can now be used for Vault storage + * **Google Cloud Spanner Storage**: Google Cloud Spanner can now be used for + Vault storage + +IMPROVEMENTS: + + * auth/centrify: Add CLI helper + * audit: Always log failure metrics, even if zero, to ensure the values appear + on dashboards [[GH-3937](https://github.com/hashicorp/vault/pull/3937)] + * cli: Disable color when output is not a TTY [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] + * cli: Add `-format` flag to all subcommands [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] + * cli: Do not display deprecation warnings when the format is not table + [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] + * core: If over a predefined lease count (256k), log a warning not more than + once a minute. Too many leases can be problematic for many of the storage + backends and often this number of leases is indicative of a need for + workflow improvements. [[GH-3957](https://github.com/hashicorp/vault/pull/3957)] + * secret/nomad: Have generated ACL tokens cap out at 64 characters [[GH-4009](https://github.com/hashicorp/vault/pull/4009)] + * secret/pki: Country, Locality, Province, Street Address, and Postal Code can + now be set on certificates [[GH-3992](https://github.com/hashicorp/vault/pull/3992)] + * secret/pki: UTF-8 Other Names can now be set in Subject Alternate Names in + issued certs; allowed values can be set per role and support globbing + [[GH-3889](https://github.com/hashicorp/vault/pull/3889)] + * secret/pki: Add a flag to make the common name optional on certs [[GH-3940](https://github.com/hashicorp/vault/pull/3940)] + * secret/pki: Ensure only DNS-compatible names go into DNS SANs; additionally, + properly handle IDNA transformations for these DNS names [[GH-3953](https://github.com/hashicorp/vault/pull/3953)] + * secret/ssh: Add `valid-principles` flag to CLI for CA mode [[GH-3922](https://github.com/hashicorp/vault/pull/3922)] + * storage/manta: Add Manta storage [[GH-3270](https://github.com/hashicorp/vault/pull/3270)] + * ui (Enterprise): Support for ChaCha20-Poly1305 keys in the transit engine. + +BUG FIXES: + * api/renewer: Honor increment value in renew auth calls [[GH-3904](https://github.com/hashicorp/vault/pull/3904)] + * auth/approle: Fix inability to use limited-use-count secret IDs on + replication performance secondaries + * auth/approle: Cleanup of secret ID accessors during tidy and removal of + dangling accessor entries [[GH-3924](https://github.com/hashicorp/vault/pull/3924)] + * auth/aws-ec2: Avoid masking of role tag response [[GH-3941](https://github.com/hashicorp/vault/pull/3941)] + * auth/cert: Verify DNS SANs in the authenticating certificate [[GH-3982](https://github.com/hashicorp/vault/pull/3982)] + * auth/okta: Return configured durations as seconds, not nanoseconds [[GH-3871](https://github.com/hashicorp/vault/pull/3871)] + * auth/okta: Get all okta groups for a user vs. default 200 limit [[GH-4034](https://github.com/hashicorp/vault/pull/4034)] + * auth/token: Token creation via the CLI no longer forces periodic token + creation. Passing an explicit zero value for the period no longer create + periodic tokens. [[GH-3880](https://github.com/hashicorp/vault/pull/3880)] + * command: Fix interpreted formatting directives when printing raw fields + [[GH-4005](https://github.com/hashicorp/vault/pull/4005)] + * command: Correctly format output when using -field and -format flags at the + same time [[GH-3987](https://github.com/hashicorp/vault/pull/3987)] + * command/rekey: Re-add lost `stored-shares` parameter [[GH-3974](https://github.com/hashicorp/vault/pull/3974)] + * command/ssh: Create and reuse the api client [[GH-3909](https://github.com/hashicorp/vault/pull/3909)] + * command/status: Fix panic when status returns 500 from leadership lookup + [[GH-3998](https://github.com/hashicorp/vault/pull/3998)] + * identity: Fix race when creating entities [[GH-3932](https://github.com/hashicorp/vault/pull/3932)] + * plugin/gRPC: Fixed an issue with list requests and raw responses coming from + plugins using gRPC transport [[GH-3881](https://github.com/hashicorp/vault/pull/3881)] + * plugin/gRPC: Fix panic when special paths are not set [[GH-3946](https://github.com/hashicorp/vault/pull/3946)] + * secret/pki: Verify a name is a valid hostname before adding to DNS SANs + [[GH-3918](https://github.com/hashicorp/vault/pull/3918)] + * secret/transit: Fix auditing when reading a key after it has been backed up + or restored [[GH-3919](https://github.com/hashicorp/vault/pull/3919)] + * secret/transit: Fix storage/memory consistency when persistence fails + [[GH-3959](https://github.com/hashicorp/vault/pull/3959)] + * storage/consul: Validate that service names are RFC 1123 compliant [[GH-3960](https://github.com/hashicorp/vault/pull/3960)] + * storage/etcd3: Fix memory ballooning with standby instances [[GH-3798](https://github.com/hashicorp/vault/pull/3798)] + * storage/etcd3: Fix large lists (like token loading at startup) not being + handled [[GH-3772](https://github.com/hashicorp/vault/pull/3772)] + * storage/postgresql: Fix compatibility with versions using custom string + version tags [[GH-3949](https://github.com/hashicorp/vault/pull/3949)] + * storage/zookeeper: Update vendoring to fix freezing issues [[GH-3896](https://github.com/hashicorp/vault/pull/3896)] + * ui (Enterprise): Decoding the replication token should no longer error and + prevent enabling of a secondary replication cluster via the ui. + * plugin/gRPC: Add connection info to the request object [[GH-3997](https://github.com/hashicorp/vault/pull/3997)] + +## 0.9.3 (January 28th, 2018) + +A regression from a feature merge disabled the Nomad secrets backend in 0.9.2. +This release re-enables the Nomad secrets backend; it is otherwise identical to +0.9.2. + +## 0.9.2 (January 26th, 2018) + +SECURITY: + + * Okta Auth Backend: While the Okta auth backend was successfully verifying + usernames and passwords, it was not checking the returned state of the + account, so accounts that had been marked locked out could still be used to + log in. Only accounts in SUCCESS or PASSWORD_WARN states are now allowed. + * Periodic Tokens: A regression in 0.9.1 meant that periodic tokens created by + the AppRole, AWS, and Cert auth backends would expire when the max TTL for + the backend/mount/system was hit instead of their stated behavior of living + as long as they are renewed. This is now fixed; existing tokens do not have + to be reissued as this was purely a regression in the renewal logic. + * Seal Wrapping: During certain replication states values written marked for + seal wrapping may not be wrapped on the secondaries. This has been fixed, + and existing values will be wrapped on next read or write. This does not + affect the barrier keys. + +DEPRECATIONS/CHANGES: + + * `sys/health` DR Secondary Reporting: The `replication_dr_secondary` bool + returned by `sys/health` could be misleading since it would be `false` both + when a cluster was not a DR secondary but also when the node is a standby in + the cluster and has not yet fully received state from the active node. This + could cause health checks on LBs to decide that the node was acceptable for + traffic even though DR secondaries cannot handle normal Vault traffic. (In + other words, the bool could only convey "yes" or "no" but not "not sure + yet".) This has been replaced by `replication_dr_mode` and + `replication_perf_mode` which are string values that convey the current + state of the node; a value of `disabled` indicates that replication is + disabled or the state is still being discovered. As a result, an LB check + can positively verify that the node is both not `disabled` and is not a DR + secondary, and avoid sending traffic to it if either is true. + * PKI Secret Backend Roles parameter types: For `ou` and `organization` + in role definitions in the PKI secret backend, input can now be a + comma-separated string or an array of strings. Reading a role will + now return arrays for these parameters. + * Plugin API Changes: The plugin API has been updated to utilize golang's + context.Context package. Many function signatures now accept a context + object as the first parameter. Existing plugins will need to pull in the + latest Vault code and update their function signatures to begin using + context and the new gRPC transport. + +FEATURES: + + * **gRPC Backend Plugins**: Backend plugins now use gRPC for transport, + allowing them to be written in other languages. + * **Brand New CLI**: Vault has a brand new CLI interface that is significantly + streamlined, supports autocomplete, and is almost entirely backwards + compatible. + * **UI: PKI Secret Backend (Enterprise)**: Configure PKI secret backends, + create and browse roles and certificates, and issue and sign certificates via + the listed roles. + +IMPROVEMENTS: + + * auth/aws: Handle IAM headers produced by clients that formulate numbers as + ints rather than strings [[GH-3763](https://github.com/hashicorp/vault/pull/3763)] + * auth/okta: Support JSON lists when specifying groups and policies [[GH-3801](https://github.com/hashicorp/vault/pull/3801)] + * autoseal/hsm: Attempt reconnecting to the HSM on certain kinds of issues, + including HA scenarios for some Gemalto HSMs. + (Enterprise) + * cli: Output password prompts to stderr to make it easier to pipe an output + token to another command [[GH-3782](https://github.com/hashicorp/vault/pull/3782)] + * core: Report replication status in `sys/health` [[GH-3810](https://github.com/hashicorp/vault/pull/3810)] + * physical/s3: Allow using paths with S3 for non-AWS deployments [[GH-3730](https://github.com/hashicorp/vault/pull/3730)] + * physical/s3: Add ability to disable SSL for non-AWS deployments [[GH-3730](https://github.com/hashicorp/vault/pull/3730)] + * plugins: Args for plugins can now be specified separately from the command, + allowing the same output format and input format for plugin information + [[GH-3778](https://github.com/hashicorp/vault/pull/3778)] + * secret/pki: `ou` and `organization` can now be specified as a + comma-separated string or an array of strings [[GH-3804](https://github.com/hashicorp/vault/pull/3804)] + * plugins: Plugins will fall back to using netrpc as the communication protocol + on older versions of Vault [[GH-3833](https://github.com/hashicorp/vault/pull/3833)] + +BUG FIXES: + + * auth/(approle,aws,cert): Fix behavior where periodic tokens generated by + these backends could not have their TTL renewed beyond the system/mount max + TTL value [[GH-3803](https://github.com/hashicorp/vault/pull/3803)] + * auth/aws: Fix error returned if `bound_iam_principal_arn` was given to an + existing role update [[GH-3843](https://github.com/hashicorp/vault/pull/3843)] + * core/sealwrap: Speed improvements and bug fixes (Enterprise) + * identity: Delete group alias when an external group is deleted [[GH-3773](https://github.com/hashicorp/vault/pull/3773)] + * legacymfa/duo: Fix intermittent panic when Duo could not be reached + [[GH-2030](https://github.com/hashicorp/vault/pull/2030)] + * secret/database: Fix a location where a lock could potentially not be + released, leading to deadlock [[GH-3774](https://github.com/hashicorp/vault/pull/3774)] + * secret/(all databases) Fix behavior where if a max TTL was specified but no + default TTL was specified the system/mount default TTL would be used but not + be capped by the local max TTL [[GH-3814](https://github.com/hashicorp/vault/pull/3814)] + * secret/database: Fix an issue where plugins were not closed properly if they + failed to initialize [[GH-3768](https://github.com/hashicorp/vault/pull/3768)] + * ui: mounting a secret backend will now properly set `max_lease_ttl` and + `default_lease_ttl` when specified - previously both fields set + `default_lease_ttl`. + +## 0.9.1 (December 21st, 2017) + +DEPRECATIONS/CHANGES: + + * AppRole Case Sensitivity: In prior versions of Vault, `list` operations + against AppRole roles would require preserving case in the role name, even + though most other operations within AppRole are case-insensitive with + respect to the role name. This has been fixed; existing roles will behave as + they have in the past, but new roles will act case-insensitively in these + cases. + * Token Auth Backend Roles parameter types: For `allowed_policies` and + `disallowed_policies` in role definitions in the token auth backend, input + can now be a comma-separated string or an array of strings. Reading a role + will now return arrays for these parameters. + * Transit key exporting: You can now mark a key in the `transit` backend as + `exportable` at any time, rather than just at creation time; however, once + this value is set, it still cannot be unset. + * PKI Secret Backend Roles parameter types: For `allowed_domains` and + `key_usage` in role definitions in the PKI secret backend, input + can now be a comma-separated string or an array of strings. Reading a role + will now return arrays for these parameters. + * SSH Dynamic Keys Method Defaults to 2048-bit Keys: When using the dynamic + key method in the SSH backend, the default is now to use 2048-bit keys if no + specific key bit size is specified. + * Consul Secret Backend lease handling: The `consul` secret backend can now + accept both strings and integer numbers of seconds for its lease value. The + value returned on a role read will be an integer number of seconds instead + of a human-friendly string. + * Unprintable characters not allowed in API paths: Unprintable characters are + no longer allowed in names in the API (paths and path parameters), with an + extra restriction on whitespace characters. Allowed characters are those + that are considered printable by Unicode plus spaces. + +FEATURES: + + * **Transit Backup/Restore**: The `transit` backend now supports a backup + operation that can export a given key, including all key versions and + configuration, as well as a restore operation allowing import into another + Vault. + * **gRPC Database Plugins**: Database plugins now use gRPC for transport, + allowing them to be written in other languages. + * **Nomad Secret Backend**: Nomad ACL tokens can now be generated and revoked + using Vault. + * **TLS Cert Auth Backend Improvements**: The `cert` auth backend can now + match against custom certificate extensions via exact or glob matching, and + additionally supports max_ttl and periodic token toggles. + +IMPROVEMENTS: + + * auth/cert: Support custom certificate constraints [[GH-3634](https://github.com/hashicorp/vault/pull/3634)] + * auth/cert: Support setting `max_ttl` and `period` [[GH-3642](https://github.com/hashicorp/vault/pull/3642)] + * audit/file: Setting a file mode of `0000` will now disable Vault from + automatically `chmod`ing the log file [[GH-3649](https://github.com/hashicorp/vault/pull/3649)] + * auth/github: The legacy MFA system can now be used with the GitHub auth + backend [[GH-3696](https://github.com/hashicorp/vault/pull/3696)] + * auth/okta: The legacy MFA system can now be used with the Okta auth backend + [[GH-3653](https://github.com/hashicorp/vault/pull/3653)] + * auth/token: `allowed_policies` and `disallowed_policies` can now be specified + as a comma-separated string or an array of strings [[GH-3641](https://github.com/hashicorp/vault/pull/3641)] + * command/server: The log level can now be specified with `VAULT_LOG_LEVEL` + [[GH-3721](https://github.com/hashicorp/vault/pull/3721)] + * core: Period values from auth backends will now be checked and applied to the + TTL value directly by core on login and renewal requests [[GH-3677](https://github.com/hashicorp/vault/pull/3677)] + * database/mongodb: Add optional `write_concern` parameter, which can be set + during database configuration. This establishes a session-wide [write + concern](https://docs.mongodb.com/manual/reference/write-concern/) for the + lifecycle of the mount [[GH-3646](https://github.com/hashicorp/vault/pull/3646)] + * http: Request path containing non-printable characters will return 400 - Bad + Request [[GH-3697](https://github.com/hashicorp/vault/pull/3697)] + * mfa/okta: Filter a given email address as a login filter, allowing operation + when login email and account email are different + * plugins: Make Vault more resilient when unsealing when plugins are + unavailable [[GH-3686](https://github.com/hashicorp/vault/pull/3686)] + * secret/pki: `allowed_domains` and `key_usage` can now be specified + as a comma-separated string or an array of strings [[GH-3642](https://github.com/hashicorp/vault/pull/3642)] + * secret/ssh: Allow 4096-bit keys to be used in dynamic key method [[GH-3593](https://github.com/hashicorp/vault/pull/3593)] + * secret/consul: The Consul secret backend now uses the value of `lease` set + on the role, if set, when renewing a secret. [[GH-3796](https://github.com/hashicorp/vault/pull/3796)] + * storage/mysql: Don't attempt database creation if it exists, which can help + under certain permissions constraints [[GH-3716](https://github.com/hashicorp/vault/pull/3716)] + +BUG FIXES: + + * api/status (enterprise): Fix status reporting when using an auto seal + * auth/approle: Fix case-sensitive/insensitive comparison issue [[GH-3665](https://github.com/hashicorp/vault/pull/3665)] + * auth/cert: Return `allowed_names` on role read [[GH-3654](https://github.com/hashicorp/vault/pull/3654)] + * auth/ldap: Fix incorrect control information being sent [[GH-3402](https://github.com/hashicorp/vault/pull/3402)] [[GH-3496](https://github.com/hashicorp/vault/pull/3496)] + [[GH-3625](https://github.com/hashicorp/vault/pull/3625)] [[GH-3656](https://github.com/hashicorp/vault/pull/3656)] + * core: Fix seal status reporting when using an autoseal + * core: Add creation path to wrap info for a control group token + * core: Fix potential panic that could occur using plugins when a node + transitioned from active to standby [[GH-3638](https://github.com/hashicorp/vault/pull/3638)] + * core: Fix memory ballooning when a connection would connect to the cluster + port and then go away -- redux! [[GH-3680](https://github.com/hashicorp/vault/pull/3680)] + * core: Replace recursive token revocation logic with depth-first logic, which + can avoid hitting stack depth limits in extreme cases [[GH-2348](https://github.com/hashicorp/vault/pull/2348)] + * core: When doing a read on configured audited-headers, properly handle case + insensitivity [[GH-3701](https://github.com/hashicorp/vault/pull/3701)] + * core/pkcs11 (enterprise): Fix panic when PKCS#11 library is not readable + * database/mysql: Allow the creation statement to use commands that are not yet + supported by the prepare statement protocol [[GH-3619](https://github.com/hashicorp/vault/pull/3619)] + * plugin/auth-gcp: Fix IAM roles when using `allow_gce_inference` [VPAG-19] + +## 0.9.0.1 (November 21st, 2017) (Enterprise Only) + +IMPROVEMENTS: + + * auth/gcp: Support seal wrapping of configuration parameters + * auth/kubernetes: Support seal wrapping of configuration parameters + +BUG FIXES: + + * Fix an upgrade issue with some physical backends when migrating from legacy + HSM stored key support to the new Seal Wrap mechanism (Enterprise) + * mfa: Add the 'mfa' flag that was removed by mistake [[GH-4223](https://github.com/hashicorp/vault/pull/4223)] + +## 0.9.0 (November 14th, 2017) + +DEPRECATIONS/CHANGES: + + * HSM config parameter requirements: When using Vault with an HSM, a new + parameter is required: `hmac_key_label`. This performs a similar function to + `key_label` but for the HMAC key Vault will use. Vault will generate a + suitable key if this value is specified and `generate_key` is set true. + * API HTTP client behavior: When calling `NewClient` the API no longer + modifies the provided client/transport. In particular this means it will no + longer enable redirection limiting and HTTP/2 support on custom clients. It + is suggested that if you want to make changes to an HTTP client that you use + one created by `DefaultConfig` as a starting point. + * AWS EC2 client nonce behavior: The client nonce generated by the backend + that gets returned along with the authentication response will be audited in + plaintext. If this is undesired, the clients can choose to supply a custom + nonce to the login endpoint. The custom nonce set by the client will from + now on, not be returned back with the authentication response, and hence not + audit logged. + * AWS Auth role options: The API will now error when trying to create or + update a role with the mutually-exclusive options + `disallow_reauthentication` and `allow_instance_migration`. + * SSH CA role read changes: When reading back a role from the `ssh` backend, + the TTL/max TTL values will now be an integer number of seconds rather than + a string. This better matches the API elsewhere in Vault. + * SSH role list changes: When listing roles from the `ssh` backend via the API, + the response data will additionally return a `key_info` map that will contain + a map of each key with a corresponding object containing the `key_type`. + * More granularity in audit logs: Audit request and response entries are still + in RFC3339 format but now have a granularity of nanoseconds. + * High availability related values have been moved out of the `storage` and + `ha_storage` stanzas, and into the top-level configuration. `redirect_addr` + has been renamed to `api_addr`. The stanzas still support accepting + HA-related values to maintain backward compatibility, but top-level values + will take precedence. + * A new `seal` stanza has been added to the configuration file, which is + optional and enables configuration of the seal type to use for additional + data protection, such as using HSM or Cloud KMS solutions to encrypt and + decrypt data. + +FEATURES: + + * **RSA Support for Transit Backend**: Transit backend can now generate RSA + keys which can be used for encryption and signing. [[GH-3489](https://github.com/hashicorp/vault/pull/3489)] + * **Identity System**: Now in open source and with significant enhancements, + Identity is an integrated system for understanding users across tokens and + enabling easier management of users directly and via groups. + * **External Groups in Identity**: Vault can now automatically assign users + and systems to groups in Identity based on their membership in external + groups. + * **Seal Wrap / FIPS 140-2 Compatibility (Enterprise)**: Vault can now take + advantage of FIPS 140-2-certified HSMs to ensure that Critical Security + Parameters are protected in a compliant fashion. Vault's implementation has + received a statement of compliance from Leidos. + * **Control Groups (Enterprise)**: Require multiple members of an Identity + group to authorize a requested action before it is allowed to run. + * **Cloud Auto-Unseal (Enterprise)**: Automatically unseal Vault using AWS KMS + and GCP CKMS. + * **Sentinel Integration (Enterprise)**: Take advantage of HashiCorp Sentinel + to create extremely flexible access control policies -- even on + unauthenticated endpoints. + * **Barrier Rekey Support for Auto-Unseal (Enterprise)**: When using auto-unsealing + functionality, the `rekey` operation is now supported; it uses recovery keys + to authorize the master key rekey. + * **Operation Token for Disaster Recovery Actions (Enterprise)**: When using + Disaster Recovery replication, a token can be created that can be used to + authorize actions such as promotion and updating primary information, rather + than using recovery keys. + * **Trigger Auto-Unseal with Recovery Keys (Enterprise)**: When using + auto-unsealing, a request to unseal Vault can be triggered by a threshold of + recovery keys, rather than requiring the Vault process to be restarted. + * **UI Redesign (Enterprise)**: All new experience for the Vault Enterprise + UI. The look and feel has been completely redesigned to give users a better + experience and make managing secrets fast and easy. + * **UI: SSH Secret Backend (Enterprise)**: Configure an SSH secret backend, + create and browse roles. And use them to sign keys or generate one time + passwords. + * **UI: AWS Secret Backend (Enterprise)**: You can now configure the AWS + backend via the Vault Enterprise UI. In addition you can create roles, + browse the roles and Generate IAM Credentials from them in the UI. + +IMPROVEMENTS: + + * api: Add ability to set custom headers on each call [[GH-3394](https://github.com/hashicorp/vault/pull/3394)] + * command/server: Add config option to disable requesting client certificates + [[GH-3373](https://github.com/hashicorp/vault/pull/3373)] + * auth/aws: Max retries can now be customized for the AWS client [[GH-3965](https://github.com/hashicorp/vault/pull/3965)] + * core: Disallow mounting underneath an existing path, not just over [[GH-2919](https://github.com/hashicorp/vault/pull/2919)] + * physical/file: Use `700` as permissions when creating directories. The files + themselves were `600` and are all encrypted, but this doesn't hurt. + * secret/aws: Add ability to use custom IAM/STS endpoints [[GH-3416](https://github.com/hashicorp/vault/pull/3416)] + * secret/aws: Max retries can now be customized for the AWS client [[GH-3965](https://github.com/hashicorp/vault/pull/3965)] + * secret/cassandra: Work around Cassandra ignoring consistency levels for a + user listing query [[GH-3469](https://github.com/hashicorp/vault/pull/3469)] + * secret/pki: Private keys can now be marshalled as PKCS#8 [[GH-3518](https://github.com/hashicorp/vault/pull/3518)] + * secret/pki: Allow entering URLs for `pki` as both comma-separated strings and JSON + arrays [[GH-3409](https://github.com/hashicorp/vault/pull/3409)] + * secret/ssh: Role TTL/max TTL can now be specified as either a string or an + integer [[GH-3507](https://github.com/hashicorp/vault/pull/3507)] + * secret/transit: Sign and verify operations now support a `none` hash + algorithm to allow signing/verifying pre-hashed data [[GH-3448](https://github.com/hashicorp/vault/pull/3448)] + * secret/database: Add the ability to glob allowed roles in the Database Backend [[GH-3387](https://github.com/hashicorp/vault/pull/3387)] + * ui (enterprise): Support for RSA keys in the transit backend + * ui (enterprise): Support for DR Operation Token generation, promoting, and + updating primary on DR Secondary clusters + +BUG FIXES: + + * api: Fix panic when setting a custom HTTP client but with a nil transport + [[GH-3435](https://github.com/hashicorp/vault/pull/3435)] [[GH-3437](https://github.com/hashicorp/vault/pull/3437)] + * api: Fix authing to the `cert` backend when the CA for the client cert is + not known to the server's listener [[GH-2946](https://github.com/hashicorp/vault/pull/2946)] + * auth/approle: Create role ID index during read if a role is missing one [[GH-3561](https://github.com/hashicorp/vault/pull/3561)] + * auth/aws: Don't allow mutually exclusive options [[GH-3291](https://github.com/hashicorp/vault/pull/3291)] + * auth/radius: Fix logging in in some situations [[GH-3461](https://github.com/hashicorp/vault/pull/3461)] + * core: Fix memleak when a connection would connect to the cluster port and + then go away [[GH-3513](https://github.com/hashicorp/vault/pull/3513)] + * core: Fix panic if a single-use token is used to step-down or seal [[GH-3497](https://github.com/hashicorp/vault/pull/3497)] + * core: Set rather than add headers to prevent some duplicated headers in + responses when requests were forwarded to the active node [[GH-3485](https://github.com/hashicorp/vault/pull/3485)] + * physical/etcd3: Fix some listing issues due to how etcd3 does prefix + matching [[GH-3406](https://github.com/hashicorp/vault/pull/3406)] + * physical/etcd3: Fix case where standbys can lose their etcd client lease + [[GH-3031](https://github.com/hashicorp/vault/pull/3031)] + * physical/file: Fix listing when underscores are the first component of a + path [[GH-3476](https://github.com/hashicorp/vault/pull/3476)] + * plugins: Allow response errors to be returned from backend plugins [[GH-3412](https://github.com/hashicorp/vault/pull/3412)] + * secret/transit: Fix panic if the length of the input ciphertext was less + than the expected nonce length [[GH-3521](https://github.com/hashicorp/vault/pull/3521)] + * ui (enterprise): Reinstate support for generic secret backends - this was + erroneously removed in a previous release + +## 0.8.3 (September 19th, 2017) + +CHANGES: + + * Policy input/output standardization: For all built-in authentication + backends, policies can now be specified as a comma-delimited string or an + array if using JSON as API input; on read, policies will be returned as an + array; and the `default` policy will not be forcefully added to policies + saved in configurations. Please note that the `default` policy will continue + to be added to generated tokens, however, rather than backends adding + `default` to the given set of input policies (in some cases, and not in + others), the stored set will reflect the user-specified set. + * `sign-self-issued` modifies Issuer in generated certificates: In 0.8.2 the + endpoint would not modify the Issuer in the generated certificate, leaving + the output self-issued. Although theoretically valid, in practice crypto + stacks were unhappy validating paths containing such certs. As a result, + `sign-self-issued` now encodes the signing CA's Subject DN into the Issuer + DN of the generated certificate. + * `sys/raw` requires enabling: While the `sys/raw` endpoint can be extremely + useful in break-glass or support scenarios, it is also extremely dangerous. + As of now, a configuration file option `raw_storage_endpoint` must be set in + order to enable this API endpoint. Once set, the available functionality has + been enhanced slightly; it now supports listing and decrypting most of + Vault's core data structures, except for the encryption keyring itself. + * `generic` is now `kv`: To better reflect its actual use, the `generic` + backend is now `kv`. Using `generic` will still work for backwards + compatibility. + +FEATURES: + + * **GCE Support for GCP Auth**: GCE instances can now authenticate to Vault + using machine credentials. + * **Support for Kubernetes Service Account Auth**: Kubernetes Service Accounts + can now authenticate to vault using JWT tokens. + +IMPROVEMENTS: + + * configuration: Provide a config option to store Vault server's process ID + (PID) in a file [[GH-3321](https://github.com/hashicorp/vault/pull/3321)] + * mfa (Enterprise): Add the ability to use identity metadata in username format + * mfa/okta (Enterprise): Add support for configuring base_url for API calls + * secret/pki: `sign-intermediate` will now allow specifying a `ttl` value + longer than the signing CA certificate's NotAfter value. [[GH-3325](https://github.com/hashicorp/vault/pull/3325)] + * sys/raw: Raw storage access is now disabled by default [[GH-3329](https://github.com/hashicorp/vault/pull/3329)] + +BUG FIXES: + + * auth/okta: Fix regression that removed the ability to set base_url [[GH-3313](https://github.com/hashicorp/vault/pull/3313)] + * core: Fix panic while loading leases at startup on ARM processors + [[GH-3314](https://github.com/hashicorp/vault/pull/3314)] + * secret/pki: Fix `sign-self-issued` encoding the wrong subject public key + [[GH-3325](https://github.com/hashicorp/vault/pull/3325)] + +## 0.8.2.1 (September 11th, 2017) (Enterprise Only) + +BUG FIXES: + + * Fix an issue upgrading to 0.8.2 for Enterprise customers. + +## 0.8.2 (September 5th, 2017) + +SECURITY: + +* In prior versions of Vault, if authenticating via AWS IAM and requesting a + periodic token, the period was not properly respected. This could lead to + tokens expiring unexpectedly, or a token lifetime being longer than expected. + Upon token renewal with Vault 0.8.2 the period will be properly enforced. + +DEPRECATIONS/CHANGES: + +* `vault ssh` users should supply `-mode` and `-role` to reduce the number of + API calls. A future version of Vault will mark these optional values are + required. Failure to supply `-mode` or `-role` will result in a warning. +* Vault plugins will first briefly run a restricted version of the plugin to + fetch metadata, and then lazy-load the plugin on first request to prevent + crash/deadlock of Vault during the unseal process. Plugins will need to be + built with the latest changes in order for them to run properly. + +FEATURES: + +* **Lazy Lease Loading**: On startup, Vault will now load leases from storage + in a lazy fashion (token checks and revocation/renewal requests still force + an immediate load). For larger installations this can significantly reduce + downtime when switching active nodes or bringing Vault up from cold start. +* **SSH CA Login with `vault ssh`**: `vault ssh` now supports the SSH CA + backend for authenticating to machines. It also supports remote host key + verification through the SSH CA backend, if enabled. +* **Signing of Self-Issued Certs in PKI**: The `pki` backend now supports + signing self-issued CA certs. This is useful when switching root CAs. + +IMPROVEMENTS: + + * audit/file: Allow specifying `stdout` as the `file_path` to log to standard + output [[GH-3235](https://github.com/hashicorp/vault/pull/3235)] + * auth/aws: Allow wildcards in `bound_iam_principal_arn` [[GH-3213](https://github.com/hashicorp/vault/pull/3213)] + * auth/okta: Compare groups case-insensitively since Okta is only + case-preserving [[GH-3240](https://github.com/hashicorp/vault/pull/3240)] + * auth/okta: Standardize Okta configuration APIs across backends [[GH-3245](https://github.com/hashicorp/vault/pull/3245)] + * cli: Add subcommand autocompletion that can be enabled with + `vault -autocomplete-install` [[GH-3223](https://github.com/hashicorp/vault/pull/3223)] + * cli: Add ability to handle wrapped responses when using `vault auth`. What + is output depends on the other given flags; see the help output for that + command for more information. [[GH-3263](https://github.com/hashicorp/vault/pull/3263)] + * core: TLS cipher suites used for cluster behavior can now be set via + `cluster_cipher_suites` in configuration [[GH-3228](https://github.com/hashicorp/vault/pull/3228)] + * core: The `plugin_name` can now either be specified directly as part of the + parameter or within the `config` object when mounting a secret or auth backend + via `sys/mounts/:path` or `sys/auth/:path` respectively [[GH-3202](https://github.com/hashicorp/vault/pull/3202)] + * core: It is now possible to update the `description` of a mount when + mount-tuning, although this must be done through the HTTP layer [[GH-3285](https://github.com/hashicorp/vault/pull/3285)] + * secret/databases/mongo: If an EOF is encountered, attempt reconnecting and + retrying the operation [[GH-3269](https://github.com/hashicorp/vault/pull/3269)] + * secret/pki: TTLs can now be specified as a string or an integer number of + seconds [[GH-3270](https://github.com/hashicorp/vault/pull/3270)] + * secret/pki: Self-issued certs can now be signed via + `pki/root/sign-self-issued` [[GH-3274](https://github.com/hashicorp/vault/pull/3274)] + * storage/gcp: Use application default credentials if they exist [[GH-3248](https://github.com/hashicorp/vault/pull/3248)] + +BUG FIXES: + + * auth/aws: Properly use role-set period values for IAM-derived token renewals + [[GH-3220](https://github.com/hashicorp/vault/pull/3220)] + * auth/okta: Fix updating organization/ttl/max_ttl after initial setting + [[GH-3236](https://github.com/hashicorp/vault/pull/3236)] + * core: Fix PROXY when underlying connection is TLS [[GH-3195](https://github.com/hashicorp/vault/pull/3195)] + * core: Policy-related commands would sometimes fail to act case-insensitively + [[GH-3210](https://github.com/hashicorp/vault/pull/3210)] + * storage/consul: Fix parsing TLS configuration when using a bare IPv6 address + [[GH-3268](https://github.com/hashicorp/vault/pull/3268)] + * plugins: Lazy-load plugins to prevent crash/deadlock during unseal process. + [[GH-3255](https://github.com/hashicorp/vault/pull/3255)] + * plugins: Skip mounting plugin-based secret and credential mounts when setting + up mounts if the plugin is no longer present in the catalog. [[GH-3255](https://github.com/hashicorp/vault/pull/3255)] + +## 0.8.1 (August 16th, 2017) + +DEPRECATIONS/CHANGES: + + * PKI Root Generation: Calling `pki/root/generate` when a CA cert/key already + exists will now return a `204` instead of overwriting an existing root. If + you want to recreate the root, first run a delete operation on `pki/root` + (requires `sudo` capability), then generate it again. + +FEATURES: + + * **Oracle Secret Backend**: There is now an external plugin to support leased + credentials for Oracle databases (distributed separately). + * **GCP IAM Auth Backend**: There is now an authentication backend that allows + using GCP IAM credentials to retrieve Vault tokens. This is available as + both a plugin and built-in to Vault. + * **PingID Push Support for Path-Based MFA (Enterprise)**: PingID Push can + now be used for MFA with the new path-based MFA introduced in Vault + Enterprise 0.8. + * **Permitted DNS Domains Support in PKI**: The `pki` backend now supports + specifying permitted DNS domains for CA certificates, allowing you to + narrowly scope the set of domains for which a CA can issue or sign child + certificates. + * **Plugin Backend Reload Endpoint**: Plugin backends can now be triggered to + reload using the `sys/plugins/reload/backend` endpoint and providing either + the plugin name or the mounts to reload. + * **Self-Reloading Plugins**: The plugin system will now attempt to reload a + crashed or stopped plugin, once per request. + +IMPROVEMENTS: + + * auth/approle: Allow array input for policies in addition to comma-delimited + strings [[GH-3163](https://github.com/hashicorp/vault/pull/3163)] + * plugins: Send logs through Vault's logger rather than stdout [[GH-3142](https://github.com/hashicorp/vault/pull/3142)] + * secret/pki: Add `pki/root` delete operation [[GH-3165](https://github.com/hashicorp/vault/pull/3165)] + * secret/pki: Don't overwrite an existing root cert/key when calling generate + [[GH-3165](https://github.com/hashicorp/vault/pull/3165)] + +BUG FIXES: + + * aws: Don't prefer a nil HTTP client over an existing one [[GH-3159](https://github.com/hashicorp/vault/pull/3159)] + * core: If there is an error when checking for create/update existence, return + 500 instead of 400 [[GH-3162](https://github.com/hashicorp/vault/pull/3162)] + * secret/database: Avoid creating usernames that are too long for legacy MySQL + [[GH-3138](https://github.com/hashicorp/vault/pull/3138)] + +## 0.8.0 (August 9th, 2017) + +SECURITY: + + * We've added a note to the docs about the way the GitHub auth backend works + as it may not be readily apparent that GitHub personal access tokens, which + are used by the backend, can be used for unauthorized access if they are + stolen from third party services and access to Vault is public. + +DEPRECATIONS/CHANGES: + + * Database Plugin Backends: Passwords generated for these backends now + enforce stricter password requirements, as opposed to the previous behavior + of returning a randomized UUID. Passwords are of length 20, and have a `A1a-` + characters prepended to ensure stricter requirements. No regressions are + expected from this change. (For database backends that were previously + substituting underscores for hyphens in passwords, this will remain the + case.) + * Lease Endpoints: The endpoints `sys/renew`, `sys/revoke`, `sys/revoke-prefix`, + `sys/revoke-force` have been deprecated and relocated under `sys/leases`. + Additionally, the deprecated path `sys/revoke-force` now requires the `sudo` + capability. + * Response Wrapping Lookup Unauthenticated: The `sys/wrapping/lookup` endpoint + is now unauthenticated. This allows introspection of the wrapping info by + clients that only have the wrapping token without then invalidating the + token. Validation functions/checks are still performed on the token. + +FEATURES: + + * **Cassandra Storage**: Cassandra can now be used for Vault storage + * **CockroachDB Storage**: CockroachDB can now be used for Vault storage + * **CouchDB Storage**: CouchDB can now be used for Vault storage + * **SAP HANA Database Plugin**: The `databases` backend can now manage users + for SAP HANA databases + * **Plugin Backends**: Vault now supports running secret and auth backends as + plugins. Plugins can be mounted like normal backends and can be developed + independently from Vault. + * **PROXY Protocol Support** Vault listeners can now be configured to honor + PROXY protocol v1 information to allow passing real client IPs into Vault. A + list of authorized addresses (IPs or subnets) can be defined and + accept/reject behavior controlled. + * **Lease Lookup and Browsing in the Vault Enterprise UI**: Vault Enterprise UI + now supports lookup and listing of leases and the associated actions from the + `sys/leases` endpoints in the API. These are located in the new top level + navigation item "Leases". + * **Filtered Mounts for Performance Mode Replication**: Whitelists or + blacklists of mounts can be defined per-secondary to control which mounts + are actually replicated to that secondary. This can allow targeted + replication of specific sets of data to specific geolocations/datacenters. + * **Disaster Recovery Mode Replication (Enterprise Only)**: There is a new + replication mode, Disaster Recovery (DR), that performs full real-time + replication (including tokens and leases) to DR secondaries. DR secondaries + cannot handle client requests, but can be promoted to primary as needed for + failover. + * **Manage New Replication Features in the Vault Enterprise UI**: Support for + Replication features in Vault Enterprise UI has expanded to include new DR + Replication mode and management of Filtered Mounts in Performance Replication + mode. + * **Vault Identity (Enterprise Only)**: Vault's new Identity system allows + correlation of users across tokens. At present this is only used for MFA, + but will be the foundation of many other features going forward. + * **Duo Push, Okta Push, and TOTP MFA For All Authenticated Paths (Enterprise + Only)**: A brand new MFA system built on top of Identity allows MFA + (currently Duo Push, Okta Push, and TOTP) for any authenticated path within + Vault. MFA methods can be configured centrally, and TOTP keys live within + the user's Identity information to allow using the same key across tokens. + Specific MFA method(s) required for any given path within Vault can be + specified in normal ACL path statements. + +IMPROVEMENTS: + + * api: Add client method for a secret renewer background process [[GH-2886](https://github.com/hashicorp/vault/pull/2886)] + * api: Add `RenewTokenAsSelf` [[GH-2886](https://github.com/hashicorp/vault/pull/2886)] + * api: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env + var or with a new API function [[GH-2956](https://github.com/hashicorp/vault/pull/2956)] + * api/cli: Client will now attempt to look up SRV records for the given Vault + hostname [[GH-3035](https://github.com/hashicorp/vault/pull/3035)] + * audit/socket: Enhance reconnection logic and don't require the connection to + be established at unseal time [[GH-2934](https://github.com/hashicorp/vault/pull/2934)] + * audit/file: Opportunistically try re-opening the file on error [[GH-2999](https://github.com/hashicorp/vault/pull/2999)] + * auth/approle: Add role name to token metadata [[GH-2985](https://github.com/hashicorp/vault/pull/2985)] + * auth/okta: Allow specifying `ttl`/`max_ttl` inside the mount [[GH-2915](https://github.com/hashicorp/vault/pull/2915)] + * cli: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env + var [[GH-2956](https://github.com/hashicorp/vault/pull/2956)] + * command/auth: Add `-token-only` flag to `vault auth` that returns only the + token on stdout and does not store it via the token helper [[GH-2855](https://github.com/hashicorp/vault/pull/2855)] + * core: CORS allowed origins can now be configured [[GH-2021](https://github.com/hashicorp/vault/pull/2021)] + * core: Add metrics counters for audit log failures [[GH-2863](https://github.com/hashicorp/vault/pull/2863)] + * cors: Allow setting allowed headers via the API instead of always using + wildcard [[GH-3023](https://github.com/hashicorp/vault/pull/3023)] + * secret/ssh: Allow specifying the key ID format using template values for CA + type [[GH-2888](https://github.com/hashicorp/vault/pull/2888)] + * server: Add `tls_client_ca_file` option for specifying a CA file to use for + client certificate verification when `tls_require_and_verify_client_cert` is + enabled [[GH-3034](https://github.com/hashicorp/vault/pull/3034)] + * storage/cockroachdb: Add CockroachDB storage backend [[GH-2713](https://github.com/hashicorp/vault/pull/2713)] + * storage/couchdb: Add CouchDB storage backend [[GH-2880](https://github.com/hashicorp/vault/pull/2880)] + * storage/mssql: Add `max_parallel` [[GH-3026](https://github.com/hashicorp/vault/pull/3026)] + * storage/postgresql: Add `max_parallel` [[GH-3026](https://github.com/hashicorp/vault/pull/3026)] + * storage/postgresql: Improve listing speed [[GH-2945](https://github.com/hashicorp/vault/pull/2945)] + * storage/s3: More efficient paging when an object has a lot of subobjects + [[GH-2780](https://github.com/hashicorp/vault/pull/2780)] + * sys/wrapping: Make `sys/wrapping/lookup` unauthenticated [[GH-3084](https://github.com/hashicorp/vault/pull/3084)] + * sys/wrapping: Wrapped tokens now store the original request path of the data + [[GH-3100](https://github.com/hashicorp/vault/pull/3100)] + * telemetry: Add support for DogStatsD [[GH-2490](https://github.com/hashicorp/vault/pull/2490)] + +BUG FIXES: + + * api/health: Don't treat standby `429` codes as an error [[GH-2850](https://github.com/hashicorp/vault/pull/2850)] + * api/leases: Fix lease lookup returning lease properties at the top level + * audit: Fix panic when audit logging a read operation on an asymmetric + `transit` key [[GH-2958](https://github.com/hashicorp/vault/pull/2958)] + * auth/approle: Fix panic when secret and cidr list not provided in role + [[GH-3075](https://github.com/hashicorp/vault/pull/3075)] + * auth/aws: Look up proper account ID on token renew [[GH-3012](https://github.com/hashicorp/vault/pull/3012)] + * auth/aws: Store IAM header in all cases when it changes [[GH-3004](https://github.com/hashicorp/vault/pull/3004)] + * auth/ldap: Verify given certificate is PEM encoded instead of failing + silently [[GH-3016](https://github.com/hashicorp/vault/pull/3016)] + * auth/token: Don't allow using the same token ID twice when manually + specifying [[GH-2916](https://github.com/hashicorp/vault/pull/2916)] + * cli: Fix issue with parsing keys that start with special characters [[GH-2998](https://github.com/hashicorp/vault/pull/2998)] + * core: Relocated `sys/leases/renew` returns same payload as original + `sys/leases` endpoint [[GH-2891](https://github.com/hashicorp/vault/pull/2891)] + * secret/ssh: Fix panic when signing with incorrect key type [[GH-3072](https://github.com/hashicorp/vault/pull/3072)] + * secret/totp: Ensure codes can only be used once. This makes some automated + workflows harder but complies with the RFC. [[GH-2908](https://github.com/hashicorp/vault/pull/2908)] + * secret/transit: Fix locking when creating a key with unsupported options + [[GH-2974](https://github.com/hashicorp/vault/pull/2974)] + +## 0.7.3 (June 7th, 2017) + +SECURITY: + + * Cert auth backend now checks validity of individual certificates: In + previous versions of Vault, validity (e.g. expiration) of individual leaf + certificates added for authentication was not checked. This was done to make + it easier for administrators to control lifecycles of individual + certificates added to the backend, e.g. the authentication material being + checked was access to that specific certificate's private key rather than + all private keys signed by a CA. However, this behavior is often unexpected + and as a result can lead to insecure deployments, so we are now validating + these certificates as well. + * App-ID path salting was skipped in 0.7.1/0.7.2: A regression in 0.7.1/0.7.2 + caused the HMACing of any App-ID information stored in paths (including + actual app-IDs and user-IDs) to be unsalted and written as-is from the API. + In 0.7.3 any such paths will be automatically changed to salted versions on + access (e.g. login or read); however, if you created new app-IDs or user-IDs + in 0.7.1/0.7.2, you may want to consider whether any users with access to + Vault's underlying data store may have intercepted these values, and + revoke/roll them. + +DEPRECATIONS/CHANGES: + + * Step-Down is Forwarded: When a step-down is issued against a non-active node + in an HA cluster, it will now forward the request to the active node. + +FEATURES: + + * **ed25519 Signing/Verification in Transit with Key Derivation**: The + `transit` backend now supports generating + [ed25519](https://ed25519.cr.yp.to/) keys for signing and verification + functionality. These keys support derivation, allowing you to modify the + actual encryption key used by supplying a `context` value. + * **Key Version Specification for Encryption in Transit**: You can now specify + the version of a key you use to wish to generate a signature, ciphertext, or + HMAC. This can be controlled by the `min_encryption_version` key + configuration property. + * **Replication Primary Discovery (Enterprise)**: Replication primaries will + now advertise the addresses of their local HA cluster members to replication + secondaries. This helps recovery if the primary active node goes down and + neither service discovery nor load balancers are in use to steer clients. + +IMPROVEMENTS: + + * api/health: Add Sys().Health() [[GH-2805](https://github.com/hashicorp/vault/pull/2805)] + * audit: Add auth information to requests that error out [[GH-2754](https://github.com/hashicorp/vault/pull/2754)] + * command/auth: Add `-no-store` option that prevents the auth command from + storing the returned token into the configured token helper [[GH-2809](https://github.com/hashicorp/vault/pull/2809)] + * core/forwarding: Request forwarding now heartbeats to prevent unused + connections from being terminated by firewalls or proxies + * plugins/databases: Add MongoDB as an internal database plugin [[GH-2698](https://github.com/hashicorp/vault/pull/2698)] + * storage/dynamodb: Add a method for checking the existence of children, + speeding up deletion operations in the DynamoDB storage backend [[GH-2722](https://github.com/hashicorp/vault/pull/2722)] + * storage/mysql: Add max_parallel parameter to MySQL backend [[GH-2760](https://github.com/hashicorp/vault/pull/2760)] + * secret/databases: Support listing connections [[GH-2823](https://github.com/hashicorp/vault/pull/2823)] + * secret/databases: Support custom renewal statements in Postgres database + plugin [[GH-2788](https://github.com/hashicorp/vault/pull/2788)] + * secret/databases: Use the role name as part of generated credentials + [[GH-2812](https://github.com/hashicorp/vault/pull/2812)] + * ui (Enterprise): Transit key and secret browsing UI handle large lists better + * ui (Enterprise): root tokens are no longer persisted + * ui (Enterprise): support for mounting Database and TOTP secret backends + +BUG FIXES: + + * auth/app-id: Fix regression causing loading of salts to be skipped + * auth/aws: Improve EC2 describe instances performance [[GH-2766](https://github.com/hashicorp/vault/pull/2766)] + * auth/aws: Fix lookup of some instance profile ARNs [[GH-2802](https://github.com/hashicorp/vault/pull/2802)] + * auth/aws: Resolve ARNs to internal AWS IDs which makes lookup at various + points (e.g. renewal time) more robust [[GH-2814](https://github.com/hashicorp/vault/pull/2814)] + * auth/aws: Properly honor configured period when using IAM authentication + [[GH-2825](https://github.com/hashicorp/vault/pull/2825)] + * auth/aws: Check that a bound IAM principal is not empty (in the current + state of the role) before requiring it match the previously authenticated + client [[GH-2781](https://github.com/hashicorp/vault/pull/2781)] + * auth/cert: Fix panic on renewal [[GH-2749](https://github.com/hashicorp/vault/pull/2749)] + * auth/cert: Certificate verification for non-CA certs [[GH-2761](https://github.com/hashicorp/vault/pull/2761)] + * core/acl: Prevent race condition when compiling ACLs in some scenarios + [[GH-2826](https://github.com/hashicorp/vault/pull/2826)] + * secret/database: Increase wrapping token TTL; in a loaded scenario it could + be too short + * secret/generic: Allow integers to be set as the value of `ttl` field as the + documentation claims is supported [[GH-2699](https://github.com/hashicorp/vault/pull/2699)] + * secret/ssh: Added host key callback to ssh client config [[GH-2752](https://github.com/hashicorp/vault/pull/2752)] + * storage/s3: Avoid a panic when some bad data is returned [[GH-2785](https://github.com/hashicorp/vault/pull/2785)] + * storage/dynamodb: Fix list functions working improperly on Windows [[GH-2789](https://github.com/hashicorp/vault/pull/2789)] + * storage/file: Don't leak file descriptors in some error cases + * storage/swift: Fix pre-v3 project/tenant name reading [[GH-2803](https://github.com/hashicorp/vault/pull/2803)] + +## 0.7.2 (May 8th, 2017) + +BUG FIXES: + + * audit: Fix auditing entries containing certain kinds of time values + [[GH-2689](https://github.com/hashicorp/vault/pull/2689)] + +## 0.7.1 (May 5th, 2017) + +DEPRECATIONS/CHANGES: + + * LDAP Auth Backend: Group membership queries will now run as the `binddn` + user when `binddn`/`bindpass` are configured, rather than as the + authenticating user as was the case previously. + +FEATURES: + + * **AWS IAM Authentication**: IAM principals can get Vault tokens + automatically, opening AWS-based authentication to users, ECS containers, + Lambda instances, and more. Signed client identity information retrieved + using the AWS API `sts:GetCallerIdentity` is validated against the AWS STS + service before issuing a Vault token. This backend is unified with the + `aws-ec2` authentication backend under the name `aws`, and allows additional + EC2-related restrictions to be applied during the IAM authentication; the + previous EC2 behavior is also still available. [[GH-2441](https://github.com/hashicorp/vault/pull/2441)] + * **MSSQL Physical Backend**: You can now use Microsoft SQL Server as your + Vault physical data store [[GH-2546](https://github.com/hashicorp/vault/pull/2546)] + * **Lease Listing and Lookup**: You can now introspect a lease to get its + creation and expiration properties via `sys/leases/lookup`; with `sudo` + capability you can also list leases for lookup, renewal, or revocation via + that endpoint. Various lease functions (renew, revoke, revoke-prefix, + revoke-force) have also been relocated to `sys/leases/`, but they also work + at the old paths for compatibility. Reading (but not listing) leases via + `sys/leases/lookup` is now a part of the current `default` policy. [[GH-2650](https://github.com/hashicorp/vault/pull/2650)] + * **TOTP Secret Backend**: You can now store multi-factor authentication keys + in Vault and use the API to retrieve time-based one-time use passwords on + demand. The backend can also be used to generate a new key and validate + passwords generated by that key. [[GH-2492](https://github.com/hashicorp/vault/pull/2492)] + * **Database Secret Backend & Secure Plugins (Beta)**: This new secret backend + combines the functionality of the MySQL, PostgreSQL, MSSQL, and Cassandra + backends. It also provides a plugin interface for extendability through + custom databases. [[GH-2200](https://github.com/hashicorp/vault/pull/2200)] + +IMPROVEMENTS: + + * auth/cert: Support for constraints on subject Common Name and DNS/email + Subject Alternate Names in certificates [[GH-2595](https://github.com/hashicorp/vault/pull/2595)] + * auth/ldap: Use the binding credentials to search group membership rather + than the user credentials [[GH-2534](https://github.com/hashicorp/vault/pull/2534)] + * cli/revoke: Add `-self` option to allow revoking the currently active token + [[GH-2596](https://github.com/hashicorp/vault/pull/2596)] + * core: Randomize x coordinate in Shamir shares [[GH-2621](https://github.com/hashicorp/vault/pull/2621)] + * replication: Fix a bug when enabling `approle` on a primary before + secondaries were connected + * replication: Add heartbeating to ensure firewalls don't kill connections to + primaries + * secret/pki: Add `no_store` option that allows certificates to be issued + without being stored. This removes the ability to look up and/or add to a + CRL but helps with scaling to very large numbers of certificates. [[GH-2565](https://github.com/hashicorp/vault/pull/2565)] + * secret/pki: If used with a role parameter, the `sign-verbatim/` + endpoint honors the values of `generate_lease`, `no_store`, `ttl` and + `max_ttl` from the given role [[GH-2593](https://github.com/hashicorp/vault/pull/2593)] + * secret/pki: Add role parameter `allow_glob_domains` that enables defining + names in `allowed_domains` containing `*` glob patterns [[GH-2517](https://github.com/hashicorp/vault/pull/2517)] + * secret/pki: Update certificate storage to not use characters that are not + supported on some filesystems [[GH-2575](https://github.com/hashicorp/vault/pull/2575)] + * storage/etcd3: Add `discovery_srv` option to query for SRV records to find + servers [[GH-2521](https://github.com/hashicorp/vault/pull/2521)] + * storage/s3: Support `max_parallel` option to limit concurrent outstanding + requests [[GH-2466](https://github.com/hashicorp/vault/pull/2466)] + * storage/s3: Use pooled transport for http client [[GH-2481](https://github.com/hashicorp/vault/pull/2481)] + * storage/swift: Allow domain values for V3 authentication [[GH-2554](https://github.com/hashicorp/vault/pull/2554)] + * tidy: Improvements to `auth/token/tidy` and `sys/leases/tidy` to handle more + cleanup cases [[GH-2452](https://github.com/hashicorp/vault/pull/2452)] + +BUG FIXES: + + * api: Respect a configured path in Vault's address [[GH-2588](https://github.com/hashicorp/vault/pull/2588)] + * auth/aws-ec2: New bounds added as criteria to allow role creation [[GH-2600](https://github.com/hashicorp/vault/pull/2600)] + * auth/ldap: Don't lowercase groups attached to users [[GH-2613](https://github.com/hashicorp/vault/pull/2613)] + * cli: Don't panic if `vault write` is used with the `force` flag but no path + [[GH-2674](https://github.com/hashicorp/vault/pull/2674)] + * core: Help operations should request forward since standbys may not have + appropriate info [[GH-2677](https://github.com/hashicorp/vault/pull/2677)] + * replication: Fix enabling secondaries when certain mounts already existed on + the primary + * secret/mssql: Update mssql driver to support queries with colons [[GH-2610](https://github.com/hashicorp/vault/pull/2610)] + * secret/pki: Don't lowercase O/OU values in certs [[GH-2555](https://github.com/hashicorp/vault/pull/2555)] + * secret/pki: Don't attempt to validate IP SANs if none are provided [[GH-2574](https://github.com/hashicorp/vault/pull/2574)] + * secret/ssh: Don't automatically lowercase principles in issued SSH certs + [[GH-2591](https://github.com/hashicorp/vault/pull/2591)] + * storage/consul: Properly handle state events rather than timing out + [[GH-2548](https://github.com/hashicorp/vault/pull/2548)] + * storage/etcd3: Ensure locks are released if client is improperly shut down + [[GH-2526](https://github.com/hashicorp/vault/pull/2526)] + +## 0.7.0 (March 21th, 2017) + +SECURITY: + + * Common name not being validated when `exclude_cn_from_sans` option used in + `pki` backend: When using a role in the `pki` backend that specified the + `exclude_cn_from_sans` option, the common name would not then be properly + validated against the role's constraints. This has been fixed. We recommend + any users of this feature to upgrade to 0.7 as soon as feasible. + +DEPRECATIONS/CHANGES: + + * List Operations Always Use Trailing Slash: Any list operation, whether via + the `GET` or `LIST` HTTP verb, will now internally canonicalize the path to + have a trailing slash. This makes policy writing more predictable, as it + means clients will no longer work or fail based on which client they're + using or which HTTP verb they're using. However, it also means that policies + allowing `list` capability must be carefully checked to ensure that they + contain a trailing slash; some policies may need to be split into multiple + stanzas to accommodate. + * PKI Defaults to Unleased Certificates: When issuing certificates from the + PKI backend, by default, no leases will be issued. If you want to manually + revoke a certificate, its serial number can be used with the `pki/revoke` + endpoint. Issuing leases is still possible by enabling the `generate_lease` + toggle in PKI role entries (this will default to `true` for upgrades, to + keep existing behavior), which will allow using lease IDs to revoke + certificates. For installations issuing large numbers of certificates (tens + to hundreds of thousands, or millions), this will significantly improve + Vault startup time since leases associated with these certificates will not + have to be loaded; however note that it also means that revocation of a + token used to issue certificates will no longer add these certificates to a + CRL. If this behavior is desired or needed, consider keeping leases enabled + and ensuring lifetimes are reasonable, and issue long-lived certificates via + a different role with leases disabled. + +FEATURES: + + * **Replication (Enterprise)**: Vault Enterprise now has support for creating + a multi-datacenter replication set between clusters. The current replication + offering is based on an asynchronous primary/secondary (1:N) model that + replicates static data while keeping dynamic data (leases, tokens) + cluster-local, focusing on horizontal scaling for high-throughput and + high-fanout deployments. + * **Response Wrapping & Replication in the Vault Enterprise UI**: Vault + Enterprise UI now supports looking up and rotating response wrapping tokens, + as well as creating tokens with arbitrary values inside. It also now + supports replication functionality, enabling the configuration of a + replication set in the UI. + * **Expanded Access Control Policies**: Access control policies can now + specify allowed and denied parameters -- and, optionally, their values -- to + control what a client can and cannot submit during an API call. Policies can + also specify minimum/maximum response wrapping TTLs to both enforce the use + of response wrapping and control the duration of resultant wrapping tokens. + See the [policies concepts + page](https://www.vaultproject.io/docs/concepts/policies.html) for more + information. + * **SSH Backend As Certificate Authority**: The SSH backend can now be + configured to sign host and user certificates. Each mount of the backend + acts as an independent signing authority. The CA key pair can be configured + for each mount and the public key is accessible via an unauthenticated API + call; additionally, the backend can generate a public/private key pair for + you. We recommend using separate mounts for signing host and user + certificates. + +IMPROVEMENTS: + + * api/request: Passing username and password information in API request + [GH-2469] + * audit: Logging the token's use count with authentication response and + logging the remaining uses of the client token with request [GH-2437] + * auth/approle: Support for restricting the number of uses on the tokens + issued [GH-2435] + * auth/aws-ec2: AWS EC2 auth backend now supports constraints for VPC ID, + Subnet ID and Region [GH-2407] + * auth/ldap: Use the value of the `LOGNAME` or `USER` env vars for the + username if not explicitly set on the command line when authenticating + [GH-2154] + * audit: Support adding a configurable prefix (such as `@cee`) before each + line [GH-2359] + * core: Canonicalize list operations to use a trailing slash [GH-2390] + * core: Add option to disable caching on a per-mount level [GH-2455] + * core: Add ability to require valid client certs in listener config [GH-2457] + * physical/dynamodb: Implement a session timeout to avoid having to use + recovery mode in the case of an unclean shutdown, which makes HA much safer + [GH-2141] + * secret/pki: O (Organization) values can now be set to role-defined values + for issued/signed certificates [GH-2369] + * secret/pki: Certificates issued/signed from PKI backend do not generate + leases by default [GH-2403] + * secret/pki: When using DER format, still return the private key type + [GH-2405] + * secret/pki: Add an intermediate to the CA chain even if it lacks an + authority key ID [GH-2465] + * secret/pki: Add role option to use CSR SANs [GH-2489] + * secret/ssh: SSH backend as CA to sign user and host certificates [GH-2208] + * secret/ssh: Support reading of SSH CA public key from `config/ca` endpoint + and also return it when CA key pair is generated [GH-2483] + +BUG FIXES: + + * audit: When auditing headers use case-insensitive comparisons [GH-2362] + * auth/aws-ec2: Return role period in seconds and not nanoseconds [GH-2374] + * auth/okta: Fix panic if user had no local groups and/or policies set + [GH-2367] + * command/server: Fix parsing of redirect address when port is not mentioned + [GH-2354] + * physical/postgresql: Fix listing returning incorrect results if there were + multiple levels of children [GH-2393] + +## 0.6.5 (February 7th, 2017) + +FEATURES: + + * **Okta Authentication**: A new Okta authentication backend allows you to use + Okta usernames and passwords to authenticate to Vault. If provided with an + appropriate Okta API token, group membership can be queried to assign + policies; users and groups can be defined locally as well. + * **RADIUS Authentication**: A new RADIUS authentication backend allows using + a RADIUS server to authenticate to Vault. Policies can be configured for + specific users or for any authenticated user. + * **Exportable Transit Keys**: Keys in `transit` can now be marked as + `exportable` at creation time. This allows a properly ACL'd user to retrieve + the associated signing key, encryption key, or HMAC key. The `exportable` + value is returned on a key policy read and cannot be changed, so if a key is + marked `exportable` it will always be exportable, and if it is not it will + never be exportable. + * **Batch Transit Operations**: `encrypt`, `decrypt` and `rewrap` operations + in the transit backend now support processing multiple input items in one + call, returning the output of each item in the response. + * **Configurable Audited HTTP Headers**: You can now specify headers that you + want to have included in each audit entry, along with whether each header + should be HMAC'd or kept plaintext. This can be useful for adding additional + client or network metadata to the audit logs. + * **Transit Backend UI (Enterprise)**: Vault Enterprise UI now supports the transit + backend, allowing creation, viewing and editing of named keys as well as using + those keys to perform supported transit operations directly in the UI. + * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent + through TCP, UDP, or UNIX Sockets. + +IMPROVEMENTS: + + * auth/aws-ec2: Add support for cross-account auth using STS [GH-2148] + * auth/aws-ec2: Support issuing periodic tokens [GH-2324] + * auth/github: Support listing teams and users [GH-2261] + * auth/ldap: Support adding policies to local users directly, in addition to + local groups [GH-2152] + * command/server: Add ability to select and prefer server cipher suites + [GH-2293] + * core: Add a nonce to unseal operations as a check (useful mostly for + support, not as a security principle) [GH-2276] + * duo: Added ability to supply extra context to Duo pushes [GH-2118] + * physical/consul: Add option for setting consistency mode on Consul gets + [GH-2282] + * physical/etcd: Full v3 API support; code will autodetect which API version + to use. The v3 code path is significantly less complicated and may be much + more stable. [GH-2168] + * secret/pki: Allow specifying OU entries in generated certificate subjects + [GH-2251] + * secret mount ui (Enterprise): the secret mount list now shows all mounted + backends even if the UI cannot browse them. Additional backends can now be + mounted from the UI as well. + +BUG FIXES: + + * auth/token: Fix regression in 0.6.4 where using token store roles as a + blacklist (with only `disallowed_policies` set) would not work in most + circumstances [GH-2286] + * physical/s3: Page responses in client so list doesn't truncate [GH-2224] + * secret/cassandra: Stop a connection leak that could occur on active node + failover [GH-2313] + * secret/pki: When using `sign-verbatim`, don't require a role and use the + CSR's common name [GH-2243] + +## 0.6.4 (December 16, 2016) + +SECURITY: + +Further details about these security issues can be found in the 0.6.4 upgrade +guide. + + * `default` Policy Privilege Escalation: If a parent token did not have the + `default` policy attached to its token, it could still create children with + the `default` policy. This is no longer allowed (unless the parent has + `sudo` capability for the creation path). In most cases this is low severity + since the access grants in the `default` policy are meant to be access + grants that are acceptable for all tokens to have. + * Leases Not Expired When Limited Use Token Runs Out of Uses: When using + limited-use tokens to create leased secrets, if the limited-use token was + revoked due to running out of uses (rather than due to TTL expiration or + explicit revocation) it would fail to revoke the leased secrets. These + secrets would still be revoked when their TTL expired, limiting the severity + of this issue. An endpoint has been added (`auth/token/tidy`) that can + perform housekeeping tasks on the token store; one of its tasks can detect + this situation and revoke the associated leases. + +FEATURES: + + * **Policy UI (Enterprise)**: Vault Enterprise UI now supports viewing, + creating, and editing policies. + +IMPROVEMENTS: + + * http: Vault now sets a `no-store` cache control header to make it more + secure in setups that are not end-to-end encrypted [GH-2183] + +BUG FIXES: + + * auth/ldap: Don't panic if dialing returns an error and starttls is enabled; + instead, return the error [GH-2188] + * ui (Enterprise): Submitting an unseal key now properly resets the + form so a browser refresh isn't required to continue. + +## 0.6.3 (December 6, 2016) + +DEPRECATIONS/CHANGES: + + * Request size limitation: A maximum request size of 32MB is imposed to + prevent a denial of service attack with arbitrarily large requests [GH-2108] + * LDAP denies passwordless binds by default: In new LDAP mounts, or when + existing LDAP mounts are rewritten, passwordless binds will be denied by + default. The new `deny_null_bind` parameter can be set to `false` to allow + these. [GH-2103] + * Any audit backend activated satisfies conditions: Previously, when a new + Vault node was taking over service in an HA cluster, all audit backends were + required to be loaded successfully to take over active duty. This behavior + now matches the behavior of the audit logging system itself: at least one + audit backend must successfully be loaded. The server log contains an error + when this occurs. This helps keep a Vault HA cluster working when there is a + misconfiguration on a standby node. [GH-2083] + +FEATURES: + + * **Web UI (Enterprise)**: Vault Enterprise now contains a built-in web UI + that offers access to a number of features, including init/unsealing/sealing, + authentication via userpass or LDAP, and K/V reading/writing. The capability + set of the UI will be expanding rapidly in further releases. To enable it, + set `ui = true` in the top level of Vault's configuration file and point a + web browser at your Vault address. + * **Google Cloud Storage Physical Backend**: You can now use GCS for storing + Vault data [GH-2099] + +IMPROVEMENTS: + + * auth/github: Policies can now be assigned to users as well as to teams + [GH-2079] + * cli: Set the number of retries on 500 down to 0 by default (no retrying). It + can be very confusing to users when there is a pause while the retries + happen if they haven't explicitly set it. With request forwarding the need + for this is lessened anyways. [GH-2093] + * core: Response wrapping is now allowed to be specified by backend responses + (requires backends gaining support) [GH-2088] + * physical/consul: When announcing service, use the scheme of the Vault server + rather than the Consul client [GH-2146] + * secret/consul: Added listing functionality to roles [GH-2065] + * secret/postgresql: Added `revocation_sql` parameter on the role endpoint to + enable customization of user revocation SQL statements [GH-2033] + * secret/transit: Add listing of keys [GH-1987] + +BUG FIXES: + + * api/unwrap, command/unwrap: Increase compatibility of `unwrap` command with + Vault 0.6.1 and older [GH-2014] + * api/unwrap, command/unwrap: Fix error when no client token exists [GH-2077] + * auth/approle: Creating the index for the role_id properly [GH-2004] + * auth/aws-ec2: Handle the case of multiple upgrade attempts when setting the + instance-profile ARN [GH-2035] + * auth/ldap: Avoid leaking connections on login [GH-2130] + * command/path-help: Use the actual error generated by Vault rather than + always using 500 when there is a path help error [GH-2153] + * command/ssh: Use temporary file for identity and ensure its deletion before + the command returns [GH-2016] + * cli: Fix error printing values with `-field` if the values contained + formatting directives [GH-2109] + * command/server: Don't say mlock is supported on OSX when it isn't. [GH-2120] + * core: Fix bug where a failure to come up as active node (e.g. if an audit + backend failed) could lead to deadlock [GH-2083] + * physical/mysql: Fix potential crash during setup due to a query failure + [GH-2105] + * secret/consul: Fix panic on user error [GH-2145] + +## 0.6.2 (October 5, 2016) + +DEPRECATIONS/CHANGES: + + * Convergent Encryption v2: New keys in `transit` using convergent mode will + use a new nonce derivation mechanism rather than require the user to supply + a nonce. While not explicitly increasing security, it minimizes the + likelihood that a user will use the mode improperly and impact the security + of their keys. Keys in convergent mode that were created in v0.6.1 will + continue to work with the same mechanism (user-supplied nonce). + * `etcd` HA off by default: Following in the footsteps of `dynamodb`, the + `etcd` storage backend now requires that `ha_enabled` be explicitly + specified in the configuration file. The backend currently has known broken + HA behavior, so this flag discourages use by default without explicitly + enabling it. If you are using this functionality, when upgrading, you should + set `ha_enabled` to `"true"` *before* starting the new versions of Vault. + * Default/Max lease/token TTLs are now 32 days: In previous versions of Vault + the default was 30 days, but moving it to 32 days allows some operations + (e.g. reauthenticating, renewing, etc.) to be performed via a monthly cron + job. + * AppRole Secret ID endpoints changed: Secret ID and Secret ID accessors are + no longer part of request URLs. The GET and DELETE operations are now moved + to new endpoints (`/lookup` and `/destroy`) which consumes the input from + the body and not the URL. + * AppRole requires at least one constraint: previously it was sufficient to + turn off all AppRole authentication constraints (secret ID, CIDR block) and + use the role ID only. It is now required that at least one additional + constraint is enabled. Existing roles are unaffected, but any new roles or + updated roles will require this. + * Reading wrapped responses from `cubbyhole/response` is deprecated. The + `sys/wrapping/unwrap` endpoint should be used instead as it provides + additional security, auditing, and other benefits. The ability to read + directly will be removed in a future release. + * Request Forwarding is now on by default: in 0.6.1 this required toggling on, + but is now enabled by default. This can be disabled via the + `"disable_clustering"` parameter in Vault's + [config](https://www.vaultproject.io/docs/config/index.html), or per-request + with the `X-Vault-No-Request-Forwarding` header. + * In prior versions a bug caused the `bound_iam_role_arn` value in the + `aws-ec2` authentication backend to actually use the instance profile ARN. + This has been corrected, but as a result there is a behavior change. To + match using the instance profile ARN, a new parameter + `bound_iam_instance_profile_arn` has been added. Existing roles will + automatically transfer the value over to the correct parameter, but the next + time the role is updated, the new meanings will take effect. + +FEATURES: + + * **Secret ID CIDR Restrictions in `AppRole`**: Secret IDs generated under an + approle can now specify a list of CIDR blocks from where the requests to + generate secret IDs should originate from. If an approle already has CIDR + restrictions specified, the CIDR restrictions on the secret ID should be a + subset of those specified on the role [GH-1910] + * **Initial Root Token PGP Encryption**: Similar to `generate-root`, the root + token created at initialization time can now be PGP encrypted [GH-1883] + * **Support Chained Intermediate CAs in `pki`**: The `pki` backend now allows, + when a CA cert is being supplied as a signed root or intermediate, a trust + chain of arbitrary length. The chain is returned as a parameter at + certificate issue/sign time and is retrievable independently as well. + [GH-1694] + * **Response Wrapping Enhancements**: There are new endpoints to look up + response wrapped token parameters; wrap arbitrary values; rotate wrapping + tokens; and unwrap with enhanced validation. In addition, list operations + can now be response-wrapped. [GH-1927] + * **Transit Features**: The `transit` backend now supports generating random + bytes and SHA sums; HMACs; and signing and verification functionality using + EC keys (P-256 curve) + +IMPROVEMENTS: + + * api: Return error when an invalid (as opposed to incorrect) unseal key is + submitted, rather than ignoring it [GH-1782] + * api: Add method to call `auth/token/create-orphan` endpoint [GH-1834] + * api: Rekey operation now redirects from standbys to master [GH-1862] + * audit/file: Sending a `SIGHUP` to Vault now causes Vault to close and + re-open the log file, making it easier to rotate audit logs [GH-1953] + * auth/aws-ec2: EC2 instances can get authenticated by presenting the identity + document and its SHA256 RSA digest [GH-1961] + * auth/aws-ec2: IAM bound parameters on the aws-ec2 backend will perform a + prefix match instead of exact match [GH-1943] + * auth/aws-ec2: Added a new constraint `bound_iam_instance_profile_arn` to + refer to IAM instance profile ARN and fixed the earlier `bound_iam_role_arn` + to refer to IAM role ARN instead of the instance profile ARN [GH-1913] + * auth/aws-ec2: Backend generates the nonce by default and clients can + explicitly disable reauthentication by setting empty nonce [GH-1889] + * auth/token: Added warnings if tokens and accessors are used in URLs [GH-1806] + * command/format: The `format` flag on select CLI commands takes `yml` as an + alias for `yaml` [GH-1899] + * core: Allow the size of the read cache to be set via the config file, and + change the default value to 1MB (from 32KB) [GH-1784] + * core: Allow single and two-character path parameters for most places + [GH-1811] + * core: Allow list operations to be response-wrapped [GH-1814] + * core: Provide better protection against timing attacks in Shamir code + [GH-1877] + * core: Unmounting/disabling backends no longer returns an error if the mount + didn't exist. This is line with elsewhere in Vault's API where `DELETE` is + an idempotent operation. [GH-1903] + * credential/approle: At least one constraint is required to be enabled while + creating and updating a role [GH-1882] + * secret/cassandra: Added consistency level for use with roles [GH-1931] + * secret/mysql: SQL for revoking user can be configured on the role [GH-1914] + * secret/transit: Use HKDF (RFC 5869) as the key derivation function for new + keys [GH-1812] + * secret/transit: Empty plaintext values are now allowed [GH-1874] + +BUG FIXES: + + * audit: Fix panic being caused by some values logging as underlying Go types + instead of formatted strings [GH-1912] + * auth/approle: Fixed panic on deleting approle that doesn't exist [GH-1920] + * auth/approle: Not letting secret IDs and secret ID accessors to get logged + in plaintext in audit logs [GH-1947] + * auth/aws-ec2: Allow authentication if the underlying host is in a bad state + but the instance is running [GH-1884] + * auth/token: Fixed metadata getting missed out from token lookup response by + gracefully handling token entry upgrade [GH-1924] + * cli: Don't error on newline in token file [GH-1774] + * core: Pass back content-type header for forwarded requests [GH-1791] + * core: Fix panic if the same key was given twice to `generate-root` [GH-1827] + * core: Fix potential deadlock on unmount/remount [GH-1793] + * physical/file: Remove empty directories from the `file` storage backend [GH-1821] + * physical/zookeeper: Remove empty directories from the `zookeeper` storage + backend and add a fix to the `file` storage backend's logic [GH-1964] + * secret/aws: Added update operation to `aws/sts` path to consider `ttl` + parameter [39b75c6] + * secret/aws: Mark STS secrets as non-renewable [GH-1804] + * secret/cassandra: Properly store session for re-use [GH-1802] + * secret/ssh: Fix panic when revoking SSH dynamic keys [GH-1781] + +## 0.6.1 (August 22, 2016) + +DEPRECATIONS/CHANGES: + + * Once the active node is 0.6.1, standby nodes must also be 0.6.1 in order to + connect to the HA cluster. We recommend following our [general upgrade + instructions](https://www.vaultproject.io/docs/install/upgrade.html) in + addition to 0.6.1-specific upgrade instructions to ensure that this is not + an issue. + * Status codes for sealed/uninitialized Vaults have changed to `503`/`501` + respectively. See the [version-specific upgrade + guide](https://www.vaultproject.io/docs/install/upgrade-to-0.6.1.html) for + more details. + * Root tokens (tokens with the `root` policy) can no longer be created except + by another root token or the `generate-root` endpoint. + * Issued certificates from the `pki` backend against new roles created or + modified after upgrading will contain a set of default key usages. + * The `dynamodb` physical data store no longer supports HA by default. It has + some non-ideal behavior around failover that was causing confusion. See the + [documentation](https://www.vaultproject.io/docs/config/index.html#ha_enabled) + for information on enabling HA mode. It is very important that this + configuration is added _before upgrading_. + * The `ldap` backend no longer searches for `memberOf` groups as part of its + normal flow. Instead, the desired group filter must be specified. This fixes + some errors and increases speed for directories with different structures, + but if this behavior has been relied upon, ensure that you see the upgrade + notes _before upgrading_. + * `app-id` is now deprecated with the addition of the new AppRole backend. + There are no plans to remove it, but we encourage using AppRole whenever + possible, as it offers enhanced functionality and can accommodate many more + types of authentication paradigms. + +FEATURES: + + * **AppRole Authentication Backend**: The `approle` backend is a + machine-oriented authentication backend that provides a similar concept to + App-ID while adding many missing features, including a pull model that + allows for the backend to generate authentication credentials rather than + requiring operators or other systems to push credentials in. It should be + useful in many more situations than App-ID. The inclusion of this backend + deprecates App-ID. [GH-1426] + * **Request Forwarding**: Vault servers can now forward requests to each other + rather than redirecting clients. This feature is off by default in 0.6.1 but + will be on by default in the next release. See the [HA concepts + page](https://www.vaultproject.io/docs/concepts/ha.html) for information on + enabling and configuring it. [GH-443] + * **Convergent Encryption in `Transit`**: The `transit` backend now supports a + convergent encryption mode where the same plaintext will produce the same + ciphertext. Although very useful in some situations, this has potential + security implications, which are mostly mitigated by requiring the use of + key derivation when convergent encryption is enabled. See [the `transit` + backend + documentation](https://www.vaultproject.io/docs/secrets/transit/index.html) + for more details. [GH-1537] + * **Improved LDAP Group Filters**: The `ldap` auth backend now uses templates + to define group filters, providing the capability to support some + directories that could not easily be supported before (especially specific + Active Directory setups with nested groups). [GH-1388] + * **Key Usage Control in `PKI`**: Issued certificates from roles created or + modified after upgrading contain a set of default key usages for increased + compatibility with OpenVPN and some other software. This set can be changed + when writing a role definition. Existing roles are unaffected. [GH-1552] + * **Request Retrying in the CLI and Go API**: Requests that fail with a `5xx` + error code will now retry after a backoff. The maximum total number of + retries (including disabling this functionality) can be set with an + environment variable. See the [environment variable + documentation](https://www.vaultproject.io/docs/commands/environment.html) + for more details. [GH-1594] + * **Service Discovery in `vault init`**: The new `-auto` option on `vault init` + will perform service discovery using Consul. When only one node is discovered, + it will be initialized and when more than one node is discovered, they will + be output for easy selection. See `vault init --help` for more details. [GH-1642] + * **MongoDB Secret Backend**: Generate dynamic unique MongoDB database + credentials based on configured roles. Sponsored by + [CommerceHub](http://www.commercehub.com/). [GH-1414] + * **Circonus Metrics Integration**: Vault can now send metrics to + [Circonus](http://www.circonus.com/). See the [configuration + documentation](https://www.vaultproject.io/docs/config/index.html) for + details. [GH-1646] + +IMPROVEMENTS: + + * audit: Added a unique identifier to each request which will also be found in + the request portion of the response. [GH-1650] + * auth/aws-ec2: Added a new constraint `bound_account_id` to the role + [GH-1523] + * auth/aws-ec2: Added a new constraint `bound_iam_role_arn` to the role + [GH-1522] + * auth/aws-ec2: Added `ttl` field for the role [GH-1703] + * auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config` + have the minimum TLS version set to 1.2 by default. This is configurable. + * auth/token: Added endpoint to list accessors [GH-1676] + * auth/token: Added `disallowed_policies` option to token store roles [GH-1681] + * auth/token: `root` or `sudo` tokens can now create periodic tokens via + `auth/token/create`; additionally, the same token can now be periodic and + have an explicit max TTL [GH-1725] + * build: Add support for building on Solaris/Illumos [GH-1726] + * cli: Output formatting in the presence of warnings in the response object + [GH-1533] + * cli: `vault auth` command supports a `-path` option to take in the path at + which the auth backend is enabled, thereby allowing authenticating against + different paths using the command options [GH-1532] + * cli: `vault auth -methods` will now display the config settings of the mount + [GH-1531] + * cli: `vault read/write/unwrap -field` now allows selecting token response + fields [GH-1567] + * cli: `vault write -field` now allows selecting wrapped response fields + [GH-1567] + * command/status: Version information and cluster details added to the output + of `vault status` command [GH-1671] + * core: Response wrapping is now enabled for login endpoints [GH-1588] + * core: The duration of leadership is now exported via events through + telemetry [GH-1625] + * core: `sys/capabilities-self` is now accessible as part of the `default` + policy [GH-1695] + * core: `sys/renew` is now accessible as part of the `default` policy [GH-1701] + * core: Unseal keys will now be returned in both hex and base64 forms, and + either can be used [GH-1734] + * core: Responses from most `/sys` endpoints now return normal `api.Secret` + structs in addition to the values they carried before. This means that + response wrapping can now be used with most authenticated `/sys` operations + [GH-1699] + * physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576] + * physical/consul: Allowing additional tags to be added to Consul service + registration via `service_tags` option [GH-1643] + * secret/aws: Listing of roles is supported now [GH-1546] + * secret/cassandra: Add `connect_timeout` value for Cassandra connection + configuration [GH-1581] + * secret/mssql,mysql,postgresql: Reading of connection settings is supported + in all the sql backends [GH-1515] + * secret/mysql: Added optional maximum idle connections value to MySQL + connection configuration [GH-1635] + * secret/mysql: Use a combination of the role name and token display name in + generated user names and allow the length to be controlled [GH-1604] + * secret/{cassandra,mssql,mysql,postgresql}: SQL statements can now be passed + in via one of four ways: a semicolon-delimited string, a base64-delimited + string, a serialized JSON string array, or a base64-encoded serialized JSON + string array [GH-1686] + * secret/ssh: Added `allowed_roles` to vault-ssh-helper's config and returning + role name as part of response of `verify` API + * secret/ssh: Added passthrough of command line arguments to `ssh` [GH-1680] + * sys/health: Added version information to the response of health status + endpoint [GH-1647] + * sys/health: Cluster information isbe returned as part of health status when + Vault is unsealed [GH-1671] + * sys/mounts: MountTable data is compressed before serializing to accommodate + thousands of mounts [GH-1693] + * website: The [token + concepts](https://www.vaultproject.io/docs/concepts/tokens.html) page has + been completely rewritten [GH-1725] + +BUG FIXES: + + * auth/aws-ec2: Added a nil check for stored whitelist identity object + during renewal [GH-1542] + * auth/cert: Fix panic if no client certificate is supplied [GH-1637] + * auth/token: Don't report that a non-expiring root token is renewable, as + attempting to renew it results in an error [GH-1692] + * cli: Don't retry a command when a redirection is received [GH-1724] + * core: Fix regression causing status codes to be `400` in most non-5xx error + cases [GH-1553] + * core: Fix panic that could occur during a leadership transition [GH-1627] + * physical/postgres: Remove use of prepared statements as this causes + connection multiplexing software to break [GH-1548] + * physical/consul: Multiple Vault nodes on the same machine leading to check ID + collisions were resulting in incorrect health check responses [GH-1628] + * physical/consul: Fix deregistration of health checks on exit [GH-1678] + * secret/postgresql: Check for existence of role before attempting deletion + [GH-1575] + * secret/postgresql: Handle revoking roles that have privileges on sequences + [GH-1573] + * secret/postgresql(,mysql,mssql): Fix incorrect use of database over + transaction object which could lead to connection exhaustion [GH-1572] + * secret/pki: Fix parsing CA bundle containing trailing whitespace [GH-1634] + * secret/pki: Fix adding email addresses as SANs [GH-1688] + * secret/pki: Ensure that CRL values are always UTC, per RFC [GH-1727] + * sys/seal-status: Fixed nil Cluster object while checking seal status [GH-1715] + +## 0.6.0 (June 14th, 2016) + +SECURITY: + + * Although `sys/revoke-prefix` was intended to revoke prefixes of secrets (via + lease IDs, which incorporate path information) and + `auth/token/revoke-prefix` was intended to revoke prefixes of tokens (using + the tokens' paths and, since 0.5.2, role information), in implementation + they both behaved exactly the same way since a single component in Vault is + responsible for managing lifetimes of both, and the type of the tracked + lifetime was not being checked. The end result was that either endpoint + could revoke both secret leases and tokens. We consider this a very minor + security issue as there are a number of mitigating factors: both endpoints + require `sudo` capability in addition to write capability, preventing + blanket ACL path globs from providing access; both work by using the prefix + to revoke as a part of the endpoint path, allowing them to be properly + ACL'd; and both are intended for emergency scenarios and users should + already not generally have access to either one. In order to prevent + confusion, we have simply removed `auth/token/revoke-prefix` in 0.6, and + `sys/revoke-prefix` will be meant for both leases and tokens instead. + +DEPRECATIONS/CHANGES: + + * `auth/token/revoke-prefix` has been removed. See the security notice for + details. [GH-1280] + * Vault will now automatically register itself as the `vault` service when + using the `consul` backend and will perform its own health checks. See + the Consul backend documentation for information on how to disable + auto-registration and service checks. + * List operations that do not find any keys now return a `404` status code + rather than an empty response object [GH-1365] + * CA certificates issued from the `pki` backend no longer have associated + leases, and any CA certs already issued will ignore revocation requests from + the lease manager. This is to prevent CA certificates from being revoked + when the token used to issue the certificate expires; it was not be obvious + to users that they need to ensure that the token lifetime needed to be at + least as long as a potentially very long-lived CA cert. + +FEATURES: + + * **AWS EC2 Auth Backend**: Provides a secure introduction mechanism for AWS + EC2 instances allowing automated retrieval of Vault tokens. Unlike most + Vault authentication backends, this backend does not require first deploying + or provisioning security-sensitive credentials (tokens, username/password, + client certificates, etc). Instead, it treats AWS as a Trusted Third Party + and uses the cryptographically signed dynamic metadata information that + uniquely represents each EC2 instance. [Vault + Enterprise](https://www.hashicorp.com/vault.html) customers have access to a + turnkey client that speaks the backend API and makes access to a Vault token + easy. + * **Response Wrapping**: Nearly any response within Vault can now be wrapped + inside a single-use, time-limited token's cubbyhole, taking the [Cubbyhole + Authentication + Principles](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html) + mechanism to its logical conclusion. Retrieving the original response is as + simple as a single API command or the new `vault unwrap` command. This makes + secret distribution easier and more secure, including secure introduction. + * **Azure Physical Backend**: You can now use Azure blob object storage as + your Vault physical data store [GH-1266] + * **Swift Physical Backend**: You can now use Swift blob object storage as + your Vault physical data store [GH-1425] + * **Consul Backend Health Checks**: The Consul backend will automatically + register a `vault` service and perform its own health checking. By default + the active node can be found at `active.vault.service.consul` and all with + standby nodes are `standby.vault.service.consul`. Sealed vaults are marked + critical and are not listed by default in Consul's service discovery. See + the documentation for details. [GH-1349] + * **Explicit Maximum Token TTLs**: You can now set explicit maximum TTLs on + tokens that do not honor changes in the system- or mount-set values. This is + useful, for instance, when the max TTL of the system or the `auth/token` + mount must be set high to accommodate certain needs but you want more + granular restrictions on tokens being issued directly from the Token + authentication backend at `auth/token`. [GH-1399] + * **Non-Renewable Tokens**: When creating tokens directly through the token + authentication backend, you can now specify in both token store roles and + the API whether or not a token should be renewable, defaulting to `true`. + * **RabbitMQ Secret Backend**: Vault can now generate credentials for + RabbitMQ. Vhosts and tags can be defined within roles. [GH-788] + +IMPROVEMENTS: + + * audit: Add the DisplayName value to the copy of the Request object embedded + in the associated Response, to match the original Request object [GH-1387] + * audit: Enable auditing of the `seal` and `step-down` commands [GH-1435] + * backends: Remove most `root`/`sudo` paths in favor of normal ACL mechanisms. + A particular exception are any current MFA paths. A few paths in `token` and + `sys` also require `root` or `sudo`. [GH-1478] + * command/auth: Restore the previous authenticated token if the `auth` command + fails to authenticate the provided token [GH-1233] + * command/write: `-format` and `-field` can now be used with the `write` + command [GH-1228] + * core: Add `mlock` support for FreeBSD, OpenBSD, and Darwin [GH-1297] + * core: Don't keep lease timers around when tokens are revoked [GH-1277] + * core: If using the `disable_cache` option, caches for the policy store and + the `transit` backend are now disabled as well [GH-1346] + * credential/cert: Renewal requests are rejected if the set of policies has + changed since the token was issued [GH-477] + * credential/cert: Check CRLs for specific non-CA certs configured in the + backend [GH-1404] + * credential/ldap: If `groupdn` is not configured, skip searching LDAP and + only return policies for local groups, plus a warning [GH-1283] + * credential/ldap: `vault list` support for users and groups [GH-1270] + * credential/ldap: Support for the `memberOf` attribute for group membership + searching [GH-1245] + * credential/userpass: Add list support for users [GH-911] + * credential/userpass: Remove user configuration paths from requiring sudo, in + favor of normal ACL mechanisms [GH-1312] + * credential/token: Sanitize policies and add `default` policies in appropriate + places [GH-1235] + * credential/token: Setting the renewable status of a token is now possible + via `vault token-create` and the API. The default is true, but tokens can be + specified as non-renewable. [GH-1499] + * secret/aws: Use chain credentials to allow environment/EC2 instance/shared + providers [GH-307] + * secret/aws: Support for STS AssumeRole functionality [GH-1318] + * secret/consul: Reading consul access configuration supported. The response + will contain non-sensitive information only [GH-1445] + * secret/pki: Added `exclude_cn_from_sans` field to prevent adding the CN to + DNS or Email Subject Alternate Names [GH-1220] + * secret/pki: Added list support for certificates [GH-1466] + * sys/capabilities: Enforce ACL checks for requests that query the capabilities + of a token on a given path [GH-1221] + * sys/health: Status information can now be retrieved with `HEAD` [GH-1509] + +BUG FIXES: + + * command/read: Fix panic when using `-field` with a non-string value [GH-1308] + * command/token-lookup: Fix TTL showing as 0 depending on how a token was + created. This only affected the value shown at lookup, not the token + behavior itself. [GH-1306] + * command/various: Tell the JSON decoder to not convert all numbers to floats; + fixes some various places where numbers were showing up in scientific + notation + * command/server: Prioritized `devRootTokenID` and `devListenAddress` flags + over their respective env vars [GH-1480] + * command/ssh: Provided option to disable host key checking. The automated + variant of `vault ssh` command uses `sshpass` which was failing to handle + host key checking presented by the `ssh` binary. [GH-1473] + * core: Properly persist mount-tuned TTLs for auth backends [GH-1371] + * core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372] + * credential/github: Make organization comparison case-insensitive during + login [GH-1359] + * credential/github: Fix panic when renewing a token created with some earlier + versions of Vault [GH-1510] + * credential/github: The token used to log in via `vault auth` can now be + specified in the `VAULT_AUTH_GITHUB_TOKEN` environment variable [GH-1511] + * credential/ldap: Fix problem where certain error conditions when configuring + or opening LDAP connections would cause a panic instead of return a useful + error message [GH-1262] + * credential/token: Fall back to normal parent-token semantics if + `allowed_policies` is empty for a role. Using `allowed_policies` of + `default` resulted in the same behavior anyways. [GH-1276] + * credential/token: Fix issues renewing tokens when using the "suffix" + capability of token roles [GH-1331] + * credential/token: Fix lookup via POST showing the request token instead of + the desired token [GH-1354] + * credential/various: Fix renewal conditions when `default` policy is not + contained in the backend config [GH-1256] + * physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353] + * secret/consul: Use non-pooled Consul API client to avoid leaving files open + [GH-1428] + * secret/pki: Don't check whether a certificate is destined to be a CA + certificate if sign-verbatim endpoint is used [GH-1250] + +## 0.5.3 (May 27th, 2016) + +SECURITY: + + * Consul ACL Token Revocation: An issue was reported to us indicating that + generated Consul ACL tokens were not being properly revoked. Upon + investigation, we found that this behavior was reproducible in a specific + scenario: when a generated lease for a Consul ACL token had been renewed + prior to revocation. In this case, the generated token was not being + properly persisted internally through the renewal function, leading to an + error during revocation due to the missing token. Unfortunately, this was + coded as a user error rather than an internal error, and the revocation + logic was expecting internal errors if revocation failed. As a result, the + revocation logic believed the revocation to have succeeded when it in fact + failed, causing the lease to be dropped while the token was still valid + within Consul. In this release, the Consul backend properly persists the + token through renewals, and the revocation logic has been changed to + consider any error type to have been a failure to revoke, causing the lease + to persist and attempt to be revoked later. + +We have written an example shell script that searches through Consul's ACL +tokens and looks for those generated by Vault, which can be used as a template +for a revocation script as deemed necessary for any particular security +response. The script is available at +https://gist.github.com/jefferai/6233c2963f9407a858d84f9c27d725c0 + +Please note that any outstanding leases for Consul tokens produced prior to +0.5.3 that have been renewed will continue to exhibit this behavior. As a +result, we recommend either revoking all tokens produced by the backend and +issuing new ones, or if needed, a more advanced variant of the provided example +could use the timestamp embedded in each generated token's name to decide which +tokens are too old and should be deleted. This could then be run periodically +up until the maximum lease time for any outstanding pre-0.5.3 tokens has +expired. + +This is a security-only release. There are no other code changes since 0.5.2. +The binaries have one additional change: they are built against Go 1.6.1 rather +than Go 1.6, as Go 1.6.1 contains two security fixes to the Go programming +language itself. + +## 0.5.2 (March 16th, 2016) + +FEATURES: + + * **MSSQL Backend**: Generate dynamic unique MSSQL database credentials based + on configured roles [GH-998] + * **Token Accessors**: Vault now provides an accessor with each issued token. + This accessor is an identifier that can be used for a limited set of + actions, notably for token revocation. This value can be logged in + plaintext to audit logs, and in combination with the plaintext metadata + logged to audit logs, provides a searchable and straightforward way to + revoke particular users' or services' tokens in many cases. To enable + plaintext audit logging of these accessors, set `hmac_accessor=false` when + enabling an audit backend. + * **Token Credential Backend Roles**: Roles can now be created in the `token` + credential backend that allow modifying token behavior in ways that are not + otherwise exposed or easily delegated. This allows creating tokens with a + fixed set (or subset) of policies (rather than a subset of the calling + token's), periodic tokens with a fixed TTL but no expiration, specified + prefixes, and orphans. + * **Listener Certificate Reloading**: Vault's configured listeners now reload + their TLS certificate and private key when the Vault process receives a + SIGHUP. + +IMPROVEMENTS: + + * auth/token: Endpoints optionally accept tokens from the HTTP body rather + than just from the URLs [GH-1211] + * auth/token,sys/capabilities: Added new endpoints + `auth/token/lookup-accessor`, `auth/token/revoke-accessor` and + `sys/capabilities-accessor`, which enables performing the respective actions + with just the accessor of the tokens, without having access to the actual + token [GH-1188] + * core: Ignore leading `/` in policy paths [GH-1170] + * core: Ignore leading `/` in mount paths [GH-1172] + * command/policy-write: Provided HCL is now validated for format violations + and provides helpful information around where the violation occurred + [GH-1200] + * command/server: The initial root token ID when running in `-dev` mode can + now be specified via `-dev-root-token-id` or the environment variable + `VAULT_DEV_ROOT_TOKEN_ID` [GH-1162] + * command/server: The listen address when running in `-dev` mode can now be + specified via `-dev-listen-address` or the environment variable + `VAULT_DEV_LISTEN_ADDRESS` [GH-1169] + * command/server: The configured listeners now reload their TLS + certificates/keys when Vault is SIGHUP'd [GH-1196] + * command/step-down: New `vault step-down` command and API endpoint to force + the targeted node to give up active status, but without sealing. The node + will wait ten seconds before attempting to grab the lock again. [GH-1146] + * command/token-renew: Allow no token to be passed in; use `renew-self` in + this case. Change the behavior for any token being passed in to use `renew`. + [GH-1150] + * credential/app-id: Allow `app-id` parameter to be given in the login path; + this causes the `app-id` to be part of the token path, making it easier to + use with `revoke-prefix` [GH-424] + * credential/cert: Non-CA certificates can be used for authentication. They + must be matched exactly (issuer and serial number) for authentication, and + the certificate must carry the client authentication or 'any' extended usage + attributes. [GH-1153] + * credential/cert: Subject and Authority key IDs are output in metadata; this + allows more flexible searching/revocation in the audit logs [GH-1183] + * credential/cert: Support listing configured certs [GH-1212] + * credential/userpass: Add support for `create`/`update` capability + distinction in user path, and add user-specific endpoints to allow changing + the password and policies [GH-1216] + * credential/token: Add roles [GH-1155] + * secret/mssql: Add MSSQL backend [GH-998] + * secret/pki: Add revocation time (zero or Unix epoch) to `pki/cert/SERIAL` + endpoint [GH-1180] + * secret/pki: Sanitize serial number in `pki/revoke` endpoint to allow some + other formats [GH-1187] + * secret/ssh: Added documentation for `ssh/config/zeroaddress` endpoint. + [GH-1154] + * sys: Added new endpoints `sys/capabilities` and `sys/capabilities-self` to + fetch the capabilities of a token on a given path [GH-1171] + * sys: Added `sys/revoke-force`, which enables a user to ignore backend errors + when revoking a lease, necessary in some emergency/failure scenarios + [GH-1168] + * sys: The return codes from `sys/health` can now be user-specified via query + parameters [GH-1199] + +BUG FIXES: + + * logical/cassandra: Apply hyphen/underscore replacement to the entire + generated username, not just the UUID, in order to handle token display name + hyphens [GH-1140] + * physical/etcd: Output actual error when cluster sync fails [GH-1141] + * vault/expiration: Not letting the error responses from the backends to skip + during renewals [GH-1176] + +## 0.5.1 (February 25th, 2016) + +DEPRECATIONS/CHANGES: + + * RSA keys less than 2048 bits are no longer supported in the PKI backend. + 1024-bit keys are considered unsafe and are disallowed in the Internet PKI. + The `pki` backend has enforced SHA256 hashes in signatures from the + beginning, and software that can handle these hashes should be able to + handle larger key sizes. [GH-1095] + * The PKI backend now does not automatically delete expired certificates, + including from the CRL. Doing so could lead to a situation where a time + mismatch between the Vault server and clients could result in a certificate + that would not be considered expired by a client being removed from the CRL. + The new `pki/tidy` endpoint can be used to trigger expirations. [GH-1129] + * The `cert` backend now performs a variant of channel binding at renewal time + for increased security. In order to not overly burden clients, a notion of + identity is used. This functionality can be disabled. See the 0.5.1 upgrade + guide for more specific information [GH-1127] + +FEATURES: + + * **Codebase Audit**: Vault's 0.5 codebase was audited by iSEC. (The terms of + the audit contract do not allow us to make the results public.) [GH-220] + +IMPROVEMENTS: + + * api: The `VAULT_TLS_SERVER_NAME` environment variable can be used to control + the SNI header during TLS connections [GH-1131] + * api/health: Add the server's time in UTC to health responses [GH-1117] + * command/rekey and command/generate-root: These now return the status at + attempt initialization time, rather than requiring a separate fetch for the + nonce [GH-1054] + * credential/cert: Don't require root/sudo tokens for the `certs/` and `crls/` + paths; use normal ACL behavior instead [GH-468] + * credential/github: The validity of the token used for login will be checked + at renewal time [GH-1047] + * credential/github: The `config` endpoint no longer requires a root token; + normal ACL path matching applies + * deps: Use the standardized Go 1.6 vendoring system + * secret/aws: Inform users of AWS-imposed policy restrictions around STS + tokens if they attempt to use an invalid policy [GH-1113] + * secret/mysql: The MySQL backend now allows disabling verification of the + `connection_url` [GH-1096] + * secret/pki: Submitted CSRs are now verified to have the correct key type and + minimum number of bits according to the role. The exception is intermediate + CA signing and the `sign-verbatim` path [GH-1104] + * secret/pki: New `tidy` endpoint to allow expunging expired certificates. + [GH-1129] + * secret/postgresql: The PostgreSQL backend now allows disabling verification + of the `connection_url` [GH-1096] + * secret/ssh: When verifying an OTP, return 400 if it is not valid instead of + 204 [GH-1086] + * credential/app-id: App ID backend will check the validity of app-id and user-id + during renewal time [GH-1039] + * credential/cert: TLS Certificates backend, during renewal, will now match the + client identity with the client identity used during login [GH-1127] + +BUG FIXES: + + * credential/ldap: Properly escape values being provided to search filters + [GH-1100] + * secret/aws: Capping on length of usernames for both IAM and STS types + [GH-1102] + * secret/pki: If a cert is not found during lookup of a serial number, + respond with a 400 rather than a 500 [GH-1085] + * secret/postgresql: Add extra revocation statements to better handle more + permission scenarios [GH-1053] + * secret/postgresql: Make connection_url work properly [GH-1112] + +## 0.5.0 (February 10, 2016) + +SECURITY: + + * Previous versions of Vault could allow a malicious user to hijack the rekey + operation by canceling an operation in progress and starting a new one. The + practical application of this is very small. If the user was an unseal key + owner, they could attempt to do this in order to either receive unencrypted + reseal keys or to replace the PGP keys used for encryption with ones under + their control. However, since this would invalidate any rekey progress, they + would need other unseal key holders to resubmit, which would be rather + suspicious during this manual operation if they were not also the original + initiator of the rekey attempt. If the user was not an unseal key holder, + there is no benefit to be gained; the only outcome that could be attempted + would be a denial of service against a legitimate rekey operation by sending + cancel requests over and over. Thanks to Josh Snyder for the report! + +DEPRECATIONS/CHANGES: + + * `s3` physical backend: Environment variables are now preferred over + configuration values. This makes it behave similar to the rest of Vault, + which, in increasing order of preference, uses values from the configuration + file, environment variables, and CLI flags. [GH-871] + * `etcd` physical backend: `sync` functionality is now supported and turned on + by default. This can be disabled. [GH-921] + * `transit`: If a client attempts to encrypt a value with a key that does not + yet exist, what happens now depends on the capabilities set in the client's + ACL policies. If the client has `create` (or `create` and `update`) + capability, the key will upsert as in the past. If the client has `update` + capability, they will receive an error. [GH-1012] + * `token-renew` CLI command: If the token given for renewal is the same as the + client token, the `renew-self` endpoint will be used in the API. Given that + the `default` policy (by default) allows all clients access to the + `renew-self` endpoint, this makes it much more likely that the intended + operation will be successful. [GH-894] + * Token `lookup`: the `ttl` value in the response now reflects the actual + remaining TTL rather than the original TTL specified when the token was + created; this value is now located in `creation_ttl` [GH-986] + * Vault no longer uses grace periods on leases or token TTLs. Uncertainty + about the length grace period for any given backend could cause confusion + and uncertainty. [GH-1002] + * `rekey`: Rekey now requires a nonce to be supplied with key shares. This + nonce is generated at the start of a rekey attempt and is unique for that + attempt. + * `status`: The exit code for the `status` CLI command is now `2` for an + uninitialized Vault instead of `1`. `1` is returned for errors. This better + matches the rest of the CLI. + +FEATURES: + + * **Split Data/High Availability Physical Backends**: You can now configure + two separate physical backends: one to be used for High Availability + coordination and another to be used for encrypted data storage. See the + [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + [GH-395] + * **Fine-Grained Access Control**: Policies can now use the `capabilities` set + to specify fine-grained control over operations allowed on a path, including + separation of `sudo` privileges from other privileges. These can be mixed + and matched in any way desired. The `policy` value is kept for backwards + compatibility. See the [updated policy + documentation](https://vaultproject.io/docs/concepts/policies.html) for + details. [GH-914] + * **List Support**: Listing is now supported via the API and the new `vault + list` command. This currently supports listing keys in the `generic` and + `cubbyhole` backends and a few other places (noted in the IMPROVEMENTS + section below). Different parts of the API and backends will need to + implement list capabilities in ways that make sense to particular endpoints, + so further support will appear over time. [GH-617] + * **Root Token Generation via Unseal Keys**: You can now use the + `generate-root` CLI command to generate new orphaned, non-expiring root + tokens in case the original is lost or revoked (accidentally or + purposefully). This requires a quorum of unseal key holders. The output + value is protected via any PGP key of the initiator's choosing or a one-time + pad known only to the initiator (a suitable pad can be generated via the + `-genotp` flag to the command. [GH-915] + * **Unseal Key Archiving**: You can now optionally have Vault store your + unseal keys in your chosen physical store for disaster recovery purposes. + This option is only available when the keys are encrypted with PGP. [GH-907] + * **Keybase Support for PGP Encryption Keys**: You can now specify Keybase + users when passing in PGP keys to the `init`, `rekey`, and `generate-root` + CLI commands. Public keys for these users will be fetched automatically. + [GH-901] + * **DynamoDB HA Physical Backend**: There is now a new, community-supported + HA-enabled physical backend using Amazon DynamoDB. See the [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + [GH-878] + * **PostgreSQL Physical Backend**: There is now a new, community-supported + physical backend using PostgreSQL. See the [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + [GH-945] + * **STS Support in AWS Secret Backend**: You can now use the AWS secret + backend to fetch STS tokens rather than IAM users. [GH-927] + * **Speedups in the transit backend**: The `transit` backend has gained a + cache, and now loads only the working set of keys (e.g. from the + `min_decryption_version` to the current key version) into its working set. + This provides large speedups and potential memory savings when the `rotate` + feature of the backend is used heavily. + +IMPROVEMENTS: + + * cli: Output secrets sorted by key name [GH-830] + * cli: Support YAML as an output format [GH-832] + * cli: Show an error if the output format is incorrect, rather than falling + back to an empty table [GH-849] + * cli: Allow setting the `advertise_addr` for HA via the + `VAULT_ADVERTISE_ADDR` environment variable [GH-581] + * cli/generate-root: Add generate-root and associated functionality [GH-915] + * cli/init: Add `-check` flag that returns whether Vault is initialized + [GH-949] + * cli/server: Use internal functions for the token-helper rather than shelling + out, which fixes some problems with using a static binary in Docker or paths + with multiple spaces when launching in `-dev` mode [GH-850] + * cli/token-lookup: Add token-lookup command [GH-892] + * command/{init,rekey}: Allow ASCII-armored keychain files to be arguments for + `-pgp-keys` [GH-940] + * conf: Use normal bool values rather than empty/non-empty for the + `tls_disable` option [GH-802] + * credential/ldap: Add support for binding, both anonymously (to discover a + user DN) and via a username and password [GH-975] + * credential/token: Add `last_renewal_time` to token lookup calls [GH-896] + * credential/token: Change `ttl` to reflect the current remaining TTL; the + original value is in `creation_ttl` [GH-1007] + * helper/certutil: Add ability to parse PKCS#8 bundles [GH-829] + * logical/aws: You can now get STS tokens instead of IAM users [GH-927] + * logical/cassandra: Add `protocol_version` parameter to set the CQL proto + version [GH-1005] + * logical/cubbyhole: Add cubbyhole access to default policy [GH-936] + * logical/mysql: Add list support for roles path [GH-984] + * logical/pki: Fix up key usages being specified for CAs [GH-989] + * logical/pki: Add list support for roles path [GH-985] + * logical/pki: Allow `pem_bundle` to be specified as the format, which + provides a concatenated PEM bundle of returned values [GH-1008] + * logical/pki: Add 30 seconds of slack to the validity start period to + accommodate some clock skew in machines [GH-1036] + * logical/postgres: Add `max_idle_connections` parameter [GH-950] + * logical/postgres: Add list support for roles path + * logical/ssh: Add list support for roles path [GH-983] + * logical/transit: Keys are archived and only keys between the latest version + and `min_decryption_version` are loaded into the working set. This can + provide a very large speed increase when rotating keys very often. [GH-977] + * logical/transit: Keys are now cached, which should provide a large speedup + in most cases [GH-979] + * physical/cache: Use 2Q cache instead of straight LRU [GH-908] + * physical/etcd: Support basic auth [GH-859] + * physical/etcd: Support sync functionality and enable by default [GH-921] + +BUG FIXES: + + * api: Correct the HTTP verb used in the LookupSelf method [GH-887] + * api: Fix the output of `Sys().MountConfig(...)` to return proper values + [GH-1017] + * command/read: Fix panic when an empty argument was given [GH-923] + * command/ssh: Fix panic when username lookup fails [GH-886] + * core: When running in standalone mode, don't advertise that we are active + until post-unseal setup completes [GH-872] + * core: Update go-cleanhttp dependency to ensure idle connections aren't + leaked [GH-867] + * core: Don't allow tokens to have duplicate policies [GH-897] + * core: Fix regression in `sys/renew` that caused information stored in the + Secret part of the response to be lost [GH-912] + * physical: Use square brackets when setting an IPv6-based advertise address + as the auto-detected advertise address [GH-883] + * physical/s3: Use an initialized client when using IAM roles to fix a + regression introduced against newer versions of the AWS Go SDK [GH-836] + * secret/pki: Fix a condition where unmounting could fail if the CA + certificate was not properly loaded [GH-946] + * secret/ssh: Fix a problem where SSH connections were not always closed + properly [GH-942] + +MISC: + + * Clarified our stance on support for community-derived physical backends. + See the [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + * Add `vault-java` to libraries [GH-851] + * Various minor documentation fixes and improvements [GH-839] [GH-854] + [GH-861] [GH-876] [GH-899] [GH-900] [GH-904] [GH-923] [GH-924] [GH-958] + [GH-959] [GH-981] [GH-990] [GH-1024] [GH-1025] + +BUILD NOTE: + + * The HashiCorp-provided binary release of Vault 0.5.0 is built against a + patched version of Go 1.5.3 containing two specific bug fixes affecting TLS + certificate handling. These fixes are in the Go 1.6 tree and were + cherry-picked on top of stock Go 1.5.3. If you want to examine the way in + which the releases were built, please look at our [cross-compilation + Dockerfile](https://github.com/hashicorp/vault/blob/v0.5.0/scripts/cross/Dockerfile-patched-1.5.3). + +## 0.4.1 (January 13, 2016) + +SECURITY: + + * Build against Go 1.5.3 to mitigate a security vulnerability introduced in + Go 1.5. For more information, please see + https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4 + +This is a security-only release; other than the version number and building +against Go 1.5.3, there are no changes from 0.4.0. + +## 0.4.0 (December 10, 2015) + +DEPRECATIONS/CHANGES: + + * Policy Name Casing: Policy names are now normalized to lower-case on write, + helping prevent accidental case mismatches. For backwards compatibility, + policy names are not currently normalized when reading or deleting. [GH-676] + * Default etcd port number: the default connection string for the `etcd` + physical store uses port 2379 instead of port 4001, which is the port used + by the supported version 2.x of etcd. [GH-753] + * As noted below in the FEATURES section, if your Vault installation contains + a policy called `default`, new tokens created will inherit this policy + automatically. + * In the PKI backend there have been a few minor breaking changes: + * The token display name is no longer a valid option for providing a base + domain for issuance. Since this name is prepended with the name of the + authentication backend that issued it, it provided a faulty use-case at best + and a confusing experience at worst. We hope to figure out a better + per-token value in a future release. + * The `allowed_base_domain` parameter has been changed to `allowed_domains`, + which accepts a comma-separated list of domains. This allows issuing + certificates with DNS subjects across multiple domains. If you had a + configured `allowed_base_domain` parameter, it will be migrated + automatically when the role is read (either via a normal read, or via + issuing a certificate). + +FEATURES: + + * **Significantly Enhanced PKI Backend**: The `pki` backend can now generate + and sign root CA certificates and intermediate CA CSRs. It can also now sign + submitted client CSRs, as well as a significant number of other + enhancements. See the updated documentation for the full API. [GH-666] + * **CRL Checking for Certificate Authentication**: The `cert` backend now + supports pushing CRLs into the mount and using the contained serial numbers + for revocation checking. See the documentation for the `cert` backend for + more info. [GH-330] + * **Default Policy**: Vault now ensures that a policy named `default` is added + to every token. This policy cannot be deleted, but it can be modified + (including to an empty policy). There are three endpoints allowed in the + default `default` policy, related to token self-management: `lookup-self`, + which allows a token to retrieve its own information, and `revoke-self` and + `renew-self`, which are self-explanatory. If your existing Vault + installation contains a policy called `default`, it will not be overridden, + but it will be added to each new token created. You can override this + behavior when using manual token creation (i.e. not via an authentication + backend) by setting the "no_default_policy" flag to true. [GH-732] + +IMPROVEMENTS: + + * api: API client now uses a 60 second timeout instead of indefinite [GH-681] + * api: Implement LookupSelf, RenewSelf, and RevokeSelf functions for auth + tokens [GH-739] + * api: Standardize environment variable reading logic inside the API; the CLI + now uses this but can still override via command-line parameters [GH-618] + * audit: HMAC-SHA256'd client tokens are now stored with each request entry. + Previously they were only displayed at creation time; this allows much + better traceability of client actions. [GH-713] + * audit: There is now a `sys/audit-hash` endpoint that can be used to generate + an HMAC-SHA256'd value from provided data using the given audit backend's + salt [GH-784] + * core: The physical storage read cache can now be disabled via + "disable_cache" [GH-674] + * core: The unsealing process can now be reset midway through (this feature + was documented before, but not enabled) [GH-695] + * core: Tokens can now renew themselves [GH-455] + * core: Base64-encoded PGP keys can be used with the CLI for `init` and + `rekey` operations [GH-653] + * core: Print version on startup [GH-765] + * core: Access to `sys/policy` and `sys/mounts` now uses the normal ACL system + instead of requiring a root token [GH-769] + * credential/token: Display whether or not a token is an orphan in the output + of a lookup call [GH-766] + * logical: Allow `.` in path-based variables in many more locations [GH-244] + * logical: Responses now contain a "warnings" key containing a list of + warnings returned from the server. These are conditions that did not require + failing an operation, but of which the client should be aware. [GH-676] + * physical/(consul,etcd): Consul and etcd now use a connection pool to limit + the number of outstanding operations, improving behavior when a lot of + operations must happen at once [GH-677] [GH-780] + * physical/consul: The `datacenter` parameter was removed; It could not be + effective unless the Vault node (or the Consul node it was connecting to) + was in the datacenter specified, in which case it wasn't needed [GH-816] + * physical/etcd: Support TLS-encrypted connections and use a connection pool + to limit the number of outstanding operations [GH-780] + * physical/s3: The S3 endpoint can now be configured, allowing using + S3-API-compatible storage solutions [GH-750] + * physical/s3: The S3 bucket can now be configured with the `AWS_S3_BUCKET` + environment variable [GH-758] + * secret/consul: Management tokens can now be created [GH-714] + +BUG FIXES: + + * api: API client now checks for a 301 response for redirects. Vault doesn't + generate these, but in certain conditions Go's internal HTTP handler can + generate them, leading to client errors. + * cli: `token-create` now supports the `ttl` parameter in addition to the + deprecated `lease` parameter. [GH-688] + * core: Return data from `generic` backends on the last use of a limited-use + token [GH-615] + * core: Fix upgrade path for leases created in `generic` prior to 0.3 [GH-673] + * core: Stale leader entries will now be reaped [GH-679] + * core: Using `mount-tune` on the auth/token path did not take effect. + [GH-688] + * core: Fix a potential race condition when (un)sealing the vault with metrics + enabled [GH-694] + * core: Fix an error that could happen in some failure scenarios where Vault + could fail to revert to a clean state [GH-733] + * core: Ensure secondary indexes are removed when a lease is expired [GH-749] + * core: Ensure rollback manager uses an up-to-date mounts table [GH-771] + * everywhere: Don't use http.DefaultClient, as it shares state implicitly and + is a source of hard-to-track-down bugs [GH-700] + * credential/token: Allow creating orphan tokens via an API path [GH-748] + * secret/generic: Validate given duration at write time, not just read time; + if stored durations are not parseable, return a warning and the default + duration rather than an error [GH-718] + * secret/generic: Return 400 instead of 500 when `generic` backend is written + to with no data fields [GH-825] + * secret/postgresql: Revoke permissions before dropping a user or revocation + may fail [GH-699] + +MISC: + + * Various documentation fixes and improvements [GH-685] [GH-688] [GH-697] + [GH-710] [GH-715] [GH-831] + +## 0.3.1 (October 6, 2015) + +SECURITY: + + * core: In certain failure scenarios, the full values of requests and + responses would be logged [GH-665] + +FEATURES: + + * **Settable Maximum Open Connections**: The `mysql` and `postgresql` backends + now allow setting the number of maximum open connections to the database, + which was previously capped to 2. [GH-661] + * **Renewable Tokens for GitHub**: The `github` backend now supports + specifying a TTL, enabling renewable tokens. [GH-664] + +BUG FIXES: + + * dist: linux-amd64 distribution was dynamically linked [GH-656] + * credential/github: Fix acceptance tests [GH-651] + +MISC: + + * Various minor documentation fixes and improvements [GH-649] [GH-650] + [GH-654] [GH-663] + +## 0.3.0 (September 28, 2015) + +DEPRECATIONS/CHANGES: + +Note: deprecations and breaking changes in upcoming releases are announced +ahead of time on the "vault-tool" mailing list. + + * **Cookie Authentication Removed**: As of 0.3 the only way to authenticate is + via the X-Vault-Token header. Cookie authentication was hard to properly + test, could result in browsers/tools/applications saving tokens in plaintext + on disk, and other issues. [GH-564] + * **Terminology/Field Names**: Vault is transitioning from overloading the + term "lease" to mean both "a set of metadata" and "the amount of time the + metadata is valid". The latter is now being referred to as TTL (or + "lease_duration" for backwards-compatibility); some parts of Vault have + already switched to using "ttl" and others will follow in upcoming releases. + In particular, the "token", "generic", and "pki" backends accept both "ttl" + and "lease" but in 0.4 only "ttl" will be accepted. [GH-528] + * **Downgrade Not Supported**: Due to enhancements in the storage subsystem, + values written by Vault 0.3+ will not be able to be read by prior versions + of Vault. There are no expected upgrade issues, however, as with all + critical infrastructure it is recommended to back up Vault's physical + storage before upgrading. + +FEATURES: + + * **SSH Backend**: Vault can now be used to delegate SSH access to machines, + via a (recommended) One-Time Password approach or by issuing dynamic keys. + [GH-385] + * **Cubbyhole Backend**: This backend works similarly to the "generic" backend + but provides a per-token workspace. This enables some additional + authentication workflows (especially for containers) and can be useful to + applications to e.g. store local credentials while being restarted or + upgraded, rather than persisting to disk. [GH-612] + * **Transit Backend Improvements**: The transit backend now allows key + rotation and datakey generation. For rotation, data encrypted with previous + versions of the keys can still be decrypted, down to a (configurable) + minimum previous version; there is a rewrap function for manual upgrades of + ciphertext to newer versions. Additionally, the backend now allows + generating and returning high-entropy keys of a configurable bitsize + suitable for AES and other functions; this is returned wrapped by a named + key, or optionally both wrapped and plaintext for immediate use. [GH-626] + * **Global and Per-Mount Default/Max TTL Support**: You can now set the + default and maximum Time To Live for leases both globally and per-mount. + Per-mount settings override global settings. Not all backends honor these + settings yet, but the maximum is a hard limit enforced outside the backend. + See the documentation for "/sys/mounts/" for details on configuring + per-mount TTLs. [GH-469] + * **PGP Encryption for Unseal Keys**: When initializing or rotating Vault's + master key, PGP/GPG public keys can now be provided. The output keys will be + encrypted with the given keys, in order. [GH-570] + * **Duo Multifactor Authentication Support**: Backends that support MFA can + now use Duo as the mechanism. [GH-464] + * **Performance Improvements**: Users of the "generic" backend will see a + significant performance improvement as the backend no longer creates leases, + although it does return TTLs (global/mount default, or set per-item) as + before. [GH-631] + * **Codebase Audit**: Vault's codebase was audited by iSEC. (The terms of the + audit contract do not allow us to make the results public.) [GH-220] + +IMPROVEMENTS: + + * audit: Log entries now contain a time field [GH-495] + * audit: Obfuscated audit entries now use hmac-sha256 instead of sha1 [GH-627] + * backends: Add ability for a cleanup function to be called on backend unmount + [GH-608] + * config: Allow specifying minimum acceptable TLS version [GH-447] + * core: If trying to mount in a location that is already mounted, be more + helpful about the error [GH-510] + * core: Be more explicit on failure if the issue is invalid JSON [GH-553] + * core: Tokens can now revoke themselves [GH-620] + * credential/app-id: Give a more specific error when sending a duplicate POST + to sys/auth/app-id [GH-392] + * credential/github: Support custom API endpoints (e.g. for Github Enterprise) + [GH-572] + * credential/ldap: Add per-user policies and option to login with + userPrincipalName [GH-420] + * credential/token: Allow root tokens to specify the ID of a token being + created from CLI [GH-502] + * credential/userpass: Enable renewals for login tokens [GH-623] + * scripts: Use /usr/bin/env to find Bash instead of hardcoding [GH-446] + * scripts: Use godep for build scripts to use same environment as tests + [GH-404] + * secret/mysql: Allow reading configuration data [GH-529] + * secret/pki: Split "allow_any_name" logic to that and "enforce_hostnames", to + allow for non-hostname values (e.g. for client certificates) [GH-555] + * storage/consul: Allow specifying certificates used to talk to Consul + [GH-384] + * storage/mysql: Allow SSL encrypted connections [GH-439] + * storage/s3: Allow using temporary security credentials [GH-433] + * telemetry: Put telemetry object in configuration to allow more flexibility + [GH-419] + * testing: Disable mlock for testing of logical backends so as not to require + root [GH-479] + +BUG FIXES: + + * audit/file: Do not enable auditing if file permissions are invalid [GH-550] + * backends: Allow hyphens in endpoint patterns (fixes AWS and others) [GH-559] + * cli: Fixed missing setup of client TLS certificates if no custom CA was + provided + * cli/read: Do not include a carriage return when using raw field output + [GH-624] + * core: Bad input data could lead to a panic for that session, rather than + returning an error [GH-503] + * core: Allow SHA2-384/SHA2-512 hashed certificates [GH-448] + * core: Do not return a Secret if there are no uses left on a token (since it + will be unable to be used) [GH-615] + * core: Code paths that called lookup-self would decrement num_uses and + potentially immediately revoke a token [GH-552] + * core: Some /sys/ paths would not properly redirect from a standby to the + leader [GH-499] [GH-551] + * credential/aws: Translate spaces in a token's display name to avoid making + IAM unhappy [GH-567] + * credential/github: Integration failed if more than ten organizations or + teams [GH-489] + * credential/token: Tokens with sudo access to "auth/token/create" can now use + root-only options [GH-629] + * secret/cassandra: Work around backwards-incompatible change made in + Cassandra 2.2 preventing Vault from properly setting/revoking leases + [GH-549] + * secret/mysql: Use varbinary instead of varchar to avoid InnoDB/UTF-8 issues + [GH-522] + * secret/postgres: Explicitly set timezone in connections [GH-597] + * storage/etcd: Renew semaphore periodically to prevent leadership flapping + [GH-606] + * storage/zk: Fix collisions in storage that could lead to data unavailability + [GH-411] + +MISC: + + * Various documentation fixes and improvements [GH-412] [GH-474] [GH-476] + [GH-482] [GH-483] [GH-486] [GH-508] [GH-568] [GH-574] [GH-586] [GH-590] + [GH-591] [GH-592] [GH-595] [GH-613] [GH-637] + * Less "armon" in stack traces [GH-453] + * Sourcegraph integration [GH-456] + +## 0.2.0 (July 13, 2015) + +FEATURES: + + * **Key Rotation Support**: The `rotate` command can be used to rotate the + master encryption key used to write data to the storage (physical) backend. + [GH-277] + * **Rekey Support**: Rekey can be used to rotate the master key and change the + configuration of the unseal keys (number of shares, threshold required). + [GH-277] + * **New secret backend: `pki`**: Enable Vault to be a certificate authority + and generate signed TLS certificates. [GH-310] + * **New secret backend: `cassandra`**: Generate dynamic credentials for + Cassandra [GH-363] + * **New storage backend: `etcd`**: store physical data in etcd [GH-259] + [GH-297] + * **New storage backend: `s3`**: store physical data in S3. Does not support + HA. [GH-242] + * **New storage backend: `MySQL`**: store physical data in MySQL. Does not + support HA. [GH-324] + * `transit` secret backend supports derived keys for per-transaction unique + keys [GH-399] + +IMPROVEMENTS: + + * cli/auth: Enable `cert` method [GH-380] + * cli/auth: read input from stdin [GH-250] + * cli/read: Ability to read a single field from a secret [GH-257] + * cli/write: Adding a force flag when no input required + * core: allow time duration format in place of seconds for some inputs + * core: audit log provides more useful information [GH-360] + * core: graceful shutdown for faster HA failover + * core: **change policy format** to use explicit globbing [GH-400] Any + existing policy in Vault is automatically upgraded to avoid issues. All + policy files must be updated for future writes. Adding the explicit glob + character `*` to the path specification is all that is required. + * core: policy merging to give deny highest precedence [GH-400] + * credential/app-id: Protect against timing attack on app-id + * credential/cert: Record the common name in the metadata [GH-342] + * credential/ldap: Allow TLS verification to be disabled [GH-372] + * credential/ldap: More flexible names allowed [GH-245] [GH-379] [GH-367] + * credential/userpass: Protect against timing attack on password + * credential/userpass: Use bcrypt for password matching + * http: response codes improved to reflect error [GH-366] + * http: the `sys/health` endpoint supports `?standbyok` to return 200 on + standby [GH-389] + * secret/app-id: Support deleting AppID and UserIDs [GH-200] + * secret/consul: Fine grained lease control [GH-261] + * secret/transit: Decouple raw key from key management endpoint [GH-355] + * secret/transit: Upsert named key when encrypt is used [GH-355] + * storage/zk: Support for HA configuration [GH-252] + * storage/zk: Changing node representation. **Backwards incompatible**. + [GH-416] + +BUG FIXES: + + * audit/file: file removing TLS connection state + * audit/syslog: fix removing TLS connection state + * command/*: commands accepting `k=v` allow blank values + * core: Allow building on FreeBSD [GH-365] + * core: Fixed various panics when audit logging enabled + * core: Lease renewal does not create redundant lease + * core: fixed leases with negative duration [GH-354] + * core: token renewal does not create child token + * core: fixing panic when lease increment is null [GH-408] + * credential/app-id: Salt the paths in storage backend to avoid information + leak + * credential/cert: Fixing client certificate not being requested + * credential/cert: Fixing panic when no certificate match found [GH-361] + * http: Accept PUT as POST for sys/auth + * http: Accept PUT as POST for sys/mounts [GH-349] + * http: Return 503 when sealed [GH-225] + * secret/postgres: Username length is capped to exceeding limit + * server: Do not panic if backend not configured [GH-222] + * server: Explicitly check value of tls_diable [GH-201] + * storage/zk: Fixed issues with version conflicts [GH-190] + +MISC: + + * cli/path-help: renamed from `help` to avoid confusion + +## 0.1.2 (May 11, 2015) + +FEATURES: + + * **New physical backend: `zookeeper`**: store physical data in Zookeeper. + HA not supported yet. + * **New credential backend: `ldap`**: authenticate using LDAP credentials. + +IMPROVEMENTS: + + * core: Auth backends can store internal data about auth creds + * audit: display name for auth is shown in logs [GH-176] + * command/*: `-insecure` has been renamed to `-tls-skip-verify` [GH-130] + * command/*: `VAULT_TOKEN` overrides local stored auth [GH-162] + * command/server: environment variables are copy-pastable + * credential/app-id: hash of app and user ID are in metadata [GH-176] + * http: HTTP API accepts `X-Vault-Token` as auth header [GH-124] + * logical/*: Generate help output even if no synopsis specified + +BUG FIXES: + + * core: login endpoints should never return secrets + * core: Internal data should never be returned from core endpoints + * core: defer barrier initialization to as late as possible to avoid error + cases during init that corrupt data (no data loss) + * core: guard against invalid init config earlier + * audit/file: create file if it doesn't exist [GH-148] + * command/*: ignore directories when traversing CA paths [GH-181] + * credential/*: all policy mapping keys are case insensitive [GH-163] + * physical/consul: Fixing path for locking so HA works in every case + +## 0.1.1 (May 2, 2015) + +SECURITY CHANGES: + + * physical/file: create the storge with 0600 permissions [GH-102] + * token/disk: write the token to disk with 0600 perms + +IMPROVEMENTS: + + * core: Very verbose error if mlock fails [GH-59] + * command/*: On error with TLS oversized record, show more human-friendly + error message. [GH-123] + * command/read: `lease_renewable` is now outputted along with the secret to + show whether it is renewable or not + * command/server: Add configuration option to disable mlock + * command/server: Disable mlock for dev mode so it works on more systems + +BUG FIXES: + + * core: if token helper isn't absolute, prepend with path to Vault + executable, not "vault" (which requires PATH) [GH-60] + * core: Any "mapping" routes allow hyphens in keys [GH-119] + * core: Validate `advertise_addr` is a valid URL with scheme [GH-106] + * command/auth: Using an invalid token won't crash [GH-75] + * credential/app-id: app and user IDs can have hyphens in keys [GH-119] + * helper/password: import proper DLL for Windows to ask password [GH-83] + +## 0.1.0 (April 28, 2015) + + * Initial release diff --git a/CHANGELOG.md b/CHANGELOG.md index ba56fc9d002c..38d85f8f530d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8128 +1,3526 @@ -## 1.13.0 -### Unreleased +## Previous versions +- [v1.0.0 - v1.9.10](CHANGELOG-pre-v1.10.md) +- [v0.11.6 and earlier](CHANGELOG-v0.md) + +## 1.15.5 +### January 31, 2024 + +SECURITY: + +* audit: Fix bug where use of 'log_raw' option could result in other devices logging raw audit data [[GH-24968](https://github.com/hashicorp/vault/pull/24968)] [[HCSEC-2024-01](https://discuss.hashicorp.com/t/hcsec-2024-01-vault-may-expose-sensitive-information-when-configuring-an-audit-log-device/62311)] CHANGES: -* auth/approle: Add maximum length of 4096 for approle role_names, as this value results in HMAC calculation [[GH-17768](https://github.com/hashicorp/vault/pull/17768)] -* auth: Returns invalid credentials for ldap, userpass and approle when wrong credentials are provided for existent users. - This will only be used internally for implementing user lockout. [[GH-17104](https://github.com/hashicorp/vault/pull/17104)] -* core: Bump Go version to 1.19.3. -* logging: Removed legacy environment variable for log format ('LOGXI_FORMAT'), should use 'VAULT_LOG_FORMAT' instead [[GH-17822](https://github.com/hashicorp/vault/pull/17822)] -* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* plugins: `GET /database/config/:name` endpoint now returns an additional `plugin_version` field in the response data. [[GH-16982](https://github.com/hashicorp/vault/pull/16982)] -* plugins: `GET /sys/auth/:path/tune` and `GET /sys/mounts/:path/tune` endpoints may now return an additional `plugin_version` field in the response data if set. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] -* plugins: `GET` for `/sys/auth`, `/sys/auth/:path`, `/sys/mounts`, and `/sys/mounts/:path` paths now return additional `plugin_version`, `running_plugin_version` and `running_sha256` fields in the response data for each mount. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] -* secrets/aws: do not create leases for non-renewable/non-revocable STS credentials to reduce storage calls [[GH-15869](https://github.com/hashicorp/vault/pull/15869)] -* ui: Upgrade Ember to version 4.4.0 [[GH-17086](https://github.com/hashicorp/vault/pull/17086)] +* core: Bump Go version to 1.21.5. +* database/snowflake: Update plugin to v0.9.1 [[GH-25020](https://github.com/hashicorp/vault/pull/25020)] +* secrets/ad: Update plugin to v0.16.2 [[GH-25058](https://github.com/hashicorp/vault/pull/25058)] +* secrets/openldap: Update plugin to v0.11.3 [[GH-25040](https://github.com/hashicorp/vault/pull/25040)] + +IMPROVEMENTS: + +* command/server: display logs on startup immediately if disable-gated-logs flag is set [[GH-24280](https://github.com/hashicorp/vault/pull/24280)] +* core/activity: Include secret_syncs in activity log responses [[GH-24710](https://github.com/hashicorp/vault/pull/24710)] +* oidc/provider: Adds `code_challenge_methods_supported` to OpenID Connect Metadata [[GH-24979](https://github.com/hashicorp/vault/pull/24979)] +* storage/raft: Upgrade to bbolt 1.3.8, along with an extra patch to reduce time scanning large freelist maps. [[GH-24010](https://github.com/hashicorp/vault/pull/24010)] +* sys (enterprise): Adds the chroot_namespace field to this sys/internal/ui/resultant-acl endpoint, which exposes the value of the chroot namespace from the +listener config. +* ui: latest version of chrome does not automatically redirect back to the app after authentication unless triggered by the user, hence added a link to redirect back to the app. [[GH-18513](https://github.com/hashicorp/vault/pull/18513)] + +BUG FIXES: + +* audit/socket: Provide socket based audit backends with 'prefix' configuration option when supplied. [[GH-25004](https://github.com/hashicorp/vault/pull/25004)] +* auth/saml (enterprise): Fixes support for Microsoft Entra ID enterprise applications +* core (enterprise): fix a potential deadlock if an error is received twice from underlying storage for the same key +* core: upgrade github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 to +support azure workload identities. [[GH-24954](https://github.com/hashicorp/vault/pull/24954)] +* helper/pkcs7: Fix slice out-of-bounds panic [[GH-24891](https://github.com/hashicorp/vault/pull/24891)] +* kmip (enterprise): Only return a Server Correlation Value to clients using KMIP version 1.4. +* plugins: fix panic when registering containerized plugin with a custom runtime on a perf standby +* ui: Allows users to dismiss the resultant-acl banner. [[GH-25106](https://github.com/hashicorp/vault/pull/25106)] +* ui: Correctly handle redirects from pre 1.15.0 Kv v2 edit, create, and show urls. [[GH-24339](https://github.com/hashicorp/vault/pull/24339)] +* ui: Fixed minor bugs with database secrets engine [[GH-24947](https://github.com/hashicorp/vault/pull/24947)] +* ui: Fixes input for jwks_ca_pem when configuring a JWT auth method [[GH-24697](https://github.com/hashicorp/vault/pull/24697)] +* ui: Fixes policy input toolbar scrolling by default [[GH-23297](https://github.com/hashicorp/vault/pull/23297)] +* ui: The UI can now be used to create or update database roles by operator without permission on the database connection. [[GH-24660](https://github.com/hashicorp/vault/pull/24660)] +* ui: fix KV v2 details view defaulting to JSON view when secret value includes `{` [[GH-24513](https://github.com/hashicorp/vault/pull/24513)] +* ui: fix incorrectly calculated capabilities on PKI issuer endpoints [[GH-24686](https://github.com/hashicorp/vault/pull/24686)] +* ui: fix issue where kv v2 capabilities checks were not passing in the full secret path if secret was inside a directory. [[GH-24404](https://github.com/hashicorp/vault/pull/24404)] +* ui: fix navigation items shown to user when chroot_namespace configured [[GH-24492](https://github.com/hashicorp/vault/pull/24492)] + +## 1.15.4 +### December 06, 2023 + +SECURITY: + +* core: Fixes an issue present in both Vault and Vault Enterprise since Vault 1.12.0, where Vault is vulnerable to a denial of service through memory exhaustion of the host when handling large HTTP requests from a client. (see [CVE-2023-6337](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-6337) & [HCSEC-2023-34](https://discuss.hashicorp.com/t/hcsec-2023-34-vault-vulnerable-to-denial-of-service-through-memory-exhaustion-when-handling-large-http-requests/60741)) + +CHANGES: + +* identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. [[GH-24325](https://github.com/hashicorp/vault/pull/24325)] + +BUG FIXES: + +* agent/logging: Agent should now honor correct -log-format and -log-file settings in logs generated by the consul-template library. [[GH-24252](https://github.com/hashicorp/vault/pull/24252)] +* api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. [[GH-24256](https://github.com/hashicorp/vault/pull/24256)] +* core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. [[GH-24336](https://github.com/hashicorp/vault/pull/24336)] +* ui: Correctly handle directory redirects from pre 1.15.0 Kv v2 list view urls. [[GH-24281](https://github.com/hashicorp/vault/pull/24281)] +* ui: Fix payload sent when disabling replication [[GH-24292](https://github.com/hashicorp/vault/pull/24292)] +* ui: When Kv v2 secret is an object, fix so details view defaults to readOnly JSON editor. [[GH-24290](https://github.com/hashicorp/vault/pull/24290)] + +## 1.15.3 +### November 30, 2023 + +CHANGES: + +* core: Bump Go version to 1.21.4. + +IMPROVEMENTS: + +* core (enterprise): Speed up unseal when using namespaces +* core: update sys/seal-status (and CLI vault status) to report the type of +the seal when unsealed, as well as the type of the recovery seal if an +auto-seal. [[GH-23022](https://github.com/hashicorp/vault/pull/23022)] +* secrets/pki: do not check TLS validity on ACME requests redirected to https [[GH-22521](https://github.com/hashicorp/vault/pull/22521)] +* ui: Sort list view of entities and aliases alphabetically using the item name [[GH-24103](https://github.com/hashicorp/vault/pull/24103)] +* ui: capabilities-self is always called in the user's root namespace [[GH-24168](https://github.com/hashicorp/vault/pull/24168)] + +BUG FIXES: + +* activity log (enterprise): De-duplicate client count estimates for license utilization reporting. +* auth/cert: Handle errors related to expired OCSP server responses [[GH-24193](https://github.com/hashicorp/vault/pull/24193)] +* core (Enterprise): Treat multiple disabled HA seals as a migration to Shamir. +* core/audit: Audit logging a Vault response will now use a 5 second context timeout, separate from the original request. [[GH-24238](https://github.com/hashicorp/vault/pull/24238)] +* core/config: Use correct HCL config value when configuring `log_requests_level`. [[GH-24059](https://github.com/hashicorp/vault/pull/24059)] +* core/quotas: Close rate-limit blocked client purge goroutines when sealing [[GH-24108](https://github.com/hashicorp/vault/pull/24108)] +* core: Fix an error that resulted in the wrong seal type being returned by sys/seal-status while +Vault is in seal migration mode. [[GH-24165](https://github.com/hashicorp/vault/pull/24165)] +* replication (enterprise): disallow configuring paths filter for a mount path that does not exist +* secrets-sync (enterprise): Fix panic when setting usage_gauge_period to none +* secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 [[GH-24192](https://github.com/hashicorp/vault/pull/24192)] +* secrets/transit: Fix a panic when attempting to export a public RSA key [[GH-24054](https://github.com/hashicorp/vault/pull/24054)] +* ui: Fix JSON editor in KV V2 unable to handle pasted values [[GH-24224](https://github.com/hashicorp/vault/pull/24224)] +* ui: Fix error when tuning token auth configuration within namespace [[GH-24147](https://github.com/hashicorp/vault/pull/24147)] +* ui: show error from API when seal fails [[GH-23921](https://github.com/hashicorp/vault/pull/23921)] + +## 1.15.2 +### November 09, 2023 + +SECURITY: +* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] + +CHANGES: + +* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] +* secrets/mongodbatlas: Update plugin to v0.10.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] + FEATURES: -* logging: Vault Agent supports logging to a specified file path via environment variable, CLI or config [[GH-17841](https://github.com/hashicorp/vault/pull/17841)] -* logging: Vault agent and server commands support log file and log rotation. [[GH-18031](https://github.com/hashicorp/vault/pull/18031)] -* ui: Add inline policy creation when creating an identity entity or group [[GH-17749](https://github.com/hashicorp/vault/pull/17749)] +* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] IMPROVEMENTS: -* Reduced binary size [[GH-17678](https://github.com/hashicorp/vault/pull/17678)] -* agent: Agent listeners can now be to be the `metrics_only` role, serving only metrics, as part of the listener's new top level `role` option. [[GH-18101](https://github.com/hashicorp/vault/pull/18101)] -* agent: fix incorrectly used loop variables in parallel tests and when finalizing seals [[GH-16872](https://github.com/hashicorp/vault/pull/16872)] -* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] -* auth/alicloud: upgrades dependencies [[GH-18021](https://github.com/hashicorp/vault/pull/18021)] -* auth/azure: Adds support for authentication with Managed Service Identity (MSI) from a - Virtual Machine Scale Set (VMSS) in flexible orchestration mode. [[GH-17540](https://github.com/hashicorp/vault/pull/17540)] -* auth/azure: upgrades dependencies [[GH-17857](https://github.com/hashicorp/vault/pull/17857)] -* auth/cert: Add configurable support for validating client certs with OCSP. [[GH-17093](https://github.com/hashicorp/vault/pull/17093)] -* auth/cert: Support listing provisioned CRLs within the mount. [[GH-18043](https://github.com/hashicorp/vault/pull/18043)] -* auth/gcp: Upgrades dependencies [[GH-17858](https://github.com/hashicorp/vault/pull/17858)] -* autopilot: Update version to v.0.2.0 to add better support for respecting min quorum [[GH-17848](https://github.com/hashicorp/vault/pull/17848)] -* autopilot: Update version to v.0.2.0 to add better support for respecting min quorum -* cli/kv: improve kv CLI to remove data or custom metadata using kv patch [[GH-18067](https://github.com/hashicorp/vault/pull/18067)] -* cli/pki: Add health-check subcommand to evaluate the health of a PKI instance. [[GH-17750](https://github.com/hashicorp/vault/pull/17750)] -* cli: Add support for creating requests to existing non-KVv2 PATCH-capable endpoints. [[GH-17650](https://github.com/hashicorp/vault/pull/17650)] -* cli: Support the -format=raw option, to read non-JSON Vault endpoints and original response bodies. [[GH-14945](https://github.com/hashicorp/vault/pull/14945)] -* core/identity: Add machine-readable output to body of response upon alias clash during entity merge [[GH-17459](https://github.com/hashicorp/vault/pull/17459)] -* core/server: Added an environment variable to write goroutine stacktraces to a - temporary file for SIGUSR2 signals. [[GH-17929](https://github.com/hashicorp/vault/pull/17929)] -* core: Add RPCs to read and update userFailedLoginInfo map -* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] -* core: Add user lockout field to config and configuring this for auth mount using auth tune to prevent brute forcing in auth methods [[GH-17338](https://github.com/hashicorp/vault/pull/17338)] -* core: Added warning to /sys/seal-status and vault status command if potentially dangerous behaviour overrides are being used. [[GH-17855](https://github.com/hashicorp/vault/pull/17855)] -* core: License location is no longer cache exempt, meaning sys/health will not contribute as greatly to storage load when using consul as a storage backend. [[GH-17265](https://github.com/hashicorp/vault/pull/17265)] -* core: Update protoc from 3.21.5 to 3.21.7 [[GH-17499](https://github.com/hashicorp/vault/pull/17499)] -* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] -* openapi: Mark request body objects as required [[GH-17909](https://github.com/hashicorp/vault/pull/17909)] -* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] -* plugins: Allow selecting builtin plugins by their reported semantic version of the form `vX.Y.Z+builtin` or `vX.Y.Z+builtin.vault`. [[GH-17289](https://github.com/hashicorp/vault/pull/17289)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] -* secrets/aws: Update dependencies [[PR-17747](https://github.com/hashicorp/vault/pull/17747)] [[GH-17747](https://github.com/hashicorp/vault/pull/17747)] -* secrets/azure: upgrades dependencies [[GH-17964](https://github.com/hashicorp/vault/pull/17964)] -* secrets/gcp: Upgrades dependencies [[GH-17871](https://github.com/hashicorp/vault/pull/17871)] -* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] -* secrets/pki: Add a new API that returns the serial numbers of revoked certificates on the local cluster [[GH-17779](https://github.com/hashicorp/vault/pull/17779)] -* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] -* secrets/pki: Added a new API that allows external actors to craft a CRL through JSON parameters [[GH-18040](https://github.com/hashicorp/vault/pull/18040)] -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] -* secrets/pki: Allow tidying of expired issuer certificates. [[GH-17823](https://github.com/hashicorp/vault/pull/17823)] -* secrets/pki: Return new fields revocation_time_rfc3339 and issuer_id to existing certificate serial lookup api if it is revoked [[GH-17774](https://github.com/hashicorp/vault/pull/17774)] -* secrets/ssh: Evaluate ssh validprincipals user template before splitting [[GH-16622](https://github.com/hashicorp/vault/pull/16622)] -* secrets/transit: Add associated_data parameter for additional authenticated data in AEAD ciphers [[GH-17638](https://github.com/hashicorp/vault/pull/17638)] -* secrets/transit: Add support for PKCSv1_5_NoOID RSA signatures [[GH-17636](https://github.com/hashicorp/vault/pull/17636)] -* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] -* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] -* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. [[GH-17789](https://github.com/hashicorp/vault/pull/17789)] -* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. -* ui: Add algorithm-signer as a SSH Secrets Engine UI field [[GH-10299](https://github.com/hashicorp/vault/pull/10299)] -* ui: Enable typescript for future development [[GH-17927](https://github.com/hashicorp/vault/pull/17927)] -* ui: consolidate all tag usage [[GH-17866](https://github.com/hashicorp/vault/pull/17866)] -* ui: mfa: use proper request id generation [[GH-17835](https://github.com/hashicorp/vault/pull/17835)] +* api (enterprise): Enable the sys/license/features from any namespace +* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] +* ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. [[GH-23700](https://github.com/hashicorp/vault/pull/23700)] +* ui: Update sidebar Secrets engine to title case. [[GH-23964](https://github.com/hashicorp/vault/pull/23964)] BUG FIXES: -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] -* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] -* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. -* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] -* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] -* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: Fix vault operator init command to show the right curl string with -output-curl-string and right policy hcl with -output-policy [[GH-17514](https://github.com/hashicorp/vault/pull/17514)] -* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] -* core: Refactor lock grabbing code to simplify stateLock deadlock investigations [[GH-17187](https://github.com/hashicorp/vault/pull/17187)] -* core: fix GPG encryption to support subkeys. [[GH-16224](https://github.com/hashicorp/vault/pull/16224)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* core: fix race when using SystemView.ReplicationState outside of a request context [[GH-17186](https://github.com/hashicorp/vault/pull/17186)] -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* core: trying to unseal with the wrong key now returns HTTP 400 [[GH-17836](https://github.com/hashicorp/vault/pull/17836)] -* credential/cert: adds error message if no tls connection is found during the AliasLookahead operation [[GH-17904](https://github.com/hashicorp/vault/pull/17904)] -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* openapi: fix gen_openapi.sh script to correctly load vault plugins [[GH-17752](https://github.com/hashicorp/vault/pull/17752)] -* plugins/kv: KV v2 returns 404 instead of 500 for request paths that incorrectly include a trailing slash. [[GH-17339](https://github.com/hashicorp/vault/pull/17339)] -* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] -* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] -* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] -* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] -* secrets/pki: Fixes duplicate otherName in certificates created by the sign-verbatim endpoint. [[GH-16700](https://github.com/hashicorp/vault/pull/16700)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] -* ui: Remove default value of 30 to TtlPicker2 if no value is passed in. [[GH-17376](https://github.com/hashicorp/vault/pull/17376)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] +* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. +* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] +* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] +* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] +* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] +* core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] +* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] +* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] +* ui: fix broken GUI when accessing from listener with chroot_namespace defined [[GH-23942](https://github.com/hashicorp/vault/pull/23942)] -## 1.12.2 -### November 30, 2022 +## 1.15.1 +### October 25, 2023 CHANGES: -* core: Bump Go version to 1.19.3. -* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* core: Bump Go version to 1.21.3. + +IMPROVEMENTS: + +* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] +* auto-auth/azure: Support setting the `authenticate_from_environment` variable to "true" and "false" string literals, too. [[GH-22996](https://github.com/hashicorp/vault/pull/22996)] +* secrets-sync (enterprise): Added telemetry on number of destinations and associations per type. +* ui: Adds a warning when whitespace is detected in a key of a KV secret [[GH-23702](https://github.com/hashicorp/vault/pull/23702)] +* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)] +* ui: Surface warning banner if UI has stopped auto-refreshing token [[GH-23143](https://github.com/hashicorp/vault/pull/23143)] +* ui: show banner when resultant-acl check fails due to permissions or wrong namespace. [[GH-23503](https://github.com/hashicorp/vault/pull/23503)] + +BUG FIXES: + +* Seal HA (enterprise/beta): Fix rejection of a seal configuration change +from two to one auto seal due to persistence of the previous seal type being +"multiseal". [[GH-23573](https://github.com/hashicorp/vault/pull/23573)] +* audit: Fix bug reopening 'file' audit devices on SIGHUP. [[GH-23598](https://github.com/hashicorp/vault/pull/23598)] +* auth/aws: Fixes a panic that can occur in IAM-based login when a [client config](https://developer.hashicorp.com/vault/api-docs/auth/aws#configure-client) does not exist. [[GH-23555](https://github.com/hashicorp/vault/pull/23555)] +* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] +* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] +* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] +* kmip (enterprise): Improve handling of failures due to storage replication issues. +* kmip (enterprise): Return a structure in the response for query function Query Server Information. +* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] +* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. +* replication (enterprise): Fix a missing unlock when changing replication state +* secrets-sync (enterprise): Fixed issue where we could sync a deleted secret +* secrets/aws: update credential rotation deadline when static role rotation period is updated [[GH-23528](https://github.com/hashicorp/vault/pull/23528)] +* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)] +* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)] +* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key +* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. +* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations +* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] +* storage/consul: fix a bug where an active node in a specific sort of network +partition could continue to write data to Consul after a new leader is elected +potentially causing data loss or corruption for keys with many concurrent +writers. For Enterprise clusters this could cause corruption of the merkle trees +leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)] +* ui: Assumes version 1 for kv engines when options are null because no version is specified [[GH-23585](https://github.com/hashicorp/vault/pull/23585)] +* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)] +* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)] +* ui: Fix bug where auth items were not listed when within a namespace. [[GH-23446](https://github.com/hashicorp/vault/pull/23446)] +* ui: Fix regression that broke the oktaNumberChallenge on the ui. [[GH-23565](https://github.com/hashicorp/vault/pull/23565)] +* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)] +* ui: Fixes issue where you could not share the list view URL from the KV v2 secrets engine. [[GH-23620](https://github.com/hashicorp/vault/pull/23620)] +* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)] +* ui: Fixes issues displaying accurate TLS state in dashboard configuration details [[GH-23726](https://github.com/hashicorp/vault/pull/23726)] + +## 1.15.0 +### September 27, 2023 + +SECURITY: + +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] +* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8.[[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] + +CHANGES: + +* auth/alicloud: Update plugin to v0.16.0 [[GH-22646](https://github.com/hashicorp/vault/pull/22646)] +* auth/azure: Update plugin to v0.16.0 [[GH-22277](https://github.com/hashicorp/vault/pull/22277)] +* auth/azure: Update plugin to v0.16.1 [[GH-22795](https://github.com/hashicorp/vault/pull/22795)] +* auth/azure: Update plugin to v0.16.2 [[GH-23060](https://github.com/hashicorp/vault/pull/23060)] +* auth/cf: Update plugin to v0.15.1 [[GH-22758](https://github.com/hashicorp/vault/pull/22758)] +* auth/gcp: Update plugin to v0.16.1 [[GH-22612](https://github.com/hashicorp/vault/pull/22612)] +* auth/jwt: Update plugin to v0.17.0 [[GH-22678](https://github.com/hashicorp/vault/pull/22678)] +* auth/kerberos: Update plugin to v0.10.1 [[GH-22797](https://github.com/hashicorp/vault/pull/22797)] +* auth/kubernetes: Update plugin to v0.17.0 [[GH-22709](https://github.com/hashicorp/vault/pull/22709)] +* auth/kubernetes: Update plugin to v0.17.1 [[GH-22879](https://github.com/hashicorp/vault/pull/22879)] +* auth/ldap: Normalize HTTP response codes when invalid credentials are provided [[GH-21282](https://github.com/hashicorp/vault/pull/21282)] +* auth/oci: Update plugin to v0.14.2 [[GH-22805](https://github.com/hashicorp/vault/pull/22805)] +* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy +* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] +* core: Bump Go version to 1.21.1. +* database/couchbase: Update plugin to v0.9.3 [[GH-22854](https://github.com/hashicorp/vault/pull/22854)] +* database/couchbase: Update plugin to v0.9.4 [[GH-22871](https://github.com/hashicorp/vault/pull/22871)] +* database/elasticsearch: Update plugin to v0.13.3 [[GH-22696](https://github.com/hashicorp/vault/pull/22696)] +* database/mongodbatlas: Update plugin to v0.10.1 [[GH-22655](https://github.com/hashicorp/vault/pull/22655)] +* database/redis-elasticache: Update plugin to v0.2.2 [[GH-22584](https://github.com/hashicorp/vault/pull/22584)] +* database/redis-elasticache: Update plugin to v0.2.3 [[GH-22598](https://github.com/hashicorp/vault/pull/22598)] +* database/redis: Update plugin to v0.2.2 [[GH-22654](https://github.com/hashicorp/vault/pull/22654)] +* database/snowflake: Update plugin to v0.9.0 [[GH-22516](https://github.com/hashicorp/vault/pull/22516)] +* events: Log level for processing an event dropped from info to debug. [[GH-22997](https://github.com/hashicorp/vault/pull/22997)] +* events: `data_path` will include full data path of secret, including name. [[GH-22487](https://github.com/hashicorp/vault/pull/22487)] +* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host +* sdk/logical/events: `EventSender` interface method is now `SendEvent` instead of `Send`. [[GH-22487](https://github.com/hashicorp/vault/pull/22487)] +* secrets/ad: Update plugin to v0.16.1 [[GH-22856](https://github.com/hashicorp/vault/pull/22856)] +* secrets/alicloud: Update plugin to v0.15.1 [[GH-22533](https://github.com/hashicorp/vault/pull/22533)] +* secrets/azure: Update plugin to v0.16.2 [[GH-22799](https://github.com/hashicorp/vault/pull/22799)] +* secrets/azure: Update plugin to v0.16.3 [[GH-22824](https://github.com/hashicorp/vault/pull/22824)] +* secrets/gcp: Update plugin to v0.17.0 [[GH-22746](https://github.com/hashicorp/vault/pull/22746)] +* secrets/gcpkms: Update plugin to v0.15.1 [[GH-22757](https://github.com/hashicorp/vault/pull/22757)] +* secrets/keymgmt: Update plugin to v0.9.3 +* secrets/kubernetes: Update plugin to v0.6.0 [[GH-22823](https://github.com/hashicorp/vault/pull/22823)] +* secrets/kv: Update plugin to v0.16.1 [[GH-22716](https://github.com/hashicorp/vault/pull/22716)] +* secrets/mongodbatlas: Update plugin to v0.10.1 [[GH-22748](https://github.com/hashicorp/vault/pull/22748)] +* secrets/openldap: Update plugin to v0.11.2 [[GH-22734](https://github.com/hashicorp/vault/pull/22734)] +* secrets/terraform: Update plugin to v0.7.3 [[GH-22907](https://github.com/hashicorp/vault/pull/22907)] +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. +* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] +* telemetry: Replace `vault.rollback.attempt.{MOUNT_POINT}` and `vault.route.rollback.{MOUNT_POINT}` metrics with `vault.rollback.attempt` and `vault.route.rollback metrics` by default. Added a telemetry configuration `add_mount_point_rollback_metrics` which, when set to true, causes vault to emit the metrics with mount points in their names. [[GH-22400](https://github.com/hashicorp/vault/pull/22400)] + +FEATURES: + +* **Certificate Issuance External Policy Service (CIEPS) (enterprise)**: Allow highly-customizable operator control of certificate validation and generation through the PKI Secrets Engine. +* **Copyable KV v2 paths in UI**: KV v2 secret paths are copyable for use in CLI commands or API calls [[GH-22551](https://github.com/hashicorp/vault/pull/22551)] +* **Dashboard UI**: Dashboard is now available in the UI as the new landing page. [[GH-21057](https://github.com/hashicorp/vault/pull/21057)] +* **Database Static Role Advanced TTL Management**: Adds the ability to rotate +* **Event System**: Add subscribe capability and subscribe_event_types to policies for events. [[GH-22474](https://github.com/hashicorp/vault/pull/22474)] +static roles on a defined schedule. [[GH-22484](https://github.com/hashicorp/vault/pull/22484)] +* **GCP IAM Support**: Adds support for IAM-based authentication to MySQL and PostgreSQL backends using Google Cloud SQL. [[GH-22445](https://github.com/hashicorp/vault/pull/22445)] +* **Improved KV V2 UI**: Updated and restructured secret engine for KV (version 2 only) [[GH-22559](https://github.com/hashicorp/vault/pull/22559)] +* **Merkle Tree Corruption Detection (enterprise)**: Add a new endpoint to check merkle tree corruption. +* **Plugin Containers**: Vault supports registering, managing, and running plugins inside a container on Linux. [[GH-22712](https://github.com/hashicorp/vault/pull/22712)] +* **SAML Auth Method (enterprise)**: Enable users to authenticate with Vault using their identity in a SAML Identity Provider. +* **Seal High Availability Beta (enterprise)**: operators can try out configuring more than one automatic seal for resilience against seal provider outages. Not for production use at this time. +* **Secrets Sync (enterprise)**: Add the ability to synchronize KVv2 secret with external secrets manager solutions. +* **UI LDAP secrets engine**: Add LDAP secrets engine to the UI. [[GH-20790](https://github.com/hashicorp/vault/pull/20790)] + +IMPROVEMENTS: + +* Bump github.com/hashicorp/go-plugin version v1.4.9 -> v1.4.10 [[GH-20966](https://github.com/hashicorp/vault/pull/20966)] +* api: add support for cloning a Client's tls.Config. [[GH-21424](https://github.com/hashicorp/vault/pull/21424)] +* api: adding a new api sys method for replication status [[GH-20995](https://github.com/hashicorp/vault/pull/20995)] +* audit: add core audit events experiment [[GH-21628](https://github.com/hashicorp/vault/pull/21628)] +* auth/aws: Added support for signed GET requests for authenticating to vault using the aws iam method. [[GH-10961](https://github.com/hashicorp/vault/pull/10961)] +* auth/azure: Add support for azure workload identity authentication (see issue +#18257). Update go-kms-wrapping dependency to include [PR +#155](https://github.com/hashicorp/go-kms-wrapping/pull/155) [[GH-22994](https://github.com/hashicorp/vault/pull/22994)] +* auth/azure: Added Azure API configurable retry options [[GH-23059](https://github.com/hashicorp/vault/pull/23059)] +* auth/cert: Adds support for requiring hexadecimal-encoded non-string certificate extension values [[GH-21830](https://github.com/hashicorp/vault/pull/21830)] +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). [[GH-22264](https://github.com/hashicorp/vault/pull/22264)] +* auto-auth: added support for LDAP auto-auth [[GH-21641](https://github.com/hashicorp/vault/pull/21641)] +* aws/auth: Adds a new config field `use_sts_region_from_client` which allows for using dynamic regional sts endpoints based on Authorization header when using IAM-based authentication. [[GH-21960](https://github.com/hashicorp/vault/pull/21960)] +* command/server: add `-dev-tls-san` flag to configure subject alternative names for the certificate generated when using `-dev-tls`. [[GH-22657](https://github.com/hashicorp/vault/pull/22657)] +* core (ent) : Add field that allows lease-count namespace quotas to be inherited by child namespaces. +* core : Add field that allows rate-limit namespace quotas to be inherited by child namespaces. [[GH-22452](https://github.com/hashicorp/vault/pull/22452)] +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] +* core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. [[GH-21010](https://github.com/hashicorp/vault/pull/21010)] +* core: Fix OpenAPI representation and `-output-policy` recognition of some non-standard sudo paths [[GH-21772](https://github.com/hashicorp/vault/pull/21772)] +* core: Fix regexes for `sys/raw/` and `sys/leases/lookup/` to match prevailing conventions [[GH-21760](https://github.com/hashicorp/vault/pull/21760)] +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] +* core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy [[GH-22304](https://github.com/hashicorp/vault/pull/22304)] +* core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy +* core: remove unnecessary *BarrierView field from backendEntry struct [[GH-20933](https://github.com/hashicorp/vault/pull/20933)] +* core: use Go stdlib functionalities instead of explicit byte/string conversions [[GH-21854](https://github.com/hashicorp/vault/pull/21854)] +* eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) [[GH-21623](https://github.com/hashicorp/vault/pull/21623)] +* events: Allow subscriptions to multiple namespaces [[GH-22540](https://github.com/hashicorp/vault/pull/22540)] +* events: Enabled by default [[GH-22815](https://github.com/hashicorp/vault/pull/22815)] +* events: WebSocket subscriptions add support for boolean filter expressions [[GH-22835](https://github.com/hashicorp/vault/pull/22835)] +* framework: Make it an error for `CreateOperation` to be defined without an `ExistenceCheck`, thereby fixing misleading `x-vault-createSupported` in OpenAPI [[GH-18492](https://github.com/hashicorp/vault/pull/18492)] +* kmip (enterprise): Add namespace lock and unlock support [[GH-21925](https://github.com/hashicorp/vault/pull/21925)] +* openapi: Better mount points for kv-v1 and kv-v2 in openapi.json [[GH-21563](https://github.com/hashicorp/vault/pull/21563)] +* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] +* openapi: Fix generation of correct fields in some rarer cases [[GH-21942](https://github.com/hashicorp/vault/pull/21942)] +* openapi: Fix response definitions for list operations [[GH-21934](https://github.com/hashicorp/vault/pull/21934)] +* openapi: List operations are now given first-class representation in the OpenAPI document, rather than sometimes being overlaid with a read operation at the same path [[GH-21723](https://github.com/hashicorp/vault/pull/21723)] +* plugins: Containerized plugins can be configured to still work when running with systemd's PrivateTmp=true setting. [[GH-23215](https://github.com/hashicorp/vault/pull/23215)] +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* sdk/framework: Adds replication state helper for backends to check for read-only storage [[GH-21743](https://github.com/hashicorp/vault/pull/21743)] +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* secrets/db: Remove the `service_account_json` parameter when reading DB connection details [[GH-23256](https://github.com/hashicorp/vault/pull/23256)] +* secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. [[GH-21702](https://github.com/hashicorp/vault/pull/21702)] +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling +* secrets/transit: Add support to create CSRs from keys in transit engine and import/export x509 certificates [[GH-21081](https://github.com/hashicorp/vault/pull/21081)] +* storage/dynamodb: Added three permit pool metrics for the DynamoDB backend, `pending_permits`, `active_permits`, and `pool_size`. [[GH-21742](https://github.com/hashicorp/vault/pull/21742)] +* storage/etcd: Make etcd parameter MaxCallSendMsgSize configurable [[GH-12666](https://github.com/hashicorp/vault/pull/12666)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] +* ui: Add API Explorer link to Sidebar, under Tools. [[GH-21578](https://github.com/hashicorp/vault/pull/21578)] +* ui: Add pagination to PKI roles, keys, issuers, and certificates list pages [[GH-23193](https://github.com/hashicorp/vault/pull/23193)] +* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] +* ui: Adds mount configuration details to Kubernetes secrets engine configuration view [[GH-22926](https://github.com/hashicorp/vault/pull/22926)] +* ui: Adds tidy_revoked_certs to PKI tidy status page [[GH-23232](https://github.com/hashicorp/vault/pull/23232)] +* ui: Adds warning before downloading KV v2 secret values [[GH-23260](https://github.com/hashicorp/vault/pull/23260)] +* ui: Display minus icon for empty MaskedInput value. Show MaskedInput for KV secrets without values [[GH-22039](https://github.com/hashicorp/vault/pull/22039)] +* ui: JSON diff view available in "Create New Version" form for KV v2 [[GH-22593](https://github.com/hashicorp/vault/pull/22593)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: Move access to KV V2 version diff view to toolbar in Version History [[GH-23200](https://github.com/hashicorp/vault/pull/23200)] +* ui: Update pki mount configuration details to match the new mount configuration details pattern [[GH-23166](https://github.com/hashicorp/vault/pull/23166)] +* ui: add example modal to policy form [[GH-21583](https://github.com/hashicorp/vault/pull/21583)] +* ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki [[GH-22191](https://github.com/hashicorp/vault/pull/22191)] +* ui: display CertificateCard instead of MaskedInput for certificates in PKI [[GH-22160](https://github.com/hashicorp/vault/pull/22160)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] +* ui: implement hashicorp design system [alert](https://helios.hashicorp.design/components/alert) component [[GH-21375](https://github.com/hashicorp/vault/pull/21375)] +* ui: update detail views that render ttl durations to display full unit instead of letter (i.e. 'days' instead of 'd') [[GH-20697](https://github.com/hashicorp/vault/pull/20697)] +* ui: update unseal and DR operation token flow components [[GH-21871](https://github.com/hashicorp/vault/pull/21871)] +* ui: upgrade Ember to 4.12 [[GH-22122](https://github.com/hashicorp/vault/pull/22122)] + +DEPRECATIONS: + +* auth/centrify: Centrify plugin is deprecated as of 1.15, slated for removal in 1.17 [[GH-23050](https://github.com/hashicorp/vault/pull/23050)] + +BUG FIXES: + +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. [[GH-22322](https://github.com/hashicorp/vault/pull/22322)] +* agent: Fix "generate-config" command documentation URL [[GH-21466](https://github.com/hashicorp/vault/pull/21466)] +* api/client: Fix deadlock in client.CloneWithHeaders when used alongside other client methods. [[GH-22410](https://github.com/hashicorp/vault/pull/22410)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* audit: Prevent panic due to nil pointer receiver for audit header formatting. [[GH-22694](https://github.com/hashicorp/vault/pull/22694)] +* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21800](https://github.com/hashicorp/vault/pull/21800)] +* auth/token, sys: Fix path-help being unavailable for some list-only endpoints [[GH-18571](https://github.com/hashicorp/vault/pull/18571)] +* auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters [[GH-18556](https://github.com/hashicorp/vault/pull/18556)] +* awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer +respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. [[GH-21951](https://github.com/hashicorp/vault/pull/21951)] +* cli: Avoid printing "Success" message when `-field` flag is provided during a `vault write`. [[GH-21546](https://github.com/hashicorp/vault/pull/21546)] +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core (enterprise): Fix sentinel policy check logic so that sentinel +policies are not used when Sentinel feature isn't licensed. +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/managed-keys (enterprise): Allow certain symmetric PKCS#11 managed key mechanisms (AES CBC with and without padding) to operate without an HMAC. +* core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary [[GH-22468](https://github.com/hashicorp/vault/pull/22468)] +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* core: Fixed issue with some durations not being properly parsed to include days. [[GH-21357](https://github.com/hashicorp/vault/pull/21357)] +* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] +* core: fix race when updating a mount's route entry tainted status and incoming requests [[GH-21640](https://github.com/hashicorp/vault/pull/21640)] +* events: Ensure subscription resources are cleaned up on close. [[GH-23042](https://github.com/hashicorp/vault/pull/23042)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* identity/mfa: Fixes to OpenAPI representation and returned error codes for `identity/mfa/method/*` APIs [[GH-20879](https://github.com/hashicorp/vault/pull/20879)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* openapi: Fix response schema for PKI Issue requests [[GH-21449](https://github.com/hashicorp/vault/pull/21449)] +* openapi: Fix schema definitions for PKI EAB APIs [[GH-21458](https://github.com/hashicorp/vault/pull/21458)] +* plugins: Containerized plugins can be run with mlock enabled. [[GH-23215](https://github.com/hashicorp/vault/pull/23215)] +* plugins: Fix instance where Vault could fail to kill broken/unresponsive plugins. [[GH-22914](https://github.com/hashicorp/vault/pull/22914)] +* plugins: Fix instance where broken/unresponsive plugins could cause Vault to hang. [[GH-22914](https://github.com/hashicorp/vault/pull/22914)] +* plugins: Runtime catalog returns 404 instead of 500 when reading a runtime that does not exist [[GH-23171](https://github.com/hashicorp/vault/pull/23171)] +* plugins: `vault plugin runtime list` can successfully list plugin runtimes with GET [[GH-23171](https://github.com/hashicorp/vault/pull/23171)] +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* replication (enterprise): Sort cluster addresses returned by echo requests, so that primary-addrs only gets persisted when the +set of addrs changes. +* replication (enterprise): update primary cluster address after DR failover +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21631](https://github.com/hashicorp/vault/pull/21631)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22330](https://github.com/hashicorp/vault/pull/22330)] +* secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/pki: allowed_domains are now compared in a case-insensitive manner if they use glob patterns [[GH-22126](https://github.com/hashicorp/vault/pull/22126)] +* secrets/transform (enterprise): Batch items with repeated tokens in the tokenization decode api will now contain the decoded_value element +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Fix nil panic when encoding a tokenization transformation on a non-active node +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* secrets/transit: fix panic when providing non-PEM formatted public key for import [[GH-22753](https://github.com/hashicorp/vault/pull/22753)] +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* storage/consul: Consul service registration tags are now case-sensitive. [[GH-6483](https://github.com/hashicorp/vault/pull/6483)] +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] +* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] +* ui: Adds missing values to details view after generating PKI certificate [[GH-21635](https://github.com/hashicorp/vault/pull/21635)] +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: Fix display for "Last Vault Rotation" timestamp for static database roles which was not rendering or copyable [[GH-22519](https://github.com/hashicorp/vault/pull/22519)] +* ui: Fix styling for username input when editing a user [[GH-21771](https://github.com/hashicorp/vault/pull/21771)] +* ui: Fix styling for viewing certificate in kubernetes configuration [[GH-21968](https://github.com/hashicorp/vault/pull/21968)] +* ui: Fix the issue where confirm delete dropdown is being cut off [[GH-23066](https://github.com/hashicorp/vault/pull/23066)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] +* ui: Fixed secrets, leases, and policies filter dropping focus after a single character [[GH-21767](https://github.com/hashicorp/vault/pull/21767)] +* ui: Fixes filter and search bug in secrets engines [[GH-23123](https://github.com/hashicorp/vault/pull/23123)] +* ui: Fixes form field label tooltip alignment [[GH-22832](https://github.com/hashicorp/vault/pull/22832)] +* ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces [[GH-21562](https://github.com/hashicorp/vault/pull/21562)] +* ui: Fixes login screen display issue with Safari browser [[GH-21582](https://github.com/hashicorp/vault/pull/21582)] +* ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) [[GH-21926](https://github.com/hashicorp/vault/pull/21926)] +* ui: Fixes styling of private key input when configuring an SSH key [[GH-21531](https://github.com/hashicorp/vault/pull/21531)] +* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] +* ui: correct doctype for index.html [[GH-22153](https://github.com/hashicorp/vault/pull/22153)] +* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes long namespace names overflow in the sidebar +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] +* ui: fixes text readability issue in revoke token confirmation dialog [[GH-22390](https://github.com/hashicorp/vault/pull/22390)] + +## 1.14.9 +### January 31, 2024 + +CHANGES: + +* core: Bump Go version to 1.20.12. +* database/snowflake: Update plugin to v0.9.2 [[GH-25057](https://github.com/hashicorp/vault/pull/25057)] IMPROVEMENTS: -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] -* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] +* command/server: display logs on startup immediately if disable-gated-logs flag is set [[GH-24280](https://github.com/hashicorp/vault/pull/24280)] +* oidc/provider: Adds `code_challenge_methods_supported` to OpenID Connect Metadata [[GH-24979](https://github.com/hashicorp/vault/pull/24979)] +* storage/raft: Upgrade to bbolt 1.3.8, along with an extra patch to reduce time scanning large freelist maps. [[GH-24010](https://github.com/hashicorp/vault/pull/24010)] +* ui: latest version of chrome does not automatically redirect back to the app after authentication unless triggered by the user, hence added a link to redirect back to the app. [[GH-18513](https://github.com/hashicorp/vault/pull/18513)] BUG FIXES: -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] -* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18086](https://github.com/hashicorp/vault/pull/18086)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18111](https://github.com/hashicorp/vault/pull/18111)] -* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +* helper/pkcs7: Fix slice out-of-bounds panic [[GH-24891](https://github.com/hashicorp/vault/pull/24891)] +* kmip (enterprise): Only return a Server Correlation Value to clients using KMIP version 1.4. +* ui: Fixed minor bugs with database secrets engine [[GH-24947](https://github.com/hashicorp/vault/pull/24947)] +* ui: Fixes input for jwks_ca_pem when configuring a JWT auth method [[GH-24697](https://github.com/hashicorp/vault/pull/24697)] +* ui: The UI can now be used to create or update database roles by operator without permission on the database connection. [[GH-24660](https://github.com/hashicorp/vault/pull/24660)] +* ui: fix incorrectly calculated capabilities on PKI issuer endpoints [[GH-24686](https://github.com/hashicorp/vault/pull/24686)] -## 1.12.1 -### November 2, 2022 +## 1.14.8 +### December 06, 2023 + +SECURITY: + +* core: Fixes an issue present in both Vault and Vault Enterprise since Vault 1.12.0, where Vault is vulnerable to a denial of service through memory exhaustion of the host when handling large HTTP requests from a client. (see [CVE-2023-6337](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-6337) & [HCSEC-2023-34](https://discuss.hashicorp.com/t/hcsec-2023-34-vault-vulnerable-to-denial-of-service-through-memory-exhaustion-when-handling-large-http-requests/60741)) + +CHANGES: + +* identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. [[GH-24325](https://github.com/hashicorp/vault/pull/24325)] + +BUG FIXES: + +* agent/logging: Agent should now honor correct -log-format and -log-file settings in logs generated by the consul-template library. [[GH-24252](https://github.com/hashicorp/vault/pull/24252)] +* api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. [[GH-24256](https://github.com/hashicorp/vault/pull/24256)] +* core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. [[GH-24336](https://github.com/hashicorp/vault/pull/24336)] +* ui: Fix payload sent when disabling replication [[GH-24292](https://github.com/hashicorp/vault/pull/24292)] + +## 1.14.7 +### November 30, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.11. IMPROVEMENTS: -* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] -* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] -* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] +* core (enterprise): Speed up unseal when using namespaces +* secrets/pki: do not check TLS validity on ACME requests redirected to https [[GH-22521](https://github.com/hashicorp/vault/pull/22521)] +* ui: Sort list view of entities and aliases alphabetically using the item name [[GH-24103](https://github.com/hashicorp/vault/pull/24103)] +* ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. [[GH-23700](https://github.com/hashicorp/vault/pull/23700)] BUG FIXES: -* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility -* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] -* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. -* kmip (enterprise): Fix selection of Cryptographic Parameters for Encrypt/Decrypt operations. -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] +* activity log (enterprise): De-duplicate client count estimates for license utilization reporting. +* auth/cert: Handle errors related to expired OCSP server responses [[GH-24193](https://github.com/hashicorp/vault/pull/24193)] +* core/config: Use correct HCL config value when configuring `log_requests_level`. [[GH-24058](https://github.com/hashicorp/vault/pull/24058)] +* core/quotas: Close rate-limit blocked client purge goroutines when sealing [[GH-24108](https://github.com/hashicorp/vault/pull/24108)] +* replication (enterprise): disallow configuring paths filter for a mount path that does not exist +* secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 [[GH-24192](https://github.com/hashicorp/vault/pull/24192)] +* secrets/transit: Fix a panic when attempting to export a public RSA key [[GH-24054](https://github.com/hashicorp/vault/pull/24054)] +* ui: Fix error when tuning token auth configuration within namespace [[GH-24147](https://github.com/hashicorp/vault/pull/24147)] -## 1.12.0 -### October 13, 2022 +## 1.14.6 +### November 09, 2023 + +SECURITY: +* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] CHANGES: -* api: Exclusively use `GET /sys/plugins/catalog` endpoint for listing plugins, and add `details` field to list responses. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] -* auth: `GET /sys/auth/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* auth: `GET /sys/auth` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* auth: `POST /sys/auth/:type` endpoint response contains a warning for `Deprecated` auth methods. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] -* auth: `auth enable` returns an error and `POST /sys/auth/:type` endpoint reports an error for `Pending Removal` auth methods. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] -* core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted [[GH-16539](https://github.com/hashicorp/vault/pull/16539)] -* core: Bump Go version to 1.19.2. -* core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. [[GH-16379](https://github.com/hashicorp/vault/pull/16379)] -* identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 [[GH-15912](https://github.com/hashicorp/vault/pull/15912)] -* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades will not be allowed if the license expiration time is before the build date of the binary. -* plugins: Add plugin version to auth register, list, and mount table [[GH-16856](https://github.com/hashicorp/vault/pull/16856)] -* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint contains deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* plugins: `GET /sys/plugins/catalog/` endpoint contains deprecation status in `detailed` list. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* plugins: `plugin info` displays deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* plugins: `plugin list` now accepts a `-detailed` flag, which display deprecation status and version info. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* secrets/azure: Removed deprecated AAD graph API support from the secrets engine. [[GH-17180](https://github.com/hashicorp/vault/pull/17180)] -* secrets: All database-specific (standalone DB) secrets engines are now marked `Pending Removal`. [[GH-17038](https://github.com/hashicorp/vault/pull/17038)] -* secrets: `GET /sys/mounts/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* secrets: `GET /sys/mounts` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* secrets: `POST /sys/mounts/:type` endpoint response contains a warning for `Deprecated` secrets engines. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] -* secrets: `secrets enable` returns an error and `POST /sys/mount/:type` endpoint reports an error for `Pending Removal` secrets engines. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] +* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] +* secrets/mongodbatlas: Update plugin to v0.10.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] FEATURES: -* **GCP Cloud KMS support for managed keys**: Managed keys now support using GCP Cloud KMS keys -* **LDAP Secrets Engine**: Adds the `ldap` secrets engine with service account check-out functionality for all supported schemas. [[GH-17152](https://github.com/hashicorp/vault/pull/17152)] -* **OCSP Responder**: PKI mounts now have an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for a specific cluster's revoked certificates in a mount. [[GH-16723](https://github.com/hashicorp/vault/pull/16723)] -* **Redis DB Engine**: Adding the new Redis database engine that supports the generation of static and dynamic user roles and root credential rotation on a stand alone Redis server. [[GH-17070](https://github.com/hashicorp/vault/pull/17070)] -* **Redis ElastiCache DB Plugin**: Added Redis ElastiCache as a built-in plugin. [[GH-17075](https://github.com/hashicorp/vault/pull/17075)] -* **Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process [[GH-14946](https://github.com/hashicorp/vault/pull/14946)] -* **Transform Key Import (BYOK)**: The transform secrets engine now supports importing keys for tokenization and FPE transformations -* HCP (enterprise): Adding foundational support for self-managed vault nodes to securely communicate with [HashiCorp Cloud Platform](https://cloud.hashicorp.com) as an opt-in feature -* ui: UI support for Okta Number Challenge. [[GH-15998](https://github.com/hashicorp/vault/pull/15998)] -* **Plugin Versioning**: Vault supports registering, managing, and running plugins with semantic versions specified. +* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] IMPROVEMENTS: -* :core/managed-keys (enterprise): Allow operators to specify PSS signatures and/or hash algorithm for the test/sign api -* activity (enterprise): Added new clients unit tests to test accuracy of estimates -* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] -* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] -* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] -* agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. [[GH-11969](https://github.com/hashicorp/vault/pull/11969)] -* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] -* api/mfa: Add namespace path to the MFA read/list endpoint [[GH-16911](https://github.com/hashicorp/vault/pull/16911)] -* api: Add a sentinel error for missing KV secrets [[GH-16699](https://github.com/hashicorp/vault/pull/16699)] -* auth/alicloud: Enables AliCloud roles to be compatible with Vault's role based quotas. [[GH-17251](https://github.com/hashicorp/vault/pull/17251)] -* auth/approle: SecretIDs can now be generated with an per-request specified TTL and num_uses. -When either the ttl and num_uses fields are not specified, the role's configuration is used. [[GH-14474](https://github.com/hashicorp/vault/pull/14474)] -* auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 [[GH-16455](https://github.com/hashicorp/vault/pull/16455)] -* auth/azure: Enables Azure roles to be compatible with Vault's role based quotas. [[GH-17194](https://github.com/hashicorp/vault/pull/17194)] -* auth/cert: Add metadata to identity-alias [[GH-14751](https://github.com/hashicorp/vault/pull/14751)] -* auth/cert: Operators can now specify a CRL distribution point URL, in which case the cert auth engine will fetch and use the CRL from that location rather than needing to push CRLs directly to auth/cert. [[GH-17136](https://github.com/hashicorp/vault/pull/17136)] -* auth/cf: Enables CF roles to be compatible with Vault's role based quotas. [[GH-17196](https://github.com/hashicorp/vault/pull/17196)] -* auth/gcp: Add support for GCE regional instance groups [[GH-16435](https://github.com/hashicorp/vault/pull/16435)] -* auth/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17160](https://github.com/hashicorp/vault/pull/17160)] -* auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] -* auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] -* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] -* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the Kerberos config in Vault. This removes any instance names found in the keytab service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] -* auth/kubernetes: Role resolution for K8S Auth [[GH-156](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/156)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] -* auth/oci: Add support for role resolution. [[GH-17212](https://github.com/hashicorp/vault/pull/17212)] -* auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. [[GH-16274](https://github.com/hashicorp/vault/pull/16274)] -* cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. [[GH-16441](https://github.com/hashicorp/vault/pull/16441)] -* cli: `auth` and `secrets` list `-detailed` commands now show Deprecation Status for builtin plugins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* cli: `vault plugin list` now has a `details` field in JSON format, and version and type information in table format. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] -* command/audit: Improve missing type error message [[GH-16409](https://github.com/hashicorp/vault/pull/16409)] -* command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. [[GH-16421](https://github.com/hashicorp/vault/pull/16421)] -* command: Fix shell completion for KV v2 mounts [[GH-16553](https://github.com/hashicorp/vault/pull/16553)] -* core (enterprise): Add HTTP PATCH support for namespaces with an associated `namespace patch` CLI command -* core (enterprise): Add check to `vault server` command to ensure configured storage backend is supported. -* core (enterprise): Add custom metadata support for namespaces -* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] -* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] -* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] -* core/quotas (enterprise): Added ability to add path suffixes for lease-count resource quotas -* core/quotas (enterprise): Added ability to add role information for lease-count resource quotas, to limit login requests on auth mounts made using that role -* core/quotas: Added ability to add path suffixes for rate-limit resource quotas [[GH-15989](https://github.com/hashicorp/vault/pull/15989)] -* core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role [[GH-16115](https://github.com/hashicorp/vault/pull/16115)] -* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* core: Handle and log deprecated builtin mounts. Introduces `VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` to override shutdown and error when attempting to mount `Pending Removal` builtin plugins. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] -* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] -* core: Upgrade github.com/hashicorp/raft [[GH-16609](https://github.com/hashicorp/vault/pull/16609)] -* core: remove gox [[GH-16353](https://github.com/hashicorp/vault/pull/16353)] -* docs: Clarify the behaviour of local mounts in the context of DR replication [[GH-16218](https://github.com/hashicorp/vault/pull/16218)] -* identity/oidc: Adds support for detailed listing of clients and providers. [[GH-16567](https://github.com/hashicorp/vault/pull/16567)] -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] -* identity/oidc: allows filtering the list providers response by an allowed_client_id [[GH-16181](https://github.com/hashicorp/vault/pull/16181)] -* identity: Prevent possibility of data races on entity creation. [[GH-16487](https://github.com/hashicorp/vault/pull/16487)] -* physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. [[GH-15866](https://github.com/hashicorp/vault/pull/15866)] -* plugins/multiplexing: Added multiplexing support to database plugins if run as external plugins [[GH-16995](https://github.com/hashicorp/vault/pull/16995)] -* plugins: Add Deprecation Status method to builtinregistry. [[GH-16846](https://github.com/hashicorp/vault/pull/16846)] -* plugins: Added environment variable flag to opt-out specific plugins from multiplexing [[GH-16972](https://github.com/hashicorp/vault/pull/16972)] -* plugins: Adding version to plugin GRPC interface [[GH-17088](https://github.com/hashicorp/vault/pull/17088)] -* plugins: Plugin catalog supports registering and managing plugins with semantic version information. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* replication (enterprise): Fix race in merkle sync that can prevent streaming by returning key value matching provided hash if found in log shipper buffer. -* secret/nomad: allow reading CA and client auth certificate from /nomad/config/access [[GH-15809](https://github.com/hashicorp/vault/pull/15809)] -* secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs [[GH-16519](https://github.com/hashicorp/vault/pull/16519)] -* secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints [[GH-16124](https://github.com/hashicorp/vault/pull/16124)] -* secret/pki: Allow issuing certificates with non-domain, non-email Common Names from roles, sign-verbatim, and as issuers (`cn_validations`). [[GH-15996](https://github.com/hashicorp/vault/pull/15996)] -* secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. [[GH-16494](https://github.com/hashicorp/vault/pull/16494)] -* secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). [[GH-15742](https://github.com/hashicorp/vault/pull/15742)] -* secrets/ad: set config default length only if password_policy is missing [[GH-16140](https://github.com/hashicorp/vault/pull/16140)] -* secrets/azure: Adds option to permanently delete AzureAD objects created by Vault. [[GH-17045](https://github.com/hashicorp/vault/pull/17045)] -* secrets/database/hana: Add ability to customize dynamic usernames [[GH-16631](https://github.com/hashicorp/vault/pull/16631)] -* secrets/database/snowflake: Add multiplexing support [[GH-17159](https://github.com/hashicorp/vault/pull/17159)] -* secrets/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17174](https://github.com/hashicorp/vault/pull/17174)] -* secrets/gcpkms: Update dependencies: google.golang.org/api@v0.83.0. [[GH-17199](https://github.com/hashicorp/vault/pull/17199)] -* secrets/kubernetes: upgrade to v0.2.0 [[GH-17164](https://github.com/hashicorp/vault/pull/17164)] -* secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. [[GH-16702](https://github.com/hashicorp/vault/pull/16702)] -* secrets/pki: Add a new flag to issue/sign APIs which can filter out root CAs from the returned ca_chain field [[GH-16935](https://github.com/hashicorp/vault/pull/16935)] -* secrets/pki: Add a warning to any successful response when the requested TTL is overwritten by MaxTTL [[GH-17073](https://github.com/hashicorp/vault/pull/17073)] -* secrets/pki: Add ability to cancel tidy operations, control tidy resource usage. [[GH-16958](https://github.com/hashicorp/vault/pull/16958)] -* secrets/pki: Add ability to periodically rebuild CRL before expiry [[GH-16762](https://github.com/hashicorp/vault/pull/16762)] -* secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. [[GH-16900](https://github.com/hashicorp/vault/pull/16900)] -* secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs [[GH-16563](https://github.com/hashicorp/vault/pull/16563)] -* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] -* secrets/pki: Added gauge metrics "secrets.pki.total_revoked_certificates_stored" and "secrets.pki.total_certificates_stored" to track the number of certificates in storage. [[GH-16676](https://github.com/hashicorp/vault/pull/16676)] -* secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). [[GH-16564](https://github.com/hashicorp/vault/pull/16564)] -* secrets/pki: Allow revocation via proving possession of certificate's private key [[GH-16566](https://github.com/hashicorp/vault/pull/16566)] -* secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance [[GH-16871](https://github.com/hashicorp/vault/pull/16871)] -* secrets/pki: Honor If-Modified-Since header on CA, CRL fetch; requires passthrough_request_headers modification on the mount point. [[GH-16249](https://github.com/hashicorp/vault/pull/16249)] -* secrets/pki: Improve stability of association of revoked cert with its parent issuer; when an issuer loses crl-signing usage, do not place certs on default issuer's CRL. [[GH-16874](https://github.com/hashicorp/vault/pull/16874)] -* secrets/pki: Support generating delta CRLs for up-to-date CRLs when auto-building is enabled. [[GH-16773](https://github.com/hashicorp/vault/pull/16773)] -* secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. [[GH-16056](https://github.com/hashicorp/vault/pull/16056)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] -* secrets/ssh: Allow the use of Identity templates in the `default_user` field [[GH-16351](https://github.com/hashicorp/vault/pull/16351)] -* secrets/transit: Add a dedicated HMAC key type, which can be used with key import. [[GH-16668](https://github.com/hashicorp/vault/pull/16668)] -* secrets/transit: Added a parameter to encrypt/decrypt batch operations to allow the caller to override the HTTP response code in case of partial user-input failures. [[GH-17118](https://github.com/hashicorp/vault/pull/17118)] -* secrets/transit: Allow configuring the possible salt lengths for RSA PSS signatures. [[GH-16549](https://github.com/hashicorp/vault/pull/16549)] -* ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs [[GH-15561](https://github.com/hashicorp/vault/pull/15561)] -* storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. [[GH-10467](https://github.com/hashicorp/vault/pull/10467)] -* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] -* ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. [[GH-15852](https://github.com/hashicorp/vault/pull/15852)] -* ui: Prevents requests to /sys/internal/ui/resultant-acl endpoint when unauthenticated [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] -* ui: Removed deprecated version of core-js 2.6.11 [[GH-15898](https://github.com/hashicorp/vault/pull/15898)] -* ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. [[GH-16489](https://github.com/hashicorp/vault/pull/16489)] -* ui: Replaces non-inclusive terms [[GH-17116](https://github.com/hashicorp/vault/pull/17116)] -* ui: redirect_to param forwards from auth route when authenticated [[GH-16821](https://github.com/hashicorp/vault/pull/16821)] -* website/docs: API generate-recovery-token documentation. [[GH-16213](https://github.com/hashicorp/vault/pull/16213)] -* website/docs: Add documentation around the expensiveness of making lots of lease count quotas in a short period [[GH-16950](https://github.com/hashicorp/vault/pull/16950)] -* website/docs: Removes mentions of unauthenticated from internal ui resultant-acl doc [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] -* website/docs: Update replication docs to mention Integrated Storage [[GH-16063](https://github.com/hashicorp/vault/pull/16063)] -* website/docs: changed to echo for all string examples instead of (<<<) here-string. [[GH-9081](https://github.com/hashicorp/vault/pull/9081)] +* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] BUG FIXES: -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* agent: Agent will now respect `max_retries` retry configuration even when caching is set. [[GH-16970](https://github.com/hashicorp/vault/pull/16970)] -* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] -* api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths [[GH-15835](https://github.com/hashicorp/vault/pull/15835)] -* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] -* api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] -* api: properly handle switching to/from unix domain socket when changing client address [[GH-11904](https://github.com/hashicorp/vault/pull/11904)] -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] -* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. [[GH-15583](https://github.com/hashicorp/vault/pull/15583)] -* core (enterprise): Fix creation of duplicate entities via alias metadata changes on local auth mounts. -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core/quotas (enterprise): Fixed issue with improper counting of leases if lease count quota created after leases -* core/quotas: Added globbing functionality on the end of path suffix quota paths [[GH-16386](https://github.com/hashicorp/vault/pull/16386)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* core: Fix panic when the plugin catalog returns neither a plugin nor an error. [[GH-17204](https://github.com/hashicorp/vault/pull/17204)] -* core: Fixes parsing boolean values for ha_storage backends in config [[GH-15900](https://github.com/hashicorp/vault/pull/15900)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* debug: Fix panic when capturing debug bundle on Windows [[GH-14399](https://github.com/hashicorp/vault/pull/14399)] -* debug: Remove extra empty lines from vault.log when debug command is run [[GH-16714](https://github.com/hashicorp/vault/pull/16714)] -* identity (enterprise): Fix a data race when creating an entity for a local alias. -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] -* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] -* quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read [[GH-15735](https://github.com/hashicorp/vault/pull/15735)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* replication (enterprise): Fix data race in saveCheckpoint. -* replication (enterprise): Fix possible data race during merkle diff/sync -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] -* secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers [[GH-16865](https://github.com/hashicorp/vault/pull/16865)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] -* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] -* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] -* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* storage/raft: Nodes no longer get demoted to nonvoter if we don't know their version due to missing heartbeats. [[GH-17019](https://github.com/hashicorp/vault/pull/17019)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] -* ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear [[GH-15681](https://github.com/hashicorp/vault/pull/15681)] -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] -* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] -* vault: Fix a bug where duplicate policies could be added to an identity group. [[GH-15638](https://github.com/hashicorp/vault/pull/15638)] +* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] +* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. +* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] +* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] +* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] +* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] +* core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] +* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] +* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] -## 1.11.6 -### November 30, 2022 +## 1.14.5 +### October 25, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.10. +* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host IMPROVEMENTS: -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] +* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] +* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] +* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)] BUG FIXES: -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18085](https://github.com/hashicorp/vault/pull/18085)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18110](https://github.com/hashicorp/vault/pull/18110)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] +* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] +* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] +* kmip (enterprise): Improve handling of failures due to storage replication issues. +* kmip (enterprise): Return a structure in the response for query function Query Server Information. +* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] +* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. +* replication (enterprise): Fix a missing unlock when changing replication state +* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)] +* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)] +* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key +* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. +* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations +* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] +* storage/consul: fix a bug where an active node in a specific sort of network +partition could continue to write data to Consul after a new leader is elected +potentially causing data loss or corruption for keys with many concurrent +writers. For Enterprise clusters this could cause corruption of the merkle trees +leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)] +* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)] +* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)] +* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)] +* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)] -## 1.11.5 -### November 2, 2022 +## 1.14.4 +### September 27, 2023 -IMPROVEMENTS: +SECURITY: -* database/snowflake: Allow parallel requests to Snowflake [[GH-17594](https://github.com/hashicorp/vault/pull/17594)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] +* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8. [[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] -BUG FIXES: +CHANGES: -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17384](https://github.com/hashicorp/vault/pull/17384)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] - -## 1.11.4 -### September 30, 2022 +* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy IMPROVEMENTS: -* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] -* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] +* ui: Add pagination to PKI roles, keys, issuers, and certificates list pages [[GH-23193](https://github.com/hashicorp/vault/pull/23193)] +* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] +* ui: Adds tidy_revoked_certs to PKI tidy status page [[GH-23232](https://github.com/hashicorp/vault/pull/23232)] +* ui: Adds warning before downloading KV v2 secret values [[GH-23260](https://github.com/hashicorp/vault/pull/23260)] BUG FIXES: -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17162](https://github.com/hashicorp/vault/pull/17162)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] +* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] +* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] +* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] +* ui: Fix the issue where confirm delete dropdown is being cut off [[GH-23066](https://github.com/hashicorp/vault/pull/23066)] +* ui: Fixes filter and search bug in secrets engines [[GH-23123](https://github.com/hashicorp/vault/pull/23123)] +* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] -## 1.11.3 -### August 31, 2022 +## 1.14.3 +### September 13, 2023 + +SECURITY: + +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] CHANGES: -* core: Bump Go version to 1.17.13. +* core: Bump Go version to 1.20.8. + +FEATURES: + +* ** Merkle Tree Corruption Detection (enterprise) **: Add a new endpoint to check merkle tree corruption. IMPROVEMENTS: -* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] -* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the -Kerberos config in Vault. This removes any instance names found in the keytab -service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] -* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] +* kmip (enterprise): reduce latency of KMIP operation handling BUG FIXES: -* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] -* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16523](https://github.com/hashicorp/vault/pull/16523)] -* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails -* database/elasticsearch: Fixes a bug in boolean parsing for initialize [[GH-16526](https://github.com/hashicorp/vault/pull/16526)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the -Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] -* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] -* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] +* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] +* kmip (enterprise): fix date handling error with some re-key operations +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable +* secrets/transit: fix panic when providing non-PEM formatted public key for import [[GH-22753](https://github.com/hashicorp/vault/pull/22753)] +* ui: fixes long namespace names overflow in the sidebar -SECURITY: +## 1.14.2 +### August 30, 2023 -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] +CHANGES: -## 1.11.2 -### August 2, 2022 +* auth/azure: Update plugin to v0.16.0 [[GH-22277](https://github.com/hashicorp/vault/pull/22277)] +* core: Bump Go version to 1.20.7. +* database/snowflake: Update plugin to v0.9.0 [[GH-22516](https://github.com/hashicorp/vault/pull/22516)] + +IMPROVEMENTS: + +* auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). [[GH-22264](https://github.com/hashicorp/vault/pull/22264)] +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* kmip (enterprise): Add namespace lock and unlock support [[GH-21925](https://github.com/hashicorp/vault/pull/21925)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki [[GH-22191](https://github.com/hashicorp/vault/pull/22191)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] +* website/docs: Fix link formatting in Vault lambda extension docs [[GH-22396](https://github.com/hashicorp/vault/pull/22396)] + +BUG FIXES: + +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. [[GH-22322](https://github.com/hashicorp/vault/pull/22322)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary [[GH-22468](https://github.com/hashicorp/vault/pull/22468)] +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22330](https://github.com/hashicorp/vault/pull/22330)] +* secrets/transform (enterprise): Batch items with repeated tokens in the tokenization decode api will now contain the decoded_value element +* secrets/transform (enterprise): Fix nil panic when encoding a tokenization transformation on a non-active node +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] +* ui: fixes text readability issue in revoke token confirmation dialog [[GH-22390](https://github.com/hashicorp/vault/pull/22390)] + +## 1.14.1 +### July 25, 2023 + +SECURITY + +* auth/ldap: Normalize HTTP response codes when invalid credentials are provided to prevent user enumeration. This vulnerability, CVE-2023-3462, is fixed in Vault 1.14.1 and 1.13.5. [[GH-21282](https://github.com/hashicorp/vault/pull/21282), [HSEC-2023-24](https://discuss.hashicorp.com/t/hcsec-2023-24-vaults-ldap-auth-method-allows-for-user-enumeration/56714)] +* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] -IMPROVEMENTS: +CHANGES: -* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] +* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. +* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] + +IMPROVEMENTS: + +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) [[GH-21623](https://github.com/hashicorp/vault/pull/21623)] +* openapi: Better mount points for kv-v1 and kv-v2 in openapi.json [[GH-21563](https://github.com/hashicorp/vault/pull/21563)] +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. [[GH-21702](https://github.com/hashicorp/vault/pull/21702)] +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling +* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] + +BUG FIXES: + +* agent: Fix "generate-config" command documentation URL [[GH-21466](https://github.com/hashicorp/vault/pull/21466)] +* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21800](https://github.com/hashicorp/vault/pull/21800)] +* auth/token, sys: Fix path-help being unavailable for some list-only endpoints [[GH-18571](https://github.com/hashicorp/vault/pull/18571)] +* auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters [[GH-18556](https://github.com/hashicorp/vault/pull/18556)] +* awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer +respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. [[GH-21951](https://github.com/hashicorp/vault/pull/21951)] +* core/managed-keys (enterprise): Allow certain symmetric PKCS#11 managed key mechanisms (AES CBC with and without padding) to operate without an HMAC. +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] +* core: Fixed issue with some durations not being properly parsed to include days. [[GH-21357](https://github.com/hashicorp/vault/pull/21357)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* openapi: Fix response schema for PKI Issue requests [[GH-21449](https://github.com/hashicorp/vault/pull/21449)] +* openapi: Fix schema definitions for PKI EAB APIs [[GH-21458](https://github.com/hashicorp/vault/pull/21458)] +* replication (enterprise): update primary cluster address after DR failover +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21631](https://github.com/hashicorp/vault/pull/21631)] +* secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* ui: Adds missing values to details view after generating PKI certificate [[GH-21635](https://github.com/hashicorp/vault/pull/21635)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] +* ui: Fixed secrets, leases, and policies filter dropping focus after a single character [[GH-21767](https://github.com/hashicorp/vault/pull/21767)] +* ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces [[GH-21562](https://github.com/hashicorp/vault/pull/21562)] +* ui: Fixes login screen display issue with Safari browser [[GH-21582](https://github.com/hashicorp/vault/pull/21582)] +* ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) [[GH-21926](https://github.com/hashicorp/vault/pull/21926)] +* ui: Fixes styling of private key input when configuring an SSH key [[GH-21531](https://github.com/hashicorp/vault/pull/21531)] +* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] + +## 1.14.0 +### June 21, 2023 -BUG FIXES: +SECURITY: -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] -## 1.11.1 -### July 21, 2022 +BREAKING CHANGES: + +* secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] CHANGES: -* core: Bump Go version to 1.17.12. +* auth/alicloud: Updated plugin from v0.14.0 to v0.15.0 [[GH-20758](https://github.com/hashicorp/vault/pull/20758)] +* auth/azure: Updated plugin from v0.13.0 to v0.15.0 [[GH-20816](https://github.com/hashicorp/vault/pull/20816)] +* auth/centrify: Updated plugin from v0.14.0 to v0.15.1 [[GH-20745](https://github.com/hashicorp/vault/pull/20745)] +* auth/gcp: Updated plugin from v0.15.0 to v0.16.0 [[GH-20725](https://github.com/hashicorp/vault/pull/20725)] +* auth/jwt: Updated plugin from v0.15.0 to v0.16.0 [[GH-20799](https://github.com/hashicorp/vault/pull/20799)] +* auth/kubernetes: Update plugin to v0.16.0 [[GH-20802](https://github.com/hashicorp/vault/pull/20802)] +* core: Bump Go version to 1.20.5. +* core: Remove feature toggle for SSCTs, i.e. the env var VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS. [[GH-20834](https://github.com/hashicorp/vault/pull/20834)] +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] +* database/couchbase: Updated plugin from v0.9.0 to v0.9.2 [[GH-20764](https://github.com/hashicorp/vault/pull/20764)] +* database/redis-elasticache: Updated plugin from v0.2.0 to v0.2.1 [[GH-20751](https://github.com/hashicorp/vault/pull/20751)] +* replication (enterprise): Add a new parameter for the update-primary API call +that allows for setting of the primary cluster addresses directly, instead of +via a token. +* secrets/ad: Updated plugin from v0.10.1-0.20230329210417-0b2cdb26cf5d to v0.16.0 [[GH-20750](https://github.com/hashicorp/vault/pull/20750)] +* secrets/alicloud: Updated plugin from v0.5.4-beta1.0.20230330124709-3fcfc5914a22 to v0.15.0 [[GH-20787](https://github.com/hashicorp/vault/pull/20787)] +* secrets/aure: Updated plugin from v0.15.0 to v0.16.0 [[GH-20777](https://github.com/hashicorp/vault/pull/20777)] +* secrets/database/mongodbatlas: Updated plugin from v0.9.0 to v0.10.0 [[GH-20882](https://github.com/hashicorp/vault/pull/20882)] +* secrets/database/snowflake: Updated plugin from v0.7.0 to v0.8.0 [[GH-20807](https://github.com/hashicorp/vault/pull/20807)] +* secrets/gcp: Updated plugin from v0.15.0 to v0.16.0 [[GH-20818](https://github.com/hashicorp/vault/pull/20818)] +* secrets/keymgmt: Updated plugin to v0.9.1 +* secrets/kubernetes: Update plugin to v0.5.0 [[GH-20802](https://github.com/hashicorp/vault/pull/20802)] +* secrets/mongodbatlas: Updated plugin from v0.9.1 to v0.10.0 [[GH-20742](https://github.com/hashicorp/vault/pull/20742)] +* secrets/pki: Allow issuance of root CAs without AIA, when templated AIA information includes issuer_id. [[GH-21209](https://github.com/hashicorp/vault/pull/21209)] +* secrets/pki: Warning when issuing leafs from CSRs with basic constraints. In the future, issuance of non-CA leaf certs from CSRs with asserted IsCA Basic Constraints will be prohibited. [[GH-20654](https://github.com/hashicorp/vault/pull/20654)] + +FEATURES: + +* **AWS Static Roles**: The AWS Secrets Engine can manage static roles configured by users. [[GH-20536](https://github.com/hashicorp/vault/pull/20536)] +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* **Environment Variables through Vault Agent**: Introducing a new process-supervisor mode for Vault Agent which allows injecting secrets as environment variables into a child process using a new `env_template` configuration stanza. The process-supervisor configuration can be generated with a new `vault agent generate-config` helper tool. [[GH-20530](https://github.com/hashicorp/vault/pull/20530)] +* **MongoDB Atlas Database Secrets**: Adds support for client certificate credentials [[GH-20425](https://github.com/hashicorp/vault/pull/20425)] +* **MongoDB Atlas Database Secrets**: Adds support for generating X.509 certificates on dynamic roles for user authentication [[GH-20882](https://github.com/hashicorp/vault/pull/20882)] +* **NEW PKI Workflow in UI**: Completes generally available rollout of new PKI UI that provides smoother mount configuration and a more guided user experience [[GH-pki-ui-improvements](https://github.com/hashicorp/vault/pull/pki-ui-improvements)] +* **Secrets/Auth Plugin Multiplexing**: The plugin will be multiplexed when run +as an external plugin by vault versions that support secrets/auth plugin +multiplexing (> 1.12) [[GH-19215](https://github.com/hashicorp/vault/pull/19215)] +* **Sidebar Navigation in UI**: A new sidebar navigation panel has been added in the UI to replace the top navigation bar. [[GH-19296](https://github.com/hashicorp/vault/pull/19296)] +* **Vault PKI ACME Server**: Support for the ACME certificate lifecycle management protocol has been added to the Vault PKI Plugin. This allows standard ACME clients, such as the EFF's certbot and the CNCF's k8s cert-manager, to request certificates from a Vault server with no knowledge of Vault APIs or authentication mechanisms. For public-facing Vault instances, we recommend requiring External Account Bindings (EAB) to limit the ability to request certificates to only authenticated clients. [[GH-20752](https://github.com/hashicorp/vault/pull/20752)] +* **Vault Proxy**: Introduced Vault Proxy, a new subcommand of the Vault binary that can be invoked using `vault proxy -config=config.hcl`. It currently has the same feature set as Vault Agent's API proxy, but the two may diverge in the future. We plan to deprecate the API proxy functionality of Vault Agent in a future release. [[GH-20548](https://github.com/hashicorp/vault/pull/20548)] +* **OCI Auto-Auth**: Add OCI (Oracle Cloud Infrastructure) auto-auth method [[GH-19260](https://github.com/hashicorp/vault/pull/19260)] + +IMPROVEMENTS: + +* * api: Add Config.TLSConfig method to fetch the TLS configuration from a client config. [[GH-20265](https://github.com/hashicorp/vault/pull/20265)] +* * physical/etcd: Upgrade etcd3 client to v3.5.7 [[GH-20261](https://github.com/hashicorp/vault/pull/20261)] +* activitylog: EntityRecord protobufs now contain a ClientType field for +distinguishing client sources. [[GH-20626](https://github.com/hashicorp/vault/pull/20626)] +* agent: Add integration tests for agent running in process supervisor mode [[GH-20741](https://github.com/hashicorp/vault/pull/20741)] +* agent: Add logic to validate env_template entries in configuration [[GH-20569](https://github.com/hashicorp/vault/pull/20569)] +* agent: Added `reload` option to cert auth configuration in case of external renewals of local x509 key-pairs. [[GH-19002](https://github.com/hashicorp/vault/pull/19002)] +* agent: JWT auto-auth has a new config option, `remove_jwt_follows_symlinks` (default: false), that, if set to true will now remove the JWT, instead of the symlink to the JWT, if a symlink to a JWT has been provided in the `path` option, and the `remove_jwt_after_reading` config option is set to true (default). [[GH-18863](https://github.com/hashicorp/vault/pull/18863)] +* agent: Vault Agent now reports its name and version as part of the User-Agent header in all requests issued. [[GH-19776](https://github.com/hashicorp/vault/pull/19776)] +* agent: initial implementation of a process runner for injecting secrets via environment variables via vault agent [[GH-20628](https://github.com/hashicorp/vault/pull/20628)] +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* api: property based testing for LifetimeWatcher sleep duration calculation [[GH-17919](https://github.com/hashicorp/vault/pull/17919)] +* audit: add plugin metadata, including plugin name, type, version, sha256, and whether plugin is external, to audit logging [[GH-19814](https://github.com/hashicorp/vault/pull/19814)] +* audit: forwarded requests can now contain host metadata on the node it was sent 'from' or a flag to indicate that it was forwarded. +* auth/cert: Better return OCSP validation errors during login to the caller. [[GH-20234](https://github.com/hashicorp/vault/pull/20234)] +* auth/kerberos: Enable plugin multiplexing +auth/kerberos: Upgrade plugin dependencies [[GH-20771](https://github.com/hashicorp/vault/pull/20771)] +* auth/ldap: allow configuration of alias dereferencing in LDAP search [[GH-18230](https://github.com/hashicorp/vault/pull/18230)] +* auth/ldap: allow providing the LDAP password via an env var when authenticating via the CLI [[GH-18225](https://github.com/hashicorp/vault/pull/18225)] +* auth/oidc: Adds support for group membership parsing when using IBM ISAM as an OIDC provider. [[GH-19247](https://github.com/hashicorp/vault/pull/19247)] +* build: Prefer GOBIN when set over GOPATH/bin when building the binary [[GH-19862](https://github.com/hashicorp/vault/pull/19862)] +* cli: Add walkSecretsTree helper function, which recursively walks secrets rooted at the given path [[GH-20464](https://github.com/hashicorp/vault/pull/20464)] +* cli: Improve addPrefixToKVPath helper [[GH-20488](https://github.com/hashicorp/vault/pull/20488)] +* command/server (enterprise): -dev-three-node now creates perf standbys instead of regular standbys. [[GH-20629](https://github.com/hashicorp/vault/pull/20629)] +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* command/server: New -dev-cluster-json writes a file describing the dev cluster in -dev and -dev-three-node modes, plus -dev-three-node now enables unauthenticated metrics and pprof requests. [[GH-20224](https://github.com/hashicorp/vault/pull/20224)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core, secrets/pki, audit: Update dependency go-jose to v3 due to v2 deprecation. [[GH-20559](https://github.com/hashicorp/vault/pull/20559)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* core: Add possibility to decode a generated encoded root token via the rest API [[GH-20595](https://github.com/hashicorp/vault/pull/20595)] +* core: include namespace path in granting_policies block of audit log +* core: include reason for ErrReadOnly on PBPWF writing failures +* core: report intermediate error messages during request forwarding [[GH-20643](https://github.com/hashicorp/vault/pull/20643)] +* core:provide more descriptive error message when calling enterprise feature paths in open-source [[GH-18870](https://github.com/hashicorp/vault/pull/18870)] +* database/elasticsearch: Upgrade plugin dependencies [[GH-20767](https://github.com/hashicorp/vault/pull/20767)] +* database/mongodb: upgrade mongo driver to 1.11 [[GH-19954](https://github.com/hashicorp/vault/pull/19954)] +* database/redis: Upgrade plugin dependencies [[GH-20763](https://github.com/hashicorp/vault/pull/20763)] +* http: Support responding to HEAD operation from plugins [[GH-19520](https://github.com/hashicorp/vault/pull/19520)] +* openapi: Add openapi response definitions to /sys defined endpoints. [[GH-18633](https://github.com/hashicorp/vault/pull/18633)] +* openapi: Add openapi response definitions to pki/config_*.go [[GH-18376](https://github.com/hashicorp/vault/pull/18376)] +* openapi: Add openapi response definitions to vault/logical_system_paths.go defined endpoints. [[GH-18515](https://github.com/hashicorp/vault/pull/18515)] +* openapi: Consistently stop Vault server on exit in gen_openapi.sh [[GH-19252](https://github.com/hashicorp/vault/pull/19252)] +* openapi: Improve operationId/request/response naming strategy [[GH-19319](https://github.com/hashicorp/vault/pull/19319)] +* openapi: add openapi response definitions to /sys/internal endpoints [[GH-18542](https://github.com/hashicorp/vault/pull/18542)] +* openapi: add openapi response definitions to /sys/rotate endpoints [[GH-18624](https://github.com/hashicorp/vault/pull/18624)] +* openapi: add openapi response definitions to /sys/seal endpoints [[GH-18625](https://github.com/hashicorp/vault/pull/18625)] +* openapi: add openapi response definitions to /sys/tool endpoints [[GH-18626](https://github.com/hashicorp/vault/pull/18626)] +* openapi: add openapi response definitions to /sys/version-history, /sys/leader, /sys/ha-status, /sys/host-info, /sys/in-flight-req [[GH-18628](https://github.com/hashicorp/vault/pull/18628)] +* openapi: add openapi response definitions to /sys/wrapping endpoints [[GH-18627](https://github.com/hashicorp/vault/pull/18627)] +* openapi: add openapi response defintions to /sys/auth endpoints [[GH-18465](https://github.com/hashicorp/vault/pull/18465)] +* openapi: add openapi response defintions to /sys/capabilities endpoints [[GH-18468](https://github.com/hashicorp/vault/pull/18468)] +* openapi: add openapi response defintions to /sys/config and /sys/generate-root endpoints [[GH-18472](https://github.com/hashicorp/vault/pull/18472)] +* openapi: added ability to validate response structures against openapi schema for test clusters [[GH-19043](https://github.com/hashicorp/vault/pull/19043)] +* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] +* sdk: Add new docker-based cluster testing framework to the sdk. [[GH-20247](https://github.com/hashicorp/vault/pull/20247)] +* secrets/ad: upgrades dependencies [[GH-19829](https://github.com/hashicorp/vault/pull/19829)] +* secrets/alicloud: upgrades dependencies [[GH-19846](https://github.com/hashicorp/vault/pull/19846)] +* secrets/consul: Improve error message when ACL bootstrapping fails. [[GH-20891](https://github.com/hashicorp/vault/pull/20891)] +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] +* secrets/gcpkms: Enable plugin multiplexing +secrets/gcpkms: Upgrade plugin dependencies [[GH-20784](https://github.com/hashicorp/vault/pull/20784)] +* secrets/mongodbatlas: upgrades dependencies [[GH-19861](https://github.com/hashicorp/vault/pull/19861)] +* secrets/openldap: upgrades dependencies [[GH-19993](https://github.com/hashicorp/vault/pull/19993)] +* secrets/pki: Add missing fields to tidy-status, include new last_auto_tidy_finished field. [[GH-20442](https://github.com/hashicorp/vault/pull/20442)] +* secrets/pki: Add warning when issuer lacks KeyUsage during CRL rebuilds; expose in logs and on rotation. [[GH-20253](https://github.com/hashicorp/vault/pull/20253)] +* secrets/pki: Allow determining existing issuers and keys on import. [[GH-20441](https://github.com/hashicorp/vault/pull/20441)] +* secrets/pki: Include CA serial number, key UUID on issuers list endpoint. [[GH-20276](https://github.com/hashicorp/vault/pull/20276)] +* secrets/pki: Limit ACME issued certificates NotAfter TTL to a maximum of 90 days [[GH-20981](https://github.com/hashicorp/vault/pull/20981)] +* secrets/pki: Support TLS-ALPN-01 challenge type in ACME for DNS certificate identifiers. [[GH-20943](https://github.com/hashicorp/vault/pull/20943)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] +* secrets/postgresql: Add configuration to scram-sha-256 encrypt passwords on Vault before sending them to PostgreSQL [[GH-19616](https://github.com/hashicorp/vault/pull/19616)] +* secrets/terraform: upgrades dependencies [[GH-19798](https://github.com/hashicorp/vault/pull/19798)] +* secrets/transit: Add support to import public keys in transit engine and allow encryption and verification of signed data [[GH-17934](https://github.com/hashicorp/vault/pull/17934)] +* secrets/transit: Allow importing RSA-PSS OID (1.2.840.113549.1.1.10) private keys via BYOK. [[GH-19519](https://github.com/hashicorp/vault/pull/19519)] +* secrets/transit: Respond to writes with updated key policy, cache configuration. [[GH-20652](https://github.com/hashicorp/vault/pull/20652)] +* secrets/transit: Support BYOK-encrypted export of keys to securely allow synchronizing specific keys and version across clusters. [[GH-20736](https://github.com/hashicorp/vault/pull/20736)] +* ui: Add download button for each secret value in KV v2 [[GH-20431](https://github.com/hashicorp/vault/pull/20431)] +* ui: Add filtering by auth type and auth name to the Authentication Method list view. [[GH-20747](https://github.com/hashicorp/vault/pull/20747)] +* ui: Add filtering by engine type and engine name to the Secret Engine list view. [[GH-20481](https://github.com/hashicorp/vault/pull/20481)] +* ui: Adds whitespace warning to secrets engine and auth method path inputs [[GH-19913](https://github.com/hashicorp/vault/pull/19913)] +* ui: Remove the Bulma CSS framework. [[GH-19878](https://github.com/hashicorp/vault/pull/19878)] +* ui: Update Web CLI with examples and a new `kv-get` command for reading kv v2 data and metadata [[GH-20590](https://github.com/hashicorp/vault/pull/20590)] +* ui: Updates UI javascript dependencies [[GH-19901](https://github.com/hashicorp/vault/pull/19901)] +* ui: add allowed_managed_keys field to secret engine mount options [[GH-19791](https://github.com/hashicorp/vault/pull/19791)] +* ui: adds warning for commas in stringArray inputs and updates tooltip help text to remove references to comma separation [[GH-20163](https://github.com/hashicorp/vault/pull/20163)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] +* website/docs: Add rotate root documentation for azure secrets engine [[GH-19187](https://github.com/hashicorp/vault/pull/19187)] +* website/docs: fix database static-user sample payload [[GH-19170](https://github.com/hashicorp/vault/pull/19170)] + +BUG FIXES: + +* agent: Fix agent generate-config to accept -namespace, VAULT_NAMESPACE, and other client-modifying flags. [[GH-21297](https://github.com/hashicorp/vault/pull/21297)] +* agent: Fix bug with 'cache' stanza validation [[GH-20934](https://github.com/hashicorp/vault/pull/20934)] +* api: Addressed a couple of issues that arose as edge cases for the -output-policy flag. Specifically around properly handling list commands, distinguishing kv V1/V2, and correctly recognizing protected paths. [[GH-19160](https://github.com/hashicorp/vault/pull/19160)] +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* auth/token: Fix cubbyhole and revocation for legacy service tokens [[GH-19416](https://github.com/hashicorp/vault/pull/19416)] +* cli/kv: add -mount flag to kv list [[GH-19378](https://github.com/hashicorp/vault/pull/19378)] +* core (enterprise): Don't delete backend stored data that appears to be filterable +on this secondary if we don't have a corresponding mount entry. +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix Forwarded Writer construction to correctly find active nodes, allowing PKI cross-cluster functionality to succeed on existing mounts. +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. [[GH-20783](https://github.com/hashicorp/vault/pull/20783)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* openapi: Small fixes for OpenAPI display attributes. Changed "log-in" to "login" [[GH-20285](https://github.com/hashicorp/vault/pull/20285)] +* plugin/reload: Fix a possible data race with rollback manager and plugin reload [[GH-19468](https://github.com/hashicorp/vault/pull/19468)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs +* replication (enterprise): Fix regression causing token creation against a role +with a new entity alias to be incorrectly forwarded from perf standbys. [[GH-21100](https://github.com/hashicorp/vault/pull/21100)] +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* sdk/backend: prevent panic when computing the zero value for a `TypeInt64` schema field. [[GH-18729](https://github.com/hashicorp/vault/pull/18729)] +* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. [[GH-20668](https://github.com/hashicorp/vault/pull/20668)] +* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +* secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +* sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] +* shamir: change mul and div implementations to be constant-time [[GH-19495](https://github.com/hashicorp/vault/pull/19495)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix secret render when path includes %. Resolves #11616. [[GH-20430](https://github.com/hashicorp/vault/pull/20430)] +* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] +* ui: fixes auto_rotate_period ttl input for transit keys [[GH-20731](https://github.com/hashicorp/vault/pull/20731)] +* ui: fixes bug in kmip role form that caused `operation_all` to persist after deselecting all operation checkboxes [[GH-19139](https://github.com/hashicorp/vault/pull/19139)] +* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)] +* ui: wait for wanted message event during OIDC callback instead of using the first message event [[GH-18521](https://github.com/hashicorp/vault/pull/18521)] + +## 1.13.13 +### January 31, 2024 + +CHANGES: + +* core: Bump Go version to 1.20.12. +* database/snowflake: Update plugin to v0.7.4 [[GH-25059](https://github.com/hashicorp/vault/pull/25059)] IMPROVEMENTS: -* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] +* command/server: display logs on startup immediately if disable-gated-logs flag is set [[GH-24280](https://github.com/hashicorp/vault/pull/24280)] +* storage/raft: Upgrade to bbolt 1.3.8, along with an extra patch to reduce time scanning large freelist maps. [[GH-24010](https://github.com/hashicorp/vault/pull/24010)] +* ui: latest version of chrome does not automatically redirect back to the app after authentication unless triggered by the user, hence added a link to redirect back to the app. [[GH-18513](https://github.com/hashicorp/vault/pull/18513)] BUG FIXES: -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* kmip (enterprise): Return SecretData as supported Object Type. -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] +* helper/pkcs7: Fix slice out-of-bounds panic [[GH-24891](https://github.com/hashicorp/vault/pull/24891)] +* kmip (enterprise): Only return a Server Correlation Value to clients using KMIP version 1.4. +* ui: Fixed minor bugs with database secrets engine [[GH-24947](https://github.com/hashicorp/vault/pull/24947)] +* ui: Fixes input for jwks_ca_pem when configuring a JWT auth method [[GH-24697](https://github.com/hashicorp/vault/pull/24697)] +* ui: fix incorrectly calculated capabilities on PKI issuer endpoints [[GH-24686](https://github.com/hashicorp/vault/pull/24686)] + +## 1.13.12 +### December 06, 2023 SECURITY: -* storage/raft (enterprise): Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HCSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] - -## 1.11.0 -### June 20, 2022 +* core: Fixes an issue present in both Vault and Vault Enterprise since Vault 1.12.0, where Vault is vulnerable to a denial of service through memory exhaustion of the host when handling large HTTP requests from a client. (see [CVE-2023-6337](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-6337) & [HCSEC-2023-34](https://discuss.hashicorp.com/t/hcsec-2023-34-vault-vulnerable-to-denial-of-service-through-memory-exhaustion-when-handling-large-http-requests/60741)) CHANGES: -* auth/aws: Add RoleSession to DisplayName when using assumeRole for authentication [[GH-14954](https://github.com/hashicorp/vault/pull/14954)] -* auth/kubernetes: If `kubernetes_ca_cert` is unset, and there is no pod-local CA available, an error will be surfaced when writing config instead of waiting for login. [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] -* auth: Remove support for legacy MFA -(https://www.vaultproject.io/docs/v1.10.x/auth/mfa) [[GH-14869](https://github.com/hashicorp/vault/pull/14869)] -* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] -* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.17.11. [[GH-go-ver-1110](https://github.com/hashicorp/vault/pull/go-ver-1110)] -* database & storage: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx). This change affects Redshift & Postgres database secrets engines, and CockroachDB & Postgres storage engines [[GH-15343](https://github.com/hashicorp/vault/pull/15343)] -* licensing (enterprise): Remove support for stored licenses and associated `sys/license` and `sys/license/signed` -endpoints in favor of [autoloaded licenses](https://www.vaultproject.io/docs/enterprise/license/autoloading). -* replication (enterprise): The `/sys/replication/performance/primary/mount-filter` endpoint has been removed. Please use [Paths Filter](https://www.vaultproject.io/api-docs/system/replication/replication-performance#create-paths-filter) instead. -* secret/pki: Remove unused signature_bits parameter from intermediate CSR generation; this parameter doesn't control the final certificate's signature algorithm selection as that is up to the signing CA [[GH-15478](https://github.com/hashicorp/vault/pull/15478)] -* secrets/kubernetes: Split `additional_metadata` into `extra_annotations` and `extra_labels` parameters [[GH-15655](https://github.com/hashicorp/vault/pull/15655)] -* secrets/pki: A new aliased api path (/pki/issuer/:issuer_ref/sign-self-issued) -providing the same functionality as the existing API(/pki/root/sign-self-issued) -does not require sudo capabilities but the latter still requires it in an -effort to maintain backwards compatibility. [[GH-15211](https://github.com/hashicorp/vault/pull/15211)] -* secrets/pki: Err on unknown role during sign-verbatim. [[GH-15543](https://github.com/hashicorp/vault/pull/15543)] -* secrets/pki: Existing CRL API (/pki/crl) now returns an X.509 v2 CRL instead -of a v1 CRL. [[GH-15100](https://github.com/hashicorp/vault/pull/15100)] -* secrets/pki: The `ca_chain` response field within issuing (/pki/issue/:role) -and signing APIs will now include the root CA certificate if the mount is -aware of it. [[GH-15155](https://github.com/hashicorp/vault/pull/15155)] -* secrets/pki: existing Delete Root API (pki/root) will now delete all issuers -and keys within the mount path. [[GH-15004](https://github.com/hashicorp/vault/pull/15004)] -* secrets/pki: existing Generate Root (pki/root/generate/:type), -Set Signed Intermediate (/pki/intermediate/set-signed) APIs will -add new issuers/keys to a mount instead of warning that an existing CA exists [[GH-14975](https://github.com/hashicorp/vault/pull/14975)] -* secrets/pki: the signed CA certificate from the sign-intermediate api will now appear within the ca_chain -response field along with the issuer's ca chain. [[GH-15524](https://github.com/hashicorp/vault/pull/15524)] -* ui: Upgrade Ember to version 3.28 [[GH-14763](https://github.com/hashicorp/vault/pull/14763)] +* identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. [[GH-24325](https://github.com/hashicorp/vault/pull/24325)] -FEATURES: +BUG FIXES: -* **Autopilot Improvements (Enterprise)**: Autopilot on Vault Enterprise now supports automated upgrades and redundancy zones when using integrated storage. -* **KeyMgmt UI**: Add UI support for managing the Key Management Secrets Engine [[GH-15523](https://github.com/hashicorp/vault/pull/15523)] -* **Kubernetes Secrets Engine**: This new secrets engine generates Kubernetes service account tokens, service accounts, role bindings, and roles dynamically. [[GH-15551](https://github.com/hashicorp/vault/pull/15551)] -* **Non-Disruptive Intermediate/Root Certificate Rotation**: This allows -import, generation and configuration of any number of keys and/or issuers -within a PKI mount, providing operators the ability to rotate certificates -in place without affecting existing client configurations. [[GH-15277](https://github.com/hashicorp/vault/pull/15277)] -* **Print minimum required policy for any command**: The global CLI flag `-output-policy` can now be used with any command to print out the minimum required policy HCL for that operation, including whether the given path requires the "sudo" capability. [[GH-14899](https://github.com/hashicorp/vault/pull/14899)] -* **Snowflake Database Plugin**: Adds ability to manage RSA key pair credentials for dynamic and static Snowflake users. [[GH-15376](https://github.com/hashicorp/vault/pull/15376)] -* **Transit BYOK**: Allow import of externally-generated keys into the Transit secrets engine. [[GH-15414](https://github.com/hashicorp/vault/pull/15414)] -* nomad: Bootstrap Nomad ACL system if no token is provided [[GH-12451](https://github.com/hashicorp/vault/pull/12451)] -* storage/dynamodb: Added `AWS_DYNAMODB_REGION` environment variable. [[GH-15054](https://github.com/hashicorp/vault/pull/15054)] +* api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. [[GH-24256](https://github.com/hashicorp/vault/pull/24256)] +* core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. [[GH-24336](https://github.com/hashicorp/vault/pull/24336)] +* ui: Fix payload sent when disabling replication [[GH-24292](https://github.com/hashicorp/vault/pull/24292)] -IMPROVEMENTS: +## 1.13.11 +### November 30, 2023 -* activity: return nil response months in activity log API when no month data exists [[GH-15420](https://github.com/hashicorp/vault/pull/15420)] -* agent/auto-auth: Add `min_backoff` to the method stanza for configuring initial backoff duration. [[GH-15204](https://github.com/hashicorp/vault/pull/15204)] -* agent: Update consul-template to v0.29.0 [[GH-15293](https://github.com/hashicorp/vault/pull/15293)] -* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] -* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* api: Add ability to pass certificate as PEM bytes to api.Client. [[GH-14753](https://github.com/hashicorp/vault/pull/14753)] -* api: Add context-aware functions to vault/api for each API wrapper function. [[GH-14388](https://github.com/hashicorp/vault/pull/14388)] -* api: Added MFALogin() for handling MFA flow when using login helpers. [[GH-14900](https://github.com/hashicorp/vault/pull/14900)] -* api: If the parameters supplied over the API payload are ignored due to not -being what the endpoints were expecting, or if the parameters supplied get -replaced by the values in the endpoint's path itself, warnings will be added to -the non-empty responses listing all the ignored and replaced parameters. [[GH-14962](https://github.com/hashicorp/vault/pull/14962)] -* api: KV helper methods to simplify the common use case of reading and writing KV secrets [[GH-15305](https://github.com/hashicorp/vault/pull/15305)] -* api: Provide a helper method WithNamespace to create a cloned client with a new NS [[GH-14963](https://github.com/hashicorp/vault/pull/14963)] -* api: Support VAULT_PROXY_ADDR environment variable to allow overriding the Vault client's HTTP proxy. [[GH-15377](https://github.com/hashicorp/vault/pull/15377)] -* api: Use the context passed to the api/auth Login helpers. [[GH-14775](https://github.com/hashicorp/vault/pull/14775)] -* api: make ListPlugins parse only known plugin types [[GH-15434](https://github.com/hashicorp/vault/pull/15434)] -* audit: Add a policy_results block into the audit log that contains the set of -policies that granted this request access. [[GH-15457](https://github.com/hashicorp/vault/pull/15457)] -* audit: Include mount_accessor in audit request and response logs [[GH-15342](https://github.com/hashicorp/vault/pull/15342)] -* audit: added entity_created boolean to audit log, set when login operations create an entity [[GH-15487](https://github.com/hashicorp/vault/pull/15487)] -* auth/aws: Add rsa2048 signature type to API [[GH-15719](https://github.com/hashicorp/vault/pull/15719)] -* auth/gcp: Enable the Google service endpoints used by the underlying client to be customized [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] -* auth/gcp: Vault CLI now infers the service account email when running on Google Cloud [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] -* auth/jwt: Adds ability to use JSON pointer syntax for the `user_claim` value. [[GH-15593](https://github.com/hashicorp/vault/pull/15593)] -* auth/okta: Add support for Google provider TOTP type in the Okta auth method [[GH-14985](https://github.com/hashicorp/vault/pull/14985)] -* auth/okta: Add support for performing [the number -challenge](https://help.okta.com/en-us/Content/Topics/Mobile/ov-admin-config.htm?cshid=csh-okta-verify-number-challenge-v1#enable-number-challenge) -during an Okta Verify push challenge [[GH-15361](https://github.com/hashicorp/vault/pull/15361)] -* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] -* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] -* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] -* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] -* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] -* cli: Alternative flag-based syntax for KV to mitigate confusion from automatically appended /data [[GH-14807](https://github.com/hashicorp/vault/pull/14807)] -* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] -* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* command: Support optional '-log-level' flag to be passed to 'operator migrate' command (defaults to info). Also support VAULT_LOG_LEVEL env var. [[GH-15405](https://github.com/hashicorp/vault/pull/15405)] -* command: Support the optional '-detailed' flag to be passed to 'vault list' command to show ListResponseWithInfo data. Also supports the VAULT_DETAILED env var. [[GH-15417](https://github.com/hashicorp/vault/pull/15417)] -* core (enterprise): Include `termination_time` in `sys/license/status` response -* core (enterprise): Include termination time in `license inspect` command output -* core,transit: Allow callers to choose random byte source including entropy augmentation sources for the sys/tools/random and transit/random endpoints. [[GH-15213](https://github.com/hashicorp/vault/pull/15213)] -* core/activity: Order month data in ascending order of timestamps [[GH-15259](https://github.com/hashicorp/vault/pull/15259)] -* core/activity: allow client counts to be precomputed and queried on non-contiguous chunks of data [[GH-15352](https://github.com/hashicorp/vault/pull/15352)] -* core/managed-keys (enterprise): Allow configuring the number of parallel operations to PKCS#11 managed keys. -* core: Add an export API for historical activity log data [[GH-15586](https://github.com/hashicorp/vault/pull/15586)] -* core: Add new DB methods that do not prepare statements. [[GH-15166](https://github.com/hashicorp/vault/pull/15166)] -* core: check uid and permissions of config dir, config file, plugin dir and plugin binaries [[GH-14817](https://github.com/hashicorp/vault/pull/14817)] -* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] -* core: Include build date in `sys/seal-status` and `sys/version-history` endpoints. [[GH-14957](https://github.com/hashicorp/vault/pull/14957)] -* core: Upgrade github.org/x/crypto/ssh [[GH-15125](https://github.com/hashicorp/vault/pull/15125)] -* kmip (enterprise): Implement operations Query, Import, Encrypt and Decrypt. Improve operations Locate, Add Attribute, Get Attributes and Get Attribute List to handle most supported attributes. -* mfa/okta: migrate to use official Okta SDK [[GH-15355](https://github.com/hashicorp/vault/pull/15355)] -* sdk: Change OpenAPI code generator to extract request objects into /components/schemas and reference them by name. [[GH-14217](https://github.com/hashicorp/vault/pull/14217)] -* secrets/consul: Add support for Consul node-identities and service-identities [[GH-15295](https://github.com/hashicorp/vault/pull/15295)] -* secrets/consul: Vault is now able to automatically bootstrap the Consul ACL system. [[GH-10751](https://github.com/hashicorp/vault/pull/10751)] -* secrets/database/elasticsearch: Use the new /_security base API path instead of /_xpack/security when managing elasticsearch. [[GH-15614](https://github.com/hashicorp/vault/pull/15614)] -* secrets/pki: Add not_before_duration to root CA generation, intermediate CA signing paths. [[GH-14178](https://github.com/hashicorp/vault/pull/14178)] -* secrets/pki: Add support for CPS URLs and User Notice to Policy Information [[GH-15751](https://github.com/hashicorp/vault/pull/15751)] -* secrets/pki: Allow operators to control the issuing certificate behavior when -the requested TTL is beyond the NotAfter value of the signing certificate [[GH-15152](https://github.com/hashicorp/vault/pull/15152)] -* secrets/pki: Always return CRLs, URLs configurations, even if using the default value. [[GH-15470](https://github.com/hashicorp/vault/pull/15470)] -* secrets/pki: Enable Patch Functionality for Roles and Issuers (API only) [[GH-15510](https://github.com/hashicorp/vault/pull/15510)] -* secrets/pki: Have pki/sign-verbatim use the not_before_duration field defined in the role [[GH-15429](https://github.com/hashicorp/vault/pull/15429)] -* secrets/pki: Warn on empty Subject field during issuer generation (root/generate and root/sign-intermediate). [[GH-15494](https://github.com/hashicorp/vault/pull/15494)] -* secrets/pki: Warn on missing AIA access information when generating issuers (config/urls). [[GH-15509](https://github.com/hashicorp/vault/pull/15509)] -* secrets/pki: Warn when `generate_lease` and `no_store` are both set to `true` on requests. [[GH-14292](https://github.com/hashicorp/vault/pull/14292)] -* secrets/ssh: Add connection timeout of 1 minute for outbound SSH connection in deprecated Dynamic SSH Keys mode. [[GH-15440](https://github.com/hashicorp/vault/pull/15440)] -* secrets/ssh: Support for `add_before_duration` in SSH [[GH-15250](https://github.com/hashicorp/vault/pull/15250)] -* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer -* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] -* ui: Default auto-rotation period in transit is 30 days [[GH-15474](https://github.com/hashicorp/vault/pull/15474)] -* ui: Parse schema refs from OpenAPI [[GH-14508](https://github.com/hashicorp/vault/pull/14508)] -* ui: Remove stored license references [[GH-15513](https://github.com/hashicorp/vault/pull/15513)] -* ui: Remove storybook. [[GH-15074](https://github.com/hashicorp/vault/pull/15074)] -* ui: Replaces the IvyCodemirror wrapper with a custom ember modifier. [[GH-14659](https://github.com/hashicorp/vault/pull/14659)] -* website/docs: Add usage documentation for Kubernetes Secrets Engine [[GH-15527](https://github.com/hashicorp/vault/pull/15527)] -* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] +CHANGES: -DEPRECATIONS: +* core: Bump Go version to 1.20.11. -* docs: Document removal of X.509 certificates with signatures who use SHA-1 in Vault 1.12 [[GH-15581](https://github.com/hashicorp/vault/pull/15581)] -* secrets/consul: Deprecate old parameters "token_type" and "policy" [[GH-15550](https://github.com/hashicorp/vault/pull/15550)] -* secrets/consul: Deprecate parameter "policies" in favor of "consul_policies" for consistency [[GH-15400](https://github.com/hashicorp/vault/pull/15400)] +IMPROVEMENTS: + +* core (enterprise): Speed up unseal when using namespaces +* ui: Sort list view of entities and aliases alphabetically using the item name [[GH-24103](https://github.com/hashicorp/vault/pull/24103)] BUG FIXES: -* Fixed panic when adding or modifying a Duo MFA Method in Enterprise -* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] -* api: Fixes bug where OutputCurlString field was unintentionally being copied over during client cloning [[GH-14968](https://github.com/hashicorp/vault/pull/14968)] -* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] -* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* auth/kubernetes: Fix error code when using the wrong service account [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] -* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] -* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] -* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] -* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] -* cli: kv get command now honors trailing spaces to retrieve secrets [[GH-15188](https://github.com/hashicorp/vault/pull/15188)] -* command: do not report listener and storage types as key not found warnings [[GH-15383](https://github.com/hashicorp/vault/pull/15383)] -* core (enterprise): Allow local alias create RPCs to persist alias metadata -* core (enterprise): Fix overcounting of lease count quota usage at startup. -* core (enterprise): Fix some races in merkle index flushing code found in testing -* core (enterprise): Handle additional edge cases reinitializing PKCS#11 libraries after login errors. -* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] -* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number -* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] -* core: Fix double counting for "route" metrics [[GH-12763](https://github.com/hashicorp/vault/pull/12763)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] -* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] -* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] -* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] -* core: renaming the environment variable VAULT_DISABLE_FILE_PERMISSIONS_CHECK to VAULT_ENABLE_FILE_PERMISSIONS_CHECK and adjusting the logic [[GH-15452](https://github.com/hashicorp/vault/pull/15452)] -* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] -* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* identity: deduplicate policies when creating/updating identity groups [[GH-15055](https://github.com/hashicorp/vault/pull/15055)] -* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] -* plugin: Fix a bug where plugin reload would falsely report success in certain scenarios. [[GH-15579](https://github.com/hashicorp/vault/pull/15579)] -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] -* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] -* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] -* sdk/cidrutil: Only check if cidr contains remote address for IP addresses [[GH-14487](https://github.com/hashicorp/vault/pull/14487)] -* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] -* sdk: Fix OpenApi spec generator to remove duplicate sha_256 parameter [[GH-15163](https://github.com/hashicorp/vault/pull/15163)] -* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] -* secrets/kv: Fix issue preventing the ability to reset the `delete_version_after` key metadata field to 0s via HTTP `PATCH`. [[GH-15792](https://github.com/hashicorp/vault/pull/15792)] -* secrets/pki: CRLs on performance secondary clusters are now automatically -rebuilt upon changes to the list of issuers. [[GH-15179](https://github.com/hashicorp/vault/pull/15179)] -* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] -* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] -* secrets/ssh: Convert role field not_before_duration to seconds before returning it [[GH-15559](https://github.com/hashicorp/vault/pull/15559)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* storage/raft: Forward autopilot state requests on perf standbys to active node. [[GH-15493](https://github.com/hashicorp/vault/pull/15493)] -* storage/raft: joining a node to a cluster now ignores any VAULT_NAMESPACE environment variable set on the server process [[GH-15519](https://github.com/hashicorp/vault/pull/15519)] -* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not accepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] -* ui: Fix KV secret showing in the edit form after a user creates a new version but doesn't have read capabilities [[GH-14794](https://github.com/hashicorp/vault/pull/14794)] -* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] -* ui: Fix issue with KV not recomputing model when you changed versions. [[GH-14941](https://github.com/hashicorp/vault/pull/14941)] -* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] -* ui: Fixed unsupported revocation statements field for DB roles [[GH-15573](https://github.com/hashicorp/vault/pull/15573)] -* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] -* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] -* ui: Revert using localStorage in favor of sessionStorage [[GH-15769](https://github.com/hashicorp/vault/pull/15769)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] -* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] -* ui: fix form validations ignoring default values and disabling submit button [[GH-15560](https://github.com/hashicorp/vault/pull/15560)] -* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] -* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] +* activity log (enterprise): De-duplicate client count estimates for license utilization reporting. +* auth/cert: Handle errors related to expired OCSP server responses [[GH-24193](https://github.com/hashicorp/vault/pull/24193)] +* core/config: Use correct HCL config value when configuring `log_requests_level`. [[GH-24057](https://github.com/hashicorp/vault/pull/24057)] +* core/quotas: Close rate-limit blocked client purge goroutines when sealing [[GH-24108](https://github.com/hashicorp/vault/pull/24108)] +* replication (enterprise): disallow configuring paths filter for a mount path that does not exist +* secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 [[GH-24192](https://github.com/hashicorp/vault/pull/24192)] +* ui: Fix error when tuning token auth configuration within namespace [[GH-24147](https://github.com/hashicorp/vault/pull/24147)] -## 1.10.9 -### November 30, 2022 +## 1.13.10 +### November 09, 2023 -BUG FIXES: +SECURITY: +* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18084](https://github.com/hashicorp/vault/pull/18084)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18109](https://github.com/hashicorp/vault/pull/18109)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +CHANGES: + +* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] +* secrets/mongodbatlas: Update plugin to v0.9.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] + +FEATURES: + +* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] + +IMPROVEMENTS: + +* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] -## 1.10.8 -### November 2, 2022 - BUG FIXES: -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] +* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] +* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. +* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] +* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] +* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] +* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] +* core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] +* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] +* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] -## 1.10.7 -### September 30, 2022 +## 1.13.9 +### October 25, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.10. +* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host + +IMPROVEMENTS: + +* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] +* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] BUG FIXES: -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] +* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] +* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] +* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] +* kmip (enterprise): Improve handling of failures due to storage replication issues. +* kmip (enterprise): Return a structure in the response for query function Query Server Information. +* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] +* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. +* replication (enterprise): Fix a missing unlock when changing replication state +* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key +* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. +* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations +* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] -## 1.10.6 -### August 31, 2022 +## 1.13.6 +### August 30, 2023 CHANGES: -* core: Bump Go version to 1.17.13. +* core: Bump Go version to 1.20.7. IMPROVEMENTS: -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] BUG FIXES: -* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16524](https://github.com/hashicorp/vault/pull/16524)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the -Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22331](https://github.com/hashicorp/vault/pull/22331)] +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] + +## 1.13.8 +### September 27, 2023 SECURITY: -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - -## 1.10.5 -### July 21, 2022 +* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8. [[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] CHANGES: -* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] -* core: Bump Go version to 1.17.12. +* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy IMPROVEMENTS: -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] +* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] BUG FIXES: -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* storage/raft (enterprise): Prevent unauthenticated voter status with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] -* ui: Revert using localStorage in favor of sessionStorage [[GH-16169](https://github.com/hashicorp/vault/pull/16169)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] +* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] +* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] +* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] +* ui: Fixes old pki's filter and search roles page bug [[GH-22810](https://github.com/hashicorp/vault/pull/22810)] +* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] + +## 1.13.7 +### September 13, 2023 -## 1.10.4 -### June 10, 2022 +SECURITY: + +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] CHANGES: -* core: Bump Go version to 1.17.11. [[GH-go-ver-1104](https://github.com/hashicorp/vault/pull/go-ver-1104)] +* core: Bump Go version to 1.20.8. +* database/snowflake: Update plugin to v0.7.3 [[GH-22591](https://github.com/hashicorp/vault/pull/22591)] + +FEATURES: + +* ** Merkle Tree Corruption Detection (enterprise) **: Add a new endpoint to check merkle tree corruption. IMPROVEMENTS: -* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] -* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] -* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] -* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] -* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] +* kmip (enterprise): reduce latency of KMIP operation handling BUG FIXES: -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* auth/kubernetes: Fix error code when using the wrong service account [[GH-15585](https://github.com/hashicorp/vault/pull/15585)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] -* core (enterprise): Fix overcounting of lease count quota usage at startup. -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. -* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] -* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] -* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] +* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] +* kmip (enterprise): fix date handling error with some re-key operations +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable -## 1.10.3 -### May 11, 2022 +## 1.13.6 +### August 30, 2023 -SECURITY: -* auth: A vulnerability was identified in Vault and Vault Enterprise (“Vault”) from 1.10.0 to 1.10.2 where MFA may not be enforced on user logins after a server restart. This vulnerability, CVE-2022-30689, was fixed in Vault 1.10.3. +CHANGES: -BUG FIXES: +* core: Bump Go version to 1.20.7. -* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] -* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +IMPROVEMENTS: -## 1.10.2 -### April 29, 2022 +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] BUG FIXES: -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] -* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22331](https://github.com/hashicorp/vault/pull/22331)] +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] -## 1.10.1 -### April 22, 2022 +## 1.13.5 +### July 25, 2023 + +SECURITY: + +* auth/ldap: Normalize HTTP response codes when invalid credentials are provided to prevent user enumeration. This vulnerability, CVE-2023-3462, is fixed in Vault 1.14.1 and 1.13.5. [[GH-21282](https://github.com/hashicorp/vault/pull/21282), [HSEC-2023-24](https://discuss.hashicorp.com/t/hcsec-2023-24-vaults-ldap-auth-method-allows-for-user-enumeration/56714)] +* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] CHANGES: -* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.17.9. [[GH-15044](https://github.com/hashicorp/vault/pull/15044)] +* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. IMPROVEMENTS: -* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] -* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] -* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] -* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] -* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. [[GH-21010](https://github.com/hashicorp/vault/pull/21010)] +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling +* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] BUG FIXES: -* Fixed panic when adding or modifying a Duo MFA Method in Enterprise -* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] -* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] -* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] -* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] -* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] -* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] -* core (enterprise): Allow local alias create RPCs to persist alias metadata [[GH-changelog:_2747](https://github.com/hashicorp/vault/pull/changelog:_2747)] -* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number -* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] -* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] -* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] -* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] -* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] -* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] -* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] -* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] -* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] -* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] -* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] -* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] -* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] -* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] -* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] +* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21799](https://github.com/hashicorp/vault/pull/21799)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* replication (enterprise): update primary cluster address after DR failover +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21632](https://github.com/hashicorp/vault/pull/21632)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] +* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] -## 1.10.0 -### March 23, 2022 +## 1.13.4 +### June 21, 2023 +BREAKING CHANGES: + +* secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] CHANGES: -* core (enterprise): requests with newly generated tokens to perf standbys which are lagging behind the active node return http 412 instead of 400/403/50x. -* core: Changes the unit of `default_lease_ttl` and `max_lease_ttl` values returned by -the `/sys/config/state/sanitized` endpoint from nanoseconds to seconds. [[GH-14206](https://github.com/hashicorp/vault/pull/14206)] -* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] -* plugin/database: The return value from `POST /database/config/:name` has been updated to "204 No Content" [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] -* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft -Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] -* storage/etcd: Remove support for v2. [[GH-14193](https://github.com/hashicorp/vault/pull/14193)] -* ui: Upgrade Ember to version 3.24 [[GH-13443](https://github.com/hashicorp/vault/pull/13443)] +* core: Bump Go version to 1.20.5. FEATURES: -* **Database plugin multiplexing**: manage multiple database connections with a single plugin process [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] -* **Login MFA**: Single and two phase MFA is now available when authenticating to Vault. [[GH-14025](https://github.com/hashicorp/vault/pull/14025)] -* **Mount Migration**: Vault supports moving secrets and auth mounts both within and across namespaces. -* **Postgres in the UI**: Postgres DB is now supported by the UI [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] -* **Report in-flight requests**: Adding a trace capability to show in-flight requests, and a new gauge metric to show the total number of in-flight requests [[GH-13024](https://github.com/hashicorp/vault/pull/13024)] -* **Server Side Consistent Tokens**: Service tokens have been updated to be longer (a minimum of 95 bytes) and token prefixes for all token types are updated from s., b., and r. to hvs., hvb., and hvr. for service, batch, and recovery tokens respectively. Vault clusters with integrated storage will now have read-after-write consistency by default. [[GH-14109](https://github.com/hashicorp/vault/pull/14109)] -* **Transit SHA-3 Support**: Add support for SHA-3 in the Transit backend. [[GH-13367](https://github.com/hashicorp/vault/pull/13367)] -* **Transit Time-Based Key Autorotation**: Add support for automatic, time-based key rotation to transit secrets engine, including in the UI. [[GH-13691](https://github.com/hashicorp/vault/pull/13691)] -* **UI Client Count Improvements**: Restructures client count dashboard, making use of billing start date to improve accuracy. Adds mount-level distribution and filtering. [[GH-client-counts](https://github.com/hashicorp/vault/pull/client-counts)] -* **Agent Telemetry**: The Vault Agent can now collect and return telemetry information at the `/agent/v1/metrics` endpoint. +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* core (enterprise): Add background worker for automatic reporting of billing +information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] + +IMPROVEMENTS: + +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] + +BUG FIXES: + +* agent: Fix bug with 'cache' stanza validation [[GH-20934](https://github.com/hashicorp/vault/pull/20934)] +* core (enterprise): Don't delete backend stored data that appears to be filterable +on this secondary if we don't have a corresponding mount entry. +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs +* replication (enterprise): Fix regression causing token creation against a role +with a new entity alias to be incorrectly forwarded from perf standbys. [[GH-21100](https://github.com/hashicorp/vault/pull/21100)] +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] + +## 1.13.3 +### June 08, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.4. +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] +* replication (enterprise): Add a new parameter for the update-primary API call +that allows for setting of the primary cluster addresses directly, instead of +via a token. +* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] + +IMPROVEMENTS: + +* Add debug symbols back to builds to fix Dynatrace support [[GH-20519](https://github.com/hashicorp/vault/pull/20519)] +* audit: add a `mount_point` field to audit requests and response entries [[GH-20411](https://github.com/hashicorp/vault/pull/20411)] +* autopilot: Update version to v0.2.0 to add better support for respecting min quorum [[GH-19472](https://github.com/hashicorp/vault/pull/19472)] +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* core: Add possibility to decode a generated encoded root token via the rest API [[GH-20595](https://github.com/hashicorp/vault/pull/20595)] +* core: include namespace path in granting_policies block of audit log +* core: report intermediate error messages during request forwarding [[GH-20643](https://github.com/hashicorp/vault/pull/20643)] +* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] +* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] + +BUG FIXES: + +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] +* cli: disable printing flags warnings messages for the ssh command [[GH-20502](https://github.com/hashicorp/vault/pull/20502)] +* command/server: fixes panic in Vault server command when running in recovery mode [[GH-20418](https://github.com/hashicorp/vault/pull/20418)] +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core/identity: Allow updates of only the custom-metadata for entity alias. [[GH-20368](https://github.com/hashicorp/vault/pull/20368)] +* core: Fix Forwarded Writer construction to correctly find active nodes, allowing PKI cross-cluster functionality to succeed on existing mounts. +* core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. [[GH-20783](https://github.com/hashicorp/vault/pull/20783)] +* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* secrets/pki: Include per-issuer enable_aia_url_templating in issuer read endpoint. [[GH-20354](https://github.com/hashicorp/vault/pull/20354)] +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation +* secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. [[GH-20668](https://github.com/hashicorp/vault/pull/20668)] +* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] +* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] +* ui: fixes issue creating mfa login enforcement from method enforcements tab [[GH-20603](https://github.com/hashicorp/vault/pull/20603)] +* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)] + +## 1.13.2 +### April 26, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.3. + +SECURITY: + +* core/seal: Fix handling of HMACing of seal-wrapped storage entries from HSMs using CKM_AES_CBC or CKM_AES_CBC_PAD which may have allowed an attacker to conduct a padding oracle attack. This vulnerability, CVE-2023-2197, affects Vault from 1.13.0 up to 1.13.1 and was fixed in 1.13.2. [[HCSEC-2023-14](https://discuss.hashicorp.com/t/hcsec-2023-14-vault-enterprise-vulnerable-to-padding-oracle-attacks-when-using-a-cbc-based-encryption-mechanism-with-a-hsm/53322)] + +IMPROVEMENTS: + +* Add debug symbols back to builds to fix Dynatrace support [[GH-20294](https://github.com/hashicorp/vault/pull/20294)] +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* core: include reason for ErrReadOnly on PBPWF writing failures +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] +* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] +* sys/wrapping: Add example how to unwrap without authentication in Vault [[GH-20109](https://github.com/hashicorp/vault/pull/20109)] +* ui: Allows license-banners to be dismissed. Saves preferences in localStorage. [[GH-19116](https://github.com/hashicorp/vault/pull/19116)] + +BUG FIXES: + +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. +* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. +auth/cert: Fix OCSP validation against Vault's PKI engine. [[GH-20181](https://github.com/hashicorp/vault/pull/20181)] +* secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. [[GH-20034](https://github.com/hashicorp/vault/pull/20034)] +* secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. [[GH-20057](https://github.com/hashicorp/vault/pull/20057)] +* secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. [[GH-20058](https://github.com/hashicorp/vault/pull/20058)] +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: fixes remaining doc links to include /vault in path [[GH-20070](https://github.com/hashicorp/vault/pull/20070)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] +* website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. [[GH-20216](https://github.com/hashicorp/vault/pull/20216)] + +## 1.13.1 +### March 29, 2023 + +SECURITY: + +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] IMPROVEMENTS: -* agent: Adds ability to configure specific user-assigned managed identities for Azure auto-auth. [[GH-14214](https://github.com/hashicorp/vault/pull/14214)] -* agent: The `agent/v1/quit` endpoint can now be used to stop the Vault Agent remotely [[GH-14223](https://github.com/hashicorp/vault/pull/14223)] -* api: Allow cloning `api.Client` tokens via `api.Config.CloneToken` or `api.Client.SetCloneToken()`. [[GH-13515](https://github.com/hashicorp/vault/pull/13515)] -* api: Define constants for X-Vault-Forward and X-Vault-Inconsistent headers [[GH-14067](https://github.com/hashicorp/vault/pull/14067)] -* api: Implements Login method in Go client libraries for GCP and Azure auth methods [[GH-13022](https://github.com/hashicorp/vault/pull/13022)] -* api: Implements Login method in Go client libraries for LDAP auth methods [[GH-13841](https://github.com/hashicorp/vault/pull/13841)] -* api: Trim newline character from wrapping token in logical.Unwrap from the api package [[GH-13044](https://github.com/hashicorp/vault/pull/13044)] -* api: add api method for modifying raft autopilot configuration [[GH-12428](https://github.com/hashicorp/vault/pull/12428)] -* api: respect WithWrappingToken() option during AppRole login authentication when used with secret ID specified from environment or from string [[GH-13241](https://github.com/hashicorp/vault/pull/13241)] -* audit: The audit logs now contain the port used by the client [[GH-12790](https://github.com/hashicorp/vault/pull/12790)] -* auth/aws: Enable region detection in the CLI by specifying the region as `auto` [[GH-14051](https://github.com/hashicorp/vault/pull/14051)] -* auth/cert: Add certificate extensions as metadata [[GH-13348](https://github.com/hashicorp/vault/pull/13348)] -* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] -* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13595](https://github.com/hashicorp/vault/pull/13595)] -* auth/ldap: Add a response warning and server log whenever the config is accessed -if `userfilter` doesn't consider `userattr` [[GH-14095](https://github.com/hashicorp/vault/pull/14095)] -* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] -* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] -* auth/okta: Update [okta-sdk-golang](https://github.com/okta/okta-sdk-golang) dependency to version v2.9.1 for improved request backoff handling [[GH-13439](https://github.com/hashicorp/vault/pull/13439)] -* auth/token: The `auth/token/revoke-accessor` endpoint is now idempotent and will -not error out if the token has already been revoked. [[GH-13661](https://github.com/hashicorp/vault/pull/13661)] -* auth: reading `sys/auth/:path` now returns the configuration for the auth engine mounted at the given path [[GH-12793](https://github.com/hashicorp/vault/pull/12793)] -* cli: interactive CLI for login mfa [[GH-14131](https://github.com/hashicorp/vault/pull/14131)] -* command (enterprise): "vault license get" now uses non-deprecated endpoint /sys/license/status -* core/ha: Add new mechanism for keeping track of peers talking to active node, and new 'operator members' command to view them. [[GH-13292](https://github.com/hashicorp/vault/pull/13292)] -* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] -* core/pki: Support Y10K value in notAfter field to be compliant with IEEE 802.1AR-2018 standard [[GH-12795](https://github.com/hashicorp/vault/pull/12795)] -* core/pki: Support Y10K value in notAfter field when signing non-CA certificates [[GH-13736](https://github.com/hashicorp/vault/pull/13736)] -* core: Add duration and start_time to completed requests log entries [[GH-13682](https://github.com/hashicorp/vault/pull/13682)] -* core: Add support to list password policies at `sys/policies/password` [[GH-12787](https://github.com/hashicorp/vault/pull/12787)] -* core: Add support to list version history via API at `sys/version-history` and via CLI with `vault version-history` [[GH-13766](https://github.com/hashicorp/vault/pull/13766)] -* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] -* core: Periodically test the health of connectivity to auto-seal backends [[GH-13078](https://github.com/hashicorp/vault/pull/13078)] -* core: Reading `sys/mounts/:path` now returns the configuration for the secret engine at the given path [[GH-12792](https://github.com/hashicorp/vault/pull/12792)] -* core: Replace "master key" terminology with "root key" [[GH-13324](https://github.com/hashicorp/vault/pull/13324)] -* core: Small changes to ensure goroutines terminate in tests [[GH-14197](https://github.com/hashicorp/vault/pull/14197)] -* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] -* core: Update github.com/prometheus/client_golang to fix security vulnerability CVE-2022-21698. [[GH-14190](https://github.com/hashicorp/vault/pull/14190)] -* core: Vault now supports the PROXY protocol v2. Support for UNKNOWN connections -has also been added to the PROXY protocol v1. [[GH-13540](https://github.com/hashicorp/vault/pull/13540)] -* http (enterprise): Serve /sys/license/status endpoint within namespaces -* identity/oidc: Adds a default OIDC provider [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] -* identity/oidc: Adds a default key for OIDC clients [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] -* identity/oidc: Adds an `allow_all` assignment that permits all entities to authenticate via an OIDC client [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] -* identity/oidc: Adds proof key for code exchange (PKCE) support to OIDC providers. [[GH-13917](https://github.com/hashicorp/vault/pull/13917)] -* sdk: Add helper for decoding root tokens [[GH-10505](https://github.com/hashicorp/vault/pull/10505)] -* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] -* secrets/consul: Add support for consul enterprise namespaces and admin partitions. [[GH-13850](https://github.com/hashicorp/vault/pull/13850)] -* secrets/consul: Add support for consul roles. [[GH-14014](https://github.com/hashicorp/vault/pull/14014)] -* secrets/database/influxdb: Switch/upgrade to the `influxdb1-client` module [[GH-12262](https://github.com/hashicorp/vault/pull/12262)] -* secrets/database: Add database configuration parameter 'disable_escaping' for username and password when connecting to a database. [[GH-13414](https://github.com/hashicorp/vault/pull/13414)] -* secrets/kv: add full secret path output to table-formatted responses [[GH-14301](https://github.com/hashicorp/vault/pull/14301)] -* secrets/kv: add patch support for KVv2 key metadata [[GH-13215](https://github.com/hashicorp/vault/pull/13215)] -* secrets/kv: add subkeys endpoint to retrieve a secret's stucture without its values [[GH-13893](https://github.com/hashicorp/vault/pull/13893)] -* secrets/pki: Add ability to fetch individual certificate as DER or PEM [[GH-10948](https://github.com/hashicorp/vault/pull/10948)] -* secrets/pki: Add count and duration metrics to PKI issue and revoke calls. [[GH-13889](https://github.com/hashicorp/vault/pull/13889)] -* secrets/pki: Add error handling for error types other than UserError or InternalError [[GH-14195](https://github.com/hashicorp/vault/pull/14195)] -* secrets/pki: Allow URI SAN templates in allowed_uri_sans when allowed_uri_sans_template is set to true. [[GH-10249](https://github.com/hashicorp/vault/pull/10249)] -* secrets/pki: Allow other_sans in sign-intermediate and sign-verbatim [[GH-13958](https://github.com/hashicorp/vault/pull/13958)] -* secrets/pki: Calculate the Subject Key Identifier as suggested in [RFC 5280, Section 4.2.1.2](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2). [[GH-11218](https://github.com/hashicorp/vault/pull/11218)] -* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] -* secrets/pki: Return complete chain (in `ca_chain` field) on calls to `pki/cert/ca_chain` [[GH-13935](https://github.com/hashicorp/vault/pull/13935)] -* secrets/pki: Use application/pem-certificate-chain for PEM certificates, application/x-pem-file for PEM CRLs [[GH-13927](https://github.com/hashicorp/vault/pull/13927)] -* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] -* secrets/ssh: Add support for generating non-RSA SSH CAs [[GH-14008](https://github.com/hashicorp/vault/pull/14008)] -* secrets/ssh: Allow specifying multiple approved key lengths for a single algorithm [[GH-13991](https://github.com/hashicorp/vault/pull/13991)] -* secrets/ssh: Use secure default for algorithm signer (rsa-sha2-256) with RSA SSH CA keys on new roles [[GH-14006](https://github.com/hashicorp/vault/pull/14006)] -* secrets/transit: Don't abort transit encrypt or decrypt batches on single item failure. [[GH-13111](https://github.com/hashicorp/vault/pull/13111)] -* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] -* storage/raft: Set InitialMmapSize to 100GB on 64bit architectures [[GH-13178](https://github.com/hashicorp/vault/pull/13178)] -* storage/raft: When using retry_join stanzas, join against all of them in parallel. [[GH-13606](https://github.com/hashicorp/vault/pull/13606)] -* sys/raw: Enhance sys/raw to read and write values that cannot be encoded in json. [[GH-13537](https://github.com/hashicorp/vault/pull/13537)] -* ui: Add support for ECDSA and Ed25519 certificate views [[GH-13894](https://github.com/hashicorp/vault/pull/13894)] -* ui: Add version diff view for KV V2 [[GH-13000](https://github.com/hashicorp/vault/pull/13000)] -* ui: Added client side paging for namespace list view [[GH-13195](https://github.com/hashicorp/vault/pull/13195)] -* ui: Adds flight icons to UI [[GH-12976](https://github.com/hashicorp/vault/pull/12976)] -* ui: Adds multi-factor authentication support [[GH-14049](https://github.com/hashicorp/vault/pull/14049)] -* ui: Allow static role credential rotation in Database secrets engines [[GH-14268](https://github.com/hashicorp/vault/pull/14268)] -* ui: Display badge for all versions in secrets engine header [[GH-13015](https://github.com/hashicorp/vault/pull/13015)] -* ui: Swap browser localStorage in favor of sessionStorage [[GH-14054](https://github.com/hashicorp/vault/pull/14054)] -* ui: The integrated web terminal now accepts both `-f` and `--force` as aliases -for `-force` for the `write` command. [[GH-13683](https://github.com/hashicorp/vault/pull/13683)] -* ui: Transform advanced templating with encode/decode format support [[GH-13908](https://github.com/hashicorp/vault/pull/13908)] -* ui: Updates ember blueprints to glimmer components [[GH-13149](https://github.com/hashicorp/vault/pull/13149)] -* ui: customizes empty state messages for transit and transform [[GH-13090](https://github.com/hashicorp/vault/pull/13090)] - -BUG FIXES: - -* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] -* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] -* agent: Fixes bug where vault agent is unaware of the namespace in the config when wrapping token -* api/client: Fixes an issue where the `replicateStateStore` was being set to `nil` upon consecutive calls to `client.SetReadYourWrites(true)`. [[GH-13486](https://github.com/hashicorp/vault/pull/13486)] -* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] -* auth/approle: Fix wrapping of nil errors in `login` endpoint [[GH-14107](https://github.com/hashicorp/vault/pull/14107)] -* auth/github: Use the Organization ID instead of the Organization name to verify the org membership. [[GH-13332](https://github.com/hashicorp/vault/pull/13332)] -* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] -* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] -* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] -* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] -* core (enterprise): Fix a data race in logshipper. -* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions -* core/api: Fix overwriting of request headers when using JSONMergePatch. [[GH-14222](https://github.com/hashicorp/vault/pull/14222)] -* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] -* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] -* core/token: Fix null token panic from 'v1/auth/token/' endpoints and return proper error response. [[GH-13233](https://github.com/hashicorp/vault/pull/13233)] -* core/token: Fix null token_type panic resulting from 'v1/auth/token/roles/{role_name}' endpoint [[GH-13236](https://github.com/hashicorp/vault/pull/13236)] -* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] -* core: `-output-curl-string` now properly sets cURL options for client and CA -certificates. [[GH-13660](https://github.com/hashicorp/vault/pull/13660)] -* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] -* core: authentication to "login" endpoint for non-existent mount path returns permission denied with status code 403 [[GH-13162](https://github.com/hashicorp/vault/pull/13162)] -* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] -* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes -* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node -* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] -* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] -* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] -* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] -* identity/oidc: Fixes potential write to readonly storage on performance secondary clusters during key rotation [[GH-14426](https://github.com/hashicorp/vault/pull/14426)] -* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] -* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] -* identity: Fix possible nil pointer dereference. [[GH-13318](https://github.com/hashicorp/vault/pull/13318)] -* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] -* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] -* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. -* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. -* metrics/autosnapshots (enterprise) : Fix bug that could cause -vault.autosnapshots.save.errors to not be incremented when there is an -autosnapshot save error. -* physical/mysql: Create table with wider `vault_key` column when initializing database tables. [[GH-14231](https://github.com/hashicorp/vault/pull/14231)] -* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] -* replication (enterprise): When using encrypted secondary tokens, only clear the -private key after a successful connection to the primary cluster -* sdk/framework: Generate proper OpenAPI specs for path patterns that use an alternation as the root. [[GH-13487](https://github.com/hashicorp/vault/pull/13487)] -* sdk/helper/ldaputil: properly escape a trailing escape character to prevent panics. [[GH-13452](https://github.com/hashicorp/vault/pull/13452)] -* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] -* sdk: Fixes OpenAPI to distinguish between paths that can do only List, or both List and Read. [[GH-13643](https://github.com/hashicorp/vault/pull/13643)] -* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] -* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] -* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) -operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] -* secrets/database/cassandra: change connect_timeout to 5s as documentation says [[GH-12443](https://github.com/hashicorp/vault/pull/12443)] -* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] -* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] -* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] -* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] -* secrets/pki: Default value for key_bits changed to 0, enabling key_type=ec key generation with default value [[GH-13080](https://github.com/hashicorp/vault/pull/13080)] -* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] -* secrets/pki: Fixes around NIST P-curve signature hash length, default value for signature_bits changed to 0. [[GH-12872](https://github.com/hashicorp/vault/pull/12872)] -* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] -* secrets/pki: Skip signature bits validation for ed25519 curve key type [[GH-13254](https://github.com/hashicorp/vault/pull/13254)] -* secrets/transit: Ensure that Vault does not panic for invalid nonce size when we aren't in convergent encryption mode. [[GH-13690](https://github.com/hashicorp/vault/pull/13690)] -* secrets/transit: Return an error if any required parameter is missing. [[GH-14074](https://github.com/hashicorp/vault/pull/14074)] -* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] -* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] -* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] -* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] -* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] -* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] -* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] -* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] -* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] -* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] -* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] -* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] -* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] -* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] -* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] -* ui: Fixes displaying empty masked values in PKI engine [[GH-14400](https://github.com/hashicorp/vault/pull/14400)] -* ui: Fixes horizontal bar chart hover issue when filtering namespaces and mounts [[GH-14493](https://github.com/hashicorp/vault/pull/14493)] -* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] -* ui: Fixes issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] -* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] -* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] -* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] -* ui: Fixes issue with SearchSelect component not holding focus [[GH-13590](https://github.com/hashicorp/vault/pull/13590)] -* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] -* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] -* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] -* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] -* ui: Fixes long secret key names overlapping masked values [[GH-13032](https://github.com/hashicorp/vault/pull/13032)] -* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] -* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] -* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] -* ui: trigger token renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] - -## 1.9.10 -### September 30, 2022 - -BUG FIXES: - -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] - -## 1.9.9 -### August 31, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.13. - -BUG FIXES: - -* core (enterprise): Fix some races in merkle index flushing code found in testing -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] - -SECURITY: - -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - -## 1.9.8 -### July 21, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.12. - -IMPROVEMENTS: - -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] - -BUG FIXES: - -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] - -## 1.9.7 -### June 10, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.11. [[GH-go-ver-197](https://github.com/hashicorp/vault/pull/go-ver-197)] - -IMPROVEMENTS: - -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] - -BUG FIXES: - -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* core (enterprise): Fix overcounting of lease count quota usage at startup. -* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. -* ui: Fixes client count timezone bug [[GH-15743](https://github.com/hashicorp/vault/pull/15743)] -* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-15666](https://github.com/hashicorp/vault/pull/15666)] - -## 1.9.6 -### April 29, 2022 - -BUG FIXES: - -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] -* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] - - -## 1.9.5 -### April 22, 2022 - -CHANGES: - -* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.17.9. [[GH-15045](https://github.com/hashicorp/vault/pull/15045)] - -IMPROVEMENTS: - -* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] -* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] -* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer -* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] - -BUG FIXES: - -* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] -* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] -* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] -* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] -* core (enterprise): Allow local alias create RPCs to persist alias metadata -* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] -* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] -* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] -* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] -* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] -* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] -* metrics/autosnapshots (enterprise) : Fix bug that could cause -vault.autosnapshots.save.errors to not be incremented when there is an -autosnapshot save error. -* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] -* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] -* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] -* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] -* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] -* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] -* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] -* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] -* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] -* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] - - -## 1.9.4 -### March 3, 2022 - -SECURITY: -* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. -* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. - -CHANGES: - -* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft -Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] - -IMPROVEMENTS: - -* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] -* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] - -BUG FIXES: - -* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] -* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] -* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] -* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] -* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] -* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] -* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) -operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] -* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] -* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] -* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] -* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] -* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] -* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] -* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] -* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] -* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] - -## 1.9.3 -### January 27, 2022 - -IMPROVEMENTS: - -* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13698](https://github.com/hashicorp/vault/pull/13698)] -* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] -* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] -* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] -* http (enterprise): Serve /sys/license/status endpoint within namespaces - -BUG FIXES: - -* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] -* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] -* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions -* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] -* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] -* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] -* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. -* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] -* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] -* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] -* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] -* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] -* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] - -## 1.9.2 -### December 21, 2021 - -CHANGES: - -* go: Update go version to 1.17.5 [[GH-13408](https://github.com/hashicorp/vault/pull/13408)] - -IMPROVEMENTS: - -* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] - -BUG FIXES: - -* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] - -## 1.9.1 -### December 9, 2021 - -SECURITY: - -* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. - -IMPROVEMENTS: - -* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] - -BUG FIXES: - -* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] -* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes -* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] -* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] -* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] -* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] -* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] -* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] -* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] -* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] -* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] -* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] -* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] -* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] -* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] -* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] - -## 1.9.0 -### November 17, 2021 - -CHANGES: - -* auth/kubernetes: `disable_iss_validation` defaults to true. [#127](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/127) [[GH-12975](https://github.com/hashicorp/vault/pull/12975)] -* expiration: VAULT_16_REVOKE_PERMITPOOL environment variable has been removed. [[GH-12888](https://github.com/hashicorp/vault/pull/12888)] -* expiration: VAULT_LEASE_USE_LEGACY_REVOCATION_STRATEGY environment variable has -been removed. [[GH-12888](https://github.com/hashicorp/vault/pull/12888)] -* go: Update go version to 1.17.2 -* secrets/ssh: Roles with empty allowed_extensions will now forbid end-users -specifying extensions when requesting ssh key signing. Update roles setting -allowed_extensions to `*` to permit any extension to be specified by an end-user. [[GH-12847](https://github.com/hashicorp/vault/pull/12847)] - -FEATURES: - -* **Customizable HTTP Headers**: Add support to define custom HTTP headers for root path (`/`) and also on API endpoints (`/v1/*`) [[GH-12485](https://github.com/hashicorp/vault/pull/12485)] -* **Deduplicate Token With Entities in Activity Log**: Vault tokens without entities are now tracked with client IDs and deduplicated in the Activity Log [[GH-12820](https://github.com/hashicorp/vault/pull/12820)] -* **Elasticsearch Database UI**: The UI now supports adding and editing Elasticsearch connections in the database secret engine. [[GH-12672](https://github.com/hashicorp/vault/pull/12672)] -* **KV Custom Metadata**: Add ability in kv-v2 to specify version-agnostic custom key metadata via the -metadata endpoint. The data will be present in responses made to the data endpoint independent of the -calling token's `read` access to the metadata endpoint. [[GH-12907](https://github.com/hashicorp/vault/pull/12907)] -* **KV patch (Tech Preview)**: Add partial update support for the `//data/:path` kv-v2 -endpoint through HTTP `PATCH`. A new `patch` ACL capability has been added and -is required to make such requests. [[GH-12687](https://github.com/hashicorp/vault/pull/12687)] -* **Key Management Secrets Engine (Enterprise)**: Adds support for distributing and managing keys in GCP Cloud KMS. -* **Local Auth Mount Entities (enterprise)**: Logins on `local` auth mounts will -generate identity entities for the tokens issued. The aliases of the entity -resulting from local auth mounts (local-aliases), will be scoped by the cluster. -This means that the local-aliases will never leave the geographical boundary of -the cluster where they were issued. This is something to be mindful about for -those who have implemented local auth mounts for complying with GDPR guidelines. -* **Namespaces (Enterprise)**: Adds support for locking Vault API for particular namespaces. -* **OIDC Identity Provider (Tech Preview)**: Adds support for Vault to be an OpenID Connect (OIDC) provider. [[GH-12932](https://github.com/hashicorp/vault/pull/12932)] -* **Oracle Database UI**: The UI now supports adding and editing Oracle connections in the database secret engine. [[GH-12752](https://github.com/hashicorp/vault/pull/12752)] -* **Postgres Database UI**: The UI now supports adding and editing Postgres connections in the database secret engine. [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] - -SECURITY: - -* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5, 1.8.4, and 1.9.0. -* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. - -IMPROVEMENTS: - -* agent/cache: Process persistent cache leases in dependency order during restore to ensure child leases are always correctly restored [[GH-12843](https://github.com/hashicorp/vault/pull/12843)] -* agent/cache: Use an in-process listener between consul-template and vault-agent when caching is enabled and either templates or a listener is defined [[GH-12762](https://github.com/hashicorp/vault/pull/12762)] -* agent/cache: tolerate partial restore failure from persistent cache [[GH-12718](https://github.com/hashicorp/vault/pull/12718)] -* agent/template: add support for new 'writeToFile' template function [[GH-12505](https://github.com/hashicorp/vault/pull/12505)] -* api: Add configuration option for ensuring isolated read-after-write semantics for all Client requests. [[GH-12814](https://github.com/hashicorp/vault/pull/12814)] -* api: adds native Login method to Go client module with different auth method interfaces to support easier authentication [[GH-12796](https://github.com/hashicorp/vault/pull/12796)] -* api: Move mergeStates and other required utils from agent to api module [[GH-12731](https://github.com/hashicorp/vault/pull/12731)] -* api: Support VAULT_HTTP_PROXY environment variable to allow overriding the Vault client's HTTP proxy [[GH-12582](https://github.com/hashicorp/vault/pull/12582)] -* auth/approle: The `role/:name/secret-id-accessor/lookup` endpoint now returns a 404 status code when the `secret_id_accessor` cannot be found [[GH-12788](https://github.com/hashicorp/vault/pull/12788)] -* auth/approle: expose secret_id_accessor as WrappedAccessor when creating wrapped secret-id. [[GH-12425](https://github.com/hashicorp/vault/pull/12425)] -* auth/aws: add profile support for AWS credentials when using the AWS auth method [[GH-12621](https://github.com/hashicorp/vault/pull/12621)] -* auth/kubernetes: validate JWT against the provided role on alias look ahead operations [[GH-12688](https://github.com/hashicorp/vault/pull/12688)] -* auth/kubernetes: Add ability to configure entity alias names based on the serviceaccount's namespace and name. [#110](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/110) [#112](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/112) [[GH-12633](https://github.com/hashicorp/vault/pull/12633)] -* auth/ldap: include support for an optional user filter field when searching for users [[GH-11000](https://github.com/hashicorp/vault/pull/11000)] -* auth/oidc: Adds the `skip_browser` CLI option to allow users to skip opening the default browser during the authentication flow. [[GH-12876](https://github.com/hashicorp/vault/pull/12876)] -* auth/okta: Send x-forwarded-for in Okta Push Factor request [[GH-12320](https://github.com/hashicorp/vault/pull/12320)] -* auth/token: Add `allowed_policies_glob` and `disallowed_policies_glob` fields to token roles to allow glob matching of policies [[GH-7277](https://github.com/hashicorp/vault/pull/7277)] -* cli: Operator diagnose now tests for missing or partial telemetry configurations. [[GH-12802](https://github.com/hashicorp/vault/pull/12802)] -* cli: add new http option : -header which enable sending arbitrary headers with the cli [[GH-12508](https://github.com/hashicorp/vault/pull/12508)] -* command: operator generate-root -decode: allow passing encoded token via stdin [[GH-12881](https://github.com/hashicorp/vault/pull/12881)] -* core/token: Return the token_no_default_policy config on token role read if set [[GH-12565](https://github.com/hashicorp/vault/pull/12565)] -* core: Add support for go-sockaddr templated addresses in config. [[GH-9109](https://github.com/hashicorp/vault/pull/9109)] -* core: adds custom_metadata field for aliases [[GH-12502](https://github.com/hashicorp/vault/pull/12502)] -* core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region [[GH-12724](https://github.com/hashicorp/vault/pull/12724)] -* core: Update github.com/ulikunitz/xz to fix security vulnerability GHSA-25xm-hr59-7c27. [[GH-12253](https://github.com/hashicorp/vault/pull/12253)] -* core: Upgrade github.com/gogo/protobuf [[GH-12255](https://github.com/hashicorp/vault/pull/12255)] -* core: build with Go 1.17, and mitigate a breaking change they made that could impact how approle and ssh interpret IPs/CIDRs [[GH-12868](https://github.com/hashicorp/vault/pull/12868)] -* core: observe the client counts broken down by namespace for partial month client count [[GH-12393](https://github.com/hashicorp/vault/pull/12393)] -* core: Artifact builds will now only run on merges to the release branches or to `main` -* core: The [dockerfile](https://github.com/hashicorp/vault/blob/main/Dockerfile) that is used to build the vault docker image available at [hashicorp/vault](https://hub.docker.com/repository/docker/hashicorp/vault) now lives in the root of this repo, and the entrypoint is available under [.release/docker/docker-entrypoint.sh](https://github.com/hashicorp/vault/blob/main/.release/docker/docker-entrypoint.sh) -* core: The vault linux packaging service configs and pre/post install scripts are now available under [.release/linux](https://github.com/hashicorp/vault/blob/main/.release/linux) -* core: Vault linux packages are now available for all supported linux architectures including arm, arm64, 386, and amd64 -* db/cassandra: make the connect_timeout config option actually apply to connection timeouts, in addition to non-connection operations [[GH-12903](https://github.com/hashicorp/vault/pull/12903)] -* identity/token: Only return keys from the `.well-known/keys` endpoint that are being used by roles to sign/verify tokens. [[GH-12780](https://github.com/hashicorp/vault/pull/12780)] -* identity: fix issue where Cache-Control header causes stampede of requests for JWKS keys [[GH-12414](https://github.com/hashicorp/vault/pull/12414)] -* physical/etcd: Upgrade etcd3 client to v3.5.0 and etcd2 to v2.305.0. [[GH-11980](https://github.com/hashicorp/vault/pull/11980)] -* pki: adds signature_bits field to customize signature algorithm on CAs and certs signed by Vault [[GH-11245](https://github.com/hashicorp/vault/pull/11245)] -* plugin: update the couchbase gocb version in the couchbase plugin [[GH-12483](https://github.com/hashicorp/vault/pull/12483)] -* replication (enterprise): Add merkle.flushDirty.num_pages_outstanding metric which specifies number of -outstanding dirty pages that were not flushed. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] -* sdk/framework: The '+' wildcard is now supported for parameterizing unauthenticated paths. [[GH-12668](https://github.com/hashicorp/vault/pull/12668)] -* secrets/aws: Add conditional template that allows custom usernames for both STS and IAM cases [[GH-12185](https://github.com/hashicorp/vault/pull/12185)] -* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] -* secrets/azure: Adds support for using Microsoft Graph API since Azure Active Directory API is being removed in 2022. [#67](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/67) [[GH-12629](https://github.com/hashicorp/vault/pull/12629)] -* secrets/database: Update MSSQL dependency github.com/denisenkom/go-mssqldb to v0.11.0 and include support for contained databases in MSSQL plugin [[GH-12839](https://github.com/hashicorp/vault/pull/12839)] -* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] -* secrets/pki: Use entropy augmentation when available when generating root and intermediate CA key material. [[GH-12559](https://github.com/hashicorp/vault/pull/12559)] -* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] -* secrets/pki: Support ed25519 as a key for the pki backend [[GH-11780](https://github.com/hashicorp/vault/pull/11780)] -* secrets/rabbitmq: Update dependency github.com/michaelklishin/rabbit-hole to v2 and resolve UserInfo.tags regression from RabbitMQ v3.9 [[GH-12877](https://github.com/hashicorp/vault/pull/12877)] -* secrets/ssh: Let allowed_users template mix templated and non-templated parts. [[GH-10886](https://github.com/hashicorp/vault/pull/10886)] -* secrets/ssh: Use entropy augmentation when available for generation of the signing key. [[GH-12560](https://github.com/hashicorp/vault/pull/12560)] -* serviceregistration: add `external-source: "vault"` metadata value for Consul registration. [[GH-12163](https://github.com/hashicorp/vault/pull/12163)] -* storage/raft: Best-effort handling of cancelled contexts. [[GH-12162](https://github.com/hashicorp/vault/pull/12162)] -* transform (enterprise): Add advanced features for encoding and decoding for Transform FPE -* transform (enterprise): Add a `reference` field to batch items, and propogate it to the response -* ui: Add KV secret search box when no metadata list access. [[GH-12626](https://github.com/hashicorp/vault/pull/12626)] -* ui: Add custom metadata to KV secret engine and metadata to config [[GH-12169](https://github.com/hashicorp/vault/pull/12169)] -* ui: Creates new StatText component [[GH-12295](https://github.com/hashicorp/vault/pull/12295)] -* ui: client count monthly view [[GH-12554](https://github.com/hashicorp/vault/pull/12554)] -* ui: creates bar chart component for displaying client count data by namespace [[GH-12437](https://github.com/hashicorp/vault/pull/12437)] -* ui: Add creation time to KV 2 version history and version view [[GH-12663](https://github.com/hashicorp/vault/pull/12663)] -* ui: Added resize for JSON editor [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] -* ui: Adds warning about white space in KV secret engine. [[GH-12921](https://github.com/hashicorp/vault/pull/12921)] -* ui: Click to copy database static role last rotation value in tooltip [[GH-12890](https://github.com/hashicorp/vault/pull/12890)] -* ui: Filter DB connection attributes so only relevant attrs POST to backend [[GH-12770](https://github.com/hashicorp/vault/pull/12770)] -* ui: Removes empty rows from DB config views [[GH-12819](https://github.com/hashicorp/vault/pull/12819)] -* ui: Standardizes toolbar presentation of destructive actions [[GH-12895](https://github.com/hashicorp/vault/pull/12895)] -* ui: Updates font for table row value fields [[GH-12908](https://github.com/hashicorp/vault/pull/12908)] -* ui: namespace search in client count views [[GH-12577](https://github.com/hashicorp/vault/pull/12577)] -* ui: parse and display pki cert metadata [[GH-12541](https://github.com/hashicorp/vault/pull/12541)] -* ui: replaces Vault's use of elazarl/go-bindata-assetfs in building the UI with Go's native Embed package [[GH-11208](https://github.com/hashicorp/vault/pull/11208)] -* ui: updated client tracking config view [[GH-12422](https://github.com/hashicorp/vault/pull/12422)] - -DEPRECATIONS: - -* auth/kubernetes: deprecate `disable_iss_validation` and `issuer` configuration fields [#127](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/127) [[GH-12975](https://github.com/hashicorp/vault/pull/12975)] - -BUG FIXES: - -* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] -* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] -* api: Fixes storage APIs returning incorrect error when parsing responses [[GH-12338](https://github.com/hashicorp/vault/pull/12338)] -* auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature [[GH-12519](https://github.com/hashicorp/vault/pull/12519)] -* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] -* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] -* auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. [[GH-12265](https://github.com/hashicorp/vault/pull/12265)] -* cli/api: Providing consistency for the use of comma separated parameters in auth/secret enable/tune [[GH-12126](https://github.com/hashicorp/vault/pull/12126)] -* cli: fixes CLI requests when namespace is both provided as argument and part of the path [[GH-12720](https://github.com/hashicorp/vault/pull/12720)] -* cli: fixes CLI requests when namespace is both provided as argument and part of the path [[GH-12911](https://github.com/hashicorp/vault/pull/12911)] -* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] -* core (enterprise): Allow deletion of stored licenses on DR secondary nodes -* core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified -* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] -* core (enterprise): Fix data race during perf standby sealing -* core (enterprise): Fixes reading raft auto-snapshot configuration from performance standby node [[GH-12317](https://github.com/hashicorp/vault/pull/12317)] -* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] -* core (enterprise): namespace header included in responses, Go client uses it when displaying error messages [[GH-12196](https://github.com/hashicorp/vault/pull/12196)] -* core/api: Fix an arm64 bug converting a negative int to an unsigned int [[GH-12372](https://github.com/hashicorp/vault/pull/12372)] -* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] -* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] -* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] -* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] -* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] -* core: fix byte printing for diagnose disk checks [[GH-12229](https://github.com/hashicorp/vault/pull/12229)] -* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] -* database/couchbase: change default template to truncate username at 128 characters [[GH-12301](https://github.com/hashicorp/vault/pull/12301)] -* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node -* http: removed unpublished true from logical_system path, making openapi spec consistent with documentation [[GH-12713](https://github.com/hashicorp/vault/pull/12713)] -* identity/token: Adds missing call to unlock mutex in key deletion error handling [[GH-12916](https://github.com/hashicorp/vault/pull/12916)] -* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] -* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] -* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] -* identity: dedup from_entity_ids when merging two entities [[GH-10101](https://github.com/hashicorp/vault/pull/10101)] -* identity: disallow creation of role without a key parameter [[GH-12208](https://github.com/hashicorp/vault/pull/12208)] -* identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl [[GH-12151](https://github.com/hashicorp/vault/pull/12151)] -* identity: merge associated entity groups when merging entities [[GH-10085](https://github.com/hashicorp/vault/pull/10085)] -* identity: suppress duplicate policies on entities [[GH-12812](https://github.com/hashicorp/vault/pull/12812)] -* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests -* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls -* kmip (enterprise): Forward KMIP register operations to the active node -* license: ignore stored terminated license while autoloading is enabled [[GH-2104](https://github.com/hashicorp/vault/pull/2104)] -* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. -* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] -* pki: Fix regression preventing email addresses being used as a common name within certificates [[GH-12716](https://github.com/hashicorp/vault/pull/12716)] -* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] -* plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems [[GH-12378](https://github.com/hashicorp/vault/pull/12378)] -* raft (enterprise): Fix panic when updating auto-snapshot config -* replication (enterprise): Fix issue where merkle.flushDirty.num_pages metric is not emitted if number -of dirty pages is 0. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] -* replication (enterprise): Fix merkle.saveCheckpoint.num_dirty metric to accurately specify the number -of dirty pages in the merkle tree at time of checkpoint creation. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] -* sdk/database: Fix a DeleteUser error message on the gRPC client. [[GH-12351](https://github.com/hashicorp/vault/pull/12351)] -* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] -* secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. [[GH-12379](https://github.com/hashicorp/vault/pull/12379)] -* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12934](https://github.com/hashicorp/vault/pull/12934)] -* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12600](https://github.com/hashicorp/vault/pull/12600)] -* secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. [[GH-12418](https://github.com/hashicorp/vault/pull/12418)] -* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* storage/raft (enterprise): Ensure that raft autosnapshot backoff retry duration never hits 0s -* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] -* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] -* storage/raft: Support `addr_type=public_v6` in auto-join [[GH-12366](https://github.com/hashicorp/vault/pull/12366)] -* transform (enterprise): Enforce minimum cache size for Transform backend and reset cache size without a restart -* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. -* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] -* ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. [[GH-12550](https://github.com/hashicorp/vault/pull/12550)] -* ui: Fix bug where edit role form on auth method is invalid by default [[GH-12646](https://github.com/hashicorp/vault/pull/12646)] -* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] -* ui: Fixed text overflow in flash messages [[GH-12357](https://github.com/hashicorp/vault/pull/12357)] -* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] -* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] -* ui: Remove spinner after token renew [[GH-12887](https://github.com/hashicorp/vault/pull/12887)] -* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] -* ui: Show day of month instead of day of year in the expiration warning dialog [[GH-11984](https://github.com/hashicorp/vault/pull/11984)] -* ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. [[GH-12409](https://github.com/hashicorp/vault/pull/12409)] -* ui: fix missing navbar items on login to namespace [[GH-12478](https://github.com/hashicorp/vault/pull/12478)] -* ui: update bar chart when model changes [[GH-12622](https://github.com/hashicorp/vault/pull/12622)] -* ui: updating database TTL picker help text. [[GH-12212](https://github.com/hashicorp/vault/pull/12212)] - -## 1.8.12 -### June 10, 2022 - -BUG FIXES: - -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. - -## 1.8.11 -### April 29, 2022 - -BUG FIXES: - -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] -* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] - -## 1.8.10 -### April 22, 2022 - -CHANGES: - -* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.16.15. [[GH-go-ver-1810](https://github.com/hashicorp/vault/pull/go-ver-1810)] - -IMPROVEMENTS: - -* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] -* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] -* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer - -BUG FIXES: - -* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] -* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] -* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] -* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] -* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] -* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] -* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] -* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* metrics/autosnapshots (enterprise) : Fix bug that could cause -vault.autosnapshots.save.errors to not be incremented when there is an -autosnapshot save error. -* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] -* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] -* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] -* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] -* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] -* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] - - -## 1.8.9 -### March 3, 2022 - -* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. -* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. - -IMPROVEMENTS: - -* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] - -BUG FIXES: - -* auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature [[GH-12519](https://github.com/hashicorp/vault/pull/12519)] -* database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] -* secrets/openldap: Fix panic from nil logger in backend [[GH-14170](https://github.com/hashicorp/vault/pull/14170)] -* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] -* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] -* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] - -## 1.8.8 -### January 27, 2022 - -IMPROVEMENTS: - -* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] - -BUG FIXES: - -* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13494](https://github.com/hashicorp/vault/pull/13494)] -* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions -* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. -* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13549](https://github.com/hashicorp/vault/pull/13549)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] -* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] -* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] -* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] -* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] -* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] - -## 1.8.7 -### December 21, 2021 - -CHANGES: - -* go: Update go version to 1.16.12 [[GH-13422](https://github.com/hashicorp/vault/pull/13422)] - -## 1.8.6 -### December 9, 2021 - -CHANGES: - -* go: Update go version to 1.16.9 [[GH-13029](https://github.com/hashicorp/vault/pull/13029)] - -SECURITY: - -* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. - -BUG FIXES: - -* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes -* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] -* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] -* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] -* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] -* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] -* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] -* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] - -## 1.8.5 -### November 4, 2021 - -SECURITY: - -* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. - -BUG FIXES: - -* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] -* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] -* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] -* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node -* identity/token: Adds missing call to unlock mutex in key deletion error handling [[GH-12916](https://github.com/hashicorp/vault/pull/12916)] -* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests -* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls -* kmip (enterprise): Forward KMIP register operations to the active node -* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12952](https://github.com/hashicorp/vault/pull/12952)] -* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. - -## 1.8.4 -### 6 October 2021 - -SECURITY: - -* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5 and 1.8.4. - -IMPROVEMENTS: - -* core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region [[GH-12724](https://github.com/hashicorp/vault/pull/12724)] - -BUG FIXES: - -* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] -* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* pki: Fix regression preventing email addresses being used as a common name within certificates [[GH-12716](https://github.com/hashicorp/vault/pull/12716)] -* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* ui: Fix bug where edit role form on auth method is invalid by default [[GH-12646](https://github.com/hashicorp/vault/pull/12646)] - -## 1.8.3 -### 29 September 2021 - -IMPROVEMENTS: - -* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] - -BUG FIXES: - -* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] -* core (enterprise): Allow deletion of stored licenses on DR secondary nodes -* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] -* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] -* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] -* raft (enterprise): Fix panic when updating auto-snapshot config -* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] -* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12599](https://github.com/hashicorp/vault/pull/12599)] -* secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. [[GH-12418](https://github.com/hashicorp/vault/pull/12418)] -* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] -* ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. [[GH-12550](https://github.com/hashicorp/vault/pull/12550)] -* ui: Show day of month instead of day of year in the expiration warning dialog [[GH-11984](https://github.com/hashicorp/vault/pull/11984)] - -## 1.8.2 -### 26 August 2021 - -CHANGES: - -* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 -* go: Update go version to 1.16.7 [[GH-12408](https://github.com/hashicorp/vault/pull/12408)] - -BUG FIXES: - -* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] -* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] -* database/couchbase: change default template to truncate username at 128 characters [[GH-12300](https://github.com/hashicorp/vault/pull/12300)] -* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] -* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] -* plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems [[GH-12378](https://github.com/hashicorp/vault/pull/12378)] -* sdk/database: Fix a DeleteUser error message on the gRPC client. [[GH-12351](https://github.com/hashicorp/vault/pull/12351)] -* secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. [[GH-12379](https://github.com/hashicorp/vault/pull/12379)] -* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] -* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] -* ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. [[GH-12409](https://github.com/hashicorp/vault/pull/12409)] - -## 1.8.1 -### August 5th, 2021 - -CHANGES: - -* go: Update go version to 1.16.6 [[GH-12245](https://github.com/hashicorp/vault/pull/12245)] - -IMPROVEMENTS: - -* serviceregistration: add `external-source: "vault"` metadata value for Consul registration. [[GH-12163](https://github.com/hashicorp/vault/pull/12163)] - -BUG FIXES: - -* auth/aws: Remove warning stating AWS Token TTL will be capped by the Default Lease TTL. [[GH-12026](https://github.com/hashicorp/vault/pull/12026)] -* auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. [[GH-12258](https://github.com/hashicorp/vault/pull/12258)] -* core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified -* core: fix byte printing for diagnose disk checks [[GH-12229](https://github.com/hashicorp/vault/pull/12229)] -* identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl [[GH-12151](https://github.com/hashicorp/vault/pull/12151)] - -## 1.8.0 -### July 28th, 2021 - -CHANGES: - -* agent: Errors in the template engine will no longer cause agent to exit unless -explicitly defined to do so. A new configuration parameter, -`exit_on_retry_failure`, within the new top-level stanza, `template_config`, can -be set to `true` in order to cause agent to exit. Note that for agent to exit if -`template.error_on_missing_key` is set to `true`, `exit_on_retry_failure` must -be also set to `true`. Otherwise, the template engine will log an error but then -restart its internal runner. [[GH-11775](https://github.com/hashicorp/vault/pull/11775)] -* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs -when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] -* core (enterprise): License/EULA changes that ensure the presence of a valid HashiCorp license to -start Vault. More information is available in the [Vault License FAQ](https://www.vaultproject.io/docs/enterprise/license/faqs) - -FEATURES: - -* **GCP Secrets Engine Static Accounts**: Adds ability to use existing service accounts for generation - of service account keys and access tokens. [[GH-12023](https://github.com/hashicorp/vault/pull/12023)] -* **Key Management Secrets Engine (Enterprise)**: Adds general availability for distributing and managing keys in AWS KMS. [[GH-11958](https://github.com/hashicorp/vault/pull/11958)] -* **License Autoloading (Enterprise)**: Licenses may now be automatically loaded from the environment or disk. -* **MySQL Database UI**: The UI now supports adding and editing MySQL connections in the database secret engine [[GH-11532](https://github.com/hashicorp/vault/pull/11532)] -* **Vault Diagnose**: A new `vault operator` command to detect common issues with vault server setups. - -SECURITY: - -* storage/raft: When initializing Vault’s Integrated Storage backend, excessively broad filesystem permissions may be set for the underlying Bolt database used by Vault’s Raft implementation. This vulnerability, CVE-2021-38553, was fixed in Vault 1.8.0. -* ui: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. - -IMPROVEMENTS: - -* agent/template: Added static_secret_render_interval to specify how often to fetch non-leased secrets [[GH-11934](https://github.com/hashicorp/vault/pull/11934)] -* agent: Allow Agent auto auth to read symlinked JWT files [[GH-11502](https://github.com/hashicorp/vault/pull/11502)] -* api: Allow a leveled logger to be provided to `api.Client` through `SetLogger`. [[GH-11696](https://github.com/hashicorp/vault/pull/11696)] -* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] -* cli/api: Add lease lookup command [[GH-11129](https://github.com/hashicorp/vault/pull/11129)] -* core: Add `prefix_filter` to telemetry config [[GH-12025](https://github.com/hashicorp/vault/pull/12025)] -* core: Add a darwin/arm64 binary release supporting the Apple M1 CPU [[GH-12071](https://github.com/hashicorp/vault/pull/12071)] -* core: Add a small (<1s) exponential backoff to failed TCP listener Accept failures. [[GH-11588](https://github.com/hashicorp/vault/pull/11588)] -* core (enterprise): Add controlled capabilities to control group policy stanza -* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] -* core: Add metrics to report if a node is a perf standby, if a node is a dr secondary or primary, and if a node is a perf secondary or primary. [[GH-11472](https://github.com/hashicorp/vault/pull/11472)] -* core: Send notifications to systemd on start, stop, and configuration reload. [[GH-11517](https://github.com/hashicorp/vault/pull/11517)] -* core: add irrevocable lease list and count apis [[GH-11607](https://github.com/hashicorp/vault/pull/11607)] -* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] -* core: Improve renew/revoke performance using per-lease locks [[GH-11122](https://github.com/hashicorp/vault/pull/11122)] -* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] -* go: Update to Go 1.16.5 [[GH-11802](https://github.com/hashicorp/vault/pull/11802)] -* replication: Delay evaluation of X-Vault-Index headers until merkle sync completes. -* secrets/rabbitmq: Add ability to customize dynamic usernames [[GH-11899](https://github.com/hashicorp/vault/pull/11899)] -* secrets/ad: Add `rotate-role` endpoint to allow rotations of service accounts. [[GH-11942](https://github.com/hashicorp/vault/pull/11942)] -* secrets/aws: add IAM tagging support for iam_user roles [[GH-10953](https://github.com/hashicorp/vault/pull/10953)] -* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] -* secrets/database/elasticsearch: Add ability to customize dynamic usernames [[GH-11957](https://github.com/hashicorp/vault/pull/11957)] -* secrets/database/influxdb: Add ability to customize dynamic usernames [[GH-11796](https://github.com/hashicorp/vault/pull/11796)] -* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] -* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] -* secrets/database/mongodbatlas: Adds the ability to customize username generation for dynamic users in MongoDB Atlas. [[GH-11956](https://github.com/hashicorp/vault/pull/11956)] -* secrets/database/redshift: Add ability to customize dynamic usernames [[GH-12016](https://github.com/hashicorp/vault/pull/12016)] -* secrets/database/snowflake: Add ability to customize dynamic usernames [[GH-11997](https://github.com/hashicorp/vault/pull/11997)] -* ssh: add support for templated values in SSH CA DefaultExtensions [[GH-11495](https://github.com/hashicorp/vault/pull/11495)] -* storage/raft: Improve raft batch size selection [[GH-11907](https://github.com/hashicorp/vault/pull/11907)] -* storage/raft: change freelist type to map and set nofreelistsync to true [[GH-11895](https://github.com/hashicorp/vault/pull/11895)] -* storage/raft: Switch to shared raft-boltdb library and add boltdb metrics [[GH-11269](https://github.com/hashicorp/vault/pull/11269)] -* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] -* storage/raft (enterprise): Enable Autopilot on DR secondary clusters -* ui: Add Validation to KV secret engine [[GH-11785](https://github.com/hashicorp/vault/pull/11785)] -* ui: Add database secret engine support for MSSQL [[GH-11231](https://github.com/hashicorp/vault/pull/11231)] -* ui: Add push notification message when selecting okta auth. [[GH-11442](https://github.com/hashicorp/vault/pull/11442)] -* ui: Add regex validation to Transform Template pattern input [[GH-11586](https://github.com/hashicorp/vault/pull/11586)] -* ui: Add specific error message if unseal fails due to license [[GH-11705](https://github.com/hashicorp/vault/pull/11705)] -* ui: Add validation support for open api form fields [[GH-11963](https://github.com/hashicorp/vault/pull/11963)] -* ui: Added auth method descriptions to UI login page [[GH-11795](https://github.com/hashicorp/vault/pull/11795)] -* ui: JSON fields on database can be cleared on edit [[GH-11708](https://github.com/hashicorp/vault/pull/11708)] -* ui: Obscure secret values on input and displayOnly fields like certificates. [[GH-11284](https://github.com/hashicorp/vault/pull/11284)] -* ui: Redesign of KV 2 Delete toolbar. [[GH-11530](https://github.com/hashicorp/vault/pull/11530)] -* ui: Replace tool partials with components. [[GH-11672](https://github.com/hashicorp/vault/pull/11672)] -* ui: Show description on secret engine list [[GH-11995](https://github.com/hashicorp/vault/pull/11995)] -* ui: Update ember to latest LTS and upgrade UI dependencies [[GH-11447](https://github.com/hashicorp/vault/pull/11447)] -* ui: Update partials to components [[GH-11680](https://github.com/hashicorp/vault/pull/11680)] -* ui: Updated ivy code mirror component for consistency [[GH-11500](https://github.com/hashicorp/vault/pull/11500)] -* ui: Updated node to v14, latest stable build [[GH-12049](https://github.com/hashicorp/vault/pull/12049)] -* ui: Updated search select component styling [[GH-11360](https://github.com/hashicorp/vault/pull/11360)] -* ui: add transform secrets engine to features list [[GH-12003](https://github.com/hashicorp/vault/pull/12003)] -* ui: add validations for duplicate path kv engine [[GH-11878](https://github.com/hashicorp/vault/pull/11878)] -* ui: show site-wide banners for license warnings if applicable [[GH-11759](https://github.com/hashicorp/vault/pull/11759)] -* ui: update license page with relevant autoload info [[GH-11778](https://github.com/hashicorp/vault/pull/11778)] - -DEPRECATIONS: - -* secrets/gcp: Deprecated the `/gcp/token/:roleset` and `/gcp/key/:roleset` paths for generating - secrets for rolesets. Use `/gcp/roleset/:roleset/token` and `/gcp/roleset/:roleset/key` instead. [[GH-12023](https://github.com/hashicorp/vault/pull/12023)] - -BUG FIXES: - -* activity: Omit wrapping tokens and control groups from client counts [[GH-11826](https://github.com/hashicorp/vault/pull/11826)] -* agent/cert: Fix issue where the API client on agent was not honoring certificate - information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] -* agent/template: fix command shell quoting issue [[GH-11838](https://github.com/hashicorp/vault/pull/11838)] -* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] -* agent: fix timestamp format in log messages from the templating engine [[GH-11838](https://github.com/hashicorp/vault/pull/11838)] -* auth/approle: fixing dereference of nil pointer [[GH-11864](https://github.com/hashicorp/vault/pull/11864)] -* auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to - bring in a verification key caching fix. [[GH-11784](https://github.com/hashicorp/vault/pull/11784)] -* auth/kubernetes: Fix AliasLookahead to correctly extract ServiceAccount UID when using ephemeral JWTs [[GH-12073](https://github.com/hashicorp/vault/pull/12073)] -* auth/ldap: Fix a bug where the LDAP auth method does not return the request_timeout configuration parameter on config read. [[GH-11975](https://github.com/hashicorp/vault/pull/11975)] -* cli: Add support for response wrapping in `vault list` and `vault kv list` with output format other than `table`. [[GH-12031](https://github.com/hashicorp/vault/pull/12031)] -* cli: vault delete and vault kv delete should support the same output options (e.g. -format) as vault write. [[GH-11992](https://github.com/hashicorp/vault/pull/11992)] -* core (enterprise): Fix orphan return value from auth methods executed on performance standby nodes. -* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] -* core (enterprise): serialize access to HSM entropy generation to avoid errors in concurrent key generation. -* core/metrics: Add generic KV mount support for vault.kv.secret.count telemetry metric [[GH-12020](https://github.com/hashicorp/vault/pull/12020)] -* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] -* core: Fix edge cases in the configuration endpoint for barrier key autorotation. [[GH-11541](https://github.com/hashicorp/vault/pull/11541)] -* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] -* core (enterprise): Fix panic on DR secondary when there are lease count quotas [[GH-11742](https://github.com/hashicorp/vault/pull/11742)] -* core: Fix race that allowed remounting on path used by another mount [[GH-11453](https://github.com/hashicorp/vault/pull/11453)] -* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] -* core: Fixed double counting of http requests after operator stepdown [[GH-11970](https://github.com/hashicorp/vault/pull/11970)] -* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] -* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] -* mongo-db: default username template now strips invalid '.' characters [[GH-11872](https://github.com/hashicorp/vault/pull/11872)] -* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] -* replication: Fix panic trying to update walState during identity group invalidation. -* replication: Fix: mounts created within a namespace that was part of an Allow - filtering rule would not appear on performance secondary if created after rule - was defined. -* secret/pki: use case insensitive domain name comparison as per RFC1035 section 2.3.3 -* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] -* secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] [[GH-11836](https://github.com/hashicorp/vault/pull/11836)] -* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] -* secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations [[GH-11861](https://github.com/hashicorp/vault/pull/11861)] -* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] -* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] -* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] -* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] -* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] -* secrets/openldap: Fix bug where schema was not compatible with rotate-root [#24](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/24) [[GH-12019](https://github.com/hashicorp/vault/pull/12019)] -* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] -* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] -* storage/raft: Tweak creation of vault.db file [[GH-12034](https://github.com/hashicorp/vault/pull/12034)] -* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] -* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] -* transform (enterprise): Fix an issue with malformed transform configuration - storage when upgrading from 1.5 to 1.6. See Upgrade Notes for 1.6.x. -* ui: Add role from database connection automatically populates the database for new role [[GH-11119](https://github.com/hashicorp/vault/pull/11119)] -* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] -* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] -* ui: Fix Version History queryParams on LinkedBlock [[GH-12079](https://github.com/hashicorp/vault/pull/12079)] -* ui: Fix bug where database secret engines with custom names cannot delete connections [[GH-11127](https://github.com/hashicorp/vault/pull/11127)] -* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] -* ui: Fix database role CG access [[GH-12111](https://github.com/hashicorp/vault/pull/12111)] -* ui: Fix date display on expired token notice [[GH-11142](https://github.com/hashicorp/vault/pull/11142)] -* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] -* ui: Fix error message caused by control group [[GH-11143](https://github.com/hashicorp/vault/pull/11143)] -* ui: Fix footer URL linking to the correct version changelog. [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] -* ui: Fix issue where logging in without namespace input causes error [[GH-11094](https://github.com/hashicorp/vault/pull/11094)] -* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] -* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] -* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] -* ui: Fixed and updated lease renewal picker [[GH-11256](https://github.com/hashicorp/vault/pull/11256)] -* ui: fix control group access for database credential [[GH-12024](https://github.com/hashicorp/vault/pull/12024)] -* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] -* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] - -## 1.7.10 -### March 3, 2022 - -SECURITY: - -* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. - -BUG FIXES: - -* database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] -* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] -* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] - -## 1.7.9 -### January 27, 2022 - -IMPROVEMENTS: - -* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] - -BUG FIXES: - -* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13493](https://github.com/hashicorp/vault/pull/13493)] -* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13735](https://github.com/hashicorp/vault/pull/13735)] -* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] -* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] - -## 1.7.8 -### December 21, 2021 - -CHANGES: - -* go: Update go version to 1.16.12 [[GH-13422](https://github.com/hashicorp/vault/pull/13422)] - -BUG FIXES: - -* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] -* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] - -## 1.7.7 -### December 9, 2021 - -SECURITY: - -* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. - -BUG FIXES: - -* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes -* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] -* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] -* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] -* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] -* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] - -## 1.7.6 -### November 4, 2021 - -SECURITY: - -* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. - -BUG FIXES: - -* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] -* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] -* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] -* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] -* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node -* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests -* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls -* kmip (enterprise): Forward KMIP register operations to the active node -* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12957](https://github.com/hashicorp/vault/pull/12957)] -* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] -* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. - -## 1.7.5 -### 29 September 2021 - -SECURITY: - -* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5 and 1.8.4. - -IMPROVEMENTS: - -* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] - -BUG FIXES: - -* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] -* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] -* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] -* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] -* raft (enterprise): Fix panic when updating auto-snapshot config -* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] -* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12598](https://github.com/hashicorp/vault/pull/12598)] -* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] -* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] - -## 1.7.4 -### 26 August 2021 - -SECURITY: - -* *UI Secret Caching*: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. - -CHANGES: - -* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 -* go: Update go version to 1.15.15 [[GH-12411](https://github.com/hashicorp/vault/pull/12411)] - -IMPROVEMENTS: - -* ui: Updated node to v14, latest stable build [[GH-12049](https://github.com/hashicorp/vault/pull/12049)] - -BUG FIXES: - -* replication (enterprise): Fix a panic that could occur when checking the last wal and the log shipper buffer is empty. -* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] -* database/couchbase: change default template to truncate username at 128 characters [[GH-12299](https://github.com/hashicorp/vault/pull/12299)] -* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] -* secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations [[GH-11861](https://github.com/hashicorp/vault/pull/11861)] -* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] -* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] -* ui: Fix database role CG access [[GH-12111](https://github.com/hashicorp/vault/pull/12111)] -* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] -* ui: fix control group access for database credential [[GH-12024](https://github.com/hashicorp/vault/pull/12024)] -* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] - -## 1.7.3 -### June 16th, 2021 - -CHANGES: - -* go: Update go version to 1.15.13 [[GH-11857](https://github.com/hashicorp/vault/pull/11857)] - -IMPROVEMENTS: - -* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] -* ui: Add specific error message if unseal fails due to license [[GH-11705](https://github.com/hashicorp/vault/pull/11705)] - -BUG FIXES: - -* auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to -bring in a verification key caching fix. [[GH-11784](https://github.com/hashicorp/vault/pull/11784)] -* core (enterprise): serialize access to HSM entropy generation to avoid errors in concurrent key generation. -* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] -* secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] [[GH-11836](https://github.com/hashicorp/vault/pull/11836)] -* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] - -## 1.7.2 -### May 20th, 2021 - -SECURITY: - -* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token -leases and dynamic secret leases with a zero-second TTL, causing them to be -treated as non-expiring, and never revoked. This issue affects Vault and Vault -Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and -1.7.2 (CVE-2021-32923). - -CHANGES: - -* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs -when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] -* auth/gcp: Update to v0.9.1 to use IAM Service Account Credentials API for -signing JWTs [[GH-11494](https://github.com/hashicorp/vault/pull/11494)] - -IMPROVEMENTS: - -* api, agent: LifetimeWatcher now does more retries when renewal failures occur. This also impacts Agent auto-auth and leases managed via Agent caching. [[GH-11445](https://github.com/hashicorp/vault/pull/11445)] -* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] -* http: Add optional HTTP response headers for hostname and raft node ID [[GH-11289](https://github.com/hashicorp/vault/pull/11289)] -* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] -* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] -* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] - -BUG FIXES: - -* agent/cert: Fix issue where the API client on agent was not honoring certificate -information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] -* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] -* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] -* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] -* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] -* replication: Fix panic trying to update walState during identity group invalidation. [[GH-1865](https://github.com/hashicorp/vault/pull/1865)] -* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] -* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] -* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] -* secrets/keymgmt (enterprise): Fixes audit logging for the read key response. -* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] -* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] -* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] - -## 1.7.1 -### 21 April 2021 - -SECURITY: - -* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the - Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions - 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) -* The Cassandra Database and Storage backends were not correctly verifying TLS certificates. This issue affects all - versions of Vault and Vault Enterprise and was fixed in versions 1.6.4, and 1.7.1. (CVE-2021-27400) - -CHANGES: - -* go: Update to Go 1.15.11 [[GH-11395](https://github.com/hashicorp/vault/pull/11395)] - -IMPROVEMENTS: - -* auth/jwt: Adds ability to directly provide service account JSON in G Suite provider config. [[GH-11388](https://github.com/hashicorp/vault/pull/11388)] -* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] -* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] -* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] -* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] - -BUG FIXES: - -* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] -* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] -* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] -* core: requests forwarded by standby weren't always timed out. [[GH-11322](https://github.com/hashicorp/vault/pull/11322)] -* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] -* replication: Fix: mounts created within a namespace that was part of an Allow - filtering rule would not appear on performance secondary if created after rule - was defined. -* replication: Perf standby nodes on newly enabled DR secondary sometimes couldn't connect to active node with TLS errors. [[GH-1823](https://github.com/hashicorp/vault/pull/1823)] -* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] -* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] -* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] -* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] -* storage/raft: using raft for ha_storage with a different storage backend was broken in 1.7.0, now fixed. [[GH-11340](https://github.com/hashicorp/vault/pull/11340)] -* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] -* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] -* ui: Fix OIDC bug seen when running on HCP [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] -* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] -* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] -* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] - -## 1.7.0 -### 24 March 2021 - -CHANGES: - -* agent: Failed auto-auth attempts are now throttled by an exponential backoff instead of the -~2 second retry delay. The maximum backoff may be configured with the new `max_backoff` parameter, -which defaults to 5 minutes. [[GH-10964](https://github.com/hashicorp/vault/pull/10964)] -* aws/auth: AWS Auth concepts and endpoints that use the "whitelist" and "blacklist" terms -have been updated to more inclusive language (e.g. `/auth/aws/identity-whitelist` has been -updated to`/auth/aws/identity-accesslist`). The old and new endpoints are aliases, -sharing the same underlying data. The legacy endpoint names are considered **deprecated** -and will be removed in a future release (not before Vault 1.9). The complete list of -endpoint changes is available in the [AWS Auth API docs](/api-docs/auth/aws#deprecations-effective-in-vault-1-7). -* go: Update Go version to 1.15.10 [[GH-11114](https://github.com/hashicorp/vault/pull/11114)] [[GH-11173](https://github.com/hashicorp/vault/pull/11173)] - -FEATURES: - -* **Aerospike Storage Backend**: Add support for using Aerospike as a storage backend [[GH-10131](https://github.com/hashicorp/vault/pull/10131)] -* **Autopilot for Integrated Storage**: A set of features has been added to allow for automatic operator-friendly management of Vault servers. This is only applicable when integrated storage is in use. - * **Dead Server Cleanup**: Dead servers will periodically be cleaned up and removed from the Raft peer set, to prevent them from interfering with the quorum size and leader elections. - * **Server Health Checking**: An API has been added to track the state of servers, including their health. - * **New Server Stabilization**: When a new server is added to the cluster, there will be a waiting period where it must be healthy and stable for a certain amount of time before being promoted to a full, voting member. -* **Tokenization Secrets Engine (Enterprise)**: The Tokenization Secrets Engine is now generally available. We have added support for MySQL, key rotation, and snapshot/restore. -* replication (enterprise): The log shipper is now memory as well as length bound, and length and size can be separately configured. -* agent: Support for persisting the agent cache to disk [[GH-10938](https://github.com/hashicorp/vault/pull/10938)] -* auth/jwt: Adds `max_age` role parameter and `auth_time` claim validation. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] -* core (enterprise): X-Vault-Index and related headers can be used by clients to manage eventual consistency. -* kmip (enterprise): Use entropy augmentation to generate kmip certificates -* sdk: Private key generation in the certutil package now allows custom io.Readers to be used. [[GH-10653](https://github.com/hashicorp/vault/pull/10653)] -* secrets/aws: add IAM tagging support for iam_user roles [[GH-10953](https://github.com/hashicorp/vault/pull/10953)] -* secrets/database/cassandra: Add ability to customize dynamic usernames [[GH-10906](https://github.com/hashicorp/vault/pull/10906)] -* secrets/database/couchbase: Add ability to customize dynamic usernames [[GH-10995](https://github.com/hashicorp/vault/pull/10995)] -* secrets/database/mongodb: Add ability to customize dynamic usernames [[GH-10858](https://github.com/hashicorp/vault/pull/10858)] -* secrets/database/mssql: Add ability to customize dynamic usernames [[GH-10767](https://github.com/hashicorp/vault/pull/10767)] -* secrets/database/mysql: Add ability to customize dynamic usernames [[GH-10834](https://github.com/hashicorp/vault/pull/10834)] -* secrets/database/postgresql: Add ability to customize dynamic usernames [[GH-10766](https://github.com/hashicorp/vault/pull/10766)] -* secrets/db/snowflake: Added support for Snowflake to the Database Secret Engine [[GH-10603](https://github.com/hashicorp/vault/pull/10603)] -* secrets/keymgmt (enterprise): Adds beta support for distributing and managing keys in AWS KMS. -* secrets/keymgmt (enterprise): Adds general availability for distributing and managing keys in Azure Key Vault. -* secrets/openldap: Added dynamic roles to OpenLDAP similar to the combined database engine [[GH-10996](https://github.com/hashicorp/vault/pull/10996)] -* secrets/terraform: New secret engine for managing Terraform Cloud API tokens [[GH-10931](https://github.com/hashicorp/vault/pull/10931)] -* ui: Adds check for feature flag on application, and updates namespace toolbar on login if present [[GH-10588](https://github.com/hashicorp/vault/pull/10588)] -* ui: Adds the wizard to the Database Secret Engine [[GH-10982](https://github.com/hashicorp/vault/pull/10982)] -* ui: Database secrets engine, supporting MongoDB only [[GH-10655](https://github.com/hashicorp/vault/pull/10655)] - -IMPROVEMENTS: - -* agent: Add a `vault.retry` stanza that allows specifying number of retries on failure; this applies both to templating and proxied requests. [[GH-11113](https://github.com/hashicorp/vault/pull/11113)] -* agent: Agent can now run as a Windows service. [[GH-10231](https://github.com/hashicorp/vault/pull/10231)] -* agent: Better concurrent request handling on identical requests proxied through Agent. [[GH-10705](https://github.com/hashicorp/vault/pull/10705)] -* agent: Route templating server through cache when persistent cache is enabled. [[GH-10927](https://github.com/hashicorp/vault/pull/10927)] -* agent: change auto-auth to preload an existing token on start [[GH-10850](https://github.com/hashicorp/vault/pull/10850)] -* auth/approle: Secrets ID generation endpoint now returns `secret_id_ttl` as part of its response. [[GH-10826](https://github.com/hashicorp/vault/pull/10826)] -* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] -* auth/okta: Adds support for Okta Verify TOTP MFA. [[GH-10942](https://github.com/hashicorp/vault/pull/10942)] -* changelog: Add dependencies listed in dependencies/2-25-21 [[GH-11015](https://github.com/hashicorp/vault/pull/11015)] -* command/debug: Now collects logs (at level `trace`) as a periodic output. [[GH-10609](https://github.com/hashicorp/vault/pull/10609)] -* core (enterprise): "vault status" command works when a namespace is set. [[GH-10725](https://github.com/hashicorp/vault/pull/10725)] -* core (enterprise): Update Trial Enterprise license from 30 minutes to 6 hours -* core/metrics: Added "vault operator usage" command. [[GH-10365](https://github.com/hashicorp/vault/pull/10365)] -* core/metrics: New telemetry metrics reporting lease expirations by time interval and namespace [[GH-10375](https://github.com/hashicorp/vault/pull/10375)] -* core: Added active since timestamp to the status output of active nodes. [[GH-10489](https://github.com/hashicorp/vault/pull/10489)] -* core: Check audit device with a test message before adding it. [[GH-10520](https://github.com/hashicorp/vault/pull/10520)] -* core: Track barrier encryption count and automatically rotate after a large number of operations or on a schedule [[GH-10774](https://github.com/hashicorp/vault/pull/10774)] -* core: add metrics for active entity count [[GH-10514](https://github.com/hashicorp/vault/pull/10514)] -* core: add partial month client count api [[GH-11022](https://github.com/hashicorp/vault/pull/11022)] -* core: dev mode listener allows unauthenticated sys/metrics requests [[GH-10992](https://github.com/hashicorp/vault/pull/10992)] -* core: reduce memory used by leases [[GH-10726](https://github.com/hashicorp/vault/pull/10726)] -* secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. [[GH-10558](https://github.com/hashicorp/vault/pull/10558)] -* storage/raft (enterprise): Listing of peers is now allowed on DR secondary -cluster nodes, as an update operation that takes in DR operation token for -authenticating the request. -* transform (enterprise): Improve FPE transformation performance -* transform (enterprise): Use transactions with batch tokenization operations for improved performance -* ui: Clarify language on usage metrics page empty state [[GH-10951](https://github.com/hashicorp/vault/pull/10951)] -* ui: Customize MongoDB input fields on Database Secrets Engine [[GH-10949](https://github.com/hashicorp/vault/pull/10949)] -* ui: Upgrade Ember-cli from 3.8 to 3.22. [[GH-9972](https://github.com/hashicorp/vault/pull/9972)] -* ui: Upgrade Storybook from 5.3.19 to 6.1.17. [[GH-10904](https://github.com/hashicorp/vault/pull/10904)] -* ui: Upgrade date-fns from 1.3.0 to 2.16.1. [[GH-10848](https://github.com/hashicorp/vault/pull/10848)] -* ui: Upgrade dependencies to resolve potential JS vulnerabilities [[GH-10677](https://github.com/hashicorp/vault/pull/10677)] -* ui: better errors on Database secrets engine role create [[GH-10980](https://github.com/hashicorp/vault/pull/10980)] - -BUG FIXES: - -* agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present [[GH-10556](https://github.com/hashicorp/vault/pull/10556)] -* agent: Set TokenParent correctly in the Index to be cached. [[GH-10833](https://github.com/hashicorp/vault/pull/10833)] -* agent: Set namespace for template server in agent. [[GH-10757](https://github.com/hashicorp/vault/pull/10757)] -* api/sys/config/ui: Fixes issue where multiple UI custom header values are ignored and only the first given value is used [[GH-10490](https://github.com/hashicorp/vault/pull/10490)] -* api: Fixes CORS API methods that were outdated and invalid [[GH-10444](https://github.com/hashicorp/vault/pull/10444)] -* auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. [[GH-10546](https://github.com/hashicorp/vault/pull/10546)] -* auth/jwt: Fixes an issue where JWT verification keys weren't updated after a `jwks_url` change. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] -* auth/jwt: Fixes an issue where `jwt_supported_algs` were not being validated for JWT auth using -`jwks_url` and `jwt_validation_pubkeys`. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] -* auth/oci: Fixes alias name to use the role name, and not the literal string `name` [[GH-10](https://github.com/hashicorp/vault-plugin-auth-oci/pull/10)] [[GH-10952](https://github.com/hashicorp/vault/pull/10952)] -* consul-template: Update consul-template vendor version and associated dependencies to master, -pulling in https://github.com/hashicorp/consul-template/pull/1447 [[GH-10756](https://github.com/hashicorp/vault/pull/10756)] -* core (enterprise): Limit entropy augmentation during token generation to root tokens. [[GH-10487](https://github.com/hashicorp/vault/pull/10487)] -* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. -* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] -* core: Avoid disclosing IP addresses in the errors of unauthenticated requests [[GH-10579](https://github.com/hashicorp/vault/pull/10579)] -* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] -* core: Fix duplicate quotas on performance standby nodes. [[GH-10855](https://github.com/hashicorp/vault/pull/10855)] -* core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and -`staleAge` are set appropriately. [[GH-10536](https://github.com/hashicorp/vault/pull/10536)] -* core: Make all APIs that report init status consistent, and make them report -initialized=true when a Raft join is in progress. [[GH-10498](https://github.com/hashicorp/vault/pull/10498)] -* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] -* core: Turn off case sensitivity for allowed entity alias check during token create operation. [[GH-10743](https://github.com/hashicorp/vault/pull/10743)] -* http: change max_request_size to be unlimited when the config value is less than 0 [[GH-10072](https://github.com/hashicorp/vault/pull/10072)] -* license: Fix license caching issue that prevents new licenses to get picked up by the license manager [[GH-10424](https://github.com/hashicorp/vault/pull/10424)] -* metrics: Protect emitMetrics from panicking during post-seal [[GH-10708](https://github.com/hashicorp/vault/pull/10708)] -* quotas/rate-limit: Fix quotas enforcing old rate limit quota paths [[GH-10689](https://github.com/hashicorp/vault/pull/10689)] -* replication (enterprise): Fix bug with not starting merkle sync while requests are in progress -* secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled [[GH-10384](https://github.com/hashicorp/vault/pull/10384)] -* secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length [[GH-10433](https://github.com/hashicorp/vault/pull/10433)] -* secrets/database: Sanitize `private_key` field when reading database plugin config [[GH-10416](https://github.com/hashicorp/vault/pull/10416)] -* secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists [[GH-10759](https://github.com/hashicorp/vault/pull/10759)] -* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] -* serviceregistration: Fix race during shutdown of Consul service registration. [[GH-10901](https://github.com/hashicorp/vault/pull/10901)] -* storage/raft (enterprise): Automated snapshots with Azure required specifying -`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. -* storage/raft (enterprise): Reading a non-existent auto snapshot config now returns 404. -* storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and -didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided -the given key will be used to encrypt the snapshot using AWS KMS. -* transform (enterprise): Fix bug tokenization handling metadata on exportable stores -* transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect -* transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path -* transform (enterprise): Make expiration timestamps human readable -* transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error -* ui: Add role from database connection automatically populates the database for new role [[GH-11119](https://github.com/hashicorp/vault/pull/11119)] -* ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation [[GH-10417](https://github.com/hashicorp/vault/pull/10417)] -* ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. [[GH-10596](https://github.com/hashicorp/vault/pull/10596)] -* ui: Fix expected response from feature-flags endpoint [[GH-10684](https://github.com/hashicorp/vault/pull/10684)] -* ui: Fix footer URL linking to the correct version changelog. [[GH-10491](https://github.com/hashicorp/vault/pull/10491)] - -DEPRECATIONS: -* aws/auth: AWS Auth endpoints that use the "whitelist" and "blacklist" terms have been deprecated. -Refer to the CHANGES section for additional details. - -## 1.6.7 -### 29 September 2021 - -BUG FIXES: - -* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] -* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] -* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] -* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12597](https://github.com/hashicorp/vault/pull/12597)] - -## 1.6.6 -### 26 August 2021 - -SECURITY: - -* *UI Secret Caching*: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. - -CHANGES: - -* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 -* go: Update go version to 1.15.15 [[GH-12423](https://github.com/hashicorp/vault/pull/12423)] - -IMPROVEMENTS: - -* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] - -BUG FIXES: - -* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] -* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] -* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] -* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] -* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] -* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] - -## 1.6.5 -### May 20th, 2021 - -SECURITY: - -* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token -leases and dynamic secret leases with a zero-second TTL, causing them to be -treated as non-expiring, and never revoked. This issue affects Vault and Vault -Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and -1.7.2 (CVE-2021-32923). - -CHANGES: - -* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs -when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] -* auth/gcp: Update to v0.8.1 to use IAM Service Account Credentials API for -signing JWTs [[GH-11498](https://github.com/hashicorp/vault/pull/11498)] - -BUG FIXES: - -* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] -* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] -* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] -* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] -* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] - -## 1.6.4 -### 21 April 2021 - -SECURITY: - -* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the - Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions - 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) -* The Cassandra Database and Storage backends were not correctly verifying TLS certificates. This issue affects all - versions of Vault and Vault Enterprise and was fixed in versions 1.6.4, and 1.7.1. (CVE-2021-27400) - -CHANGES: - -* go: Update to Go 1.15.11 [[GH-11396](https://github.com/hashicorp/vault/pull/11396)] - -IMPROVEMENTS: - -* command/debug: Now collects logs (at level `trace`) as a periodic output. [[GH-10609](https://github.com/hashicorp/vault/pull/10609)] -* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] -* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] - -BUG FIXES: - -* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] -* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] -* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] -* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] -* pki: Preserve ordering of all DN attribute values when issuing certificates [[GH-11259](https://github.com/hashicorp/vault/pull/11259)] -* replication: Fix: mounts created within a namespace that was part of an Allow - filtering rule would not appear on performance secondary if created after rule - was defined. -* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] -* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] - - -## 1.6.3 -### February 25, 2021 - -SECURITY: - -* Limited Unauthenticated License Metadata Read: We addressed a security vulnerability that allowed for the unauthenticated -reading of Vault license metadata from DR Secondaries. This vulnerability affects Vault Enterprise and is -fixed in 1.6.3 (CVE-2021-27668). - -CHANGES: - -* secrets/mongodbatlas: Move from whitelist to access list API [[GH-10966](https://github.com/hashicorp/vault/pull/10966)] - -IMPROVEMENTS: - -* ui: Clarify language on usage metrics page empty state [[GH-10951](https://github.com/hashicorp/vault/pull/10951)] - -BUG FIXES: - -* auth/kubernetes: Cancel API calls to TokenReview endpoint when request context -is closed [[GH-10930](https://github.com/hashicorp/vault/pull/10930)] -* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] -* quotas: Fix duplicate quotas on performance standby nodes. [[GH-10855](https://github.com/hashicorp/vault/pull/10855)] -* quotas/rate-limit: Fix quotas enforcing old rate limit quota paths [[GH-10689](https://github.com/hashicorp/vault/pull/10689)] -* replication (enterprise): Don't write request count data on DR Secondaries. -Fixes DR Secondaries becoming out of sync approximately every 30s. [[GH-10970](https://github.com/hashicorp/vault/pull/10970)] -* secrets/azure (enterprise): Forward service principal credential creation to the -primary cluster if called on a performance standby or performance secondary. [[GH-10902](https://github.com/hashicorp/vault/pull/10902)] - -## 1.6.2 -### January 29, 2021 - -SECURITY: - -* IP Address Disclosure: We fixed a vulnerability where, under some error -conditions, Vault would return an error message disclosing internal IP -addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in -1.6.2 (CVE-2021-3024). -* Limited Unauthenticated Remove Peer: As of Vault 1.6, the remove-peer command -on DR secondaries did not require authentication. This issue impacts the -stability of HA architecture, as a bad actor could remove all standby -nodes from a DR -secondary. This issue affects Vault Enterprise 1.6.0 and 1.6.1, and is fixed in -1.6.2 (CVE-2021-3282). -* Mount Path Disclosure: Vault previously returned different HTTP status codes for -existent and non-existent mount paths. This behavior would allow unauthenticated -brute force attacks to reveal which paths had valid mounts. This issue affects -Vault and Vault Enterprise and is fixed in 1.6.2 (CVE-2020-25594). - -CHANGES: - -* go: Update go version to 1.15.7 [[GH-10730](https://github.com/hashicorp/vault/pull/10730)] - -FEATURES: - -* ui: Adds check for feature flag on application, and updates namespace toolbar on login if present [[GH-10588](https://github.com/hashicorp/vault/pull/10588)] - -IMPROVEMENTS: - -* core (enterprise): "vault status" command works when a namespace is set. [[GH-10725](https://github.com/hashicorp/vault/pull/10725)] -* core: reduce memory used by leases [[GH-10726](https://github.com/hashicorp/vault/pull/10726)] -* storage/raft (enterprise): Listing of peers is now allowed on DR secondary -cluster nodes, as an update operation that takes in DR operation token for -authenticating the request. -* core: allow setting tls_servername for raft retry/auto-join [[GH-10698](https://github.com/hashicorp/vault/pull/10698)] - -BUG FIXES: - -* agent: Set namespace for template server in agent. [[GH-10757](https://github.com/hashicorp/vault/pull/10757)] -* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] -* metrics: Protect emitMetrics from panicking during post-seal [[GH-10708](https://github.com/hashicorp/vault/pull/10708)] -* secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists [[GH-10759](https://github.com/hashicorp/vault/pull/10759)] -* storage/raft (enterprise): Automated snapshots with Azure required specifying -`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. -* storage/raft (enterprise): Autosnapshots config and storage weren't excluded from -performance replication, causing conflicts and errors. -* ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. [[GH-10596](https://github.com/hashicorp/vault/pull/10596)] -* ui: Fix expected response from feature-flags endpoint [[GH-10684](https://github.com/hashicorp/vault/pull/10684)] - -## 1.6.1 -### December 16, 2020 - -SECURITY: - -* LDAP Auth Method: We addressed an issue where error messages returned by the - LDAP auth method allowed user enumeration [[GH-10537](https://github.com/hashicorp/vault/pull/10537)]. This vulnerability affects Vault OSS and Vault - Enterprise and is fixed in 1.5.6 and 1.6.1 (CVE-2020-35177). -* Sentinel EGP: We've fixed incorrect handling of namespace paths to prevent - users within namespaces from applying Sentinel EGP policies to paths above - their namespace. This vulnerability affects Vault Enterprise and is fixed in - 1.5.6 and 1.6.1 (CVE-2020-35453). - -IMPROVEMENTS: - -* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] -* core/metrics: Added "vault operator usage" command. [[GH-10365](https://github.com/hashicorp/vault/pull/10365)] -* secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. [[GH-10558](https://github.com/hashicorp/vault/pull/10558)] - -BUG FIXES: - -* agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present [[GH-10556](https://github.com/hashicorp/vault/pull/10556)] -* auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. [[GH-10546](https://github.com/hashicorp/vault/pull/10546)] -* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. -* core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. [[GH-10456](https://github.com/hashicorp/vault/pull/10456)] -* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] -* core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and -`staleAge` are set appropriately. [[GH-10536](https://github.com/hashicorp/vault/pull/10536)] -* core: Make all APIs that report init status consistent, and make them report -initialized=true when a Raft join is in progress. [[GH-10498](https://github.com/hashicorp/vault/pull/10498)] -* secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled [[GH-10384](https://github.com/hashicorp/vault/pull/10384)] -* secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length [[GH-10433](https://github.com/hashicorp/vault/pull/10433)] -* secrets/database: Sanitize `private_key` field when reading database plugin config [[GH-10416](https://github.com/hashicorp/vault/pull/10416)] -* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] -* storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided the given key will be used to encrypt the snapshot using AWS KMS. -* transform (enterprise): Fix bug tokenization handling metadata on exportable stores -* transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path -* transform (enterprise): Make expiration timestamps human readable -* transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error -* transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect -* ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation [[GH-10417](https://github.com/hashicorp/vault/pull/10417)] -* ui: Fix footer URL linking to the correct version changelog. [[GH-10491](https://github.com/hashicorp/vault/pull/10491)] -* ui: Fox radio click on secrets and auth list pages. [[GH-10586](https://github.com/hashicorp/vault/pull/10586)] - -## 1.6.0 -### November 11th, 2020 - -NOTE: - -Binaries for 32-bit macOS (i.e. the `darwin_386` build) will no longer be published. This target was dropped in the latest version of the Go compiler. - -CHANGES: - -* agent: Agent now properly returns a non-zero exit code on error, such as one due to template rendering failure. Using `error_on_missing_key` in the template config will cause agent to immediately exit on failure. In order to make agent properly exit due to continuous failure from template rendering errors, the old behavior of indefinitely restarting the template server is now changed to exit once the default retry attempt of 12 times (with exponential backoff) gets exhausted. [[GH-9670](https://github.com/hashicorp/vault/pull/9670)] -* token: Periodic tokens generated by auth methods will have the period value stored in its token entry. [[GH-7885](https://github.com/hashicorp/vault/pull/7885)] -* core: New telemetry metrics reporting mount table size and number of entries [[GH-10201](https://github.com/hashicorp/vault/pull/10201)] -* go: Updated Go version to 1.15.4 [[GH-10366](https://github.com/hashicorp/vault/pull/10366)] - -FEATURES: - -* **Couchbase Secrets**: Vault can now manage static and dynamic credentials for Couchbase. [[GH-9664](https://github.com/hashicorp/vault/pull/9664)] -* **Expanded Password Policy Support**: Custom password policies are now supported for all database engines. -* **Integrated Storage Auto Snapshots (Enterprise)**: This feature enables an operator to schedule snapshots of the integrated storage backend and ensure those snapshots are persisted elsewhere. -* **Integrated Storage Cloud Auto Join**: This feature for integrated storage enables Vault nodes running in the cloud to automatically discover and join a Vault cluster via operator-supplied metadata. -* **Key Management Secrets Engine (Enterprise; Tech Preview)**: This new secret engine allows securely distributing and managing keys to Azure cloud KMS services. -* **Seal Migration**: With Vault 1.6, we will support migrating from an auto unseal mechanism to a different mechanism of the same type. For example, if you were using an AWS KMS key to automatically unseal, you can now migrate to a different AWS KMS key. -* **Tokenization (Enterprise; Tech Preview)**: Tokenization supports creating irreversible “tokens” from sensitive data. Tokens can be used in less secure environments, protecting the original data. -* **Vault Client Count**: Vault now counts the number of active entities (and non-entity tokens) per month and makes this information available via the "Metrics" section of the UI. - -IMPROVEMENTS: - -* auth/approle: Role names can now be referenced in templated policies through the `approle.metadata.role_name` property [[GH-9529](https://github.com/hashicorp/vault/pull/9529)] -* auth/aws: Improve logic check on wildcard `BoundIamPrincipalARNs` and include role name on error messages on check failure [[GH-10036](https://github.com/hashicorp/vault/pull/10036)] -* auth/jwt: Add support for fetching groups and user information from G Suite during authentication. [[GH-123](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/123)] -* auth/jwt: Adding EdDSA (ed25519) to supported algorithms [[GH-129](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/129)] -* auth/jwt: Improve cli authorization error [[GH-137](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/137)] -* auth/jwt: Add OIDC namespace_in_state option [[GH-140](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/140)] -* secrets/transit: fix missing plaintext in bulk decrypt response [[GH-9991](https://github.com/hashicorp/vault/pull/9991)] -* command/server: Delay informational messages in -dev mode until logs have settled. [[GH-9702](https://github.com/hashicorp/vault/pull/9702)] -* command/server: Add environment variable support for `disable_mlock`. [[GH-9931](https://github.com/hashicorp/vault/pull/9931)] -* core/metrics: Add metrics for storage cache [[GH_10079](https://github.com/hashicorp/vault/pull/10079)] -* core/metrics: Add metrics for leader status [[GH 10147](https://github.com/hashicorp/vault/pull/10147)] -* physical/azure: Add the ability to use Azure Instance Metadata Service to set the credentials for Azure Blob storage on the backend. [[GH-10189](https://github.com/hashicorp/vault/pull/10189)] -* sdk/framework: Add a time type for API fields. [[GH-9911](https://github.com/hashicorp/vault/pull/9911)] -* secrets/database: Added support for password policies to all databases [[GH-9641](https://github.com/hashicorp/vault/pull/9641), - [and more](https://github.com/hashicorp/vault/pulls?q=is%3Apr+is%3Amerged+dbpw)] -* secrets/database/cassandra: Added support for static credential rotation [[GH-10051](https://github.com/hashicorp/vault/pull/10051)] -* secrets/database/elasticsearch: Added support for static credential rotation [[GH-19](https://github.com/hashicorp/vault-plugin-database-elasticsearch/pull/19)] -* secrets/database/hanadb: Added support for root credential & static credential rotation [[GH-10142](https://github.com/hashicorp/vault/pull/10142)] -* secrets/database/hanadb: Default password generation now includes dashes. Custom statements may need to be updated - to include quotes around the password field [[GH-10142](https://github.com/hashicorp/vault/pull/10142)] -* secrets/database/influxdb: Added support for static credential rotation [[GH-10118](https://github.com/hashicorp/vault/pull/10118)] -* secrets/database/mongodbatlas: Added support for root credential rotation [[GH-14](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/14)] -* secrets/database/mongodbatlas: Support scopes field in creations statements for MongoDB Atlas database plugin [[GH-15](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/15)] -* seal/awskms: Add logging during awskms auto-unseal [[GH-9794](https://github.com/hashicorp/vault/pull/9794)] -* storage/azure: Update SDK library to use [azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) since previous library has been deprecated. [[GH-9577](https://github.com/hashicorp/vault/pull/9577/)] -* secrets/ad: `rotate-root` now supports POST requests like other secret engines [[GH-70](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/70)] -* ui: Add ui functionality for the Transform Secret Engine [[GH-9665](https://github.com/hashicorp/vault/pull/9665)] -* ui: Pricing metrics dashboard [[GH-10049](https://github.com/hashicorp/vault/pull/10049)] - -BUG FIXES: - -* auth/jwt: Fix bug preventing config edit UI from rendering [[GH-141](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/141)] -* cli: Don't open or overwrite a raft snapshot file on an unsuccessful `vault operator raft snapshot` [[GH-9894](https://github.com/hashicorp/vault/pull/9894)] -* core: Implement constant time version of shamir GF(2^8) math [[GH-9932](https://github.com/hashicorp/vault/pull/9932)] -* core: Fix resource leak in plugin API (plugin-dependent, not all plugins impacted) [[GH-9557](https://github.com/hashicorp/vault/pull/9557)] -* core: Fix race involved in enabling certain features via a license change -* core: Fix error handling in HCL parsing of objects with invalid syntax [[GH-410](https://github.com/hashicorp/hcl/pull/410)] -* identity: Check for timeouts in entity API [[GH-9925](https://github.com/hashicorp/vault/pull/9925)] -* secrets/database: Fix handling of TLS options in mongodb connection strings [[GH-9519](https://github.com/hashicorp/vault/pull/9519)] -* secrets/gcp: Ensure that the IAM policy version is appropriately set after a roleset's bindings have changed. [[GH-93](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/93)] -* ui: Mask LDAP bindpass while typing [[GH-10087](https://github.com/hashicorp/vault/pull/10087)] -* ui: Update language in promote dr modal flow [[GH-10155](https://github.com/hashicorp/vault/pull/10155)] -* ui: Update language on replication primary dashboard for clarity [[GH-10205](https://github.com/hashicorp/vault/pull/10217)] -* core: Fix bug where updating an existing path quota could introduce a conflict. [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] - -## 1.5.9 -### May 20th, 2021 - -SECURITY: - -* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token -leases and dynamic secret leases with a zero-second TTL, causing them to be -treated as non-expiring, and never revoked. This issue affects Vault and Vault -Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and -1.7.2 (CVE-2021-32923). - -CHANGES: - -* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs -when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] -* auth/gcp: Update to v0.7.2 to use IAM Service Account Credentials API for -signing JWTs [[GH-11499](https://github.com/hashicorp/vault/pull/11499)] - -BUG FIXES: - -* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] - -## 1.5.8 -### 21 April 2021 - -SECURITY: - -* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the - Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions - 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) - -CHANGES: - -* go: Update to Go 1.14.15 [[GH-11397](https://github.com/hashicorp/vault/pull/11397)] - -IMPROVEMENTS: - -* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] - -BUG FIXES: - -* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] -* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] -* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] -* core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. [[GH-10456](https://github.com/hashicorp/vault/pull/10456)] - -## 1.5.7 -### January 29, 2021 - -SECURITY: - -* IP Address Disclosure: We fixed a vulnerability where, under some error -conditions, Vault would return an error message disclosing internal IP -addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in -1.6.2 and 1.5.7 (CVE-2021-3024). -* Mount Path Disclosure: Vault previously returned different HTTP status codes for -existent and non-existent mount paths. This behavior would allow unauthenticated -brute force attacks to reveal which paths had valid mounts. This issue affects -Vault and Vault Enterprise and is fixed in 1.6.2 and 1.5.7 (CVE-2020-25594). - -IMPROVEMENTS: - -* storage/raft (enterprise): Listing of peers is now allowed on DR secondary -cluster nodes, as an update operation that takes in DR operation token for -authenticating the request. - -BUG FIXES: - -* core: Avoid disclosing IP addresses in the errors of unauthenticated requests [[GH-10579](https://github.com/hashicorp/vault/pull/10579)] -* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] - -## 1.5.6 -### December 16, 2020 - -SECURITY: - -* LDAP Auth Method: We addressed an issue where error messages returned by the - LDAP auth method allowed user enumeration [[GH-10537](https://github.com/hashicorp/vault/pull/10537)]. This vulnerability affects Vault OSS and Vault - Enterprise and is fixed in 1.5.6 and 1.6.1 (CVE-2020-35177). -* Sentinel EGP: We've fixed incorrect handling of namespace paths to prevent - users within namespaces from applying Sentinel EGP policies to paths above - their namespace. This vulnerability affects Vault Enterprise and is fixed in - 1.5.6 and 1.6.1. - -IMPROVEMENTS: - -* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] - -BUG FIXES: - -* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. -* core: Fix bug where updating an existing path quota could introduce a conflict [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] -* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] -* quotas (enterprise): Reset cache before loading quotas in the db during startup -* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] - -## 1.5.5 -### October 21, 2020 - -IMPROVEMENTS: - -* auth/aws, core/seal, secret/aws: Set default IMDS timeouts to match AWS SDK [[GH-10133](https://github.com/hashicorp/vault/pull/10133)] - -BUG FIXES: - -* auth/aws: Restrict region selection when in the aws-us-gov partition to avoid IAM errors [[GH-9947](https://github.com/hashicorp/vault/pull/9947)] -* core (enterprise): Allow operators to add and remove (Raft) peers in a DR secondary cluster using Integrated Storage. -* core (enterprise): Add DR operation token to the remove peer API and CLI command (when DR secondary). -* core (enterprise): Fix deadlock in handling EGP policies -* core (enterprise): Fix extraneous error messages in DR Cluster -* secrets/mysql: Conditionally overwrite TLS parameters for MySQL secrets engine [[GH-9729](https://github.com/hashicorp/vault/pull/9729)] -* secrets/ad: Fix bug where `password_policy` setting was not using correct key when `ad/config` was read [[GH-71](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/71)] -* ui: Fix issue with listing roles and methods on the same auth methods with different names [[GH-10122](https://github.com/hashicorp/vault/pull/10122)] - -## 1.5.4 -### September 24th, 2020 - -SECURITY: - -* Batch Token Expiry: We addressed an issue where batch token leases could outlive their TTL because we were not scheduling the expiration time correctly. This vulnerability affects Vault OSS and Vault Enterprise 1.0 and newer and is fixed in 1.4.7 and 1.5.4 (CVE-2020-25816). - -IMPROVEMENTS: - -* secrets/pki: Handle expiration of a cert not in storage as a success [[GH-9880](https://github.com/hashicorp/vault/pull/9880)] -* auth/kubernetes: Add an option to disable defaulting to the local CA cert and service account JWT when running in a Kubernetes pod [[GH-97]](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/97) -* secrets/gcp: Add check for 403 during rollback to prevent repeated deletion calls [[GH-97](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/97)] -* core: Disable usage metrics collection on performance standby nodes. [[GH-9966](https://github.com/hashicorp/vault/pull/9966)] -* credential/aws: Added X-Amz-Content-Sha256 as a default STS request header [[GH-10009](https://github.com/hashicorp/vault/pull/10009)] - -BUG FIXES: - -* agent: Fix `disable_fast_negotiation` not being set on the auth method when configured by user. [[GH-9892](https://github.com/hashicorp/vault/pull/9892)] -* core (enterprise): Fix hang when cluster-wide plugin reload cleanup is slow on unseal -* core (enterprise): Fix an error in cluster-wide plugin reload cleanup following such a reload -* core: Fix crash when metrics collection encounters zero-length keys in KV store [[GH-9811](https://github.com/hashicorp/vault/pull/9881)] -* mfa (enterprise): Fix incorrect handling of PingID responses that could result in auth requests failing -* replication (enterprise): Improve race condition when using a newly created token on a performance standby node -* replication (enterprise): Only write failover cluster addresses if they've changed -* ui: fix bug where dropdown for identity/entity management is not reflective of actual policy [[GH-9958](https://github.com/hashicorp/vault/pull/9958)] - -## 1.5.3 -### August 27th, 2020 - -NOTE: - -All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. - -BUG FIXES: - -* auth/aws: Made header handling for IAM authentication more robust -* secrets/ssh: Fixed a bug with role option for SSH signing algorithm to allow more than RSA signing - -## 1.5.2.1 -### August 21st, 2020 -### Enterprise Only - -NOTE: - -Includes correct license in the HSM binary. - -## 1.5.2 -### August 20th, 2020 - -NOTE: - -OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. - -KNOWN ISSUES: - -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.5.2 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.5.2) -* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise - customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. - - -## 1.5.1 -### August 20th, 2020 - -SECURITY: - -* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) -* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) -* When using Vault Agent with cert auto-auth and caching enabled, under certain circumstances, clients without permission to access agent's token may retrieve the token without login credentials. This vulnerability affects Vault Agent 1.1.0 and newer and is fixed in 1.5.1 (CVE-2020-17455) - -KNOWN ISSUES: - -* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.5.1 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.5.1) - -CHANGES: - -* pki: The tidy operation will now remove revoked certificates if the parameter `tidy_revoked_certs` is set to `true`. This will result in certificate entries being immediately removed, as opposed to awaiting until its NotAfter time. Note that this only affects certificates that have been already revoked. [[GH-9609](https://github.com/hashicorp/vault/pull/9609)] -* go: Updated Go version to 1.14.7 - -IMPROVEMENTS: - -* auth/jwt: Add support for fetching groups and user information from G Suite during authentication. [[GH-9574](https://github.com/hashicorp/vault/pull/9574)] -* auth/jwt: Add EdDSA to supported algorithms. [[GH-129](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/129)] -* secrets/openldap: Add "ad" schema that allows the engine to correctly rotate AD passwords. [[GH-9740](https://github.com/hashicorp/vault/pull/9740)] -* pki: Add a `allowed_domains_template` parameter that enables the use of identity templating within the `allowed_domains` parameter. [[GH-8509](https://github.com/hashicorp/vault/pull/8509)] -* secret/azure: Use write-ahead-logs to cleanup any orphaned Service Principals [[GH-9773](https://github.com/hashicorp/vault/pull/9773)] -* ui: Wrap TTL option on transit engine export action is updated to a new component. [[GH-9632](https://github.com/hashicorp/vault/pull/9632)] -* ui: Wrap Tool uses newest version of TTL Picker component. [[GH-9691](https://github.com/hashicorp/vault/pull/9691)] - -BUG FIXES: - -* secrets/gcp: Ensure that the IAM policy version is appropriately set after a roleset's bindings have changed. [[GH-9603](https://github.com/hashicorp/vault/pull/9603)] -* replication (enterprise): Fix status API output incorrectly stating replication is in `idle` state. -* replication (enterprise): Use PrimaryClusterAddr if it's been set -* core: Fix panic when printing over-long info fields at startup [[GH-9681](https://github.com/hashicorp/vault/pull/9681)] -* core: Seal migration using the new minimal-downtime strategy didn't work properly with performance standbys. [[GH-9690](https://github.com/hashicorp/vault/pull/9690)] -* core: Vault failed to start when there were non-string values in seal configuration [[GH-9555](https://github.com/hashicorp/vault/pull/9555)] -* core: Handle a trailing slash in the API address used for enabling replication - -## 1.5.0 -### July 21st, 2020 - -CHANGES: - -* audit: Token TTL and issue time are now provided in the auth portion of audit logs. [[GH-9091](https://github.com/hashicorp/vault/pull/9091)] -* auth/gcp: Changes the default name of the entity alias that gets created to be the role ID for both IAM and GCE authentication. [[GH-99](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/99)] -* core: Remove the addition of newlines to parsed configuration when using integer/boolean values [[GH-8928](https://github.com/hashicorp/vault/pull/8928)] -* cubbyhole: Reject reads and writes to an empty ("") path. [[GH-8971](https://github.com/hashicorp/vault/pull/8971)] -* secrets/azure: Default password generation changed from uuid to cryptographically secure randomized string [[GH-40](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/40)] -* storage/gcs: The `credentials_file` config option has been removed. The `GOOGLE_APPLICATION_CREDENTIALS` environment variable - or default credentials may be used instead [[GH-9424](https://github.com/hashicorp/vault/pull/9424)] -* storage/raft: The storage configuration now accepts a new `max_entry_size` config that will limit - the total size in bytes of any entry committed via raft. It defaults to `"1048576"` (1MiB). [[GH-9027](https://github.com/hashicorp/vault/pull/9027)] -* token: Token creation with custom token ID via `id` will no longer allow periods (`.`) as part of the input string. - The final generated token value may contain periods, such as the `s.` prefix for service token - indication. [[GH-8646](https://github.com/hashicorp/vault/pull/8646/files)] -* token: Token renewals will now return token policies within the `token_policies` , identity policies within `identity_policies`, and the full policy set within `policies`. [[GH-8535](https://github.com/hashicorp/vault/pull/8535)] -* go: Updated Go version to 1.14.4 - -FEATURES: - -* **Monitoring**: We have released a Splunk App [9] for Enterprise customers. The app is accompanied by an updated monitoring guide and a few new metrics to enable OSS users to effectively monitor Vault. -* **Password Policies**: Allows operators to customize how passwords are generated for select secret engines (OpenLDAP, Active Directory, Azure, and RabbitMQ). -* **Replication UI Improvements**: We have redesigned the replication UI to highlight the state and relationship between primaries and secondaries and improved management workflows, enabling a more holistic understanding of multiple Vault clusters. -* **Resource Quotas**: As of 1.5, Vault supports specifying a quota to rate limit requests on OSS and Enterprise. Enterprise customers also have access to set quotas on the number of leases that can be generated on a path. -* **OpenShift Support**: We have updated the Helm charts to allow users to install Vault onto their OpenShift clusters. -* **Seal Migration**: We have made updates to allow migrations from auto unseal to Shamir unseal on Enterprise. -* **AWS Auth Web Identity Support**: We've added support for AWS Web Identities, which will be used in the credentials chain if present. -* **Vault Monitor**: Similar to the monitor command for Consul and Nomad, we have added the ability for Vault to stream logs from other Vault servers at varying log levels. -* **AWS Secrets Groups Support**: IAM users generated by Vault may now be added to IAM Groups. -* **Integrated Storage as HA Storage**: In Vault 1.5, it is possible to use Integrated Storage as HA Storage with a different storage backend as regular storage. -* **OIDC Auth Provider Extensions**: We've added support to OIDC Auth to incorporate IdP-specific extensions. Currently this includes expanded Azure AD groups support. -* **GCP Secrets**: Support BigQuery dataset ACLs in absence of IAM endpoints. -* **KMIP**: Add support for signing client certificates requests (CSRs) rather than having them be generated entirely within Vault. - -IMPROVEMENTS: - -* audit: Replication status requests are no longer audited. [[GH-8877](https://github.com/hashicorp/vault/pull/8877)] -* audit: Added mount_type field to requests and responses. [[GH-9167](https://github.com/hashicorp/vault/pull/9167)] -* auth/aws: Add support for Web Identity credentials [[GH-7738](https://github.com/hashicorp/vault/pull/7738)] -* auth/jwt: Support users that are members of more than 200 groups on Azure [[GH-120](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/120)] -* auth/kerberos: Support identities without userPrincipalName [[GH-44](https://github.com/hashicorp/vault-plugin-auth-kerberos/issues/44)] -* auth/kubernetes: Allow disabling `iss` validation [[GH-91](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/91)] -* auth/kubernetes: Try reading the ca.crt and TokenReviewer JWT from the default service account [[GH-83](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/83)] -* cli: Support reading TLS parameters from file for the `vault operator raft join` command. [[GH-9060](https://github.com/hashicorp/vault/pull/9060)] -* cli: Add a new subcommand, `vault monitor`, for tailing server logs in the console. [[GH-8477](https://github.com/hashicorp/vault/pull/8477)] -* core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)] -* core: Added Password Policies for user-configurable password generation [[GH-8637](https://github.com/hashicorp/vault/pull/8637)] -* core: New telemetry metrics covering token counts, token creation, KV secret counts, lease creation. [[GH-9239](https://github.com/hashicorp/vault/pull/9239)] [[GH-9250](https://github.com/hashicorp/vault/pull/9250)] [[GH-9244](https://github.com/hashicorp/vault/pull/9244)] [[GH-9052](https://github.com/hashicorp/vault/pull/9052)] -* physical/gcs: The storage backend now uses a dedicated client for HA lock updates to prevent lock table update failures when flooded by other client requests. [[GH-9424](https://github.com/hashicorp/vault/pull/9424)] -* physical/spanner: The storage backend now uses a dedicated client for HA lock updates to prevent lock table update failures when flooded by other client requests. [[GH-9423](https://github.com/hashicorp/vault/pull/9423)] -* plugin: Add SDK method, `Sys.ReloadPlugin`, and CLI command, `vault plugin reload`, for reloading plugins. [[GH-8777](https://github.com/hashicorp/vault/pull/8777)] -* plugin (enterprise): Add a scope field to plugin reload, which when global, reloads the plugin anywhere in a cluster. [[GH-9347](https://github.com/hashicorp/vault/pull/9347)] -* sdk/framework: Support accepting TypeFloat parameters over the API [[GH-8923](https://github.com/hashicorp/vault/pull/8923)] -* secrets/aws: Add iam_groups parameter to role create/update [[GH-8811](https://github.com/hashicorp/vault/pull/8811)] -* secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-11](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/11)] -* secrets/database: Add static role rotation for MSSQL database plugin [[GH-9062](https://github.com/hashicorp/vault/pull/9062)] -* secrets/database: Allow InfluxDB to use insecure TLS without cert bundle [[GH-8778](https://github.com/hashicorp/vault/pull/8778)] -* secrets/gcp: Support BigQuery dataset ACLs in absence of IAM endpoints [[GH-78](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/78)] -* secrets/pki: Allow 3072-bit RSA keys [[GH-8343](https://github.com/hashicorp/vault/pull/8343)] -* secrets/ssh: Add a CA-mode role option to specify signing algorithm [[GH-9096](https://github.com/hashicorp/vault/pull/9096)] -* secrets/ssh: The [Vault SSH Helper](https://github.com/hashicorp/vault-ssh-helper) can now be configured to reference a mount in a namespace [[GH-44](https://github.com/hashicorp/vault-ssh-helper/pull/44)] -* secrets/transit: Transit requests that make use of keys now include a new field `key_version` in their responses [[GH-9100](https://github.com/hashicorp/vault/pull/9100)] -* secrets/transit: Improving transit batch encrypt and decrypt latencies [[GH-8775](https://github.com/hashicorp/vault/pull/8775)] -* sentinel: Add a sentinel config section, and "additional_enabled_modules", a list of Sentinel modules that may be imported in addition to the defaults. -* ui: Update TTL picker styling on SSH secret engine [[GH-8891](https://github.com/hashicorp/vault/pull/8891)] -* ui: Only render the JWT input field of the Vault login form on mounts configured for JWT auth [[GH-8952](https://github.com/hashicorp/vault/pull/8952)] -* ui: Add replication dashboards. Improve replication management workflows. [[GH-8705]](https://github.com/hashicorp/vault/pull/8705). -* ui: Update alert banners to match design systems black text. [[GH-9463]](https://github.com/hashicorp/vault/pull/9463). - -BUG FIXES: - -* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-7](https://github.com/hashicorp/vault-plugin-auth-oci/pull/7)] -* core: Extend replicated cubbyhole fix in 1.4.0 to cover case where a performance primary is also a DR primary [[GH-9148](https://github.com/hashicorp/vault/pull/9148)] -* replication (enterprise): Use the PrimaryClusterAddr if it's been set -* seal/awskms: fix AWS KMS auto-unseal when AWS_ROLE_SESSION_NAME not set [[GH-9416](https://github.com/hashicorp/vault/pull/9416)] -* sentinel: fix panic due to concurrent map access when rules iterate over metadata maps -* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9186](https://github.com/hashicorp/vault/pull/9186)] -* secrets/database: Fix issue where rotating root database credentials while Vault's storage backend is unavailable causes Vault to lose access to the database [[GH-8782](https://github.com/hashicorp/vault/pull/8782)] -* secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9129](https://github.com/hashicorp/vault/pull/9129)] -* secrets/database: Fix parsing of multi-line PostgreSQL statements [[GH-8512](https://github.com/hashicorp/vault/pull/8512)] -* secrets/gcp: Fix issue were updates were not being applied to the `token_scopes` of a roleset. [[GH-90](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/90)] -* secrets/kv: Return the value of delete_version_after when reading kv/config, even if it is set to the default. [[GH-42](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/42)] -* ui: Add Toggle component into core addon so it is available in KMIP and other Ember Engines.[[GH-8913]](https://github.com/hashicorp/vault/pull/8913) -* ui: Disallow max versions value of large than 9999999999999999 on kv2 secrets engine. [[GH-9242](https://github.com/hashicorp/vault/pull/9242)] -* ui: Add and upgrade missing dependencies to resolve a failure with `make static-dist`. [[GH-9277](https://github.com/hashicorp/vault/pull/9371)] - -## 1.4.7.1 -### October 15th, 2020 -### Enterprise Only - -BUG FIXES: -* replication (enterprise): Fix panic when old filter path evaluation fails - -## 1.4.7 -### September 24th, 2020 - -SECURITY: - -* Batch Token Expiry: We addressed an issue where batch token leases could outlive their TTL because we were not scheduling the expiration time correctly. This vulnerability affects Vault OSS and Vault Enterprise 1.0 and newer and is fixed in 1.4.7 and 1.5.4 (CVE-2020-25816). - -IMPROVEMENTS: - -* secret/azure: Use write-ahead-logs to cleanup any orphaned Service Principals [[GH-9773](https://github.com/hashicorp/vault/pull/9773)] - -BUG FIXES: -* replication (enterprise): Don't stop replication if old filter path evaluation fails - -## 1.4.6 -### August 27th, 2020 - -NOTE: - -All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. - -BUG FIXES: - -* auth/aws: Made header handling for IAM authentication more robust -* secrets/ssh: Fixed a bug with role option for SSH signing algorithm to allow more than RSA signing [[GH-9824](https://github.com/hashicorp/vault/pull/9824)] - -## 1.4.5.1 -### August 21st, 2020 -### Enterprise Only - -NOTE: - -Includes correct license in the HSM binary. - -## 1.4.5 -### August 20th, 2020 - -NOTE: - -OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. - -KNOWN ISSUES: - -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.4.5 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.4.5) -* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise - customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. - - -## 1.4.4 -### August 20th, 2020 - -SECURITY: - -* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) -* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) - -KNOWN ISSUES: - -* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.4.4 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.4.4) - -BUG FIXES: - -* auth/okta: fix bug introduced in 1.4.0: only 200 external groups were fetched even if user belonged to more [[GH-9580](https://github.com/hashicorp/vault/pull/9580)] -* seal/awskms: fix AWS KMS auto-unseal when AWS_ROLE_SESSION_NAME not set [[GH-9416](https://github.com/hashicorp/vault/pull/9416)] -* secrets/aws: Fix possible issue creating access keys when using Performance Standbys [[GH-9606](https://github.com/hashicorp/vault/pull/9606)] - -IMPROVEMENTS: -* auth/aws: Retry on transient failures during AWS IAM auth login attempts [[GH-8727](https://github.com/hashicorp/vault/pull/8727)] -* ui: Add transit key algorithms aes128-gcm96, ecdsa-p384, ecdsa-p521 to the UI. [[GH-9070](https://github.com/hashicorp/vault/pull/9070)] & [[GH-9520](https://github.com/hashicorp/vault/pull/9520)] - -## 1.4.3 -### July 2nd, 2020 - -IMPROVEMENTS: - -* auth/aws: Add support for Web Identity credentials [[GH-9251](https://github.com/hashicorp/vault/pull/9251)] -* auth/kerberos: Support identities without userPrincipalName [[GH-44](https://github.com/hashicorp/vault-plugin-auth-kerberos/issues/44)] -* core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)] -* secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-9311](https://github.com/hashicorp/vault/pull/9311)] -* physical/mysql: Require TLS or plaintext flagging in MySQL configuration [[GH-9012](https://github.com/hashicorp/vault/pull/9012)] -* ui: Link to the Vault Changelog in the UI footer [[GH-9216](https://github.com/hashicorp/vault/pull/9216)] +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] +* database/elasticsearch: Update error messages resulting from Elasticsearch API errors [[GH-19545](https://github.com/hashicorp/vault/pull/19545)] +* events: Suppress log warnings triggered when events are sent but the events system is not enabled. [[GH-19593](https://github.com/hashicorp/vault/pull/19593)] BUG FIXES: -* agent: Restart template server when it shuts down [[GH-9200](https://github.com/hashicorp/vault/pull/9200)] -* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-9278](https://github.com/hashicorp/vault/pull/9278)] -* replication: The issue causing cubbyholes in namespaces on performance secondaries to not work, which was fixed in 1.4.0, was still an issue when the primary was both a performance primary and DR primary. -* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values -* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9207](https://github.com/hashicorp/vault/pull/9207)] -* secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9208](https://github.com/hashicorp/vault/pull/9208)] -* secrets/gcp: Fix issue were updates were not being applied to the `token_scopes` of a roleset. [[GH-9277](https://github.com/hashicorp/vault/pull/9277)] +* agent: Fix panic when SIGHUP is issued to Agent while it has a non-TLS listener. [[GH-19483](https://github.com/hashicorp/vault/pull/19483)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. +* kmip (enterprise): Fix a problem forwarding some requests to the active node. +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19640](https://github.com/hashicorp/vault/pull/19640)] +* secrets/pki: Fix PKI revocation request forwarding from standby nodes due to an error wrapping bug [[GH-19624](https://github.com/hashicorp/vault/pull/19624)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: Fixes crypto.randomUUID error in unsecure contexts from third party ember-data library [[GH-19428](https://github.com/hashicorp/vault/pull/19428)] +* ui: fixes SSH engine config deletion [[GH-19448](https://github.com/hashicorp/vault/pull/19448)] +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted [[GH-19541](https://github.com/hashicorp/vault/pull/19541)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] - -## 1.4.2 (May 21st, 2020) +## 1.13.0 +### March 01, 2023 SECURITY: -* core: Proxy environment variables are now redacted before being logged, in case the URLs include a username:password. This vulnerability, CVE-2020-13223, is fixed in 1.3.6 and 1.4.2, but affects 1.4.0 and 1.4.1, as well as older versions of Vault [[GH-9022](https://github.com/hashicorp/vault/pull/9022)] -* secrets/gcp: Fix a regression in 1.4.0 where the system TTLs were being used instead of the configured backend TTLs for dynamic service accounts. This vulnerability is CVE-2020-12757. [[GH-85](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/85)] - -IMPROVEMENTS: - -* storage/raft: The storage stanza now accepts `leader_ca_cert_file`, `leader_client_cert_file`, and - `leader_client_key_file` parameters to read and parse TLS certificate information from paths on disk. - Existing non-path based parameters will continue to work, but their values will need to be provided as a - single-line string with newlines delimited by `\n`. [[GH-8894](https://github.com/hashicorp/vault/pull/8894)] -* storage/raft: The `vault status` CLI command and the `sys/leader` API now contain the committed and applied - raft indexes. [[GH-9011](https://github.com/hashicorp/vault/pull/9011)] - -BUG FIXES: - -* auth/aws: Fix token renewal issues caused by the metadata changes in 1.4.1 [[GH-8991](https://github.com/hashicorp/vault/pull/8991)] -* auth/ldap: Fix 1.4.0 regression that could result in auth failures when LDAP auth config includes upndomain. [[GH-9041](https://github.com/hashicorp/vault/pull/9041)] -* secrets/ad: Forward rotation requests from standbys to active clusters [[GH-66](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/66)] -* secrets/database: Prevent generation of usernames that are not allowed by the MongoDB Atlas API [[GH-9](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/9)] -* secrets/database: Return an error if a manual rotation of static account credentials fails [[GH-9035](https://github.com/hashicorp/vault/pull/9035)] -* secrets/openldap: Forward all rotation requests from standbys to active clusters [[GH-9028](https://github.com/hashicorp/vault/pull/9028)] -* secrets/transform (enterprise): Fix panic that could occur when accessing cached template entries, such as a requests - that accessed templates directly or indirectly from a performance standby node. -* serviceregistration: Fix a regression for Consul service registration that ignored using the listener address as - the redirect address unless api_addr was provided. It now properly uses the same redirect address as the one - used by Vault's Core object. [[GH-8976](https://github.com/hashicorp/vault/pull/8976)] -* storage/raft: Advertise the configured cluster address to the rest of the nodes in the raft cluster. This fixes - an issue where a node advertising 0.0.0.0 is not using a unique hostname. [[GH-9008](https://github.com/hashicorp/vault/pull/9008)] -* storage/raft: Fix panic when multiple nodes attempt to join the cluster at once. [[GH-9008](https://github.com/hashicorp/vault/pull/9008)] -* sys: The path provided in `sys/internal/ui/mounts/:path` is now namespace-aware. This fixes an issue - with `vault kv` subcommands that had namespaces provided in the path returning permission denied all the time. - [[GH-8962](https://github.com/hashicorp/vault/pull/8962)] -* ui: Fix snowman that appears when namespaces have more than one period [[GH-8910](https://github.com/hashicorp/vault/pull/8910)] - -## 1.4.1 (April 30th, 2020) - -CHANGES: - -* auth/aws: The default set of metadata fields added in 1.4.1 has been changed to `account_id` and `auth_type` [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] -* storage/raft: Disallow `ha_storage` to be specified if `raft` is set as the `storage` type. [[GH-8707](https://github.com/hashicorp/vault/pull/8707)] -IMPROVEMENTS: - -* auth/aws: The set of metadata stored during login is now configurable [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] -* auth/aws: Improve region selection to avoid errors seen if the account hasn't enabled some newer AWS regions [[GH-8679](https://github.com/hashicorp/vault/pull/8679)] -* auth/azure: Enable login from Azure VMs with user-assigned identities [[GH-33](https://github.com/hashicorp/vault-plugin-auth-azure/pull/33)] -* auth/gcp: The set of metadata stored during login is now configurable [[GH-92](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/92)] -* auth/gcp: The type of alias name used during login is now configurable [[GH-95](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/95)] -* auth/ldap: Improve error messages during LDAP operation failures [[GH-8740](https://github.com/hashicorp/vault/pull/8740)] -* identity: Add a batch delete API for identity entities [[GH-8785]](https://github.com/hashicorp/vault/pull/8785) -* identity: Improve performance of logins when no group updates are needed [[GH-8795]](https://github.com/hashicorp/vault/pull/8795) -* metrics: Add `vault.identity.num_entities` metric [[GH-8816]](https://github.com/hashicorp/vault/pull/8816) -* secrets/kv: Allow `delete-version-after` to be reset to 0 via the CLI [[GH-8635](https://github.com/hashicorp/vault/pull/8635)] -* secrets/rabbitmq: Improve error handling and reporting [[GH-8619](https://github.com/hashicorp/vault/pull/8619)] -* ui: Provide One Time Password during Operation Token generation process [[GH-8630]](https://github.com/hashicorp/vault/pull/8630) - -BUG FIXES: - -* auth/okta: Fix MFA regression (introduced in [GH-8143](https://github.com/hashicorp/vault/pull/8143)) from 1.4.0 [[GH-8807](https://github.com/hashicorp/vault/pull/8807)] -* auth/userpass: Fix upgrade value for `token_bound_cidrs` being ignored due to incorrect key provided [[GH-8826](https://github.com/hashicorp/vault/pull/8826/files)] -* config/seal: Fix segfault when seal block is removed [[GH-8517](https://github.com/hashicorp/vault/pull/8517)] -* core: Fix an issue where users attempting to build Vault could receive Go module checksum errors [[GH-8770](https://github.com/hashicorp/vault/pull/8770)] -* core: Fix blocked requests if a SIGHUP is issued during a long-running request has the state lock held. - Also fixes deadlock that can happen if `vault debug` with the config target is ran during this time. - [[GH-8755](https://github.com/hashicorp/vault/pull/8755)] -* core: Always rewrite the .vault-token file as part of a `vault login` to ensure permissions and ownership are set correctly [[GH-8867](https://github.com/hashicorp/vault/pull/8867)] -* database/mongodb: Fix context deadline error that may result due to retry attempts on failed commands - [[GH-8863](https://github.com/hashicorp/vault/pull/8863)] -* http: Fix superflous call messages from the http package on logs caused by missing returns after - `respondError` calls [[GH-8796](https://github.com/hashicorp/vault/pull/8796)] -* namespace (enterprise): Fix namespace listing to return `key_info` when a scoping namespace is also provided. -* seal/gcpkms: Fix panic that could occur if all seal parameters were provided via environment - variables [[GH-8840](https://github.com/hashicorp/vault/pull/8840)] -* storage/raft: Fix memory allocation and incorrect metadata tracking issues with snapshots [[GH-8793](https://github.com/hashicorp/vault/pull/8793)] -* storage/raft: Fix panic that could occur if `disable_clustering` was set to true on Raft storage cluster [[GH-8784](https://github.com/hashicorp/vault/pull/8784)] -* storage/raft: Handle errors returned from the API during snapshot operations [[GH-8861](https://github.com/hashicorp/vault/pull/8861)] -* sys/wrapping: Allow unwrapping of wrapping tokens which contain nil data [[GH-8714](https://github.com/hashicorp/vault/pull/8714)] - -## 1.4.0 (April 7th, 2020) +* secrets/ssh: removal of the deprecated dynamic keys mode. **When any remaining dynamic key leases expire**, an error stating `secret is unsupported by this backend` will be thrown by the lease manager. [[GH-18874](https://github.com/hashicorp/vault/pull/18874)] +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] CHANGES: -* cli: The raft configuration command has been renamed to list-peers to avoid - confusion. +* auth/alicloud: require the `role` field on login [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] +* auth/approle: Add maximum length of 4096 for approle role_names, as this value results in HMAC calculation [[GH-17768](https://github.com/hashicorp/vault/pull/17768)] +* auth: Returns invalid credentials for ldap, userpass and approle when wrong credentials are provided for existent users. +This will only be used internally for implementing user lockout. [[GH-17104](https://github.com/hashicorp/vault/pull/17104)] +* core: Bump Go version to 1.20.1. +* core: Vault version has been moved out of sdk and into main vault module. +Plugins using sdk/useragent.String must instead use sdk/useragent.PluginString. [[GH-14229](https://github.com/hashicorp/vault/pull/14229)] +* logging: Removed legacy environment variable for log format ('LOGXI_FORMAT'), should use 'VAULT_LOG_FORMAT' instead [[GH-17822](https://github.com/hashicorp/vault/pull/17822)] +* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* plugins: `GET /database/config/:name` endpoint now returns an additional `plugin_version` field in the response data. [[GH-16982](https://github.com/hashicorp/vault/pull/16982)] +* plugins: `GET /sys/auth/:path/tune` and `GET /sys/mounts/:path/tune` endpoints may now return an additional `plugin_version` field in the response data if set. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] +* plugins: `GET` for `/sys/auth`, `/sys/auth/:path`, `/sys/mounts`, and `/sys/mounts/:path` paths now return additional `plugin_version`, `running_plugin_version` and `running_sha256` fields in the response data for each mount. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] +* sdk: Remove version package, make useragent.String versionless. [[GH-19068](https://github.com/hashicorp/vault/pull/19068)] +* secrets/aws: do not create leases for non-renewable/non-revocable STS credentials to reduce storage calls [[GH-15869](https://github.com/hashicorp/vault/pull/15869)] +* secrets/gcpkms: Updated plugin from v0.13.0 to v0.14.0 [[GH-19063](https://github.com/hashicorp/vault/pull/19063)] +* sys/internal/inspect: Turns of this endpoint by default. A SIGHUP can now be used to reload the configs and turns this endpoint on. +* ui: Upgrade Ember to version 4.4.0 [[GH-17086](https://github.com/hashicorp/vault/pull/17086)] FEATURES: -* **Kerberos Authentication**: Vault now supports Kerberos authentication using a SPNEGO token. - Login can be performed using the Vault CLI, API, or agent. -* **Kubernetes Service Discovery**: A new Kubernetes service discovery feature where, if - configured, Vault will tag Vault pods with their current health status. For more, see [#8249](https://github.com/hashicorp/vault/pull/8249). -* **MongoDB Atlas Secrets**: Vault can now generate dynamic credentials for both MongoDB Atlas databases - as well as the [Atlas programmatic interface](https://docs.atlas.mongodb.com/tutorial/manage-programmatic-access/). -* **OpenLDAP Secrets Engine**: We now support password management of existing OpenLDAP user entries. For more, see [#8360](https://github.com/hashicorp/vault/pull/8360/). -* **Redshift Database Secrets Engine**: The database secrets engine now supports static and dynamic secrets for the Amazon Web Services (AWS) Redshift service. -* **Service Registration Config**: A newly introduced `service_registration` configuration stanza, that allows for service registration to be configured separately from the storage backend. For more, see [#7887](https://github.com/hashicorp/vault/pull/7887/). -* **Transform Secrets Engine (Enterprise)**: A new secrets engine that handles secure data transformations against provided input values. -* **Integrated Storage**: Promoted out of beta and into general availability for both open-source and enterprise workloads. - -IMPROVEMENTS: - -* agent: add option to force the use of the auth-auth token, and ignore the Vault token in the request [[GH-8101](https://github.com/hashicorp/vault/pull/8101)] -* api: Restore and fix DNS SRV Lookup [[GH-8520](https://github.com/hashicorp/vault/pull/8520)] -* audit: HMAC http_raw_body in audit log; this ensures that large authenticated Prometheus metrics responses get - replaced with short HMAC values [[GH-8130](https://github.com/hashicorp/vault/pull/8130)] -* audit: Generate-root, generate-recovery-token, and generate-dr-operation-token requests and responses are now audited. [[GH-8301](https://github.com/hashicorp/vault/pull/8301)] -* auth/aws: Reduce the number of simultaneous STS client credentials needed [[GH-8161](https://github.com/hashicorp/vault/pull/8161)] -* auth/azure: subscription ID, resource group, vm and vmss names are now stored in alias metadata [[GH-30](https://github.com/hashicorp/vault-plugin-auth-azure/pull/30)] -* auth/jwt: Additional OIDC callback parameters available for CLI logins [[GH-80](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/80) & [GH-86](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/86)] -* auth/jwt: Bound claims may be optionally configured using globs [[GH-89](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/89)] -* auth/jwt: Timeout during OIDC CLI login if process doesn't complete within 2 minutes [[GH-97](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/97)] -* auth/jwt: Add support for the `form_post` response mode [[GH-98](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/98)] -* auth/jwt: add optional client_nonce to authorization flow [[GH-104](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/104)] -* auth/okta: Upgrade okta sdk lib, which should improve handling of groups [[GH-8143](https://github.com/hashicorp/vault/pull/8143)] -* aws: Add support for v2 of the instance metadata service (see [issue 7924](https://github.com/hashicorp/vault/issues/7924) for all linked PRs) -* core: Separate out service discovery interface from storage interface to allow - new types of service discovery not coupled to storage [[GH-7887](https://github.com/hashicorp/vault/pull/7887)] -* core: Add support for telemetry option `metrics_prefix` [[GH-8340](https://github.com/hashicorp/vault/pull/8340)] -* core: Entropy Augmentation can now be used with AWS KMS and Vault Transit seals -* core: Allow tls_min_version to be set to TLS 1.3 [[GH-8305](https://github.com/hashicorp/vault/pull/8305)] -* cli: Incorrect TLS configuration will now correctly fail [[GH-8025](https://github.com/hashicorp/vault/pull/8025)] -* identity: Allow specifying a custom `client_id` for identity tokens [[GH-8165](https://github.com/hashicorp/vault/pull/8165)] -* metrics/prometheus: improve performance with high volume of metrics updates [[GH-8507](https://github.com/hashicorp/vault/pull/8507)] -* replication (enterprise): Fix race condition causing clusters with high throughput writes to sometimes - fail to enter streaming-wal mode -* replication (enterprise): Secondary clusters can now perform an extra gRPC call to all nodes in a primary - cluster in an attempt to resolve the active node's address -* replication (enterprise): The replication status API now outputs `last_performance_wal`, `last_dr_wal`, - and `connection_state` values -* replication (enterprise): DR secondary clusters can now be recovered by the `replication/dr/secondary/recover` - API -* replication (enterprise): We now allow for an alternate means to create a Disaster Recovery token, by using a batch - token that is created with an ACL that allows for access to one or more of the DR endpoints. -* secrets/database/mongodb: Switched internal MongoDB driver to mongo-driver [[GH-8140](https://github.com/hashicorp/vault/pull/8140)] -* secrets/database/mongodb: Add support for x509 client authorization to MongoDB [[GH-8329](https://github.com/hashicorp/vault/pull/8329)] -* secrets/database/oracle: Add support for static credential rotation [[GH-26](https://github.com/hashicorp/vault-plugin-database-oracle/pull/26)] -* secrets/consul: Add support to specify TLS options per Consul backend [[GH-4800](https://github.com/hashicorp/vault/pull/4800)] -* secrets/gcp: Allow specifying the TTL for a service key [[GH-54](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/54)] -* secrets/gcp: Add support for rotating root keys [[GH-53](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/53)] -* secrets/gcp: Handle version 3 policies for Resource Manager IAM requests [[GH-77](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/77)] -* secrets/nomad: Add support to specify TLS options per Nomad backend [[GH-8083](https://github.com/hashicorp/vault/pull/8083)] -* secrets/ssh: Allowed users can now be templated with identity information [[GH-7548](https://github.com/hashicorp/vault/pull/7548)] -* secrets/transit: Adding RSA3072 key support [[GH-8151](https://github.com/hashicorp/vault/pull/8151)] -* storage/consul: Vault returns now a more descriptive error message when only a client cert or - a client key has been provided [[GH-4930]](https://github.com/hashicorp/vault/pull/8084) -* storage/raft: Nodes in the raft cluster can all be given possible leader - addresses for them to continuously try and join one of them, thus automating - the process of join to a greater extent [[GH-7856](https://github.com/hashicorp/vault/pull/7856)] -* storage/raft: Fix a potential deadlock that could occur on leadership transition [[GH-8547](https://github.com/hashicorp/vault/pull/8547)] -* storage/raft: Refresh TLS keyring on snapshot restore [[GH-8546](https://github.com/hashicorp/vault/pull/8546)] -* storage/etcd: Bumped etcd client API SDK [[GH-7931](https://github.com/hashicorp/vault/pull/7931) & [GH-4961](https://github.com/hashicorp/vault/pull/4961) & [GH-4349](https://github.com/hashicorp/vault/pull/4349) & [GH-7582](https://github.com/hashicorp/vault/pull/7582)] -* ui: Make Transit Key actions more prominent [[GH-8304](https://github.com/hashicorp/vault/pull/8304)] -* ui: Add Core Usage Metrics [[GH-8347](https://github.com/hashicorp/vault/pull/8347)] -* ui: Add refresh Namespace list on the Namespace dropdown, and redesign of Namespace dropdown menu [[GH-8442](https://github.com/hashicorp/vault/pull/8442)] -* ui: Update transit actions to codeblocks & automatically encode plaintext unless indicated [[GH-8462](https://github.com/hashicorp/vault/pull/8462)] -* ui: Display the results of transit key actions in a modal window [[GH-8462](https://github.com/hashicorp/vault/pull/8575)] -* ui: Transit key version styling updates & ability to copy key from dropdown [[GH-8480](https://github.com/hashicorp/vault/pull/8480)] - -BUG FIXES: - -* agent: Fix issue where TLS options are ignored for agent template feature [[GH-7889](https://github.com/hashicorp/vault/pull/7889)] -* auth/jwt: Use lower case role names for `default_role` to match the `role` case convention [[GH-100](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/100)] -* auth/ldap: Fix a bug where the UPNDOMAIN parameter was wrongly used to lookup the group - membership of the given user [[GH-6325]](https://github.com/hashicorp/vault/pull/8333) -* cli: Support autocompletion for nested mounts [[GH-8303](https://github.com/hashicorp/vault/pull/8303)] -* cli: Fix CLI namespace autocompletion [[GH-8315](https://github.com/hashicorp/vault/pull/8315)] -* identity: Fix incorrect caching of identity token JWKS responses [[GH-8412](https://github.com/hashicorp/vault/pull/8412)] -* metrics/stackdriver: Fix issue that prevents the stackdriver metrics library to create unnecessary stackdriver descriptors [[GH-8073](https://github.com/hashicorp/vault/pull/8073)] -* replication (enterprise): Fix issue causing cubbyholes in namespaces on performance secondaries to not work. -* replication (enterprise): Unmounting a dynamic secrets backend could sometimes lead to replication errors. Change the order of operations to prevent that. -* seal (enterprise): Fix seal migration when transactional seal wrap backend is in use. -* secrets/database/influxdb: Fix potential panic if connection to the InfluxDB database cannot be established [[GH-8282](https://github.com/hashicorp/vault/pull/8282)] -* secrets/database/mysql: Ensures default static credential rotation statements are used [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] -* secrets/database/mysql: Fix inconsistent query parameter names: {{name}} or {{username}} for - different queries. Now it allows for either for backwards compatibility [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] -* secrets/database/postgres: Fix inconsistent query parameter names: {{name}} or {{username}} for - different queries. Now it allows for either for backwards compatibility [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] -* secrets/pki: Support FQDNs in DNS Name [[GH-8288](https://github.com/hashicorp/vault/pull/8288)] -* storage/raft: Allow seal migration to be performed on Vault clusters using raft storage [[GH-8103](https://github.com/hashicorp/vault/pull/8103)] -* telemetry: Prometheus requests on standby nodes will now return an error instead of forwarding - the request to the active node [[GH-8280](https://github.com/hashicorp/vault/pull/8280)] -* ui: Fix broken popup menu on the transit secrets list page [[GH-8348](https://github.com/hashicorp/vault/pull/8348)] -* ui: Update headless Chrome flag to fix `yarn run test:oss` [[GH-8035](https://github.com/hashicorp/vault/pull/8035)] -* ui: Update CLI to accept empty strings as param value to reset previously-set values -* ui: Fix bug where error states don't clear when moving between action tabs on Transit [[GH-8354](https://github.com/hashicorp/vault/pull/8354)] - -## 1.3.10 -### August 27th, 2020 - -NOTE: - -All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. - -BUG FIXES: - -* auth/aws: Made header handling for IAM authentication more robust - -## 1.3.9.1 -### August 21st, 2020 -### Enterprise Only - -NOTE: - -Includes correct license in the HSM binary. - -## 1.3.9 -### August 20th, 2020 - -NOTE: - -OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. - -KNOWN ISSUES: - -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.3.9 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.3.9) -* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise - customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. - -## 1.3.8 -### August 20th, 2020 - -SECURITY: - -* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) -* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) - -KNOWN ISSUES: +* **User lockout**: Ignore repeated bad credentials from the same user for a configured period of time. Enabled by default. +* **Azure Auth Managed Identities**: Allow any Azure resource that supports managed identities to authenticate with Vault [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Azure Auth Rotate Root**: Add support for rotate root in Azure Auth engine [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. [[GH-19194](https://github.com/hashicorp/vault/pull/19194)] +* **GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] +* **Kubernetes Secrets Engine UI**: Kubernetes is now available in the UI as a supported secrets engine. [[GH-17893](https://github.com/hashicorp/vault/pull/17893)] +* **New PKI UI**: Add beta support for new and improved PKI UI [[GH-18842](https://github.com/hashicorp/vault/pull/18842)] +* **PKI Cross-Cluster Revocations**: Revocation information can now be +synchronized across primary and performance replica clusters offering +a unified CRL/OCSP view of revocations across cluster boundaries. [[GH-19196](https://github.com/hashicorp/vault/pull/19196)] +* **Server UDS Listener**: Adding listener to Vault server to serve http request via unix domain socket [[GH-18227](https://github.com/hashicorp/vault/pull/18227)] +* **Transit managed keys**: The transit secrets engine now supports configuring and using managed keys +* **User Lockout**: Adds support to configure the user-lockout behaviour for failed logins to prevent +brute force attacks for userpass, approle and ldap auth methods. [[GH-19230](https://github.com/hashicorp/vault/pull/19230)] +* **VMSS Flex Authentication**: Adds support for Virtual Machine Scale Set Flex Authentication [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Namespaces (enterprise)**: Added the ability to allow access to secrets and more to be shared across namespaces that do not share a namespace hierarchy. Using the new `sys/config/group-policy-application` API, policies can be configured to apply outside of namespace hierarchy, allowing this kind of cross-namespace sharing. +* **OpenAPI-based Go & .NET Client Libraries (Beta)**: We have now made available two new [[OpenAPI-based Go](https://github.com/hashicorp/vault-client-go/)] & [[OpenAPI-based .NET](https://github.com/hashicorp/vault-client-dotnet/)] Client libraries (beta). You can use them to perform various secret management operations easily from your applications. + +IMPROVEMENTS: + +* **Redis ElastiCache DB Engine**: Renamed configuration parameters for disambiguation; old parameters still supported for compatibility. [[GH-18752](https://github.com/hashicorp/vault/pull/18752)] +* Bump github.com/hashicorp/go-plugin version from 1.4.5 to 1.4.8 [[GH-19100](https://github.com/hashicorp/vault/pull/19100)] +* Reduced binary size [[GH-17678](https://github.com/hashicorp/vault/pull/17678)] +* agent/config: Allow config directories to be specified with -config, and allow multiple -configs to be supplied. [[GH-18403](https://github.com/hashicorp/vault/pull/18403)] +* agent: Add note in logs when starting Vault Agent indicating if the version differs to the Vault Server. [[GH-18684](https://github.com/hashicorp/vault/pull/18684)] +* agent: Added `token_file` auto-auth configuration to allow using a pre-existing token for Vault Agent. [[GH-18740](https://github.com/hashicorp/vault/pull/18740)] +* agent: Agent listeners can now be to be the `metrics_only` role, serving only metrics, as part of the listener's new top level `role` option. [[GH-18101](https://github.com/hashicorp/vault/pull/18101)] +* agent: Configured Vault Agent listeners now listen without the need for caching to be configured. [[GH-18137](https://github.com/hashicorp/vault/pull/18137)] +* agent: allows some parts of config to be reloaded without requiring a restart. [[GH-18638](https://github.com/hashicorp/vault/pull/18638)] +* agent: fix incorrectly used loop variables in parallel tests and when finalizing seals [[GH-16872](https://github.com/hashicorp/vault/pull/16872)] +* api: Remove dependency on sdk module. [[GH-18962](https://github.com/hashicorp/vault/pull/18962)] +* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] +* audit: Add `elide_list_responses` option, providing a countermeasure for a common source of oversized audit log entries [[GH-18128](https://github.com/hashicorp/vault/pull/18128)] +* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] +* auth/alicloud: upgrades dependencies [[GH-18021](https://github.com/hashicorp/vault/pull/18021)] +* auth/azure: Adds support for authentication with Managed Service Identity (MSI) from a +Virtual Machine Scale Set (VMSS) in flexible orchestration mode. [[GH-17540](https://github.com/hashicorp/vault/pull/17540)] +* auth/azure: upgrades dependencies [[GH-17857](https://github.com/hashicorp/vault/pull/17857)] +* auth/cert: Add configurable support for validating client certs with OCSP. [[GH-17093](https://github.com/hashicorp/vault/pull/17093)] +* auth/cert: Support listing provisioned CRLs within the mount. [[GH-18043](https://github.com/hashicorp/vault/pull/18043)] +* auth/cf: Remove incorrect usage of CreateOperation from path_config [[GH-19098](https://github.com/hashicorp/vault/pull/19098)] +* auth/gcp: Upgrades dependencies [[GH-17858](https://github.com/hashicorp/vault/pull/17858)] +* auth/oidc: Adds `abort_on_error` parameter to CLI login command to help in non-interactive contexts [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] +* auth/oidc: Adds ability to set Google Workspace domain for groups search [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] +* auth/token (enterprise): Allow batch token creation in perfStandby nodes +* auth: Allow naming login MFA methods and using those names instead of IDs in satisfying MFA requirement for requests. +Make passcode arguments consistent across login MFA method types. [[GH-18610](https://github.com/hashicorp/vault/pull/18610)] +* auth: Provide an IP address of the requests from Vault to a Duo challenge after successful authentication. [[GH-18811](https://github.com/hashicorp/vault/pull/18811)] +* autopilot: Update version to v.0.2.0 to add better support for respecting min quorum +* cli/kv: improve kv CLI to remove data or custom metadata using kv patch [[GH-18067](https://github.com/hashicorp/vault/pull/18067)] +* cli/pki: Add List-Intermediates functionality to pki client. [[GH-18463](https://github.com/hashicorp/vault/pull/18463)] +* cli/pki: Add health-check subcommand to evaluate the health of a PKI instance. [[GH-17750](https://github.com/hashicorp/vault/pull/17750)] +* cli/pki: Add pki issue command, which creates a CSR, has a vault mount sign it, then reimports it. [[GH-18467](https://github.com/hashicorp/vault/pull/18467)] +* cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. [[GH-18499](https://github.com/hashicorp/vault/pull/18499)] +* cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file [[GH-19269](https://github.com/hashicorp/vault/pull/19269)] +* cli: Add support for creating requests to existing non-KVv2 PATCH-capable endpoints. [[GH-17650](https://github.com/hashicorp/vault/pull/17650)] +* cli: Add transit import key helper commands for BYOK to Transit/Transform. [[GH-18887](https://github.com/hashicorp/vault/pull/18887)] +* cli: Support the -format=raw option, to read non-JSON Vault endpoints and original response bodies. [[GH-14945](https://github.com/hashicorp/vault/pull/14945)] +* cli: updated `vault operator rekey` prompts to describe recovery keys when `-target=recovery` [[GH-18892](https://github.com/hashicorp/vault/pull/18892)] +* client/pki: Add a new command verify-sign which checks the relationship between two certificates. [[GH-18437](https://github.com/hashicorp/vault/pull/18437)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* core/identity: Add machine-readable output to body of response upon alias clash during entity merge [[GH-17459](https://github.com/hashicorp/vault/pull/17459)] +* core/server: Added an environment variable to write goroutine stacktraces to a +temporary file for SIGUSR2 signals. [[GH-17929](https://github.com/hashicorp/vault/pull/17929)] +* core: Add RPCs to read and update userFailedLoginInfo map +* core: Add experiments system and `events.alpha1` experiment. [[GH-18682](https://github.com/hashicorp/vault/pull/18682)] +* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] +* core: Add user lockout field to config and configuring this for auth mount using auth tune to prevent brute forcing in auth methods [[GH-17338](https://github.com/hashicorp/vault/pull/17338)] +* core: Add vault.core.locked_users telemetry metric to emit information about total number of locked users. [[GH-18718](https://github.com/hashicorp/vault/pull/18718)] +* core: Added sys/locked-users endpoint to list locked users. Changed api endpoint from +sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] to sys/locked-users/[mount_accessor]/unlock/[alias_identifier]. [[GH-18675](https://github.com/hashicorp/vault/pull/18675)] +* core: Added sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] endpoint to unlock an user +with given mount_accessor and alias_identifier if locked [[GH-18279](https://github.com/hashicorp/vault/pull/18279)] +* core: Added warning to /sys/seal-status and vault status command if potentially dangerous behaviour overrides are being used. [[GH-17855](https://github.com/hashicorp/vault/pull/17855)] +* core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods. [[GH-18673](https://github.com/hashicorp/vault/pull/18673)] +* core: License location is no longer cache exempt, meaning sys/health will not contribute as greatly to storage load when using consul as a storage backend. [[GH-17265](https://github.com/hashicorp/vault/pull/17265)] +* core: Update protoc from 3.21.5 to 3.21.7 [[GH-17499](https://github.com/hashicorp/vault/pull/17499)] +* core: add `detect_deadlocks` config to optionally detect core state deadlocks [[GH-18604](https://github.com/hashicorp/vault/pull/18604)] +* core: added changes for user lockout workflow. [[GH-17951](https://github.com/hashicorp/vault/pull/17951)] +* core: parallelize backend initialization to improve startup time for large numbers of mounts. [[GH-18244](https://github.com/hashicorp/vault/pull/18244)] +* database/postgres: Support multiline strings for revocation statements. [[GH-18632](https://github.com/hashicorp/vault/pull/18632)] +* database/redis-elasticache: changed config argument names for disambiguation [[GH-19044](https://github.com/hashicorp/vault/pull/19044)] +* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] +* hcp/connectivity: Add foundational OSS support for opt-in secure communication between self-managed Vault nodes and [HashiCorp Cloud Platform](https://cloud.hashicorp.com) [[GH-18228](https://github.com/hashicorp/vault/pull/18228)] +* hcp/connectivity: Include HCP organization, project, and resource ID in server startup logs [[GH-18315](https://github.com/hashicorp/vault/pull/18315)] +* hcp/connectivity: Only update SCADA session metadata if status changes [[GH-18585](https://github.com/hashicorp/vault/pull/18585)] +* hcp/status: Add cluster-level status information [[GH-18351](https://github.com/hashicorp/vault/pull/18351)] +* hcp/status: Expand node-level status information [[GH-18302](https://github.com/hashicorp/vault/pull/18302)] +* logging: Vault Agent supports logging to a specified file path via environment variable, CLI or config [[GH-17841](https://github.com/hashicorp/vault/pull/17841)] +* logging: Vault agent and server commands support log file and log rotation. [[GH-18031](https://github.com/hashicorp/vault/pull/18031)] +* migration: allow parallelization of key migration for `vault operator migrate` in order to speed up a migration. [[GH-18817](https://github.com/hashicorp/vault/pull/18817)] +* namespaces (enterprise): Add new API, `sys/config/group-policy-application`, to allow group policies to be configurable +to apply to a group in `any` namespace. The default, `within_namespace_hierarchy`, is the current behaviour. +* openapi: Add default values to thing_mount_path parameters [[GH-18935](https://github.com/hashicorp/vault/pull/18935)] +* openapi: Add logic to generate openapi response structures [[GH-18192](https://github.com/hashicorp/vault/pull/18192)] +* openapi: Add openapi response definitions to approle/path_login.go & approle/path_tidy_user_id.go [[GH-18772](https://github.com/hashicorp/vault/pull/18772)] +* openapi: Add openapi response definitions to approle/path_role.go [[GH-18198](https://github.com/hashicorp/vault/pull/18198)] +* openapi: Change gen_openapi.sh to generate schema with generic mount paths [[GH-18934](https://github.com/hashicorp/vault/pull/18934)] +* openapi: Mark request body objects as required [[GH-17909](https://github.com/hashicorp/vault/pull/17909)] +* openapi: add openapi response defintions to /sys/audit endpoints [[GH-18456](https://github.com/hashicorp/vault/pull/18456)] +* openapi: generic_mount_paths: Move implementation fully into server, rather than partially in plugin framework; recognize all 4 singleton mounts (auth/token, cubbyhole, identity, system) rather than just 2; change parameter from `{mountPath}` to `{_mount_path}` [[GH-18663](https://github.com/hashicorp/vault/pull/18663)] +* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] +* plugins: Allow selecting builtin plugins by their reported semantic version of the form `vX.Y.Z+builtin` or `vX.Y.Z+builtin.vault`. [[GH-17289](https://github.com/hashicorp/vault/pull/17289)] +* plugins: Let Vault unseal and mount deprecated builtin plugins in a +deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Mark app-id auth method Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] +* plugins: Mark logical database plugins Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] +* sdk: Add response schema validation method framework/FieldData.ValidateStrict and two test helpers (ValidateResponse, ValidateResponseData) [[GH-18635](https://github.com/hashicorp/vault/pull/18635)] +* sdk: Adding FindResponseSchema test helper to assist with response schema validation in tests [[GH-18636](https://github.com/hashicorp/vault/pull/18636)] +* secrets/aws: Update dependencies [[PR-17747](https://github.com/hashicorp/vault/pull/17747)] [[GH-17747](https://github.com/hashicorp/vault/pull/17747)] +* secrets/azure: Adds ability to persist an application for the lifetime of a role. [[GH-19096](https://github.com/hashicorp/vault/pull/19096)] +* secrets/azure: upgrades dependencies [[GH-17964](https://github.com/hashicorp/vault/pull/17964)] +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* secrets/gcp: Upgrades dependencies [[GH-17871](https://github.com/hashicorp/vault/pull/17871)] +* secrets/kubernetes: Add /check endpoint to determine if environment variables are set [[GH-18](https://github.com/hashicorp/vault-plugin-secrets-kubernetes/pull/18)] [[GH-18587](https://github.com/hashicorp/vault/pull/18587)] +* secrets/kubernetes: add /check endpoint to determine if environment variables are set [[GH-19084](https://github.com/hashicorp/vault/pull/19084)] +* secrets/kv: Emit events on write if events system enabled [[GH-19145](https://github.com/hashicorp/vault/pull/19145)] +* secrets/kv: make upgrade synchronous when no keys to upgrade [[GH-19056](https://github.com/hashicorp/vault/pull/19056)] +* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] +* secrets/pki: Add a new API that returns the serial numbers of revoked certificates on the local cluster [[GH-17779](https://github.com/hashicorp/vault/pull/17779)] +* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] +* secrets/pki: Added a new API that allows external actors to craft a CRL through JSON parameters [[GH-18040](https://github.com/hashicorp/vault/pull/18040)] +* secrets/pki: Allow UserID Field (https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1) to be set on Certificates when +allowed by role [[GH-18397](https://github.com/hashicorp/vault/pull/18397)] +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] +* secrets/pki: Allow templating performance replication cluster- and issuer-specific AIA URLs. [[GH-18199](https://github.com/hashicorp/vault/pull/18199)] +* secrets/pki: Allow tidying of expired issuer certificates. [[GH-17823](https://github.com/hashicorp/vault/pull/17823)] +* secrets/pki: Allow tidying of the legacy ca_bundle, improving startup on post-migrated, seal-wrapped PKI mounts. [[GH-18645](https://github.com/hashicorp/vault/pull/18645)] +* secrets/pki: Respond with written data to `config/auto-tidy`, `config/crl`, and `roles/:role`. [[GH-18222](https://github.com/hashicorp/vault/pull/18222)] +* secrets/pki: Return issuer_id and issuer_name on /issuer/:issuer_ref/json endpoint. [[GH-18482](https://github.com/hashicorp/vault/pull/18482)] +* secrets/pki: Return new fields revocation_time_rfc3339 and issuer_id to existing certificate serial lookup api if it is revoked [[GH-17774](https://github.com/hashicorp/vault/pull/17774)] +* secrets/ssh: Allow removing SSH host keys from the dynamic keys feature. [[GH-18939](https://github.com/hashicorp/vault/pull/18939)] +* secrets/ssh: Evaluate ssh validprincipals user template before splitting [[GH-16622](https://github.com/hashicorp/vault/pull/16622)] +* secrets/transit: Add an optional reference field to batch operation items +which is repeated on batch responses to help more easily correlate inputs with outputs. [[GH-18243](https://github.com/hashicorp/vault/pull/18243)] +* secrets/transit: Add associated_data parameter for additional authenticated data in AEAD ciphers [[GH-17638](https://github.com/hashicorp/vault/pull/17638)] +* secrets/transit: Add support for PKCSv1_5_NoOID RSA signatures [[GH-17636](https://github.com/hashicorp/vault/pull/17636)] +* secrets/transit: Allow configuring whether upsert of keys is allowed. [[GH-18272](https://github.com/hashicorp/vault/pull/18272)] +* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. [[GH-17789](https://github.com/hashicorp/vault/pull/17789)] +* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. +* ui: Add algorithm-signer as a SSH Secrets Engine UI field [[GH-10299](https://github.com/hashicorp/vault/pull/10299)] +* ui: Add inline policy creation when creating an identity entity or group [[GH-17749](https://github.com/hashicorp/vault/pull/17749)] +* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] +* ui: Enable typescript for future development [[GH-17927](https://github.com/hashicorp/vault/pull/17927)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] +* ui: adds allowed_response_headers as param for secret engine mount config [[GH-19216](https://github.com/hashicorp/vault/pull/19216)] +* ui: consolidate all tag usage [[GH-17866](https://github.com/hashicorp/vault/pull/17866)] +* ui: mfa: use proper request id generation [[GH-17835](https://github.com/hashicorp/vault/pull/17835)] +* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] +* ui: update DocLink component to use new host url: developer.hashicorp.com [[GH-18374](https://github.com/hashicorp/vault/pull/18374)] +* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] +* ui: use the combined activity log (partial + historic) API for client count dashboard and remove use of monthly endpoint [[GH-17575](https://github.com/hashicorp/vault/pull/17575)] +* vault/diagnose: Upgrade `go.opentelemetry.io/otel`, `go.opentelemetry.io/otel/sdk`, `go.opentelemetry.io/otel/trace` to v1.11.2 [[GH-18589](https://github.com/hashicorp/vault/pull/18589)] -* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.3.8 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.3.8) +DEPRECATIONS: -## 1.3.7 -### July 2nd, 2020 +* secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. [[GH-19334](https://github.com/hashicorp/vault/pull/19334)] BUG FIXES: -* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values -* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9363](https://github.com/hashicorp/vault/pull/9363)] +* api: Remove timeout logic from ReadRaw functions and add ReadRawWithContext [[GH-18708](https://github.com/hashicorp/vault/pull/18708)] +* auth/alicloud: fix regression in vault login command that caused login to fail [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] +* auth/kubernetes: fixes and dep updates for the auth-kubernetes plugin (see plugin changelog for details) [[GH-19094](https://github.com/hashicorp/vault/pull/19094)] +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* cli/pki: Decode integer values properly in health-check configuration file [[GH-19265](https://github.com/hashicorp/vault/pull/19265)] +* cli/pki: Fix path for role health-check warning messages [[GH-19274](https://github.com/hashicorp/vault/pull/19274)] +* cli/pki: Properly report permission issues within health-check mount tune checks [[GH-19276](https://github.com/hashicorp/vault/pull/19276)] +* cli/transit: Fix import, import-version command invocation [[GH-19373](https://github.com/hashicorp/vault/pull/19373)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] +* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] +* core (enterprise): Fix missing quotation mark in error message +* core (enterprise): Fix panic that could occur with SSCT alongside invoking external plugins for revocation. +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. +* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] +* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] +* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] +* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] +* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] +* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] +* core/auth: Return a 403 instead of a 500 for wrapping requests when token is not provided [[GH-18859](https://github.com/hashicorp/vault/pull/18859)] +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] +* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: Fix spurious `permission denied` for all HelpOperations on sudo-protected paths [[GH-18568](https://github.com/hashicorp/vault/pull/18568)] +* core: Fix vault operator init command to show the right curl string with -output-curl-string and right policy hcl with -output-policy [[GH-17514](https://github.com/hashicorp/vault/pull/17514)] +* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] +* core: Linux packages now have vendor label and set the default label to HashiCorp. +This fix is implemented for any future releases, but will not be updated for historical releases. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* core: Refactor lock grabbing code to simplify stateLock deadlock investigations [[GH-17187](https://github.com/hashicorp/vault/pull/17187)] +* core: fix GPG encryption to support subkeys. [[GH-16224](https://github.com/hashicorp/vault/pull/16224)] +* core: fix a start up race condition where performance standbys could go into a +mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: fix race when using SystemView.ReplicationState outside of a request context [[GH-17186](https://github.com/hashicorp/vault/pull/17186)] +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* core: trying to unseal with the wrong key now returns HTTP 400 [[GH-17836](https://github.com/hashicorp/vault/pull/17836)] +* credential/cert: adds error message if no tls connection is found during the AliasLookahead operation [[GH-17904](https://github.com/hashicorp/vault/pull/17904)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] +* kmip (enterprise): Fix a problem with some multi-part MAC Verify operations. +* kmip (enterprise): Only require data to be full blocks on encrypt/decrypt operations using CBC and ECB block cipher modes. +* license (enterprise): Fix bug where license would update even if the license didn't change. +* licensing (enterprise): update autoloaded license cache after reload +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* openapi: fix gen_openapi.sh script to correctly load vault plugins [[GH-17752](https://github.com/hashicorp/vault/pull/17752)] +* plugins/kv: KV v2 returns 404 instead of 500 for request paths that incorrectly include a trailing slash. [[GH-17339](https://github.com/hashicorp/vault/pull/17339)] +* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] +* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] +* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] +* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] +* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/ad: Fix bug where updates to config would fail if password isn't provided [[GH-19061](https://github.com/hashicorp/vault/pull/19061)] +* secrets/gcp: fix issue where IAM bindings were not preserved during policy update [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] +* secrets/mongodb-atlas: Fix a bug that did not allow WAL rollback to handle partial failures when creating API keys [[GH-19111](https://github.com/hashicorp/vault/pull/19111)] +* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] +* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] +* secrets/pki: Fixes duplicate otherName in certificates created by the sign-verbatim endpoint. [[GH-16700](https://github.com/hashicorp/vault/pull/16700)] +* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] +* secrets/pki: consistently use UTC for CA's notAfter exceeded error message [[GH-18984](https://github.com/hashicorp/vault/pull/18984)] +* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] +* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* storage/raft: Fix race with follower heartbeat tracker during teardown. [[GH-18704](https://github.com/hashicorp/vault/pull/18704)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] +* ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. [[GH-19290](https://github.com/hashicorp/vault/pull/19290)] +* ui: Remove default value of 30 to TtlPicker2 if no value is passed in. [[GH-17376](https://github.com/hashicorp/vault/pull/17376)] +* ui: allow selection of "default" for ssh algorithm_signer in web interface [[GH-17894](https://github.com/hashicorp/vault/pull/17894)] +* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] +* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19403](https://github.com/hashicorp/vault/pull/19403)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] -## 1.3.6 (May 21st, 2020) +## 1.12.11 +### September 13, 2023 SECURITY: -* core: proxy environment variables are now redacted before being logged, in case the URLs include a username:password. This vulnerability, CVE-2020-13223, is fixed in 1.3.6 and 1.4.2, but affects 1.4 and 1.4.1, as well as older versions of Vault [[GH-9022](https://github.com/hashicorp/vault/pull/9022)] - -BUG FIXES: -* auth/aws: Fix token renewal issues caused by the metadata changes in 1.3.5 [[GH-8991](https://github.com/hashicorp/vault/pull/8991)] -* replication: Fix mount filter bug that allowed replication filters to hide local mounts on a performance secondary - -## 1.3.5 (April 28th, 2020) - -CHANGES: - -* auth/aws: The default set of metadata fields added in 1.3.2 has been changed to `account_id` and `auth_type` [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. [[GH-22852](https://github.com/hashicorp/vault/pull/22852)] IMPROVEMENTS: -* auth/aws: The set of metadata stored during login is now configurable [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] - -## 1.3.4 (March 19th, 2020) - -SECURITY: - -* A vulnerability was identified in Vault and Vault Enterprise such that, under certain circumstances, an Entity's Group membership may inadvertently include Groups the Entity no longer has permissions to. This vulnerability, CVE-2020-10660, affects Vault and Vault Enterprise versions 0.9.0 and newer, and is fixed in 1.3.4. [[GH-8606](https://github.com/hashicorp/vault/pull/8606)] -* A vulnerability was identified in Vault Enterprise such that, under certain circumstances, existing nested-path policies may give access to Namespaces created after-the-fact. This vulnerability, CVE-2020-10661, affects Vault Enterprise versions 0.11 and newer, and is fixed in 1.3.4. - -## 1.3.3 (March 5th, 2020) +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* kmip (enterprise): reduce latency of KMIP operation handling BUG FIXES: -* approle: Fix excessive locking during tidy, which could potentially block new approle logins for long enough to cause an outage [[GH-8418](https://github.com/hashicorp/vault/pull/8418)] -* cli: Fix issue where Raft snapshots from standby nodes created an empty backup file [[GH-8097](https://github.com/hashicorp/vault/pull/8097)] -* identity: Fix incorrect caching of identity token JWKS responses [[GH-8412](https://github.com/hashicorp/vault/pull/8412)] -* kmip: role read now returns tls_client_ttl -* kmip: fix panic when templateattr not provided in rekey request -* secrets/database/influxdb: Fix potential panic if connection to the InfluxDB database cannot be established [[GH-8282](https://github.com/hashicorp/vault/pull/8282)] -* storage/mysql: Fix potential crash when using MySQL as coordination for high availability [[GH-8300](https://github.com/hashicorp/vault/pull/8300)] -* storage/raft: Fix potential crash when using Raft as coordination for high availability [[GH-8356](https://github.com/hashicorp/vault/pull/8356)] -* ui: Fix missing License menu item [[GH-8230](https://github.com/hashicorp/vault/pull/8230)] -* ui: Fix bug where default auth method on login is defaulted to auth method that is listing-visibility=unauth instead of “other” [[GH-8218](https://github.com/hashicorp/vault/pull/8218)] -* ui: Fix bug where KMIP details were not shown in the UI Wizard [[GH-8255](https://github.com/hashicorp/vault/pull/8255)] -* ui: Show Error messages on Auth Configuration page when you hit permission errors [[GH-8500](https://github.com/hashicorp/vault/pull/8500)] -* ui: Remove duplicate form inputs for the GitHub config [[GH-8519](https://github.com/hashicorp/vault/pull/8519)] -* ui: Correct HMAC capitalization [[GH-8528](https://github.com/hashicorp/vault/pull/8528)] -* ui: Fix danger message in DR [[GH-8555](https://github.com/hashicorp/vault/pull/8555)] -* ui: Fix certificate field for LDAP config [[GH-8573](https://github.com/hashicorp/vault/pull/8573)] - -## 1.3.2 (January 22nd, 2020) +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable -SECURITY: - * When deleting a namespace on Vault Enterprise, in certain circumstances, the deletion - process will fail to revoke dynamic secrets for a mount in that namespace. This will - leave any dynamic secrets in remote systems alive and will fail to clean them up. This - vulnerability, CVE-2020-7220, affects Vault Enterprise 0.11.0 and newer. - -IMPROVEMENTS: - * auth/aws: Add aws metadata to identity alias [[GH-7985](https://github.com/hashicorp/vault/pull/7985)] - * auth/kubernetes: Allow both names and namespaces to be set to "*" [[GH-78](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/78)] +## 1.12.10 +### August 30, 2023 -BUG FIXES: +CHANGES: -* auth/azure: Fix Azure compute client to use correct base URL [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] -* auth/ldap: Fix renewal of tokens without configured policies that are - generated by an LDAP login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] -* auth/okta: Fix renewal of tokens without configured policies that are - generated by an Okta login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] -* core: Fix seal migration error when attempting to migrate from auto unseal to shamir [[GH-8172](https://github.com/hashicorp/vault/pull/8172)] -* core: Fix seal migration config issue when migrating from auto unseal to auto unseal [[GH-8172](https://github.com/hashicorp/vault/pull/8172)] -* plugin: Fix issue where a plugin unwrap request potentially used an expired token [[GH-8058](https://github.com/hashicorp/vault/pull/8058)] -* replication: Fix issue where a forwarded request from a performance/standby node could run into - a timeout -* secrets/database: Fix issue where a manual static role rotation could potentially panic [[GH-8098](https://github.com/hashicorp/vault/pull/8098)] -* secrets/database: Fix issue where a manual root credential rotation request is not forwarded - to the primary node [[GH-8125](https://github.com/hashicorp/vault/pull/8125)] -* secrets/database: Fix issue where a manual static role rotation request is not forwarded - to the primary node [[GH-8126](https://github.com/hashicorp/vault/pull/8126)] -* secrets/database/mysql: Fix issue where special characters for a MySQL password were encoded [[GH-8040](https://github.com/hashicorp/vault/pull/8040)] -* ui: Fix deleting namespaces [[GH-8132](https://github.com/hashicorp/vault/pull/8132)] -* ui: Fix Error handler on kv-secret edit and kv-secret view pages [[GH-8133](https://github.com/hashicorp/vault/pull/8133)] -* ui: Fix OIDC callback to check storage [[GH-7929](https://github.com/hashicorp/vault/pull/7929)]. -* ui: Change `.box-radio` height to min-height to prevent overflow issues [[GH-8065](https://github.com/hashicorp/vault/pull/8065)] - -## 1.3.1 (December 18th, 2019) +* core: Bump Go version to 1.19.12. IMPROVEMENTS: -* agent: Add ability to set `exit-after-auth` via the CLI [[GH-7920](https://github.com/hashicorp/vault/pull/7920)] -* auth/ldap: Add a `request_timeout` configuration option to prevent connection - requests from hanging [[GH-7909](https://github.com/hashicorp/vault/pull/7909)] -* auth/kubernetes: Add audience to tokenreview API request for Kube deployments where issuer - is not Kube. [[GH-74](https://github.com/hashicorp/vault/pull/74)] -* secrets/ad: Add a `request_timeout` configuration option to prevent connection - requests from hanging [[GH-59](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/59)] -* storage/postgresql: Add support for setting `connection_url` from enviornment - variable `VAULT_PG_CONNECTION_URL` [[GH-7937](https://github.com/hashicorp/vault/pull/7937)] -* telemetry: Add `enable_hostname_label` option to telemetry stanza [[GH-7902](https://github.com/hashicorp/vault/pull/7902)] -* telemetry: Add accept header check for prometheus mime type [[GH-7958](https://github.com/hashicorp/vault/pull/7958)] +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] BUG FIXES: -* agent: Fix issue where Agent exits before all templates are rendered when - using and `exit_after_auth` [[GH-7899](https://github.com/hashicorp/vault/pull/7899)] -* auth/aws: Fixes region-related issues when using a custom `sts_endpoint` by adding - a `sts_region` parameter [[GH-7922](https://github.com/hashicorp/vault/pull/7922)] -* auth/token: Fix panic when getting batch tokens on a performance standby from a role - that does not exist [[GH-8027](https://github.com/hashicorp/vault/pull/8027)] -* core: Improve warning message for lease TTLs [[GH-7901](https://github.com/hashicorp/vault/pull/7901)] -* identity: Fix identity token panic during invalidation [[GH-8043](https://github.com/hashicorp/vault/pull/8043)] -* plugin: Fix a panic that could occur if a mount/auth entry was unable to - mount the plugin backend and a request that required the system view to be - retrieved was made [[GH-7991](https://github.com/hashicorp/vault/pull/7991)] -* replication: Add `generate-public-key` endpoint to list of allowed endpoints - for existing DR secondaries -* secrets/gcp: Fix panic if bindings aren't provided in roleset create/update. [[GH-56](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/56)] -* secrets/pki: Prevent generating certificate on performance standby when storing - [[GH-7904](https://github.com/hashicorp/vault/pull/7904)] -* secrets/transit: Prevent restoring keys to new names that are sub paths [[GH-7998](https://github.com/hashicorp/vault/pull/7998)] -* storage/s3: Fix a bug in configurable S3 paths that was preventing use of S3 as - a source during `operator migrate` operations [[GH-7966](https://github.com/hashicorp/vault/pull/7966)] -* ui: Ensure secrets with a period in their key can be viewed and copied [[GH-7926](https://github.com/hashicorp/vault/pull/7926)] -* ui: Fix status menu after demotion [[GH-7997](https://github.com/hashicorp/vault/pull/7997)] -* ui: Fix select dropdowns in Safari when running Mojave [[GH-8023](https://github.com/hashicorp/vault/pull/8023)] - -## 1.3 (November 14th, 2019) - -CHANGES: +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22332](https://github.com/hashicorp/vault/pull/22332)] +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] - * Secondary cluster activation: There has been a change to the way that activating - performance and DR secondary clusters works when using public keys for - encryption of the parameters rather than a wrapping token. This flow was - experimental and never documented. It is now officially supported and - documented but is not backwards compatible with older Vault releases. - * Cluster cipher suites: On its cluster port, Vault will no longer advertise - the full TLS 1.2 cipher suite list by default. Although this port is only - used for Vault-to-Vault communication and would always pick a strong cipher, - it could cause false flags on port scanners and other security utilities - that assumed insecure ciphers were being used. The previous behavior can be - achieved by setting the value of the (undocumented) `cluster_cipher_suites` - config flag to `tls12`. - * API/Agent Renewal behavior: The API now allows multiple options for how it - deals with renewals. The legacy behavior in the Agent/API is for the renewer - (now called the lifetime watcher) to exit on a renew error, leading to a - reauthentication. The new default behavior is for the lifetime watcher to - ignore 5XX errors and simply retry as scheduled, using the existing lease - duration. It is also possible, within custom code, to disable renewals - entirely, which allows the lifetime watcher to simply return when it - believes it is time for your code to renew or reauthenticate. +## 1.12.9 +### July 25, 2023 -FEATURES: +SECURITY: - * **Vault Debug**: A new top-level subcommand, `debug`, is added that allows - operators to retrieve debugging information related to a particular Vault - node. Operators can use this simple workflow to capture triaging information, - which can then be consumed programmatically or by support and engineering teams. - It has the abilitity to probe for config, host, metrics, pprof, server status, - and replication status. - * **Recovery Mode**: Vault server can be brought up in recovery mode to resolve - outages caused due to data store being in bad state. This is a privileged mode - that allows `sys/raw` API calls to perform surgical corrections to the data - store. Bad storage state can be caused by bugs. However, this is usually - observed when known (and fixed) bugs are hit by older versions of Vault. - * **Entropy Augmentation (Enterprise)**: Vault now supports sourcing entropy from - external source for critical security parameters. Currently an HSM that - supports PKCS#11 is the only supported source. - * **Active Directory Secret Check-In/Check-Out**: In the Active Directory secrets - engine, users or applications can check out a service account for use, and its - password will be rotated when it's checked back in. - * **Vault Agent Template**: Vault Agent now supports rendering templates containing - Vault secrets to disk, similar to Consul Template [[GH-7652](https://github.com/hashicorp/vault/pull/7652)] - * **Transit Key Type Support**: Signing and verification is now supported with the P-384 - (secp384r1) and P-521 (secp521r1) ECDSA curves [[GH-7551](https://github.com/hashicorp/vault/pull/7551)] and encryption and - decryption is now supported via AES128-GCM96 [[GH-7555](https://github.com/hashicorp/vault/pull/7555)] - * **SSRF Protection for Vault Agent**: Vault Agent has a configuration option to - require a specific header before allowing requests [[GH-7627](https://github.com/hashicorp/vault/pull/7627)] - * **AWS Auth Method Root Rotation**: The credential used by the AWS auth method can - now be rotated, to ensure that only Vault knows the credentials it is using [[GH-7131](https://github.com/hashicorp/vault/pull/7131)] - * **New UI Features**: The UI now supports managing users and groups for the - Userpass, Cert, Okta, and Radius auth methods. - * **Shamir with Stored Master Key**: The on disk format for Shamir seals has changed, - allowing for a secondary cluster using Shamir downstream from a primary cluster - using Auto Unseal. [[GH-7694](https://github.com/hashicorp/vault/pull/7694)] - * **Stackdriver Metrics Sink**: Vault can now send metrics to - [Stackdriver](https://cloud.google.com/stackdriver/). See the [configuration - documentation](https://www.vaultproject.io/docs/config/index.html) for - details. [[GH-6957](https://github.com/hashicorp/vault/pull/6957)] - * **Filtered Paths Replication (Enterprise)**: Based on the predecessor Filtered Mount Replication, - Filtered Paths Replication allows now filtering of namespaces in addition to mounts. - With this feature, Filtered Mount Replication should be considered deprecated. - * **Token Renewal via Accessor**: Tokens can now be renewed via the accessor value through - the new `auth/token/renew-accessor` endpoint if the caller's token has - permission to access that endpoint. - * **Improved Integrated Storage (Beta)**: Improved raft write performance, added support for - non-voter nodes, along with UI support for: using raft storage, joining a raft cluster, - and downloading and restoring a snapshot. +* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] -IMPROVEMENTS: +CHANGES: - * agent: Add ability to set the TLS SNI name used by Agent [[GH-7519](https://github.com/hashicorp/vault/pull/7519)] - * agent & api: Change default renewer behavior to ignore 5XX errors [[GH-7733](https://github.com/hashicorp/vault/pull/7733)] - * auth/jwt: The redirect callback host may now be specified for CLI logins - [[GH-71](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/71)] - * auth/jwt: Bound claims may now contain boolean values [[GH-73](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/73)] - * auth/jwt: CLI logins can now open the browser when running in WSL [[GH-77](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/77)] - * core: Exit ScanView if context has been cancelled [[GH-7419](https://github.com/hashicorp/vault/pull/7419)] - * core: re-encrypt barrier and recovery keys if the unseal key is updated - [[GH-7493](https://github.com/hashicorp/vault/pull/7493)] - * core: Don't advertise the full set of TLS 1.2 cipher suites on the cluster - port, even though only strong ciphers were used [[GH-7487](https://github.com/hashicorp/vault/pull/7487)] - * core (enterprise): Add background seal re-wrap - * core/metrics: Add config parameter to allow unauthenticated sys/metrics - access. [[GH-7550](https://github.com/hashicorp/vault/pull/7550)] - * metrics: Upgrade DataDog library to improve performance [[GH-7794](https://github.com/hashicorp/vault/pull/7794)] - * replication (enterprise): Write-Ahead-Log entries will not duplicate the - data belonging to the encompassing physical entries of the transaction, - thereby improving the performance and storage capacity. - * replication (enterprise): Added more replication metrics - * replication (enterprise): Reindex process now compares subpages for a more - accurate indexing process. - * replication (enterprise): Reindex API now accepts a new `skip_flush` - parameter indicating all the changes should not be flushed while the tree is - locked. - * secrets/aws: The root config can now be read [[GH-7245](https://github.com/hashicorp/vault/pull/7245)] - * secrets/aws: Role paths may now contain the '@' character [[GH-7553](https://github.com/hashicorp/vault/pull/7553)] - * secrets/database/cassandra: Add ability to skip verfication of connection - [[GH-7614](https://github.com/hashicorp/vault/pull/7614)] - * secrets/gcp: Fix panic during rollback if the roleset has been deleted - [[GH-52](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/52)] - * storage/azure: Add config parameter to Azure storage backend to allow - specifying the ARM endpoint [[GH-7567](https://github.com/hashicorp/vault/pull/7567)] - * storage/cassandra: Improve storage efficiency by eliminating unnecessary - copies of value data [[GH-7199](https://github.com/hashicorp/vault/pull/7199)] - * storage/raft: Improve raft write performance by utilizing FSM Batching - [[GH-7527](https://github.com/hashicorp/vault/pull/7527)] - * storage/raft: Add support for non-voter nodes [[GH-7634](https://github.com/hashicorp/vault/pull/7634)] - * sys: Add a new `sys/host-info` endpoint for querying information about - the host [[GH-7330](https://github.com/hashicorp/vault/pull/7330)] - * sys: Add a new set of endpoints under `sys/pprof/` that allows profiling - information to be extracted [[GH-7473](https://github.com/hashicorp/vault/pull/7473)] - * sys: Add endpoint that counts the total number of active identity entities - [[GH-7541](https://github.com/hashicorp/vault/pull/7541)] - * sys: `sys/seal-status` now has a `storage_type` field denoting what type of - storage - the cluster is configured to use - * sys: Add a new `sys/internal/counters/tokens` endpoint, that counts the - total number of active service token accessors in the shared token storage. - [[GH-7541](https://github.com/hashicorp/vault/pull/7541)] - * sys/config: Add a new endpoint under `sys/config/state/sanitized` that - returns the configuration state of the server. It excludes config values - from `storage`, `ha_storage`, and `seal` stanzas and some values - from `telemetry` due to potential sensitive entries in those fields. - * ui: when using raft storage, you can now join a raft cluster, download a - snapshot, and restore a snapshot from the UI [[GH-7410](https://github.com/hashicorp/vault/pull/7410)] - * ui: clarify when secret version is deleted in the secret version history - dropdown [[GH-7714](https://github.com/hashicorp/vault/pull/7714)] +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. -BUG FIXES: +IMPROVEMENTS: - * agent: Fix a data race on the token value for inmemsink [[GH-7707](https://github.com/hashicorp/vault/pull/7707)] - * api: Fix Go API using lease revocation via URL instead of body [[GH-7777](https://github.com/hashicorp/vault/pull/7777)] - * api: Allow setting a function to control retry behavior [[GH-7331](https://github.com/hashicorp/vault/pull/7331)] - * auth/gcp: Fix a bug where region information in instance groups names could - cause an authorization attempt to fail [[GH-74](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/74)] - * cli: Fix a bug where a token of an unknown format (e.g. in ~/.vault-token) - could cause confusing error messages during `vault login` [[GH-7508](https://github.com/hashicorp/vault/pull/7508)] - * cli: Fix a bug where the `namespace list` command with JSON formatting - always returned an empty object [[GH-7705](https://github.com/hashicorp/vault/pull/7705)] - * cli: Command timeouts are now always specified solely by the - `VAULT_CLIENT_TIMEOUT` value. [[GH-7469](https://github.com/hashicorp/vault/pull/7469)] - * core: Don't allow registering a non-root zero TTL token lease. This is purely - defense in depth as the lease would be revoked immediately anyways, but - there's no real reason to allow registration. [[GH-7524](https://github.com/hashicorp/vault/pull/7524)] - * core: Correctly revoke the token that's present in the response auth from a - auth/token/ request if there's partial failure during the process. [[GH-7835](https://github.com/hashicorp/vault/pull/7835)] - * identity (enterprise): Fixed identity case sensitive loading in secondary - cluster [[GH-7327](https://github.com/hashicorp/vault/pull/7327)] - * identity: Ensure only replication primary stores the identity case sensitivity state [[GH-7820](https://github.com/hashicorp/vault/pull/7820)] - * raft: Fixed VAULT_CLUSTER_ADDR env being ignored at startup [[GH-7619](https://github.com/hashicorp/vault/pull/7619)] - * secrets/pki: Don't allow duplicate SAN names in issued certs [[GH-7605](https://github.com/hashicorp/vault/pull/7605)] - * sys/health: Pay attention to the values provided for `standbyok` and - `perfstandbyok` rather than simply using their presence as a key to flip on - that behavior [[GH-7323](https://github.com/hashicorp/vault/pull/7323)] - * ui: using the `wrapped_token` query param will work with `redirect_to` and - will automatically log in as intended [[GH-7398](https://github.com/hashicorp/vault/pull/7398)] - * ui: fix an error when initializing from the UI using PGP keys [[GH-7542](https://github.com/hashicorp/vault/pull/7542)] - * ui: show all active kv v2 secret versions even when `delete_version_after` is configured [[GH-7685](https://github.com/hashicorp/vault/pull/7685)] - * ui: Ensure that items in the top navigation link to pages that users have access to [[GH-7590](https://github.com/hashicorp/vault/pull/7590)] - -## 1.2.7 -### August 27th, 2020 - -NOTE: - -All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling BUG FIXES: -* auth/aws: Made header handling for IAM authentication more robust - -## 1.2.6.1 -### August 21st, 2020 -### Enterprise Only - -NOTE: - -Includes correct license in the HSM binary. +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* replication (enterprise): update primary cluster address after DR failover +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21633](https://github.com/hashicorp/vault/pull/21633)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] -## 1.2.6 -### August 20th, 2020 +## 1.12.8 +### June 21, 2023 +BREAKING CHANGES: -NOTE: +* secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] -OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. - -KNOWN ISSUES: +CHANGES: -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.2.6 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.2.6) -* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise - customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. +* core: Bump Go version to 1.19.10. -## 1.2.5 -### August 20th, 2020 +FEATURES: -SECURITY: +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* core (enterprise): Add background worker for automatic reporting of billing +information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] - * When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) - * When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) +IMPROVEMENTS: -KNOWN ISSUES: +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] -* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. -* AWS IAM logins may return an error depending on the headers sent with the request. - For more details and a workaround, see the [1.2.5 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.2.5) - BUG FIXES: -* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values -## 1.2.4 (November 7th, 2019) +* core (enterprise): Don't delete backend stored data that appears to be filterable +on this secondary if we don't have a corresponding mount entry. +* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] +* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] +* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] +* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] +* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] +* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] + +## 1.12.7 +### June 08, 2023 SECURITY: - * In a non-root namespace, revocation of a token scoped to a non-root - namespace did not trigger the expected revocation of dynamic secret leases - associated with that token. As a result, dynamic secret leases in non-root - namespaces may outlive the token that created them. This vulnerability, - CVE-2019-18616, affects Vault Enterprise 0.11.0 and newer. - * Disaster Recovery secondary clusters did not delete already-replicated data - after a mount filter has been created on an upstream Performance secondary - cluster. As a result, encrypted secrets may remain replicated on a Disaster - Recovery secondary cluster after application of a mount filter excluding - those secrets from replication. This vulnerability, CVE-2019-18617, affects - Vault Enterprise 0.8 and newer. - * Update version of Go to 1.12.12 to fix Go bug golang.org/issue/34960 which - corresponds to CVE-2019-17596. +* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] CHANGES: - * auth/aws: If a custom `sts_endpoint` is configured, Vault Agent and the CLI - should provide the corresponding region via the `region` parameter (which - already existed as a CLI parameter, and has now been added to Agent). The - automatic region detection added to the CLI and Agent in 1.2 has been removed. +* core: Bump Go version to 1.19.9. +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] IMPROVEMENTS: - * cli: Ignore existing token during CLI login [[GH-7508](https://github.com/hashicorp/vault/pull/7508)] - * core: Log proxy settings from environment on startup [[GH-7528](https://github.com/hashicorp/vault/pull/7528)] - * core: Cache whether we've been initialized to reduce load on storage [[GH-7549](https://github.com/hashicorp/vault/pull/7549)] +* audit: add a `mount_point` field to audit requests and response entries [[GH-20411](https://github.com/hashicorp/vault/pull/20411)] +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* core: include namespace path in granting_policies block of audit log +* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] +* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] +* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] BUG FIXES: - * agent: Fix handling of gzipped responses [[GH-7470](https://github.com/hashicorp/vault/pull/7470)] - * cli: Fix panic when pgp keys list is empty [[GH-7546](https://github.com/hashicorp/vault/pull/7546)] - * cli: Command timeouts are now always specified solely by the - `VAULT_CLIENT_TIMEOUT` value. [[GH-7469](https://github.com/hashicorp/vault/pull/7469)] - * core: add hook for initializing seals for migration [[GH-7666](https://github.com/hashicorp/vault/pull/7666)] - * core (enterprise): Migrating from one auto unseal method to another never - worked on enterprise, now it does. - * identity: Add required field `response_types_supported` to identity token - `.well-known/openid-configuration` response [[GH-7533](https://github.com/hashicorp/vault/pull/7533)] - * identity: Fixed nil pointer panic when merging entities [[GH-7712](https://github.com/hashicorp/vault/pull/7712)] - * replication (Enterprise): Fix issue causing performance standbys nodes - disconnecting when under high loads. - * secrets/azure: Fix panic that could occur if client retries timeout [[GH-7793](https://github.com/hashicorp/vault/pull/7793)] - * secrets/database: Fix bug in combined DB secrets engine that can result in - writes to static-roles endpoints timing out [[GH-7518](https://github.com/hashicorp/vault/pull/7518)] - * secrets/pki: Improve tidy to continue when value is nil [[GH-7589](https://github.com/hashicorp/vault/pull/7589)] - * ui (Enterprise): Allow kv v2 secrets that are gated by Control Groups to be - viewed in the UI [[GH-7504](https://github.com/hashicorp/vault/pull/7504)] - -## 1.2.3 (September 12, 2019) - -FEATURES: - -* **Oracle Cloud (OCI) Integration**: Vault now support using Oracle Cloud for - storage, auto unseal, and authentication. - -IMPROVEMENTS: - - * auth/jwt: Groups claim matching now treats a string response as a single - element list [[GH-63](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/63)] - * auth/kubernetes: enable better support for projected tokens API by allowing - user to specify issuer [[GH-65](https://github.com/hashicorp/vault/pull/65)] - * auth/pcf: The PCF auth plugin was renamed to the CF auth plugin, maintaining - full backwards compatibility [[GH-7346](https://github.com/hashicorp/vault/pull/7346)] - * replication: Premium packages now come with unlimited performance standby - nodes - -BUG FIXES: +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] +* cli: disable printing flags warnings messages for the ssh command [[GH-20502](https://github.com/hashicorp/vault/pull/20502)] +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation +* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] +* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] - * agent: Allow batch tokens and other non-renewable tokens to be used for - agent operations [[GH-7441](https://github.com/hashicorp/vault/pull/7441)] - * auth/jwt: Fix an error where newer (v1.2) token_* configuration parameters - were not being applied to tokens generated using the OIDC login flow - [[GH-67](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/67)] - * raft: Fix an incorrect JSON tag on `leader_ca_cert` in the join request [[GH-7393](https://github.com/hashicorp/vault/pull/7393)] - * seal/transit: Allow using Vault Agent for transit seal operations [[GH-7441](https://github.com/hashicorp/vault/pull/7441)] - * storage/couchdb: Fix a file descriptor leak [[GH-7345](https://github.com/hashicorp/vault/pull/7345)] - * ui: Fix a bug where the status menu would disappear when trying to revoke a - token [[GH-7337](https://github.com/hashicorp/vault/pull/7337)] - * ui: Fix a regression that prevented input of custom items in search-select - [[GH-7338](https://github.com/hashicorp/vault/pull/7338)] - * ui: Fix an issue with the namespace picker being unable to render nested - namespaces named with numbers and sorting of namespaces in the picker - [[GH-7333](https://github.com/hashicorp/vault/pull/7333)] - -## 1.2.2 (August 15, 2019) +## 1.12.6 +### April 26, 2023 CHANGES: - * auth/pcf: The signature format has been updated to use the standard Base64 - encoding instead of the URL-safe variant. Signatures created using the - previous format will continue to be accepted [PCF-27] - * core: The http response code returned when an identity token key is not found - has been changed from 400 to 404 +* core: Bump Go version to 1.19.8. + +IMPROVEMENTS: + +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] +* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] + +BUG FIXES: + +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] + +## 1.12.5 +### March 29, 2023 -IMPROVEMENTS: - - * identity: Remove 512 entity limit for groups [[GH-7317](https://github.com/hashicorp/vault/pull/7317)] +SECURITY: -BUG FIXES: +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] - * auth/approle: Fix an error where an empty `token_type` string was not being - correctly handled as `TokenTypeDefault` [[GH-7273](https://github.com/hashicorp/vault/pull/7273)] - * auth/radius: Fix panic when logging in [[GH-7286](https://github.com/hashicorp/vault/pull/7286)] - * ui: the string-list widget will now honor multiline input [[GH-7254](https://github.com/hashicorp/vault/pull/7254)] - * ui: various visual bugs in the KV interface were addressed [[GH-7307](https://github.com/hashicorp/vault/pull/7307)] - * ui: fixed incorrect URL to access help in LDAP auth [[GH-7299](https://github.com/hashicorp/vault/pull/7299)] +IMPROVEMENTS: -## 1.2.1 (August 6th, 2019) +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] BUG FIXES: - * agent: Fix a panic on creds pulling in some error conditions in `aws` and - `alicloud` auth methods [[GH-7238](https://github.com/hashicorp/vault/pull/7238)] - * auth/approle: Fix error reading role-id on a role created pre-1.2 [[GH-7231](https://github.com/hashicorp/vault/pull/7231)] - * auth/token: Fix sudo check in non-root namespaces on create [[GH-7224](https://github.com/hashicorp/vault/pull/7224)] - * core: Fix health checks with perfstandbyok=true returning the wrong status - code [[GH-7240](https://github.com/hashicorp/vault/pull/7240)] - * ui: The web CLI will now parse input as a shell string, with special - characters escaped [[GH-7206](https://github.com/hashicorp/vault/pull/7206)] - * ui: The UI will now redirect to a page after authentication [[GH-7088](https://github.com/hashicorp/vault/pull/7088)] - * ui (Enterprise): The list of namespaces is now cleared when logging - out [[GH-7186](https://github.com/hashicorp/vault/pull/7186)] +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. +* kmip (enterprise): Fix a problem forwarding some requests to the active node. +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19641](https://github.com/hashicorp/vault/pull/19641)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] + +## 1.12.4 +### March 01, 2023 -## 1.2.0 (July 30th, 2019) +SECURITY: +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] CHANGES: - * Token store roles use new, common token fields for the values - that overlap with other auth backends. `period`, `explicit_max_ttl`, and - `bound_cidrs` will continue to work, with priority being given to the - `token_` prefixed versions of those parameters. They will also be returned - when doing a read on the role if they were used to provide values initially; - however, in Vault 1.4 if `period` or `explicit_max_ttl` is zero they will no - longer be returned. (`explicit_max_ttl` was already not returned if empty.) - * Due to underlying changes in Go version 1.12 and Go > 1.11.5, Vault is now - stricter about what characters it will accept in path names. Whereas before - it would filter out unprintable characters (and this could be turned off), - control characters and other invalid characters are now rejected within Go's - HTTP library before the request is passed to Vault, and this cannot be - disabled. To continue using these (e.g. for already-written paths), they - must be properly percent-encoded (e.g. `\r` becomes `%0D`, `\x00` becomes - `%00`, and so on). - * The user-configured regions on the AWSKMS seal stanza will now be preferred - over regions set in the enclosing environment. This is a _breaking_ change. - * All values in audit logs now are omitted if they are empty. This helps - reduce the size of audit log entries by not reproducing keys in each entry - that commonly don't contain any value, which can help in cases where audit - log entries are above the maximum UDP packet size and others. - * Both PeriodicFunc and WALRollback functions will be called if both are - provided. Previously WALRollback would only be called if PeriodicFunc was - not set. See [[GH-6717](https://github.com/hashicorp/vault/pull/6717)] for - details. - * Vault now uses Go's official dependency management system, Go Modules, to - manage dependencies. As a result to both reduce transitive dependencies for - API library users and plugin authors, and to work around various conflicts, - we have moved various helpers around, mostly under an `sdk/` submodule. A - couple of functions have also moved from plugin helper code to the `api/` - submodule. If you are a plugin author, take a look at some of our official - plugins and the paths they are importing for guidance. - * AppRole uses new, common token fields for values that overlap - with other auth backends. `period` and `policies` will continue to work, - with priority being given to the `token_` prefixed versions of those - parameters. They will also be returned when doing a read on the role if they - were used to provide values initially. - * In AppRole, `"default"` is no longer automatically added to the `policies` - parameter. This was a no-op since it would always be added anyways by - Vault's core; however, this can now be explicitly disabled with the new - `token_no_default_policy` field. - * In AppRole, `bound_cidr_list` is no longer returned when reading a role - * rollback: Rollback will no longer display log messages when it runs; it will - only display messages on error. - * Database plugins will now default to 4 `max_open_connections` - rather than 2. - -FEATURES: - - * **Integrated Storage**: Vault 1.2 includes a _tech preview_ of a new way to - manage storage directly within a Vault cluster. This new integrated storage - solution is based on the Raft protocol which is also used to back HashiCorp - Consul and HashiCorp Nomad. - * **Combined DB credential rotation**: Alternative mode for the Combined DB - Secret Engine to automatically rotate existing database account credentials - and set Vault as the source of truth for credentials. - * **Identity Tokens**: Vault's Identity system can now generate OIDC-compliant - ID tokens. These customizable tokens allow encapsulating a signed, verifiable - snapshot of identity information and metadata. They can be use by other - applications—even those without Vault authorization—as a way of establishing - identity based on a Vault entity. - * **Pivotal Cloud Foundry plugin**: New auth method using Pivotal Cloud - Foundry certificates for Vault authentication. - * **ElasticSearch database plugin**: New ElasticSearch database plugin issues - unique, short-lived ElasticSearch credentials. - * **New UI Features**: An HTTP Request Volume Page and new UI for editing LDAP - Users and Groups have been added. - * **HA support for Postgres**: PostgreSQL versions >= 9.5 may now but used as - and HA storage backend. - * **KMIP secrets engine (Enterprise)**: Allows Vault to operate as a KMIP - Server, seamlessly brokering cryptographic operations for traditional - infrastructure. - * Common Token Fields: Auth methods now use common fields for controlling - token behavior, making it easier to understand configuration across methods. - * **Vault API explorer**: The Vault UI now includes an embedded API explorer - where you can browse the endpoints avaliable to you and make requests. To try - it out, open the Web CLI and type `api`. +* core: Bump Go version to 1.19.6. IMPROVEMENTS: - * agent: Allow EC2 nonce to be passed in [[GH-6953](https://github.com/hashicorp/vault/pull/6953)] - * agent: Add optional `namespace` parameter, which sets the default namespace - for the auto-auth functionality [[GH-6988](https://github.com/hashicorp/vault/pull/6988)] - * agent: Add cert auto-auth method [[GH-6652](https://github.com/hashicorp/vault/pull/6652)] - * api: Add support for passing data to delete operations via `DeleteWithData` - [[GH-7139](https://github.com/hashicorp/vault/pull/7139)] - * audit/file: Dramatically speed up file operations by changing - locking/marshaling order [[GH-7024](https://github.com/hashicorp/vault/pull/7024)] - * auth/jwt: A JWKS endpoint may now be configured for signature verification [[GH-43](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/43)] - * auth/jwt: A new `verbose_oidc_logging` role parameter has been added to help - troubleshoot OIDC configuration [[GH-57](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/57)] - * auth/jwt: `bound_claims` will now match received claims that are lists if any element - of the list is one of the expected values [[GH-50](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/50)] - * auth/jwt: Leeways for `nbf` and `exp` are now configurable, as is clock skew - leeway [[GH-53](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/53)] - * auth/kubernetes: Allow service names/namespaces to be configured as globs - [[GH-58](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/58)] - * auth/token: Allow the support of the identity system for the token backend - via token roles [[GH-6267](https://github.com/hashicorp/vault/pull/6267)] - * auth/token: Add a large set of token configuration options to token store - roles [[GH-6662](https://github.com/hashicorp/vault/pull/6662)] - * cli: `path-help` now allows `-format=json` to be specified, which will - output OpenAPI [[GH-7006](https://github.com/hashicorp/vault/pull/7006)] - * cli: Add support for passing parameters to `vault delete` operations - [[GH-7139](https://github.com/hashicorp/vault/pull/7139)] - * cli: Add a log-format CLI flag that can specify either "standard" or "json" - for the log format for the `vault server`command. [[GH-6840](https://github.com/hashicorp/vault/pull/6840)] - * cli: Add `-dev-no-store-token` to allow dev servers to not store the - generated token at the tokenhelper location [[GH-7104](https://github.com/hashicorp/vault/pull/7104)] - * identity: Allow a group alias' canonical ID to be modified - * namespaces: Namespaces can now be created and deleted from performance - replication secondaries - * plugins: Change the default for `max_open_connections` for DB plugins to 4 - [[GH-7093](https://github.com/hashicorp/vault/pull/7093)] - * replication: Client TLS authentication is now supported when enabling or - updating a replication secondary - * secrets/database: Cassandra operations will now cancel on client timeout - [[GH-6954](https://github.com/hashicorp/vault/pull/6954)] - * secrets/kv: Add optional `delete_version_after` parameter, which takes a - duration and can be set on the mount and/or the metadata for a specific key - [[GH-7005](https://github.com/hashicorp/vault/pull/7005)] - * storage/postgres: LIST now performs better on large datasets [[GH-6546](https://github.com/hashicorp/vault/pull/6546)] - * storage/s3: A new `path` parameter allows selecting the path within a bucket - for Vault data [[GH-7157](https://github.com/hashicorp/vault/pull/7157)] - * ui: KV v1 and v2 will now gracefully degrade allowing a write without read - workflow in the UI [[GH-6570](https://github.com/hashicorp/vault/pull/6570)] - * ui: Many visual improvements with the addition of Toolbars [[GH-6626](https://github.com/hashicorp/vault/pull/6626)], the restyling - of the Confirm Action component [[GH-6741](https://github.com/hashicorp/vault/pull/6741)], and using a new set of glyphs for our - Icon component [[GH-6736](https://github.com/hashicorp/vault/pull/6736)] - * ui: Lazy loading parts of the application so that the total initial payload is - smaller [[GH-6718](https://github.com/hashicorp/vault/pull/6718)] - * ui: Tabbing to auto-complete in filters will first complete a common prefix if there - is one [[GH-6759](https://github.com/hashicorp/vault/pull/6759)] - * ui: Removing jQuery from the application makes the initial JS payload smaller [[GH-6768](https://github.com/hashicorp/vault/pull/6768)] +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] +* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] BUG FIXES: - * audit: Log requests and responses due to invalid wrapping token provided - [[GH-6541](https://github.com/hashicorp/vault/pull/6541)] - * audit: Fix bug preventing request counter queries from working with auditing - enabled [[GH-6767](https://github.com/hashicorp/vault/pull/6767) - * auth/aws: AWS Roles are now upgraded and saved to the latest version just - after the AWS credential plugin is mounted. [[GH-7025](https://github.com/hashicorp/vault/pull/7025)] - * auth/aws: Fix a case where a panic could stem from a malformed assumed-role ARN - when parsing this value [[GH-6917](https://github.com/hashicorp/vault/pull/6917)] - * auth/aws: Fix an error complaining about a read-only view that could occur - during updating of a role when on a performance replication secondary - [[GH-6926](https://github.com/hashicorp/vault/pull/6926)] - * auth/jwt: Fix a regression introduced in 1.1.1 that disabled checking of client_id - for OIDC logins [[GH-54](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/54)] - * auth/jwt: Fix a panic during OIDC CLI logins that could occur if the Vault server - response is empty [[GH-55](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/55)] - * auth/jwt: Fix issue where OIDC logins might intermittently fail when using - performance standbys [[GH-61](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/61)] - * identity: Fix a case where modifying aliases of an entity could end up - moving the entity into the wrong namespace - * namespaces: Fix a behavior (currently only known to be benign) where we - wouldn't delete policies through the official functions before wiping the - namespaces on deletion - * secrets/database: Escape username/password before using in connection URL - [[GH-7089](https://github.com/hashicorp/vault/pull/7089)] - * secrets/pki: Forward revocation requests to active node when on a - performance standby [[GH-7173](https://github.com/hashicorp/vault/pull/7173)] - * ui: Fix timestamp on some transit keys [[GH-6827](https://github.com/hashicorp/vault/pull/6827)] - * ui: Show Entities and Groups in Side Navigation [[GH-7138](https://github.com/hashicorp/vault/pull/7138)] - * ui: Ensure dropdown updates selected item on HTTP Request Metrics page - -## 1.1.4/1.1.5 (July 25th/30th, 2019) - -NOTE: - -Although 1.1.4 was tagged, we realized very soon after the tag was publicly -pushed that an intended fix was accidentally left out. As a result, 1.1.4 was -not officially announced and 1.1.5 should be used as the release after 1.1.3. - -IMPROVEMENTS: +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18207](https://github.com/hashicorp/vault/pull/18207)] +* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] +* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19410](https://github.com/hashicorp/vault/pull/19410)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] - * identity: Allow a group alias' canonical ID to be modified - * namespaces: Improve namespace deletion performance [[GH-6939](https://github.com/hashicorp/vault/pull/6939)] - * namespaces: Namespaces can now be created and deleted from performance - replication secondaries +## 1.12.3 +### February 6, 2023 -BUG FIXES: +CHANGES: - * api: Add backwards compat support for API env vars [[GH-7135](https://github.com/hashicorp/vault/pull/7135)] - * auth/aws: Fix a case where a panic could stem from a malformed assumed-role - ARN when parsing this value [[GH-6917](https://github.com/hashicorp/vault/pull/6917)] - * auth/ldap: Add `use_pre111_group_cn_behavior` flag to allow recovering from - a regression caused by a bug fix starting in 1.1.1 [[GH-7208](https://github.com/hashicorp/vault/pull/7208)] - * auth/aws: Use a role cache to avoid separate locking paths [[GH-6926](https://github.com/hashicorp/vault/pull/6926)] - * core: Fix a deadlock if a panic happens during request handling [[GH-6920](https://github.com/hashicorp/vault/pull/6920)] - * core: Fix an issue that may cause key upgrades to not be cleaned up properly - [[GH-6949](https://github.com/hashicorp/vault/pull/6949)] - * core: Don't shutdown if key upgrades fail due to canceled context [[GH-7070](https://github.com/hashicorp/vault/pull/7070)] - * core: Fix panic caused by handling requests while vault is inactive - * identity: Fix reading entity and groups that have spaces in their names - [[GH-7055](https://github.com/hashicorp/vault/pull/7055)] - * identity: Ensure entity alias operations properly verify namespace [[GH-6886](https://github.com/hashicorp/vault/pull/6886)] - * mfa: Fix a nil pointer panic that could occur if invalid Duo credentials - were supplied - * replication: Forward step-down on perf standbys to match HA behavior - * replication: Fix various read only storage errors on performance standbys - * replication: Stop forwarding before stopping replication to eliminate some - possible bad states - * secrets/database: Allow cassandra queries to be cancled [[GH-6954](https://github.com/hashicorp/vault/pull/6954)] - * storage/consul: Fix a regression causing vault to not connect to consul over - unix sockets [[GH-6859](https://github.com/hashicorp/vault/pull/6859)] - * ui: Fix saving of TTL and string array fields generated by Open API [[GH-7094](https://github.com/hashicorp/vault/pull/7094)] - -## 1.1.3 (June 5th, 2019) +* core: Bump Go version to 1.19.4. IMPROVEMENTS: - * agent: Now supports proxying request query parameters [[GH-6772](https://github.com/hashicorp/vault/pull/6772)] - * core: Mount table output now includes a UUID indicating the storage path [[GH-6633](https://github.com/hashicorp/vault/pull/6633)] - * core: HTTP server timeout values are now configurable [[GH-6666](https://github.com/hashicorp/vault/pull/6666)] - * replication: Improve performance of the reindex operation on secondary clusters - when mount filters are in use - * replication: Replication status API now returns the state and progress of a reindex - -BUG FIXES: - - * api: Return the Entity ID in the secret output [[GH-6819](https://github.com/hashicorp/vault/pull/6819)] - * auth/jwt: Consider bound claims when considering if there is at least one - bound constraint [[GH-49](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/49)] - * auth/okta: Fix handling of group names containing slashes [[GH-6665](https://github.com/hashicorp/vault/pull/6665)] - * cli: Add deprecated stored-shares flag back to the init command [[GH-6677](https://github.com/hashicorp/vault/pull/6677)] - * cli: Fix a panic when the KV command would return no data [[GH-6675](https://github.com/hashicorp/vault/pull/6675)] - * cli: Fix issue causing CLI list operations to not return proper format when - there is an empty response [[GH-6776](https://github.com/hashicorp/vault/pull/6776)] - * core: Correctly honor non-HMAC request keys when auditing requests [[GH-6653](https://github.com/hashicorp/vault/pull/6653)] - * core: Fix the `x-vault-unauthenticated` value in OpenAPI for a number of - endpoints [[GH-6654](https://github.com/hashicorp/vault/pull/6654)] - * core: Fix issue where some OpenAPI parameters were incorrectly listed as - being sent as a header [[GH-6679](https://github.com/hashicorp/vault/pull/6679)] - * core: Fix issue that would allow duplicate mount names to be used [[GH-6771](https://github.com/hashicorp/vault/pull/6771)] - * namespaces: Fix behavior when using `root` instead of `root/` as the - namespace header value - * pki: fix a panic when a client submits a null value [[GH-5679](https://github.com/hashicorp/vault/pull/5679)] - * replication: Properly update mount entry cache on a secondary to apply all - new values after a tune - * replication: Properly close connection on bootstrap error - * replication: Fix an issue causing startup problems if a namespace policy - wasn't replicated properly - * replication: Fix longer than necessary WAL replay during an initial reindex - * replication: Fix error during mount filter invalidation on DR secondary clusters - * secrets/ad: Make time buffer configurable [AD-35] - * secrets/gcp: Check for nil config when getting credentials [[GH-35](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/35)] - * secrets/gcp: Fix error checking in some cases where the returned value could - be 403 instead of 404 [[GH-37](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/37)] - * secrets/gcpkms: Disable key rotation when deleting a key [[GH-10](https://github.com/hashicorp/vault-plugin-secrets-gcpkms/pull/10)] - * storage/consul: recognize `https://` address even if schema not specified - [[GH-6602](https://github.com/hashicorp/vault/pull/6602)] - * storage/dynamodb: Fix an issue where a deleted lock key in DynamoDB (HA) - could cause constant switching of the active node [[GH-6637](https://github.com/hashicorp/vault/pull/6637)] - * storage/dynamodb: Eliminate a high-CPU condition that could occur if an - error was received from the DynamoDB API [[GH-6640](https://github.com/hashicorp/vault/pull/6640)] - * storage/gcs: Correctly use configured chunk size values [[GH-6655](https://github.com/hashicorp/vault/pull/6655)] - * storage/mssql: Use the correct database when pre-created schemas exist - [[GH-6356](https://github.com/hashicorp/vault/pull/6356)] - * ui: Fix issue with select arrows on drop down menus [[GH-6627](https://github.com/hashicorp/vault/pull/6627)] - * ui: Fix an issue where sensitive input values weren't being saved to the - server [[GH-6586](https://github.com/hashicorp/vault/pull/6586)] - * ui: Fix web cli parsing when using quoted values [[GH-6755](https://github.com/hashicorp/vault/pull/6755)] - * ui: Fix a namespace workflow mapping identities from external namespaces by - allowing arbitrary input in search-select component [[GH-6728](https://github.com/hashicorp/vault/pull/6728)] - -## 1.1.2 (April 18th, 2019) - -This is a bug fix release containing the two items below. It is otherwise -unchanged from 1.1.1. +* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] +* plugins: Let Vault unseal and mount deprecated builtin plugins in a +deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] BUG FIXES: - * auth/okta: Fix a potential dropped error [[GH-6592](https://github.com/hashicorp/vault/pull/6592)] - * secrets/kv: Fix a regression on upgrade where a KVv2 mount could fail to be - mounted on unseal if it had previously been mounted but not written to - [[GH-31](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/31)] - -## 1.1.1 (April 11th, 2019) - -SECURITY: - - * Given: (a) performance replication is enabled; (b) performance standbys are - in use on the performance replication secondary cluster; and (c) mount - filters are in use, if a mount that was previously available to a secondary - is updated to be filtered out, although the data would be removed from the - secondary cluster, the in-memory cache of the data would not be purged on - the performance standby nodes. As a result, the previously-available data - could still be read from memory if it was ever read from disk, and if this - included mount configuration data this could result in token or lease - issuance. The issue is fixed in this release; in prior releases either an - active node changeover (such as a step-down) or a restart of the standby - nodes is sufficient to cause the performance standby nodes to clear their - cache. A CVE is in the process of being issued; the number is - CVE-2019-11075. - * Roles in the JWT Auth backend using the OIDC login flow (i.e. role_type of - “oidc”) were not enforcing bound_cidrs restrictions, if any were configured - for the role. This issue did not affect roles of type “jwt”. +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. +* licensing (enterprise): update autoloaded license cache after reload +* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] +* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] +* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] +* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] +* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] +## 1.12.2 +### November 30, 2022 CHANGES: - * auth/jwt: Disallow logins of role_type "oidc" via the `/login` path [[GH-38](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/38)] - * core/acl: New ordering defines which policy wins when there are multiple - inexact matches and at least one path contains `+`. `+*` is now illegal in - policy paths. The previous behavior simply selected any matching - segment-wildcard path that matched. [[GH-6532](https://github.com/hashicorp/vault/pull/6532)] - * replication: Due to technical limitations, mounting and unmounting was not - previously possible from a performance secondary. These have been resolved, - and these operations may now be run from a performance secondary. +* core: Bump Go version to 1.19.3. +* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] IMPROVEMENTS: - * agent: Allow AppRole auto-auth without a secret-id [[GH-6324](https://github.com/hashicorp/vault/pull/6324)] - * auth/gcp: Cache clients to improve performance and reduce open file usage - * auth/jwt: Bounds claims validiation will now allow matching the received - claims against a list of expected values [[GH-41](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/41)] - * secret/gcp: Cache clients to improve performance and reduce open file usage - * replication: Mounting/unmounting/remounting/mount-tuning is now supported - from a performance secondary cluster - * ui: Suport for authentication via the RADIUS auth method [[GH-6488](https://github.com/hashicorp/vault/pull/6488)] - * ui: Navigating away from secret list view will clear any page-specific - filter that was applied [[GH-6511](https://github.com/hashicorp/vault/pull/6511)] - * ui: Improved the display when OIDC auth errors [[GH-6553](https://github.com/hashicorp/vault/pull/6553)] +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] +* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] BUG FIXES: - * agent: Allow auto-auth to be used with caching without having to define any - sinks [[GH-6468](https://github.com/hashicorp/vault/pull/6468)] - * agent: Disallow some nonsensical config file combinations [[GH-6471](https://github.com/hashicorp/vault/pull/6471)] - * auth/ldap: Fix CN check not working if CN was not all in uppercase [[GH-6518](https://github.com/hashicorp/vault/pull/6518)] - * auth/jwt: The CLI helper for OIDC logins will now open the browser to the correct - URL when running on Windows [[GH-37](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/37)] - * auth/jwt: Fix OIDC login issue where configured TLS certs weren't being used [[GH-40](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/40)] - * auth/jwt: Fix an issue where the `oidc_scopes` parameter was not being included in - the response to a role read request [[GH-35](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/35)] - * core: Fix seal migration case when migrating to Shamir and a seal block - wasn't explicitly specified [[GH-6455](https://github.com/hashicorp/vault/pull/6455)] - * core: Fix unwrapping when using namespaced wrapping tokens [[GH-6536](https://github.com/hashicorp/vault/pull/6536)] - * core: Fix incorrect representation of required properties in OpenAPI output - [[GH-6490](https://github.com/hashicorp/vault/pull/6490)] - * core: Fix deadlock that could happen when using the UI [[GH-6560](https://github.com/hashicorp/vault/pull/6560)] - * identity: Fix updating groups removing existing members [[GH-6527](https://github.com/hashicorp/vault/pull/6527)] - * identity: Properly invalidate group alias in performance secondary [[GH-6564](https://github.com/hashicorp/vault/pull/6564)] - * identity: Use namespace context when loading entities and groups to ensure - merging of duplicate entries works properly [[GH-6563](https://github.com/hashicorp/vault/pull/6563)] - * replication: Fix performance standby election failure [[GH-6561](https://github.com/hashicorp/vault/pull/6561)] - * replication: Fix mount filter invalidation on performance standby nodes - * replication: Fix license reloading on performance standby nodes - * replication: Fix handling of control groups on performance standby nodes - * replication: Fix some forwarding scenarios with request bodies using - performance standby nodes [[GH-6538](https://github.com/hashicorp/vault/pull/6538)] - * secret/gcp: Fix roleset binding when using JSON [[GH-27](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/27)] - * secret/pki: Use `uri_sans` param in when not using CSR parameters [[GH-6505](https://github.com/hashicorp/vault/pull/6505)] - * storage/dynamodb: Fix a race condition possible in HA configurations that could - leave the cluster without a leader [[GH-6512](https://github.com/hashicorp/vault/pull/6512)] - * ui: Fix an issue where in production builds OpenAPI model generation was - failing, causing any form using it to render labels with missing fields [[GH-6474](https://github.com/hashicorp/vault/pull/6474)] - * ui: Fix issue nav-hiding when moving between namespaces [[GH-6473](https://github.com/hashicorp/vault/pull/6473)] - * ui: Secrets will always show in the nav regardless of access to cubbyhole [[GH-6477](https://github.com/hashicorp/vault/pull/6477)] - * ui: fix SSH OTP generation [[GH-6540](https://github.com/hashicorp/vault/pull/6540)] - * ui: add polyfill to load UI in IE11 [[GH-6567](https://github.com/hashicorp/vault/pull/6567)] - * ui: Fix issue where some elements would fail to work properly if using ACLs - with segment-wildcard paths (`/+/` segments) [[GH-6525](https://github.com/hashicorp/vault/pull/6525)] - -## 1.1.0 (March 18th, 2019) - -CHANGES: - - * auth/jwt: The `groups_claim_delimiter_pattern` field has been removed. If the - groups claim is not at the top level, it can now be specified as a - [JSONPointer](https://tools.ietf.org/html/rfc6901). - * auth/jwt: Roles now have a "role type" parameter with a default type of - "oidc". To configure new JWT roles, a role type of "jwt" must be explicitly - specified. - * cli: CLI commands deprecated in 0.9.2 are now removed. Please see the CLI - help/warning output in previous versions of Vault for updated commands. - * core: Vault no longer automatically mounts a K/V backend at the "secret/" - path when initializing Vault - * core: Vault's cluster port will now be open at all times on HA standby nodes - * plugins: Vault no longer supports running netRPC plugins. These were - deprecated in favor of gRPC based plugins and any plugin built since 0.9.4 - defaults to gRPC. Older plugins may need to be recompiled against the latest - Vault dependencies. - -FEATURES: +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] +* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18086](https://github.com/hashicorp/vault/pull/18086)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18111](https://github.com/hashicorp/vault/pull/18111)] +* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - * **Vault Agent Caching**: Vault Agent can now be configured to act as a - caching proxy to Vault. Clients can send requests to Vault Agent and the - request will be proxied to the Vault server and cached locally in Agent. - Currently Agent will cache generated leases and tokens and keep them - renewed. The proxy can also use the Auto Auth feature so clients do not need - to authenticate to Vault, but rather can make requests to Agent and have - Agent fully manage token lifecycle. - * **OIDC Redirect Flow Support**: The JWT auth backend now supports OIDC - roles. These allow authentication via an OIDC-compliant provider via the - user's browser. The login may be initiated from the Vault UI or through - the `vault login` command. - * **ACL Path Wildcard**: ACL paths can now use the `+` character to enable - wild card matching for a single directory in the path definition. - * **Transit Auto Unseal**: Vault can now be configured to use the Transit - Secret Engine in another Vault cluster as an auto unseal provider. +## 1.12.1 +### November 2, 2022 IMPROVEMENTS: - * auth/jwt: A default role can be set. It will be used during JWT/OIDC logins if - a role is not specified. - * auth/jwt: Arbitrary claims data can now be copied into token & alias metadata. - * auth/jwt: An arbitrary set of bound claims can now be configured for a role. - * auth/jwt: The name "oidc" has been added as an alias for the jwt backend. Either - name may be specified in the `auth enable` command. - * command/server: A warning will be printed when 'tls_cipher_suites' includes a - blacklisted cipher suite or all cipher suites are blacklisted by the HTTP/2 - specification [[GH-6300](https://github.com/hashicorp/vault/pull/6300)] - * core/metrics: Prometheus pull support using a new sys/metrics endpoint. [[GH-5308](https://github.com/hashicorp/vault/pull/5308)] - * core: On non-windows platforms a SIGUSR2 will make the server log a dump of - all running goroutines' stack traces for debugging purposes [[GH-6240](https://github.com/hashicorp/vault/pull/6240)] - * replication: The initial replication indexing process on newly initialized or upgraded - clusters now runs asynchronously - * sentinel: Add token namespace id and path, available in rules as - token.namespace.id and token.namespace.path - * ui: The UI is now leveraging OpenAPI definitions to pull in fields for various forms. - This means, it will not be necessary to add fields on the go and JS sides in the future. - [[GH-6209](https://github.com/hashicorp/vault/pull/6209)] +* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] +* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] +* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] BUG FIXES: - * auth/jwt: Apply `bound_claims` validation across all login paths - * auth/jwt: Update `bound_audiences` validation during non-OIDC logins to accept - any matched audience, as documented and handled in OIDC logins [[GH-30](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/30)] - * auth/token: Fix issue where empty values for token role update call were - ignored [[GH-6314](https://github.com/hashicorp/vault/pull/6314)] - * core: The `operator migrate` command will no longer hang on empty key names - [[GH-6371](https://github.com/hashicorp/vault/pull/6371)] - * identity: Fix a panic at login when external group has a nil alias [[GH-6230](https://github.com/hashicorp/vault/pull/6230)] - * namespaces: Clear out identity store items upon namespace deletion - * replication/perfstandby: Fixed a bug causing performance standbys to wait - longer than necessary after forwarding a write to the active node - * replication/mountfilter: Fix a deadlock that could occur when mount filters - were updated [[GH-6426](https://github.com/hashicorp/vault/pull/6426)] - * secret/kv: Fix issue where a v1→v2 upgrade could run on a performance - standby when using a local mount - * secret/ssh: Fix for a bug where attempting to delete the last ssh role - in the zeroaddress configuration could fail [[GH-6390](https://github.com/hashicorp/vault/pull/6390)] - * secret/totp: Uppercase provided keys so they don't fail base32 validation - [[GH-6400](https://github.com/hashicorp/vault/pull/6400)] - * secret/transit: Multiple HMAC, Sign or Verify operations can now be - performed with one API call using the new `batch_input` parameter [[GH-5875](https://github.com/hashicorp/vault/pull/5875)] - * sys: `sys/internal/ui/mounts` will no longer return secret or auth mounts - that have been filtered. Similarly, `sys/internal/ui/mount/:path` will - return a error response if a filtered mount path is requested. [[GH-6412](https://github.com/hashicorp/vault/pull/6412)] - * ui: Fix for a bug where you couldn't access the data tab after clicking on - wrap details on the unwrap page [[GH-6404](https://github.com/hashicorp/vault/pull/6404)] - * ui: Fix an issue where the policies tab was erroneously hidden [[GH-6301](https://github.com/hashicorp/vault/pull/6301)] - * ui: Fix encoding issues with kv interfaces [[GH-6294](https://github.com/hashicorp/vault/pull/6294)] - -## 1.0.3.1 (March 14th, 2019) (Enterprise Only) +* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility +* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] +* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. +* kmip (enterprise): Fix selection of Cryptographic Parameters for Encrypt/Decrypt operations. +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] + +## 1.12.0 +### October 13, 2022 SECURITY: - * A regression was fixed in replication mount filter code introduced in Vault - 1.0 that caused the underlying filtered data to be replicated to - secondaries. This data was not accessible to users via Vault's API but via a - combination of privileged configuration file changes/Vault commands it could - be read. Upgrading to this version or 1.1 will fix this issue and cause the - replicated data to be deleted from filtered secondaries. More information - was sent to customer contacts on file. - -## 1.0.3 (February 12th, 2019) +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] CHANGES: - * New AWS authentication plugin mounts will default to using the generated - role ID as the Identity alias name. This applies to both EC2 and IAM auth. - Existing mounts that explicitly set this value will not be affected but - mounts that specified no preference will switch over on upgrade. - * The default policy now allows a token to look up its associated identity - entity either by name or by id [[GH-6105](https://github.com/hashicorp/vault/pull/6105)] - * The Vault UI's navigation and onboarding wizard now only displays items that - are permitted in a users' policy [[GH-5980](https://github.com/hashicorp/vault/pull/5980), [GH-6094](https://github.com/hashicorp/vault/pull/6094)] - * An issue was fixed that caused recovery keys to not work on secondary - clusters when using a different unseal mechanism/key than the primary. This - would be hit if the cluster was rekeyed or initialized after 1.0. We recommend - rekeying the recovery keys on the primary cluster if you meet the above - requirements. +* api: Exclusively use `GET /sys/plugins/catalog` endpoint for listing plugins, and add `details` field to list responses. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] +* auth: `GET /sys/auth/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* auth: `GET /sys/auth` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* auth: `POST /sys/auth/:type` endpoint response contains a warning for `Deprecated` auth methods. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] +* auth: `auth enable` returns an error and `POST /sys/auth/:type` endpoint reports an error for `Pending Removal` auth methods. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] +* core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted [[GH-16539](https://github.com/hashicorp/vault/pull/16539)] +* core: Bump Go version to 1.19.2. +* core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. [[GH-16379](https://github.com/hashicorp/vault/pull/16379)] +* identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 [[GH-15912](https://github.com/hashicorp/vault/pull/15912)] +* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades will not be allowed if the license expiration time is before the build date of the binary. +* plugins: Add plugin version to auth register, list, and mount table [[GH-16856](https://github.com/hashicorp/vault/pull/16856)] +* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint contains deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* plugins: `GET /sys/plugins/catalog/` endpoint contains deprecation status in `detailed` list. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* plugins: `plugin info` displays deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `plugin list` now accepts a `-detailed` flag, which display deprecation status and version info. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* secrets/azure: Removed deprecated AAD graph API support from the secrets engine. [[GH-17180](https://github.com/hashicorp/vault/pull/17180)] +* secrets: All database-specific (standalone DB) secrets engines are now marked `Pending Removal`. [[GH-17038](https://github.com/hashicorp/vault/pull/17038)] +* secrets: `GET /sys/mounts/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* secrets: `GET /sys/mounts` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* secrets: `POST /sys/mounts/:type` endpoint response contains a warning for `Deprecated` secrets engines. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] +* secrets: `secrets enable` returns an error and `POST /sys/mount/:type` endpoint reports an error for `Pending Removal` secrets engines. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] FEATURES: - * **cURL Command Output**: CLI commands can now use the `-output-curl-string` - flag to print out an equivalent cURL command. - * **Response Headers From Plugins**: Plugins can now send back headers that - will be included in the response to a client. The set of allowed headers can - be managed by the operator. +* **GCP Cloud KMS support for managed keys**: Managed keys now support using GCP Cloud KMS keys +* **LDAP Secrets Engine**: Adds the `ldap` secrets engine with service account check-out functionality for all supported schemas. [[GH-17152](https://github.com/hashicorp/vault/pull/17152)] +* **OCSP Responder**: PKI mounts now have an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for a specific cluster's revoked certificates in a mount. [[GH-16723](https://github.com/hashicorp/vault/pull/16723)] +* **Redis DB Engine**: Adding the new Redis database engine that supports the generation of static and dynamic user roles and root credential rotation on a stand alone Redis server. [[GH-17070](https://github.com/hashicorp/vault/pull/17070)] +* **Redis ElastiCache DB Plugin**: Added Redis ElastiCache as a built-in plugin. [[GH-17075](https://github.com/hashicorp/vault/pull/17075)] +* **Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process [[GH-14946](https://github.com/hashicorp/vault/pull/14946)] +* **Transform Key Import (BYOK)**: The transform secrets engine now supports importing keys for tokenization and FPE transformations +* HCP (enterprise): Adding foundational support for self-managed vault nodes to securely communicate with [HashiCorp Cloud Platform](https://cloud.hashicorp.com) as an opt-in feature +* ui: UI support for Okta Number Challenge. [[GH-15998](https://github.com/hashicorp/vault/pull/15998)] +* **Plugin Versioning**: Vault supports registering, managing, and running plugins with semantic versions specified. IMPROVEMENTS: - * auth/aws: AWS EC2 authentication can optionally create entity aliases by - role ID [[GH-6133](https://github.com/hashicorp/vault/pull/6133)] - * auth/jwt: The supported set of signing algorithms is now configurable [JWT - plugin [GH-16](https://github.com/hashicorp/vault/pull/16)] - * core: When starting from an uninitialized state, HA nodes will now attempt - to auto-unseal using a configured auto-unseal mechanism after the active - node initializes Vault [[GH-6039](https://github.com/hashicorp/vault/pull/6039)] - * secret/database: Add socket keepalive option for Cassandra [[GH-6201](https://github.com/hashicorp/vault/pull/6201)] - * secret/ssh: Add signed key constraints, allowing enforcement of key types - and minimum key sizes [[GH-6030](https://github.com/hashicorp/vault/pull/6030)] - * secret/transit: ECDSA signatures can now be marshaled in JWS-compatible - fashion [[GH-6077](https://github.com/hashicorp/vault/pull/6077)] - * storage/etcd: Support SRV service names [[GH-6087](https://github.com/hashicorp/vault/pull/6087)] - * storage/aws: Support specifying a KMS key ID for server-side encryption - [[GH-5996](https://github.com/hashicorp/vault/pull/5996)] +* core/managed-keys (enterprise): Allow operators to specify PSS signatures and/or hash algorithm for the test/sign api +* activity (enterprise): Added new clients unit tests to test accuracy of estimates +* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] +* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] +* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] +* agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. [[GH-11969](https://github.com/hashicorp/vault/pull/11969)] +* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] +* api/mfa: Add namespace path to the MFA read/list endpoint [[GH-16911](https://github.com/hashicorp/vault/pull/16911)] +* api: Add a sentinel error for missing KV secrets [[GH-16699](https://github.com/hashicorp/vault/pull/16699)] +* auth/alicloud: Enables AliCloud roles to be compatible with Vault's role based quotas. [[GH-17251](https://github.com/hashicorp/vault/pull/17251)] +* auth/approle: SecretIDs can now be generated with an per-request specified TTL and num_uses. +When either the ttl and num_uses fields are not specified, the role's configuration is used. [[GH-14474](https://github.com/hashicorp/vault/pull/14474)] +* auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 [[GH-16455](https://github.com/hashicorp/vault/pull/16455)] +* auth/azure: Enables Azure roles to be compatible with Vault's role based quotas. [[GH-17194](https://github.com/hashicorp/vault/pull/17194)] +* auth/cert: Add metadata to identity-alias [[GH-14751](https://github.com/hashicorp/vault/pull/14751)] +* auth/cert: Operators can now specify a CRL distribution point URL, in which case the cert auth engine will fetch and use the CRL from that location rather than needing to push CRLs directly to auth/cert. [[GH-17136](https://github.com/hashicorp/vault/pull/17136)] +* auth/cf: Enables CF roles to be compatible with Vault's role based quotas. [[GH-17196](https://github.com/hashicorp/vault/pull/17196)] +* auth/gcp: Add support for GCE regional instance groups [[GH-16435](https://github.com/hashicorp/vault/pull/16435)] +* auth/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17160](https://github.com/hashicorp/vault/pull/17160)] +* auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] +* auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] +* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] +* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the Kerberos config in Vault. This removes any instance names found in the keytab service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] +* auth/kubernetes: Role resolution for K8S Auth [[GH-156](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/156)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] +* auth/oci: Add support for role resolution. [[GH-17212](https://github.com/hashicorp/vault/pull/17212)] +* auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. [[GH-16274](https://github.com/hashicorp/vault/pull/16274)] +* cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. [[GH-16441](https://github.com/hashicorp/vault/pull/16441)] +* cli: `auth` and `secrets` list `-detailed` commands now show Deprecation Status for builtin plugins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* cli: `vault plugin list` now has a `details` field in JSON format, and version and type information in table format. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] +* command/audit: Improve missing type error message [[GH-16409](https://github.com/hashicorp/vault/pull/16409)] +* command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. [[GH-16421](https://github.com/hashicorp/vault/pull/16421)] +* command: Fix shell completion for KV v2 mounts [[GH-16553](https://github.com/hashicorp/vault/pull/16553)] +* core (enterprise): Add HTTP PATCH support for namespaces with an associated `namespace patch` CLI command +* core (enterprise): Add check to `vault server` command to ensure configured storage backend is supported. +* core (enterprise): Add custom metadata support for namespaces +* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] +* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] +* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] +* core/quotas (enterprise): Added ability to add path suffixes for lease-count resource quotas +* core/quotas (enterprise): Added ability to add role information for lease-count resource quotas, to limit login requests on auth mounts made using that role +* core/quotas: Added ability to add path suffixes for rate-limit resource quotas [[GH-15989](https://github.com/hashicorp/vault/pull/15989)] +* core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role [[GH-16115](https://github.com/hashicorp/vault/pull/16115)] +* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* core: Handle and log deprecated builtin mounts. Introduces `VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` to override shutdown and error when attempting to mount `Pending Removal` builtin plugins. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] +* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] +* core: Upgrade github.com/hashicorp/raft [[GH-16609](https://github.com/hashicorp/vault/pull/16609)] +* core: remove gox [[GH-16353](https://github.com/hashicorp/vault/pull/16353)] +* docs: Clarify the behaviour of local mounts in the context of DR replication [[GH-16218](https://github.com/hashicorp/vault/pull/16218)] +* identity/oidc: Adds support for detailed listing of clients and providers. [[GH-16567](https://github.com/hashicorp/vault/pull/16567)] +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] +* identity/oidc: allows filtering the list providers response by an allowed_client_id [[GH-16181](https://github.com/hashicorp/vault/pull/16181)] +* identity: Prevent possibility of data races on entity creation. [[GH-16487](https://github.com/hashicorp/vault/pull/16487)] +* physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. [[GH-15866](https://github.com/hashicorp/vault/pull/15866)] +* plugins/multiplexing: Added multiplexing support to database plugins if run as external plugins [[GH-16995](https://github.com/hashicorp/vault/pull/16995)] +* plugins: Add Deprecation Status method to builtinregistry. [[GH-16846](https://github.com/hashicorp/vault/pull/16846)] +* plugins: Added environment variable flag to opt-out specific plugins from multiplexing [[GH-16972](https://github.com/hashicorp/vault/pull/16972)] +* plugins: Adding version to plugin GRPC interface [[GH-17088](https://github.com/hashicorp/vault/pull/17088)] +* plugins: Plugin catalog supports registering and managing plugins with semantic version information. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* replication (enterprise): Fix race in merkle sync that can prevent streaming by returning key value matching provided hash if found in log shipper buffer. +* secret/nomad: allow reading CA and client auth certificate from /nomad/config/access [[GH-15809](https://github.com/hashicorp/vault/pull/15809)] +* secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs [[GH-16519](https://github.com/hashicorp/vault/pull/16519)] +* secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints [[GH-16124](https://github.com/hashicorp/vault/pull/16124)] +* secret/pki: Allow issuing certificates with non-domain, non-email Common Names from roles, sign-verbatim, and as issuers (`cn_validations`). [[GH-15996](https://github.com/hashicorp/vault/pull/15996)] +* secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. [[GH-16494](https://github.com/hashicorp/vault/pull/16494)] +* secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). [[GH-15742](https://github.com/hashicorp/vault/pull/15742)] +* secrets/ad: set config default length only if password_policy is missing [[GH-16140](https://github.com/hashicorp/vault/pull/16140)] +* secrets/azure: Adds option to permanently delete AzureAD objects created by Vault. [[GH-17045](https://github.com/hashicorp/vault/pull/17045)] +* secrets/database/hana: Add ability to customize dynamic usernames [[GH-16631](https://github.com/hashicorp/vault/pull/16631)] +* secrets/database/snowflake: Add multiplexing support [[GH-17159](https://github.com/hashicorp/vault/pull/17159)] +* secrets/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17174](https://github.com/hashicorp/vault/pull/17174)] +* secrets/gcpkms: Update dependencies: google.golang.org/api@v0.83.0. [[GH-17199](https://github.com/hashicorp/vault/pull/17199)] +* secrets/kubernetes: upgrade to v0.2.0 [[GH-17164](https://github.com/hashicorp/vault/pull/17164)] +* secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. [[GH-16702](https://github.com/hashicorp/vault/pull/16702)] +* secrets/pki: Add a new flag to issue/sign APIs which can filter out root CAs from the returned ca_chain field [[GH-16935](https://github.com/hashicorp/vault/pull/16935)] +* secrets/pki: Add a warning to any successful response when the requested TTL is overwritten by MaxTTL [[GH-17073](https://github.com/hashicorp/vault/pull/17073)] +* secrets/pki: Add ability to cancel tidy operations, control tidy resource usage. [[GH-16958](https://github.com/hashicorp/vault/pull/16958)] +* secrets/pki: Add ability to periodically rebuild CRL before expiry [[GH-16762](https://github.com/hashicorp/vault/pull/16762)] +* secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. [[GH-16900](https://github.com/hashicorp/vault/pull/16900)] +* secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs [[GH-16563](https://github.com/hashicorp/vault/pull/16563)] +* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] +* secrets/pki: Added gauge metrics "secrets.pki.total_revoked_certificates_stored" and "secrets.pki.total_certificates_stored" to track the number of certificates in storage. [[GH-16676](https://github.com/hashicorp/vault/pull/16676)] +* secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). [[GH-16564](https://github.com/hashicorp/vault/pull/16564)] +* secrets/pki: Allow revocation via proving possession of certificate's private key [[GH-16566](https://github.com/hashicorp/vault/pull/16566)] +* secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance [[GH-16871](https://github.com/hashicorp/vault/pull/16871)] +* secrets/pki: Honor If-Modified-Since header on CA, CRL fetch; requires passthrough_request_headers modification on the mount point. [[GH-16249](https://github.com/hashicorp/vault/pull/16249)] +* secrets/pki: Improve stability of association of revoked cert with its parent issuer; when an issuer loses crl-signing usage, do not place certs on default issuer's CRL. [[GH-16874](https://github.com/hashicorp/vault/pull/16874)] +* secrets/pki: Support generating delta CRLs for up-to-date CRLs when auto-building is enabled. [[GH-16773](https://github.com/hashicorp/vault/pull/16773)] +* secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. [[GH-16056](https://github.com/hashicorp/vault/pull/16056)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] +* secrets/ssh: Allow the use of Identity templates in the `default_user` field [[GH-16351](https://github.com/hashicorp/vault/pull/16351)] +* secrets/transit: Add a dedicated HMAC key type, which can be used with key import. [[GH-16668](https://github.com/hashicorp/vault/pull/16668)] +* secrets/transit: Added a parameter to encrypt/decrypt batch operations to allow the caller to override the HTTP response code in case of partial user-input failures. [[GH-17118](https://github.com/hashicorp/vault/pull/17118)] +* secrets/transit: Allow configuring the possible salt lengths for RSA PSS signatures. [[GH-16549](https://github.com/hashicorp/vault/pull/16549)] +* ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs [[GH-15561](https://github.com/hashicorp/vault/pull/15561)] +* storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. [[GH-10467](https://github.com/hashicorp/vault/pull/10467)] +* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] +* ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. [[GH-15852](https://github.com/hashicorp/vault/pull/15852)] +* ui: Prevents requests to /sys/internal/ui/resultant-acl endpoint when unauthenticated [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] +* ui: Removed deprecated version of core-js 2.6.11 [[GH-15898](https://github.com/hashicorp/vault/pull/15898)] +* ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. [[GH-16489](https://github.com/hashicorp/vault/pull/16489)] +* ui: Replaces non-inclusive terms [[GH-17116](https://github.com/hashicorp/vault/pull/17116)] +* ui: redirect_to param forwards from auth route when authenticated [[GH-16821](https://github.com/hashicorp/vault/pull/16821)] +* website/docs: API generate-recovery-token documentation. [[GH-16213](https://github.com/hashicorp/vault/pull/16213)] +* website/docs: Add documentation around the expensiveness of making lots of lease count quotas in a short period [[GH-16950](https://github.com/hashicorp/vault/pull/16950)] +* website/docs: Removes mentions of unauthenticated from internal ui resultant-acl doc [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] +* website/docs: Update replication docs to mention Integrated Storage [[GH-16063](https://github.com/hashicorp/vault/pull/16063)] +* website/docs: changed to echo for all string examples instead of (<<<) here-string. [[GH-9081](https://github.com/hashicorp/vault/pull/9081)] BUG FIXES: - * core: Fix a rare case where a standby whose connection is entirely torn down - to the active node, then reconnects to the same active node, may not - successfully resume operation [[GH-6167](https://github.com/hashicorp/vault/pull/6167)] - * cors: Don't duplicate headers when they're written [[GH-6207](https://github.com/hashicorp/vault/pull/6207)] - * identity: Persist merged entities only on the primary [[GH-6075](https://github.com/hashicorp/vault/pull/6075)] - * replication: Fix a potential race when a token is created and then used with - a performance standby very quickly, before an associated entity has been - replicated. If the entity is not found in this scenario, the request will - forward to the active node. - * replication: Fix issue where recovery keys would not work on secondary - clusters if using a different unseal mechanism than the primary. - * replication: Fix a "failed to register lease" error when using performance - standbys - * storage/postgresql: The `Get` method will now return an Entry object with - the `Key` member correctly populated with the full path that was requested - instead of just the last path element [[GH-6044](https://github.com/hashicorp/vault/pull/6044)] - -## 1.0.2 (January 15th, 2019) - -SECURITY: +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* agent: Agent will now respect `max_retries` retry configuration even when caching is set. [[GH-16970](https://github.com/hashicorp/vault/pull/16970)] +* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] +* api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths [[GH-15835](https://github.com/hashicorp/vault/pull/15835)] +* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] +* api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] +* api: properly handle switching to/from unix domain socket when changing client address [[GH-11904](https://github.com/hashicorp/vault/pull/11904)] +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] +* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. [[GH-15583](https://github.com/hashicorp/vault/pull/15583)] +* core (enterprise): Fix creation of duplicate entities via alias metadata changes on local auth mounts. +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core/quotas (enterprise): Fixed issue with improper counting of leases if lease count quota created after leases +* core/quotas: Added globbing functionality on the end of path suffix quota paths [[GH-16386](https://github.com/hashicorp/vault/pull/16386)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* core: Fix panic when the plugin catalog returns neither a plugin nor an error. [[GH-17204](https://github.com/hashicorp/vault/pull/17204)] +* core: Fixes parsing boolean values for ha_storage backends in config [[GH-15900](https://github.com/hashicorp/vault/pull/15900)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* debug: Fix panic when capturing debug bundle on Windows [[GH-14399](https://github.com/hashicorp/vault/pull/14399)] +* debug: Remove extra empty lines from vault.log when debug command is run [[GH-16714](https://github.com/hashicorp/vault/pull/16714)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] +* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] +* quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read [[GH-15735](https://github.com/hashicorp/vault/pull/15735)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* replication (enterprise): Fix data race in saveCheckpoint. +* replication (enterprise): Fix possible data race during merkle diff/sync +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] +* secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers [[GH-16865](https://github.com/hashicorp/vault/pull/16865)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] +* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] +* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] +* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* storage/raft: Nodes no longer get demoted to nonvoter if we don't know their version due to missing heartbeats. [[GH-17019](https://github.com/hashicorp/vault/pull/17019)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] +* ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear [[GH-15681](https://github.com/hashicorp/vault/pull/15681)] +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] +* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] +* vault: Fix a bug where duplicate policies could be added to an identity group. [[GH-15638](https://github.com/hashicorp/vault/pull/15638)] - * When creating a child token from a parent with `bound_cidrs`, the list of - CIDRs would not be propagated to the child token, allowing the child token - to be used from any address. +## 1.11.12 +### June 21, 2023 CHANGES: - * secret/aws: Role now returns `credential_type` instead of `credential_types` - to match role input. If a legacy role that can supply more than one - credential type, they will be concatenated with a `,`. - * physical/dynamodb, autoseal/aws: Instead of Vault performing environment - variable handling, and overriding static (config file) values if found, we - use the default AWS SDK env handling behavior, which also looks for - deprecated values. If you were previously providing both config values and - environment values, please ensure the config values are unset if you want to - use environment values. - * Namespaces (Enterprise): Providing "root" as the header value for - `X-Vault-Namespace` will perform the request on the root namespace. This is - equivalent to providing an empty value. Creating a namespace called "root" in - the root namespace is disallowed. +* core: Bump Go version to 1.19.10. +* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades +will not be allowed if the license termination time is before the build date of the binary. FEATURES: - * **InfluxDB Database Plugin**: Use Vault to dynamically create and manage InfluxDB - users - -IMPROVEMENTS: - - * auth/aws: AWS EC2 authentication can optionally create entity aliases by - image ID [[GH-5846](https://github.com/hashicorp/vault/pull/5846)] - * autoseal/gcpckms: Reduce the required permissions for the GCPCKMS autounseal - [[GH-5999](https://github.com/hashicorp/vault/pull/5999)] - * physical/foundationdb: TLS support added. [[GH-5800](https://github.com/hashicorp/vault/pull/5800)] - -BUG FIXES: - - * api: Fix a couple of places where we were using the `LIST` HTTP verb - (necessary to get the right method into the wrapping lookup function) and - not then modifying it to a `GET`; although this is officially the verb Vault - uses for listing and it's fully legal to use custom verbs, since many WAFs - and API gateways choke on anything outside of RFC-standardized verbs we fall - back to `GET` [[GH-6026](https://github.com/hashicorp/vault/pull/6026)] - * autoseal/aws: Fix reading session tokens when AWS access key/secret key are - also provided [[GH-5965](https://github.com/hashicorp/vault/pull/5965)] - * command/operator/rekey: Fix help output showing `-delete-backup` when it - should show `-backup-delete` [[GH-5981](https://github.com/hashicorp/vault/pull/5981)] - * core: Fix bound_cidrs not being propagated to child tokens - * replication: Correctly forward identity entity creation that originates from - performance standby nodes (Enterprise) - * secret/aws: Make input `credential_type` match the output type (string, not - array) [[GH-5972](https://github.com/hashicorp/vault/pull/5972)] - * secret/cubbyhole: Properly cleanup cubbyhole after token revocation [[GH-6006](https://github.com/hashicorp/vault/pull/6006)] - * secret/pki: Fix reading certificates on windows with the file storage backend [[GH-6013](https://github.com/hashicorp/vault/pull/6013)] - * ui (enterprise): properly display perf-standby count on the license page [[GH-5971](https://github.com/hashicorp/vault/pull/5971)] - * ui: fix disappearing nested secrets and go to the nearest parent when deleting - a secret - [[GH-5976](https://github.com/hashicorp/vault/pull/5976)] - * ui: fix error where deleting an item via the context menu would fail if the - item name contained dots [[GH-6018](https://github.com/hashicorp/vault/pull/6018)] - * ui: allow saving of kv secret after an errored save attempt [[GH-6022](https://github.com/hashicorp/vault/pull/6022)] - * ui: fix display of kv-v1 secret containing a key named "keys" [[GH-6023](https://github.com/hashicorp/vault/pull/6023)] - -## 1.0.1 (December 14th, 2018) - -SECURITY: - - * Update version of Go to 1.11.3 to fix Go bug - https://github.com/golang/go/issues/29233 which corresponds to - CVE-2018-16875 - * Database user revocation: If a client has configured custom revocation - statements for a role with a value of `""`, that statement would be executed - verbatim, resulting in a lack of actual revocation but success for the - operation. Vault will now strip empty statements from any provided; as a - result if an empty statement is provided, it will behave as if no statement - is provided, falling back to the default revocation statement. - -CHANGES: - - * secret/database: On role read, empty statements will be returned as empty - slices instead of potentially being returned as JSON null values. This makes - it more in line with other parts of Vault and makes it easier for statically - typed languages to interpret the values. - -IMPROVEMENTS: - - * cli: Strip iTerm extra characters from password manager input [[GH-5837](https://github.com/hashicorp/vault/pull/5837)] - * command/server: Setting default kv engine to v1 in -dev mode can now be - specified via -dev-kv-v1 [[GH-5919](https://github.com/hashicorp/vault/pull/5919)] - * core: Add operationId field to OpenAPI output [[GH-5876](https://github.com/hashicorp/vault/pull/5876)] - * ui: Added ability to search for Group and Policy IDs when creating Groups - and Entities instead of typing them in manually +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* core (enterprise): Add background worker for automatic reporting of billing +information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] + +IMPROVEMENTS: + +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] +* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] +* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] +* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] BUG FIXES: - * auth/azure: Cache azure authorizer [15] - * auth/gcp: Remove explicit project for service account in GCE authorizer [[GH-58](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/58)] - * cli: Show correct stored keys/threshold for autoseals [[GH-5910](https://github.com/hashicorp/vault/pull/5910)] - * cli: Fix backwards compatibility fallback when listing plugins [[GH-5913](https://github.com/hashicorp/vault/pull/5913)] - * core: Fix upgrades when the seal config had been created on early versions - of vault [[GH-5956](https://github.com/hashicorp/vault/pull/5956)] - * namespaces: Correctly reload the proper mount when tuning or reloading the - mount [[GH-5937](https://github.com/hashicorp/vault/pull/5937)] - * secret/azure: Cache azure authorizer [19] - * secret/database: Strip empty statements on user input [[GH-5955](https://github.com/hashicorp/vault/pull/5955)] - * secret/gcpkms: Add path for retrieving the public key [[GH-5](https://github.com/hashicorp/vault-plugin-secrets-gcpkms/pull/5)] - * secret/pki: Fix panic that could occur during tidy operation when malformed - data was found [[GH-5931](https://github.com/hashicorp/vault/pull/5931)] - * secret/pki: Strip empty line in ca_chain output [[GH-5779](https://github.com/hashicorp/vault/pull/5779)] - * ui: Fixed a bug where the web CLI was not usable via the `fullscreen` - command - [[GH-5909](https://github.com/hashicorp/vault/pull/5909)] - * ui: Fix a bug where you couldn't write a jwt auth method config [[GH-5936](https://github.com/hashicorp/vault/pull/5936)] - -## 0.11.6 (December 14th, 2018) - -This release contains the three security fixes from 1.0.0 and 1.0.1 and the -following bug fixes from 1.0.0/1.0.1: - - * namespaces: Correctly reload the proper mount when tuning or reloading the - mount [[GH-5937](https://github.com/hashicorp/vault/pull/5937)] - * replication/perfstandby: Fix audit table upgrade on standbys [[GH-5811](https://github.com/hashicorp/vault/pull/5811)] - * replication/perfstandby: Fix redirect on approle update [[GH-5820](https://github.com/hashicorp/vault/pull/5820)] - * secrets/kv: Fix issue where storage version would get incorrectly downgraded - [[GH-5809](https://github.com/hashicorp/vault/pull/5809)] - -It is otherwise identical to 0.11.5. - -## 1.0.0 (December 3rd, 2018) +* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] +* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] +* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] +* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] +* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] +* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs + +## 1.11.11 +### June 08, 2023 SECURITY: - * When debugging a customer incident we discovered that in the case of - malformed data from an autoseal mechanism, Vault's master key could be - logged in Vault's server log. For this to happen, the data would need to be - modified by the autoseal mechanism after being submitted to it by Vault but - prior to encryption, or after decryption, prior to it being returned to - Vault. To put it another way, it requires the data that Vault submits for - encryption to not match the data returned after decryption. It is not - sufficient for the autoseal mechanism to return an error, and it cannot be - triggered by an outside attacker changing the on-disk ciphertext as all - autoseal mechanisms use authenticated encryption. We do not believe that - this is generally a cause for concern; since it involves the autoseal - mechanism returning bad data to Vault but with no error, in a working Vault - configuration this code path should never be hit, and if hitting this issue - Vault will not be unsealing properly anyways so it will be obvious what is - happening and an immediate rekey of the master key can be performed after - service is restored. We have filed for a CVE (CVE-2018-19786) and a CVSS V3 - score of 5.2 has been assigned. +* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] CHANGES: - * Tokens are now prefixed by a designation to indicate what type of token they - are. Service tokens start with `s.` and batch tokens start with `b.`. - Existing tokens will still work (they are all of service type and will be - considered as such). Prefixing allows us to be more efficient when consuming - a token, which keeps the critical path of requests faster. - * Paths within `auth/token` that allow specifying a token or accessor in the - URL have been removed. These have been deprecated since March 2016 and - undocumented, but were retained for backwards compatibility. They shouldn't - be used due to the possibility of those paths being logged, so at this point - they are simply being removed. - * Vault will no longer accept updates when the storage key has invalid UTF-8 - character encoding [[GH-5819](https://github.com/hashicorp/vault/pull/5819)] - * Mount/Auth tuning the `options` map on backends will now upsert any provided - values, and keep any of the existing values in place if not provided. The - options map itself cannot be unset once it's set, but the keypairs within the - map can be unset if an empty value is provided, with the exception of the - `version` keypair which is handled differently for KVv2 purposes. - * Agent no longer automatically reauthenticates when new credentials are - detected. It's not strictly necessary and in some cases was causing - reauthentication much more often than intended. - * HSM Regenerate Key Support Removed: Vault no longer supports destroying and - regenerating encryption keys on an HSM; it only supports creating them. - Although this has never been a source of a customer incident, it is simply a - code path that is too trivial to activate, especially by mistyping - `regenerate_key` instead of `generate_key`. - * Barrier Config Upgrade (Enterprise): When upgrading from Vault 0.8.x, the - seal type in the barrier config storage entry will be upgraded from - "hsm-auto" to "awskms" or "pkcs11" upon unseal if using AWSKMS or HSM seals. - If performing seal migration, the barrier config should first be upgraded - prior to starting migration. - * Go API client uses pooled HTTP client: The Go API client now uses a - connection-pooling HTTP client by default. For CLI operations this makes no - difference but it should provide significant performance benefits for those - writing custom clients using the Go API library. As before, this can be - changed to any custom HTTP client by the caller. - * Builtin Secret Engines and Auth Methods are integrated deeper into the - plugin system. The plugin catalog can now override builtin plugins with - custom versions of the same name. Additionally the plugin system now - requires a plugin `type` field when configuring plugins, this can be "auth", - "database", or "secret". - -FEATURES: - - * **Auto-Unseal in Open Source**: Cloud-based auto-unseal has been migrated - from Enterprise to Open Source. We've created a migrator to allow migrating - between Shamir seals and auto unseal methods. - * **Batch Tokens**: Batch tokens trade off some features of service tokens for no - storage overhead, and in most cases can be used across performance - replication clusters. - * **Replication Speed Improvements**: We've worked hard to speed up a lot of - operations when using Vault Enterprise Replication. - * **GCP KMS Secrets Engine**: This new secrets engine provides a Transit-like - pattern to keys stored within GCP Cloud KMS. - * **AppRole support in Vault Agent Auto-Auth**: You can now use AppRole - credentials when having Agent automatically authenticate to Vault - * **OpenAPI Support**: Descriptions of mounted backends can be served directly - from Vault - * **Kubernetes Projected Service Account Tokens**: Projected Service Account - Tokens are now supported in Kubernetes auth - * **Response Wrapping in UI**: Added ability to wrap secrets and easily copy - the wrap token or secret JSON in the UI - -IMPROVEMENTS: - - * agent: Support for configuring the location of the kubernetes service account - [[GH-5725](https://github.com/hashicorp/vault/pull/5725)] - * auth/token: New tokens are indexed in storage HMAC-SHA256 instead of SHA1 - * secret/totp: Allow @ character to be part of key name [[GH-5652](https://github.com/hashicorp/vault/pull/5652)] - * secret/consul: Add support for new policy based tokens added in Consul 1.4 - [[GH-5586](https://github.com/hashicorp/vault/pull/5586)] - * ui: Improve the token auto-renew warning, and automatically begin renewal - when a user becomes active again [[GH-5662](https://github.com/hashicorp/vault/pull/5662)] - * ui: The unbundled UI page now has some styling [[GH-5665](https://github.com/hashicorp/vault/pull/5665)] - * ui: Improved banner and popup design [[GH-5672](https://github.com/hashicorp/vault/pull/5672)] - * ui: Added token type to auth method mount config [[GH-5723](https://github.com/hashicorp/vault/pull/5723)] - * ui: Display additonal wrap info when unwrapping. [[GH-5664](https://github.com/hashicorp/vault/pull/5664)] - * ui: Empty states have updated styling and link to relevant actions and - documentation [[GH-5758](https://github.com/hashicorp/vault/pull/5758)] - * ui: Allow editing of KV V2 data when a token doesn't have capabilities to - read secret metadata [[GH-5879](https://github.com/hashicorp/vault/pull/5879)] - -BUG FIXES: - - * agent: Fix auth when multiple redirects [[GH-5814](https://github.com/hashicorp/vault/pull/5814)] - * cli: Restore the `-policy-override` flag [[GH-5826](https://github.com/hashicorp/vault/pull/5826)] - * core: Fix rekey progress reset which did not happen under certain - circumstances. [[GH-5743](https://github.com/hashicorp/vault/pull/5743)] - * core: Migration from autounseal to shamir will clean up old keys [[GH-5671](https://github.com/hashicorp/vault/pull/5671)] - * identity: Update group memberships when entity is deleted [[GH-5786](https://github.com/hashicorp/vault/pull/5786)] - * replication/perfstandby: Fix audit table upgrade on standbys [[GH-5811](https://github.com/hashicorp/vault/pull/5811)] - * replication/perfstandby: Fix redirect on approle update [[GH-5820](https://github.com/hashicorp/vault/pull/5820)] - * secrets/azure: Fix valid roles being rejected for duplicate ids despite - having distinct scopes - [[GH-16](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/16)] - * storage/gcs: Send md5 of values to GCS to avoid potential corruption - [[GH-5804](https://github.com/hashicorp/vault/pull/5804)] - * secrets/kv: Fix issue where storage version would get incorrectly downgraded - [[GH-5809](https://github.com/hashicorp/vault/pull/5809)] - * secrets/kv: Disallow empty paths on a `kv put` while accepting empty paths - for all other operations for backwards compatibility - [[GH-19](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/19)] - * ui: Allow for secret creation in kv v2 when cas_required=true [[GH-5823](https://github.com/hashicorp/vault/pull/5823)] - * ui: Fix dr secondary operation token generation via the ui [[GH-5818](https://github.com/hashicorp/vault/pull/5818)] - * ui: Fix the PKI context menu so that items load [[GH-5824](https://github.com/hashicorp/vault/pull/5824)] - * ui: Update DR Secondary Token generation command [[GH-5857](https://github.com/hashicorp/vault/pull/5857)] - * ui: Fix pagination bug where controls would be rendered once for each - item when viewing policies [[GH-5866](https://github.com/hashicorp/vault/pull/5866)] - * ui: Fix bug where `sys/leases/revoke` required 'sudo' capability to show - the revoke button in the UI [[GH-5647](https://github.com/hashicorp/vault/pull/5647)] - * ui: Fix issue where certain pages wouldn't render in a namespace [[GH-5692](https://github.com/hashicorp/vault/pull/5692)] - -## 0.11.5 (November 13th, 2018) - -BUG FIXES: - - * agent: Fix issue when specifying two file sinks [[GH-5610](https://github.com/hashicorp/vault/pull/5610)] - * auth/userpass: Fix minor timing issue that could leak the presence of a - username [[GH-5614](https://github.com/hashicorp/vault/pull/5614)] - * autounseal/alicloud: Fix issue interacting with the API (Enterprise) - * autounseal/azure: Fix key version tracking (Enterprise) - * cli: Fix panic that could occur if parameters were not provided [[GH-5603](https://github.com/hashicorp/vault/pull/5603)] - * core: Fix buggy behavior if trying to remount into a namespace - * identity: Fix duplication of entity alias entity during alias transfer - between entities [[GH-5733](https://github.com/hashicorp/vault/pull/5733)] - * namespaces: Fix tuning of auth mounts in a namespace - * ui: Fix bug where editing secrets as JSON doesn't save properly [[GH-5660](https://github.com/hashicorp/vault/pull/5660)] - * ui: Fix issue where IE 11 didn't render the UI and also had a broken form - when trying to use tool/hash [[GH-5714](https://github.com/hashicorp/vault/pull/5714)] - -## 0.11.4 (October 23rd, 2018) - -CHANGES: - - * core: HA lock file is no longer copied during `operator migrate` [[GH-5503](https://github.com/hashicorp/vault/pull/5503)]. - We've categorized this as a change, but generally this can be considered - just a bug fix, and no action is needed. - -FEATURES: - - * **Transit Key Trimming**: Keys in transit secret engine can now be trimmed to - remove older unused key versions - * **Web UI support for KV Version 2**: Browse, delete, undelete and destroy - individual secret versions in the UI - * **Azure Existing Service Principal Support**: Credentials can now be generated - against an existing service principal - -IMPROVEMENTS: - - * core: Add last WAL in leader/health output for easier debugging [[GH-5523](https://github.com/hashicorp/vault/pull/5523)] - * identity: Identity names will now be handled case insensitively by default. - This includes names of entities, aliases and groups [[GH-5404](https://github.com/hashicorp/vault/pull/5404)] - * secrets/aws: Added role-option max_sts_ttl to cap TTL for AWS STS - credentials [[GH-5500](https://github.com/hashicorp/vault/pull/5500)] - * secret/database: Allow Cassandra user to be non-superuser so long as it has - role creation permissions [[GH-5402](https://github.com/hashicorp/vault/pull/5402)] - * secret/radius: Allow setting the NAS Identifier value in the generated - packet [[GH-5465](https://github.com/hashicorp/vault/pull/5465)] - * secret/ssh: Allow usage of JSON arrays when setting zero addresses [[GH-5528](https://github.com/hashicorp/vault/pull/5528)] - * secret/transit: Allow trimming unused keys [[GH-5388](https://github.com/hashicorp/vault/pull/5388)] - * ui: Support KVv2 [[GH-5547](https://github.com/hashicorp/vault/pull/5547)], [[GH-5563](https://github.com/hashicorp/vault/pull/5563)] - * ui: Allow viewing and updating Vault license via the UI - * ui: Onboarding will now display your progress through the chosen tutorials - * ui: Dynamic secret backends obfuscate sensitive data by default and - visibility is toggleable - -BUG FIXES: - - * agent: Fix potential hang during agent shutdown [[GH-5026](https://github.com/hashicorp/vault/pull/5026)] - * auth/ldap: Fix listing of users/groups that contain slashes [[GH-5537](https://github.com/hashicorp/vault/pull/5537)] - * core: Fix memory leak during some expiration calls [[GH-5505](https://github.com/hashicorp/vault/pull/5505)] - * core: Fix generate-root operations requiring empty `otp` to be provided - instead of an empty body [[GH-5495](https://github.com/hashicorp/vault/pull/5495)] - * identity: Remove lookup check during alias removal from entity [[GH-5524](https://github.com/hashicorp/vault/pull/5524)] - * secret/pki: Fix TTL/MaxTTL check when using `sign-verbatim` [[GH-5549](https://github.com/hashicorp/vault/pull/5549)] - * secret/pki: Fix regression in 0.11.2+ causing the NotBefore value of - generated certificates to be set to the Unix epoch if the role value was not - set, instead of using the default of 30 seconds [[GH-5481](https://github.com/hashicorp/vault/pull/5481)] - * storage/mysql: Use `varbinary` instead of `varchar` when creating HA tables - [[GH-5529](https://github.com/hashicorp/vault/pull/5529)] - -## 0.11.3 (October 8th, 2018) - -SECURITY: - - * Revocation: A regression in 0.11.2 (OSS) and 0.11.0 (Enterprise) caused - lease IDs containing periods (`.`) to not be revoked properly. Upon startup - when revocation is tried again these should now revoke successfully. +* core: Bump Go version to 1.19.9. +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] IMPROVEMENTS: - * auth/ldap: Listing of users and groups return absolute paths [[GH-5537](https://github.com/hashicorp/vault/pull/5537)] - * secret/pki: OID SANs can now specify `*` to allow any value [[GH-5459](https://github.com/hashicorp/vault/pull/5459)] +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] +* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] BUG FIXES: - * auth/ldap: Fix panic if specific values were given to be escaped [[GH-5471](https://github.com/hashicorp/vault/pull/5471)] - * cli/auth: Fix panic if `vault auth` was given no parameters [[GH-5473](https://github.com/hashicorp/vault/pull/5473)] - * secret/database/mongodb: Fix panic that could occur at high load [[GH-5463](https://github.com/hashicorp/vault/pull/5463)] - * secret/pki: Fix CA generation not allowing OID SANs [[GH-5459](https://github.com/hashicorp/vault/pull/5459)] +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation -## 0.11.2 (October 2nd, 2018) +## 1.11.10 +### April 26, 2023 CHANGES: - * `sys/seal-status` now includes an `initialized` boolean in the output. If - Vault is not initialized, it will return a `200` with this value set `false` - instead of a `400`. - * `passthrough_request_headers` will now deny certain headers from being - provided to backends based on a global denylist. - * Token Format: Tokens are now represented as a base62 value; tokens in - namespaces will have the namespace identifier appended. (This appeared in - Enterprise in 0.11.0, but is only in OSS in 0.11.2.) - -FEATURES: - - * **AWS Secret Engine Root Credential Rotation**: The credential used by the AWS - secret engine can now be rotated, to ensure that only Vault knows the - credentials it is using [[GH-5140](https://github.com/hashicorp/vault/pull/5140)] - * **Storage Backend Migrator**: A new `operator migrate` command allows offline - migration of data between two storage backends - * **AliCloud KMS Auto Unseal and Seal Wrap Support (Enterprise)**: AliCloud KMS can now be used a support seal for - Auto Unseal and Seal Wrapping - -BUG FIXES: - - * auth/okta: Fix reading deprecated `token` parameter if a token was - previously set in the configuration [[GH-5409](https://github.com/hashicorp/vault/pull/5409)] - * core: Re-add deprecated capabilities information for now [[GH-5360](https://github.com/hashicorp/vault/pull/5360)] - * core: Fix handling of cyclic token relationships [[GH-4803](https://github.com/hashicorp/vault/pull/4803)] - * storage/mysql: Fix locking on MariaDB [[GH-5343](https://github.com/hashicorp/vault/pull/5343)] - * replication: Fix DR API when using a token [[GH-5398](https://github.com/hashicorp/vault/pull/5398)] - * identity: Ensure old group alias is removed when a new one is written [[GH-5350](https://github.com/hashicorp/vault/pull/5350)] - * storage/alicloud: Don't call uname on package init [[GH-5358](https://github.com/hashicorp/vault/pull/5358)] - * secrets/jwt: Fix issue where request context would be canceled too early - * ui: fix need to have update for aws iam creds generation [GF-5294] - * ui: fix calculation of token expiry [[GH-5435](https://github.com/hashicorp/vault/pull/5435)] - -IMPROVEMENTS: - - * auth/aws: The identity alias name can now configured to be either IAM unique - ID of the IAM Principal, or ARN of the caller identity [[GH-5247](https://github.com/hashicorp/vault/pull/5247)] - * auth/cert: Add allowed_organizational_units support [[GH-5252](https://github.com/hashicorp/vault/pull/5252)] - * cli: Format TTLs for non-secret responses [[GH-5367](https://github.com/hashicorp/vault/pull/5367)] - * identity: Support operating on entities and groups by their names [[GH-5355](https://github.com/hashicorp/vault/pull/5355)] - * plugins: Add `env` parameter when registering plugins to the catalog to allow - operators to include environment variables during plugin execution. [[GH-5359](https://github.com/hashicorp/vault/pull/5359)] - * secrets/aws: WAL Rollback improvements [[GH-5202](https://github.com/hashicorp/vault/pull/5202)] - * secrets/aws: Allow specifying STS role-default TTLs [[GH-5138](https://github.com/hashicorp/vault/pull/5138)] - * secrets/pki: Add configuration support for setting NotBefore [[GH-5325](https://github.com/hashicorp/vault/pull/5325)] - * core: Support for passing the Vault token via an Authorization Bearer header [[GH-5397](https://github.com/hashicorp/vault/pull/5397)] - * replication: Reindex process now runs in the background and does not block other - vault operations - * storage/zookeeper: Enable TLS based communication with Zookeeper [[GH-4856](https://github.com/hashicorp/vault/pull/4856)] - * ui: you can now init a cluster with a seal config [[GH-5428](https://github.com/hashicorp/vault/pull/5428)] - * ui: added the option to force promote replication clusters [[GH-5438](https://github.com/hashicorp/vault/pull/5438)] - * replication: Allow promotion of a secondary when data is syncing with a "force" flag - -## 0.11.1.1 (September 17th, 2018) (Enterprise Only) - -BUG FIXES: - - * agent: Fix auth handler-based wrapping of output tokens [[GH-5316](https://github.com/hashicorp/vault/pull/5316)] - * core: Properly store the replication checkpoint file if it's larger than the - storage engine's per-item limit - * core: Improve WAL deletion rate - * core: Fix token creation on performance standby nodes - * core: Fix unwrapping inside a namespace - * core: Always forward tidy operations from performance standby nodes - -IMPROVEMENTS: - - * auth/aws: add support for key/value pairs or JSON values for - `iam_request_headers` with IAM auth method [[GH-5320](https://github.com/hashicorp/vault/pull/5320)] - * auth/aws, secret/aws: Throttling errors from the AWS API will now be - reported as 502 errors by Vault, along with the original error [[GH-5270](https://github.com/hashicorp/vault/pull/5270)] - * replication: Start fetching during a sync from where it previously errored - -## 0.11.1 (September 6th, 2018) - -SECURITY: - - * Random Byte Reading in Barrier: Prior to this release, Vault was not - properly checking the error code when reading random bytes for the IV for - AES operations in its cryptographic barrier. Specifically, this means that - such an IV could potentially be zero multiple times, causing nonce re-use - and weakening the security of the key. On most platforms this should never - happen because reading from kernel random sources is non-blocking and always - successful, but there may be platform-specific behavior that has not been - accounted for. (Vault has tests to check exactly this, and the tests have - never seen nonce re-use.) - -FEATURES: - - * AliCloud Agent Support: Vault Agent can now authenticate against the - AliCloud auth method. - * UI: Enable AliCloud auth method and Azure secrets engine via the UI. +* core: Bump Go version to 1.19.8. IMPROVEMENTS: - * core: Logging level for most logs (not including secrets/auth plugins) can - now be changed on-the-fly via `SIGHUP`, reading the desired value from - Vault's config file [[GH-5280](https://github.com/hashicorp/vault/pull/5280)] +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] BUG FIXES: - * core: Ensure we use a background context when stepping down [[GH-5290](https://github.com/hashicorp/vault/pull/5290)] - * core: Properly check error return from random byte reading [[GH-5277](https://github.com/hashicorp/vault/pull/5277)] - * core: Re-add `sys/` top-route injection for now [[GH-5241](https://github.com/hashicorp/vault/pull/5241)] - * core: Policies stored in minified JSON would return an error [[GH-5229](https://github.com/hashicorp/vault/pull/5229)] - * core: Evaluate templated policies in capabilities check [[GH-5250](https://github.com/hashicorp/vault/pull/5250)] - * identity: Update MemDB with identity group alias while loading groups [[GH-5289](https://github.com/hashicorp/vault/pull/5289)] - * secrets/database: Fix nil pointer when revoking some leases [[GH-5262](https://github.com/hashicorp/vault/pull/5262)] - * secrets/pki: Fix sign-verbatim losing extra Subject attributes [[GH-5245](https://github.com/hashicorp/vault/pull/5245)] - * secrets/pki: Remove certificates from store when tidying revoked - certificates and simplify API [[GH-5231](https://github.com/hashicorp/vault/pull/5231)] - * ui: JSON editor will not coerce input to an object, and will now show an - error about Vault expecting an object [[GH-5271](https://github.com/hashicorp/vault/pull/5271)] - * ui: authentication form will now default to any methods that have been tuned - to show up for unauthenticated users [[GH-5281](https://github.com/hashicorp/vault/pull/5281)] - - -## 0.11.0 (August 28th, 2018) - -DEPRECATIONS/CHANGES: - - * Request Timeouts: A default request timeout of 90s is now enforced. This - setting can be overwritten in the config file. If you anticipate requests - taking longer than 90s this setting should be updated before upgrading. - * (NOTE: will be re-added into 0.11.1 as it broke more than anticipated. There - will be some further guidelines around when this will be removed again.) - * `sys/` Top Level Injection: For the last two years for backwards - compatibility data for various `sys/` routes has been injected into both the - Secret's Data map and into the top level of the JSON response object. - However, this has some subtle issues that pop up from time to time and is - becoming increasingly complicated to maintain, so it's finally being - removed. - * Path Fallback for List Operations: For a very long time Vault has - automatically adjusted `list` operations to always end in a `/`, as list - operations operates on prefixes, so all list operations by definition end - with `/`. This was done server-side so affects all clients. However, this - has also led to a lot of confusion for users writing policies that assume - that the path that they use in the CLI is the path used internally. Starting - in 0.11, ACL policies gain a new fallback rule for listing: they will use a - matching path ending in `/` if available, but if not found, they will look - for the same path without a trailing `/`. This allows putting `list` - capabilities in the same path block as most other capabilities for that - path, while not providing any extra access if `list` wasn't actually - provided there. - * Performance Standbys On By Default: If you flavor/license of Vault - Enterprise supports Performance Standbys, they are on by default. You can - disable this behavior per-node with the `disable_performance_standby` - configuration flag. - * AWS Secret Engine Roles: The AWS Secret Engine roles are now explicit about - the type of AWS credential they are generating; this reduces reduce - ambiguity that existed previously as well as enables new features for - specific credential types. Writing role data and generating credentials - remain backwards compatible; however, the data returned when reading a - role's configuration has changed in backwards-incompatible ways. Anything - that depended on reading role data from the AWS secret engine will break - until it is updated to work with the new format. - * Token Format (Enterprise): Tokens are now represented as a base62 value; - tokens in namespaces will have the namespace identifier appended. - -FEATURES: - - * **Namespaces (Enterprise)**: A set of features within Vault Enterprise - that allows Vault environments to support *Secure Multi-tenancy* within a - single Vault Enterprise infrastructure. Through namespaces, Vault - administrators can support tenant isolation for teams and individuals as - well as empower those individuals to self-manage their own tenant - environment. - * **Performance Standbys (Enterprise)**: Standby nodes can now service - requests that do not modify storage. This provides near-horizontal scaling - of a cluster in some workloads, and is the intra-cluster analogue of - the existing Performance Replication feature, which replicates to distinct - clusters in other datacenters, geos, etc. - * **AliCloud OSS Storage**: AliCloud OSS can now be used for Vault storage. - * **AliCloud Auth Plugin**: AliCloud's identity services can now be used to - grant access to Vault. See the [plugin - repository](https://github.com/hashicorp/vault-plugin-auth-alicloud) for - more information. - * **Azure Secrets Plugin**: There is now a plugin (pulled in to Vault) that - allows generating credentials to allow access to Azure. See the [plugin - repository](https://github.com/hashicorp/vault-plugin-secrets-azure) for - more information. - * **HA Support for MySQL Storage**: MySQL storage now supports HA. - * **ACL Templating**: ACL policies can now be templated using identity Entity, - Groups, and Metadata. - * **UI Onboarding wizards**: The Vault UI can provide contextual help and - guidance, linking out to relevant links or guides on vaultproject.io for - various workflows in Vault. - -IMPROVEMENTS: - - * agent: Add `exit_after_auth` to be able to use the Agent for a single - authentication [[GH-5013](https://github.com/hashicorp/vault/pull/5013)] - * auth/approle: Add ability to set token bound CIDRs on individual Secret IDs - [[GH-5034](https://github.com/hashicorp/vault/pull/5034)] - * cli: Add support for passing parameters to `vault read` operations [[GH-5093](https://github.com/hashicorp/vault/pull/5093)] - * secrets/aws: Make credential types more explicit [[GH-4360](https://github.com/hashicorp/vault/pull/4360)] - * secrets/nomad: Support for longer token names [[GH-5117](https://github.com/hashicorp/vault/pull/5117)] - * secrets/pki: Allow disabling CRL generation [[GH-5134](https://github.com/hashicorp/vault/pull/5134)] - * storage/azure: Add support for different Azure environments [[GH-4997](https://github.com/hashicorp/vault/pull/4997)] - * storage/file: Sort keys in list responses [[GH-5141](https://github.com/hashicorp/vault/pull/5141)] - * storage/mysql: Support special characters in database and table names. - -BUG FIXES: +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] - * auth/jwt: Always validate `aud` claim even if `bound_audiences` isn't set - (IOW, error in this case) - * core: Prevent Go's HTTP library from interspersing logs in a different - format and/or interleaved [[GH-5135](https://github.com/hashicorp/vault/pull/5135)] - * identity: Properly populate `mount_path` and `mount_type` on group lookup - [[GH-5074](https://github.com/hashicorp/vault/pull/5074)] - * identity: Fix persisting alias metadata [[GH-5188](https://github.com/hashicorp/vault/pull/5188)] - * identity: Fix carryover issue from previously fixed race condition that - could cause Vault not to start up due to two entities referencing the same - alias. These entities are now merged. [[GH-5000](https://github.com/hashicorp/vault/pull/5000)] - * replication: Fix issue causing some pages not to flush to storage - * secrets/database: Fix inability to update custom SQL statements on - database roles. [[GH-5080](https://github.com/hashicorp/vault/pull/5080)] - * secrets/pki: Disallow putting the CA's serial on its CRL. While technically - legal, doing so inherently means the CRL can't be trusted anyways, so it's - not useful and easy to footgun. [[GH-5134](https://github.com/hashicorp/vault/pull/5134)] - * storage/gcp,spanner: Fix data races [[GH-5081](https://github.com/hashicorp/vault/pull/5081)] - -## 0.10.4 (July 25th, 2018) +## 1.11.9 +### March 29, 2023 SECURITY: - * Control Groups: The associated Identity entity with a request was not being - properly persisted. As a result, the same authorizer could provide more than - one authorization. - -DEPRECATIONS/CHANGES: - - * Revocations of dynamic secrets leases are now queued/asynchronous rather - than synchronous. This allows Vault to take responsibility for revocation - even if the initial attempt fails. The previous synchronous behavior can be - attained via the `-sync` CLI flag or `sync` API parameter. When in - synchronous mode, if the operation results in failure it is up to the user - to retry. - * CLI Retries: The CLI will no longer retry commands on 5xx errors. This was a - source of confusion to users as to why Vault would "hang" before returning a - 5xx error. The Go API client still defaults to two retries. - * Identity Entity Alias metadata: You can no longer manually set metadata on - entity aliases. All alias data (except the canonical entity ID it refers to) - is intended to be managed by the plugin providing the alias information, so - allowing it to be set manually didn't make sense. - -FEATURES: - - * **JWT/OIDC Auth Method**: The new `jwt` auth method accepts JWTs and either - validates signatures locally or uses OIDC Discovery to fetch the current set - of keys for signature validation. Various claims can be specified for - validation (in addition to the cryptographic signature) and a user and - optional groups claim can be used to provide Identity information. - * **FoundationDB Storage**: You can now use FoundationDB for storing Vault - data. - * **UI Control Group Workflow (enterprise)**: The UI will now detect control - group responses and provides a workflow to view the status of the request - and to authorize requests. - * **Vault Agent (Beta)**: Vault Agent is a daemon that can automatically - authenticate for you across a variety of authentication methods, provide - tokens to clients, and keep the tokens renewed, reauthenticating as - necessary. +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] IMPROVEMENTS: - * auth/azure: Add support for virtual machine scale sets - * auth/gcp: Support multiple bindings for region, zone, and instance group - * cli: Add subcommands for interacting with the plugin catalog [[GH-4911](https://github.com/hashicorp/vault/pull/4911)] - * cli: Add a `-description` flag to secrets and auth tune subcommands to allow - updating an existing secret engine's or auth method's description. This - change also allows the description to be unset by providing an empty string. - * core: Add config flag to disable non-printable character check [[GH-4917](https://github.com/hashicorp/vault/pull/4917)] - * core: A `max_request_size` parameter can now be set per-listener to adjust - the maximum allowed size per request [[GH-4824](https://github.com/hashicorp/vault/pull/4824)] - * core: Add control group request endpoint to default policy [[GH-4904](https://github.com/hashicorp/vault/pull/4904)] - * identity: Identity metadata is now passed through to plugins [[GH-4967](https://github.com/hashicorp/vault/pull/4967)] - * replication: Add additional saftey checks and logging when replication is - in a bad state - * secrets/kv: Add support for using `-field=data` to KVv2 when using `vault - kv` [[GH-4895](https://github.com/hashicorp/vault/pull/4895)] - * secrets/pki: Add the ability to tidy revoked but unexpired certificates - [[GH-4916](https://github.com/hashicorp/vault/pull/4916)] - * secrets/ssh: Allow Vault to work with single-argument SSH flags [[GH-4825](https://github.com/hashicorp/vault/pull/4825)] - * secrets/ssh: SSH executable path can now be configured in the CLI [[GH-4937](https://github.com/hashicorp/vault/pull/4937)] - * storage/swift: Add additional configuration options [[GH-4901](https://github.com/hashicorp/vault/pull/4901)] - * ui: Choose which auth methods to show to unauthenticated users via - `listing_visibility` in the auth method edit forms [[GH-4854](https://github.com/hashicorp/vault/pull/4854)] - * ui: Authenticate users automatically by passing a wrapped token to the UI via - the new `wrapped_token` query parameter [[GH-4854](https://github.com/hashicorp/vault/pull/4854)] +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] BUG FIXES: - * api: Fix response body being cleared too early [[GH-4987](https://github.com/hashicorp/vault/pull/4987)] - * auth/approle: Fix issue with tidy endpoint that would unnecessarily remove - secret accessors [[GH-4981](https://github.com/hashicorp/vault/pull/4981)] - * auth/aws: Fix updating `max_retries` [[GH-4980](https://github.com/hashicorp/vault/pull/4980)] - * auth/kubernetes: Trim trailing whitespace when sending JWT - * cli: Fix parsing of environment variables for integer flags [[GH-4925](https://github.com/hashicorp/vault/pull/4925)] - * core: Fix returning 500 instead of 503 if a rekey is attempted when Vault is - sealed [[GH-4874](https://github.com/hashicorp/vault/pull/4874)] - * core: Fix issue releasing the leader lock in some circumstances [[GH-4915](https://github.com/hashicorp/vault/pull/4915)] - * core: Fix a panic that could happen if the server was shut down while still - starting up - * core: Fix deadlock that would occur if a leadership loss occurs at the same - time as a seal operation [[GH-4932](https://github.com/hashicorp/vault/pull/4932)] - * core: Fix issue with auth mounts failing to renew tokens due to policies - changing [[GH-4960](https://github.com/hashicorp/vault/pull/4960)] - * auth/radius: Fix issue where some radius logins were being canceled too early - [[GH-4941](https://github.com/hashicorp/vault/pull/4941)] - * core: Fix accidental seal of vault of we lose leadership during startup - [[GH-4924](https://github.com/hashicorp/vault/pull/4924)] - * core: Fix standby not being able to forward requests larger than 4MB - [[GH-4844](https://github.com/hashicorp/vault/pull/4844)] - * core: Avoid panic while processing group memberships [[GH-4841](https://github.com/hashicorp/vault/pull/4841)] - * identity: Fix a race condition creating aliases [[GH-4965](https://github.com/hashicorp/vault/pull/4965)] - * plugins: Fix being unable to send very large payloads to or from plugins - [[GH-4958](https://github.com/hashicorp/vault/pull/4958)] - * physical/azure: Long list responses would sometimes be truncated [[GH-4983](https://github.com/hashicorp/vault/pull/4983)] - * replication: Allow replication status requests to be processed while in - merkle sync - * replication: Ensure merkle reindex flushes all changes to storage immediately - * replication: Fix a case where a network interruption could cause a secondary - to be unable to reconnect to a primary - * secrets/pki: Fix permitted DNS domains performing improper validation - [[GH-4863](https://github.com/hashicorp/vault/pull/4863)] - * secrets/database: Fix panic during DB creds revocation [[GH-4846](https://github.com/hashicorp/vault/pull/4846)] - * ui: Fix usage of cubbyhole backend in the UI [[GH-4851](https://github.com/hashicorp/vault/pull/4851)] - * ui: Fix toggle state when a secret is JSON-formatted [[GH-4913](https://github.com/hashicorp/vault/pull/4913)] - * ui: Fix coercion of falsey values to empty string when editing secrets as - JSON [[GH-4977](https://github.com/hashicorp/vault/pull/4977)] - -## 0.10.3 (June 20th, 2018) - -DEPRECATIONS/CHANGES: - - * In the audit log and in client responses, policies are now split into three - parameters: policies that came only from tokens, policies that came only - from Identity, and the combined set. Any previous location of policies via - the API now contains the full, combined set. - * When a token is tied to an Identity entity and the entity is deleted, the - token will no longer be usable, regardless of the validity of the token - itself. - * When authentication succeeds but no policies were defined for that specific - user, most auth methods would allow a token to be generated but a few would - reject the authentication, namely `ldap`, `okta`, and `radius`. Since the - `default` policy is added by Vault's core, this would incorrectly reject - valid authentications before they would in fact be granted policies. This - inconsistency has been addressed; valid authentications for these methods - now succeed even if no policy was specifically defined in that method for - that user. - -FEATURES: - - * Root Rotation for Active Directory: You can now command Vault to rotate the - configured root credentials used in the AD secrets engine, to ensure that - only Vault knows the credentials it's using. - * URI SANs in PKI: You can now configure URI Subject Alternate Names in the - `pki` backend. Roles can limit which SANs are allowed via globbing. - * `kv rollback` Command: You can now use `vault kv rollback` to roll a KVv2 - path back to a previous non-deleted/non-destroyed version. The previous - version becomes the next/newest version for the path. - * Token Bound CIDRs in AppRole: You can now add CIDRs to which a token - generated from AppRole will be bound. - -IMPROVEMENTS: - - * approle: Return 404 instead of 202 on invalid role names during POST - operations [[GH-4778](https://github.com/hashicorp/vault/pull/4778)] - * core: Add idle and initial header read/TLS handshake timeouts to connections - to ensure server resources are cleaned up [[GH-4760](https://github.com/hashicorp/vault/pull/4760)] - * core: Report policies in token, identity, and full sets [[GH-4747](https://github.com/hashicorp/vault/pull/4747)] - * secrets/databases: Add `create`/`update` distinction for connection - configurations [[GH-3544](https://github.com/hashicorp/vault/pull/3544)] - * secrets/databases: Add `create`/`update` distinction for role configurations - [[GH-3544](https://github.com/hashicorp/vault/pull/3544)] - * secrets/databases: Add best-effort revocation logic for use when a role has - been deleted [[GH-4782](https://github.com/hashicorp/vault/pull/4782)] - * secrets/kv: Add `kv rollback` [[GH-4774](https://github.com/hashicorp/vault/pull/4774)] - * secrets/pki: Add URI SANs support [[GH-4675](https://github.com/hashicorp/vault/pull/4675)] - * secrets/ssh: Allow standard SSH command arguments to be used, without - requiring username@hostname syntax [[GH-4710](https://github.com/hashicorp/vault/pull/4710)] - * storage/consul: Add context support so that requests are cancelable - [[GH-4739](https://github.com/hashicorp/vault/pull/4739)] - * sys: Added `hidden` option to `listing_visibility` field on `sys/mounts` - API [[GH-4827](https://github.com/hashicorp/vault/pull/4827)] - * ui: Secret values are obfuscated by default and visibility is toggleable [[GH-4422](https://github.com/hashicorp/vault/pull/4422)] - -BUG FIXES: +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#190](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/190)] [[GH-19720](https://github.com/hashicorp/vault/pull/19720)] +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] - * auth/approle: Fix panic due to metadata being nil [[GH-4719](https://github.com/hashicorp/vault/pull/4719)] - * auth/aws: Fix delete path for tidy operations [[GH-4799](https://github.com/hashicorp/vault/pull/4799)] - * core: Optimizations to remove some speed regressions due to the - security-related changes in 0.10.2 - * storage/dynamodb: Fix errors seen when reading existing DynamoDB data [[GH-4721](https://github.com/hashicorp/vault/pull/4721)] - * secrets/database: Fix default MySQL root rotation statement [[GH-4748](https://github.com/hashicorp/vault/pull/4748)] - * secrets/gcp: Fix renewal for GCP account keys - * secrets/kv: Fix writing to the root of a KVv2 mount from `vault kv` commands - incorrectly operating on a root+mount path instead of being an error - [[GH-4726](https://github.com/hashicorp/vault/pull/4726)] - * seal/pkcs11: Add `CKK_SHA256_HMAC` to the search list when finding HMAC - keys, fixing lookup on some Thales devices - * replication: Fix issue enabling replication when a non-auth mount and auth - mount have the same name - * auth/kubernetes: Fix issue verifying ECDSA signed JWTs - * ui: add missing edit mode for auth method configs [[GH-4770](https://github.com/hashicorp/vault/pull/4770)] - -## 0.10.2 (June 6th, 2018) +## 1.11.8 +### March 01, 2023 SECURITY: - * Tokens: A race condition was identified that could occur if a token's - lease expired while Vault was not running. In this case, when Vault came - back online, sometimes it would properly revoke the lease but other times it - would not, leading to a Vault token that no longer had an expiration and had - essentially unlimited lifetime. This race was per-token, not all-or-nothing - for all tokens that may have expired during Vault's downtime. We have fixed - the behavior and put extra checks in place to help prevent any similar - future issues. In addition, the logic we have put in place ensures that such - lease-less tokens can no longer be used (unless they are root tokens that - never had an expiration to begin with). - * Convergent Encryption: The version 2 algorithm used in `transit`'s - convergent encryption feature is susceptible to offline - plaintext-confirmation attacks. As a result, we are introducing a version 3 - algorithm that mitigates this. If you are currently using convergent - encryption, we recommend upgrading, rotating your encryption key (the new - key version will use the new algorithm), and rewrapping your data (the - `rewrap` endpoint can be used to allow a relatively non-privileged user to - perform the rewrapping while never divulging the plaintext). - * AppRole case-sensitive role name secret-id leaking: When using a mixed-case - role name via AppRole, deleting a secret-id via accessor or other operations - could end up leaving the secret-id behind and valid but without an accessor. - This has now been fixed, and we have put checks in place to prevent these - secret-ids from being used. - -DEPRECATIONS/CHANGES: - - * PKI duration return types: The PKI backend now returns durations (e.g. when - reading a role) as an integer number of seconds instead of a Go-style - string, in line with how the rest of Vault's API returns durations. +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] -FEATURES: +CHANGES: - * Active Directory Secrets Engine: A new `ad` secrets engine has been created - which allows Vault to rotate and provide credentials for configured AD - accounts. - * Rekey Verification: Rekey operations can now require verification. This - turns on a two-phase process where the existing key shares authorize - generating a new master key, and a threshold of the new, returned key shares - must be provided to verify that they have been successfully received in - order for the actual master key to be rotated. - * CIDR restrictions for `cert`, `userpass`, and `kubernetes` auth methods: - You can now limit authentication to specific CIDRs; these will also be - encoded in resultant tokens to limit their use. - * Vault UI Browser CLI: The UI now supports usage of read/write/list/delete - commands in a CLI that can be accessed from the nav bar. Complex inputs such - as JSON files are not currently supported. This surfaces features otherwise - unsupported in Vault's UI. - * Azure Key Vault Auto Unseal/Seal Wrap Support (Enterprise): Azure Key Vault - can now be used a support seal for Auto Unseal and Seal Wrapping. +* core: Bump Go version to 1.19.6. IMPROVEMENTS: - * api: Close renewer's doneCh when the renewer is stopped, so that programs - expecting a final value through doneCh behave correctly [[GH-4472](https://github.com/hashicorp/vault/pull/4472)] - * auth/cert: Break out `allowed_names` into component parts and add - `allowed_uri_sans` [[GH-4231](https://github.com/hashicorp/vault/pull/4231)] - * auth/ldap: Obfuscate error messages pre-bind for greater security [[GH-4700](https://github.com/hashicorp/vault/pull/4700)] - * cli: `vault login` now supports a `-no-print` flag to suppress printing - token information but still allow storing into the token helper [[GH-4454](https://github.com/hashicorp/vault/pull/4454)] - * core/pkcs11 (enterprise): Add support for CKM_AES_CBC_PAD, CKM_RSA_PKCS, and - CKM_RSA_PKCS_OAEP mechanisms - * core/pkcs11 (enterprise): HSM slots can now be selected by token label - instead of just slot number - * core/token: Optimize token revocation by removing unnecessary list call - against the storage backend when calling revoke-orphan on tokens [[GH-4465](https://github.com/hashicorp/vault/pull/4465)] - * core/token: Refactor token revocation logic to not block on the call when - underlying leases are pending revocation by moving the expiration logic to - the expiration manager [[GH-4512](https://github.com/hashicorp/vault/pull/4512)] - * expiration: Allow revoke-prefix and revoke-force to work on single leases as - well as prefixes [[GH-4450](https://github.com/hashicorp/vault/pull/4450)] - * identity: Return parent group info when reading a group [[GH-4648](https://github.com/hashicorp/vault/pull/4648)] - * identity: Provide more contextual key information when listing entities, - groups, and aliases - * identity: Passthrough EntityID to backends [[GH-4663](https://github.com/hashicorp/vault/pull/4663)] - * identity: Adds ability to request entity information through system view - [GH_4681] - * secret/pki: Add custom extended key usages [[GH-4667](https://github.com/hashicorp/vault/pull/4667)] - * secret/pki: Add custom PKIX serial numbers [[GH-4694](https://github.com/hashicorp/vault/pull/4694)] - * secret/ssh: Use hostname instead of IP in OTP mode, similar to CA mode - [[GH-4673](https://github.com/hashicorp/vault/pull/4673)] - * storage/file: Attempt in some error conditions to do more cleanup [[GH-4684](https://github.com/hashicorp/vault/pull/4684)] - * ui: wrapping lookup now distplays the path [[GH-4644](https://github.com/hashicorp/vault/pull/4644)] - * ui: Identity interface now has more inline actions to make editing and adding - aliases to an entity or group easier [[GH-4502](https://github.com/hashicorp/vault/pull/4502)] - * ui: Identity interface now lists groups by name [[GH-4655](https://github.com/hashicorp/vault/pull/4655)] - * ui: Permission denied errors still render the sidebar in the Access section - [[GH-4658](https://github.com/hashicorp/vault/pull/4658)] - * replication: Improve performance of index page flushes and WAL garbage - collecting - -BUG FIXES: - - * auth/approle: Make invalid role_id a 400 error instead of 500 [[GH-4470](https://github.com/hashicorp/vault/pull/4470)] - * auth/cert: Fix Identity alias using serial number instead of common name - [[GH-4475](https://github.com/hashicorp/vault/pull/4475)] - * cli: Fix panic running `vault token capabilities` with multiple paths - [[GH-4552](https://github.com/hashicorp/vault/pull/4552)] - * core: When using the `use_always` option with PROXY protocol support, do not - require `authorized_addrs` to be set [[GH-4065](https://github.com/hashicorp/vault/pull/4065)] - * core: Fix panic when certain combinations of policy paths and allowed/denied - parameters were used [[GH-4582](https://github.com/hashicorp/vault/pull/4582)] - * secret/gcp: Make `bound_region` able to use short names - * secret/kv: Fix response wrapping for KV v2 [[GH-4511](https://github.com/hashicorp/vault/pull/4511)] - * secret/kv: Fix address flag not being honored correctly [[GH-4617](https://github.com/hashicorp/vault/pull/4617)] - * secret/pki: Fix `safety_buffer` for tidy being allowed to be negative, - clearing all certs [[GH-4641](https://github.com/hashicorp/vault/pull/4641)] - * secret/pki: Fix `key_type` not being allowed to be set to `any` [[GH-4595](https://github.com/hashicorp/vault/pull/4595)] - * secret/pki: Fix path length parameter being ignored when using - `use_csr_values` and signing an intermediate CA cert [[GH-4459](https://github.com/hashicorp/vault/pull/4459)] - * secret/ssh: Only append UserKnownHostsFile to args when configured with a - value [[GH-4674](https://github.com/hashicorp/vault/pull/4674)] - * storage/dynamodb: Fix listing when one child is left within a nested path - [[GH-4570](https://github.com/hashicorp/vault/pull/4570)] - * storage/gcs: Fix swallowing an error on connection close [[GH-4691](https://github.com/hashicorp/vault/pull/4691)] - * ui: Fix HMAC algorithm in transit [[GH-4604](https://github.com/hashicorp/vault/pull/4604)] - * ui: Fix unwrap of auth responses via the UI's unwrap tool [[GH-4611](https://github.com/hashicorp/vault/pull/4611)] - * ui (enterprise): Fix parsing of version string that blocked some users from seeing - enterprise-specific pages in the UI [[GH-4547](https://github.com/hashicorp/vault/pull/4547)] - * ui: Fix incorrect capabilities path check when viewing policies [[GH-4566](https://github.com/hashicorp/vault/pull/4566)] - * replication: Fix error while running plugins on a newly created replication - secondary - * replication: Fix issue with token store lookups after a secondary's mount table - is invalidated. - * replication: Improve startup time when a large merkle index is in use. - * replication: Fix panic when storage becomes unreachable during unseal. - -## 0.10.1/0.9.7 (April 25th, 2018) - -The following two items are in both 0.9.7 and 0.10.1. They only affect -Enterprise, and as such 0.9.7 is an Enterprise-only release: - -SECURITY: - - * EGPs: A regression affecting 0.9.6 and 0.10.0 causes EGPs to not be applied - correctly if an EGP is updated in a running Vault after initial write or - after it is loaded on unseal. This has been fixed. +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] BUG FIXES: - * Fixed an upgrade issue affecting performance secondaries when migrating from - a version that did not include Identity to one that did. - -All other content in this release is for 0.10.1 only. +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18208](https://github.com/hashicorp/vault/pull/18208)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] -DEPRECATIONS/CHANGES: +## 1.11.7 +### February 6, 2023 - * `vault kv` and Vault versions: In 0.10.1 some issues with `vault kv` against - v1 K/V engine mounts are fixed. However, using 0.10.1 for both the server - and CLI versions is required. - * Mount information visibility: Users that have access to any path within a - mount can now see information about that mount, such as its type and - options, via some API calls. - * Identity and Local Mounts: Local mounts would allow creating Identity - entities but these would not be able to be used successfully (even locally) - in replicated scenarios. We have now disallowed entities and groups from - being created for local mounts in the first place. - -FEATURES: +CHANGES: - * X-Forwarded-For support: `X-Forwarded-For` headers can now be used to set the - client IP seen by Vault. See the [TCP listener configuration - page](https://www.vaultproject.io/docs/configuration/listener/tcp.html) for - details. - * CIDR IP Binding for Tokens: Tokens now support being bound to specific - CIDR(s) for usage. Currently this is implemented in Token Roles; usage can be - expanded to other authentication backends over time. - * `vault kv patch` command: A new `kv patch` helper command that allows - modifying only some values in existing data at a K/V path, but uses - check-and-set to ensure that this modification happens safely. - * AppRole Local Secret IDs: Roles can now be configured to generate secret IDs - local to the cluster. This enables performance secondaries to generate and - consume secret IDs without contacting the primary. - * AES-GCM Support for PKCS#11 [BETA] (Enterprise): For supporting HSMs, - AES-GCM can now be used in lieu of AES-CBC/HMAC-SHA256. This has currently - only been fully tested on AWS CloudHSM. - * Auto Unseal/Seal Wrap Key Rotation Support (Enterprise): Auto Unseal - mechanisms, including PKCS#11 HSMs, now support rotation of encryption keys, - and migration between key and encryption types, such as from AES-CBC to - AES-GCM, can be performed at the same time (where supported). +* core: Bump Go version to 1.19.4. IMPROVEMENTS: - * auth/approle: Support for cluster local secret IDs. This enables secondaries - to generate secret IDs without contacting the primary [[GH-4427](https://github.com/hashicorp/vault/pull/4427)] - * auth/token: Add to the token lookup response, the policies inherited due to - identity associations [[GH-4366](https://github.com/hashicorp/vault/pull/4366)] - * auth/token: Add CIDR binding to token roles [[GH-815](https://github.com/hashicorp/vault/pull/815)] - * cli: Add `vault kv patch` [[GH-4432](https://github.com/hashicorp/vault/pull/4432)] - * core: Add X-Forwarded-For support [[GH-4380](https://github.com/hashicorp/vault/pull/4380)] - * core: Add token CIDR-binding support [[GH-815](https://github.com/hashicorp/vault/pull/815)] - * identity: Add the ability to disable an entity. Disabling an entity does not - revoke associated tokens, but while the entity is disabled they cannot be - used. [[GH-4353](https://github.com/hashicorp/vault/pull/4353)] - * physical/consul: Allow tuning of session TTL and lock wait time [[GH-4352](https://github.com/hashicorp/vault/pull/4352)] - * replication: Dynamically adjust WAL cleanup over a period of time based on - the rate of writes committed - * secret/ssh: Update dynamic key install script to use shell locking to avoid - concurrent modifications [[GH-4358](https://github.com/hashicorp/vault/pull/4358)] - * ui: Access to `sys/mounts` is no longer needed to use the UI - the list of - engines will show you the ones you implicitly have access to (because you have - access to to secrets in those engines) [[GH-4439](https://github.com/hashicorp/vault/pull/4439)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] BUG FIXES: - * cli: Fix `vault kv` backwards compatibility with KV v1 engine mounts - [[GH-4430](https://github.com/hashicorp/vault/pull/4430)] - * identity: Persist entity memberships in external identity groups across - mounts [[GH-4365](https://github.com/hashicorp/vault/pull/4365)] - * identity: Fix error preventing authentication using local mounts on - performance secondary replication clusters [[GH-4407](https://github.com/hashicorp/vault/pull/4407)] - * replication: Fix issue causing secondaries to not connect properly to a - pre-0.10 primary until the primary was upgraded - * secret/gcp: Fix panic on rollback when a roleset wasn't created properly - [[GH-4344](https://github.com/hashicorp/vault/pull/4344)] - * secret/gcp: Fix panic on renewal - * ui: Fix IE11 form submissions in a few parts of the application [[GH-4378](https://github.com/hashicorp/vault/pull/4378)] - * ui: Fix IE file saving on policy pages and init screens [[GH-4376](https://github.com/hashicorp/vault/pull/4376)] - * ui: Fixed an issue where the AWS secret backend would show the wrong menu - [[GH-4371](https://github.com/hashicorp/vault/pull/4371)] - * ui: Fixed an issue where policies with commas would not render in the - interface properly [[GH-4398](https://github.com/hashicorp/vault/pull/4398)] - * ui: Corrected the saving of mount tune ttls for auth methods [[GH-4431](https://github.com/hashicorp/vault/pull/4431)] - * ui: Credentials generation no longer checks capabilities before making - api calls. This should fix needing "update" capabilites to read IAM - credentials in the AWS secrets engine [[GH-4446](https://github.com/hashicorp/vault/pull/4446)] - -## 0.10.0 (April 10th, 2018) - -SECURITY: - - * Log sanitization for Combined Database Secret Engine: In certain failure - scenarios with incorrectly formatted connection urls, the raw connection - errors were being returned to the user with the configured database - credentials. Errors are now sanitized before being returned to the user. - -DEPRECATIONS/CHANGES: - - * Database plugin compatibility: The database plugin interface was enhanced to - support some additional functionality related to root credential rotation - and supporting templated URL strings. The changes were made in a - backwards-compatible way and all builtin plugins were updated with the new - features. Custom plugins not built into Vault will need to be upgraded to - support templated URL strings and root rotation. Additionally, the - Initialize method was deprecated in favor of a new Init method that supports - configuration modifications that occur in the plugin back to the primary - data store. - * Removal of returned secret information: For a long time Vault has returned - configuration given to various secret engines and auth methods with secret - values (such as secret API keys or passwords) still intact, and with a - warning to the user on write that anyone with read access could see the - secret. This was mostly done to make it easy for tools like Terraform to - judge whether state had drifted. However, it also feels quite un-Vault-y to - do this and we've never felt very comfortable doing so. In 0.10 we have gone - through and removed this behavior from the various backends; fields which - contained secret values are simply no longer returned on read. We are - working with the Terraform team to make changes to their provider to - accommodate this as best as possible, and users of other tools may have to - make adjustments, but in the end we felt that the ends did not justify the - means and we needed to prioritize security over operational convenience. - * LDAP auth method case sensitivity: We now treat usernames and groups - configured locally for policy assignment in a case insensitive fashion by - default. Existing configurations will continue to work as they do now; - however, the next time a configuration is written `case_sensitive_names` - will need to be explicitly set to `true`. - * TTL handling within core: All lease TTL handling has been centralized within - the core of Vault to ensure consistency across all backends. Since this was - previously delegated to individual backends, there may be some slight - differences in TTLs generated from some backends. - * Removal of default `secret/` mount: In 0.12 we will stop mounting `secret/` - by default at initialization time (it will still be available in `dev` - mode). - -FEATURES: +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. +* licensing (enterprise): update autoloaded license cache after reload +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] - * OSS UI: The Vault UI is now fully open-source. Similarly to the CLI, some - features are only available with a supporting version of Vault, but the code - base is entirely open. - * Versioned K/V: The `kv` backend has been completely revamped, featuring - flexible versioning of values, check-and-set protections, and more. A new - `vault kv` subcommand allows friendly interactions with it. Existing mounts - of the `kv` backend can be upgraded to the new versioned mode (downgrades - are not currently supported). The old "passthrough" mode is still the - default for new mounts; versioning can be turned on by setting the - `-version=2` flag for the `vault secrets enable` command. - * Database Root Credential Rotation: Database configurations can now rotate - their own configured admin/root credentials, allowing configured credentials - for a database connection to be rotated immediately after sending them into - Vault, invalidating the old credentials and ensuring only Vault knows the - actual valid values. - * Azure Authentication Plugin: There is now a plugin (pulled in to Vault) that - allows authenticating Azure machines to Vault using Azure's Managed Service - Identity credentials. See the [plugin - repository](https://github.com/hashicorp/vault-plugin-auth-azure) for more - information. - * GCP Secrets Plugin: There is now a plugin (pulled in to Vault) that allows - generating secrets to allow access to GCP. See the [plugin - repository](https://github.com/hashicorp/vault-plugin-secrets-gcp) for more - information. - * Selective Audit HMACing of Request and Response Data Keys: HMACing in audit - logs can be turned off for specific keys in the request input map and - response `data` map on a per-mount basis. - * Passthrough Request Headers: Request headers can now be selectively passed - through to backends on a per-mount basis. This is useful in various cases - when plugins are interacting with external services. - * HA for Google Cloud Storage: The GCS storage type now supports HA. - * UI support for identity: Add and edit entities, groups, and their associated - aliases. - * UI auth method support: Enable, disable, and configure all of the built-in - authentication methods. - * UI (Enterprise): View and edit Sentinel policies. +## 1.11.6 +### November 30, 2022 IMPROVEMENTS: - * core: Centralize TTL generation for leases in core [[GH-4230](https://github.com/hashicorp/vault/pull/4230)] - * identity: API to update group-alias by ID [[GH-4237](https://github.com/hashicorp/vault/pull/4237)] - * secret/cassandra: Update Cassandra storage delete function to not use batch - operations [[GH-4054](https://github.com/hashicorp/vault/pull/4054)] - * storage/mysql: Allow setting max idle connections and connection lifetime - [[GH-4211](https://github.com/hashicorp/vault/pull/4211)] - * storage/gcs: Add HA support [[GH-4226](https://github.com/hashicorp/vault/pull/4226)] - * ui: Add Nomad to the list of available secret engines - * ui: Adds ability to set static headers to be returned by the UI +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] BUG FIXES: - * api: Fix retries not working [[GH-4322](https://github.com/hashicorp/vault/pull/4322)] - * auth/gcp: Invalidate clients on config change - * auth/token: Revoke-orphan and tidy operations now correctly cleans up the - parent prefix entry in the underlying storage backend. These operations also - mark corresponding child tokens as orphans by removing the parent/secondary - index from the entries. [[GH-4193](https://github.com/hashicorp/vault/pull/4193)] - * command: Re-add `-mfa` flag and migrate to OSS binary [[GH-4223](https://github.com/hashicorp/vault/pull/4223)] - * core: Fix issue occurring from mounting two auth backends with the same path - with one mount having `auth/` in front [[GH-4206](https://github.com/hashicorp/vault/pull/4206)] - * mfa: Invalidation of MFA configurations (Enterprise) - * replication: Fix a panic on some non-64-bit platforms - * replication: Fix invalidation of policies on performance secondaries - * secret/pki: When tidying if a value is unexpectedly nil, delete it and move - on [[GH-4214](https://github.com/hashicorp/vault/pull/4214)] - * storage/s3: Fix panic if S3 returns no Content-Length header [[GH-4222](https://github.com/hashicorp/vault/pull/4222)] - * ui: Fixed an issue where the UI was checking incorrect paths when operating - on transit keys. Capabilities are now checked when attempting to encrypt / - decrypt, etc. - * ui: Fixed IE 11 layout issues and JS errors that would stop the application - from running. - * ui: Fixed the link that gets rendered when a user doesn't have permissions - to view the root of a secret engine. The link now sends them back to the list - of secret engines. - * replication: Fix issue with DR secondaries when using mount specified local - paths. - * cli: Fix an issue where generating a dr operation token would not output the - token [[GH-4328](https://github.com/hashicorp/vault/pull/4328)] - -## 0.9.6 (March 20th, 2018) - -DEPRECATIONS/CHANGES: - - * The AWS authentication backend now allows binds for inputs as either a - comma-delimited string or a string array. However, to keep consistency with - input and output, when reading a role the binds will now be returned as - string arrays rather than strings. - * In order to prefix-match IAM role and instance profile ARNs in AWS auth - backend, you now must explicitly opt-in by adding a `*` to the end of the - ARN. Existing configurations will be upgraded automatically, but when - writing a new role configuration the updated behavior will be used. - -FEATURES: - - * Replication Activation Enhancements: When activating a replication - secondary, a public key can now be fetched first from the target cluster. - This public key can be provided to the primary when requesting the - activation token. If provided, the public key will be used to perform a - Diffie-Hellman key exchange resulting in a shared key that encrypts the - contents of the activation token. The purpose is to protect against - accidental disclosure of the contents of the token if unwrapped by the wrong - party, given that the contents of the token are highly sensitive. If - accidentally unwrapped, the contents of the token are not usable by the - unwrapping party. It is important to note that just as a malicious operator - could unwrap the contents of the token, a malicious operator can pretend to - be a secondary and complete the Diffie-Hellman exchange on their own; this - feature provides defense in depth but still requires due diligence around - replication activation, including multiple eyes on the commands/tokens and - proper auditing. - -IMPROVEMENTS: - - * api: Update renewer grace period logic. It no longer is static, but rather - dynamically calculates one based on the current lease duration after each - renew. [[GH-4090](https://github.com/hashicorp/vault/pull/4090)] - * auth/approle: Allow array input for bound_cidr_list [4078] - * auth/aws: Allow using lists in role bind parameters [[GH-3907](https://github.com/hashicorp/vault/pull/3907)] - * auth/aws: Allow binding by EC2 instance IDs [[GH-3816](https://github.com/hashicorp/vault/pull/3816)] - * auth/aws: Allow non-prefix-matched IAM role and instance profile ARNs - [[GH-4071](https://github.com/hashicorp/vault/pull/4071)] - * auth/ldap: Set a very large size limit on queries [[GH-4169](https://github.com/hashicorp/vault/pull/4169)] - * core: Log info notifications of revoked leases for all leases/reasons, not - just expirations [[GH-4164](https://github.com/hashicorp/vault/pull/4164)] - * physical/couchdb: Removed limit on the listing of items [[GH-4149](https://github.com/hashicorp/vault/pull/4149)] - * secret/pki: Support certificate policies [[GH-4125](https://github.com/hashicorp/vault/pull/4125)] - * secret/pki: Add ability to have CA:true encoded into intermediate CSRs, to - improve compatibility with some ADFS scenarios [[GH-3883](https://github.com/hashicorp/vault/pull/3883)] - * secret/transit: Allow selecting signature algorithm as well as hash - algorithm when signing/verifying [[GH-4018](https://github.com/hashicorp/vault/pull/4018)] - * server: Make sure `tls_disable_client_cert` is actually a true value rather - than just set [[GH-4049](https://github.com/hashicorp/vault/pull/4049)] - * storage/dynamodb: Allow specifying max retries for dynamo client [[GH-4115](https://github.com/hashicorp/vault/pull/4115)] - * storage/gcs: Allow specifying chunk size for transfers, which can reduce - memory utilization [[GH-4060](https://github.com/hashicorp/vault/pull/4060)] - * sys/capabilities: Add the ability to use multiple paths for capability - checking [[GH-3663](https://github.com/hashicorp/vault/pull/3663)] - -BUG FIXES: +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18085](https://github.com/hashicorp/vault/pull/18085)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18110](https://github.com/hashicorp/vault/pull/18110)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - * auth/aws: Fix honoring `max_ttl` when a corresponding role `ttl` is not also - set [[GH-4107](https://github.com/hashicorp/vault/pull/4107)] - * auth/okta: Fix honoring configured `max_ttl` value [[GH-4110](https://github.com/hashicorp/vault/pull/4110)] - * auth/token: If a periodic token being issued has a period greater than the - max_lease_ttl configured on the token store mount, truncate it. This matches - renewal behavior; before it was inconsistent between issuance and renewal. - [[GH-4112](https://github.com/hashicorp/vault/pull/4112)] - * cli: Improve error messages around `vault auth help` when there is no CLI - helper for a particular method [[GH-4056](https://github.com/hashicorp/vault/pull/4056)] - * cli: Fix autocomplete installation when using Fish as the shell [[GH-4094](https://github.com/hashicorp/vault/pull/4094)] - * secret/database: Properly honor mount-tuned max TTL [[GH-4051](https://github.com/hashicorp/vault/pull/4051)] - * secret/ssh: Return `key_bits` value when reading a role [[GH-4098](https://github.com/hashicorp/vault/pull/4098)] - * sys: When writing policies on a performance replication secondary, properly - forward requests to the primary [[GH-4129](https://github.com/hashicorp/vault/pull/4129)] - -## 0.9.5 (February 26th, 2018) +## 1.11.5 +### November 2, 2022 IMPROVEMENTS: - * auth: Allow sending default_lease_ttl and max_lease_ttl values when enabling - auth methods. [[GH-4019](https://github.com/hashicorp/vault/pull/4019)] - * secret/database: Add list functionality to `database/config` endpoint - [[GH-4026](https://github.com/hashicorp/vault/pull/4026)] - * physical/consul: Allow setting a specific service address [[GH-3971](https://github.com/hashicorp/vault/pull/3971)] - * replication: When bootstrapping a new secondary, if the initial cluster - connection fails, Vault will attempt to roll back state so that - bootstrapping can be tried again, rather than having to recreate the - downstream cluster. This will still require fetching a new secondary - activation token. +* database/snowflake: Allow parallel requests to Snowflake [[GH-17594](https://github.com/hashicorp/vault/pull/17594)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] BUG FIXES: - * auth/aws: Update libraries to fix regression verifying PKCS#7 identity - documents [[GH-4014](https://github.com/hashicorp/vault/pull/4014)] - * listener: Revert to Go 1.9 for now to allow certificates with non-DNS names - in their DNS SANs to be used for Vault's TLS connections [[GH-4028](https://github.com/hashicorp/vault/pull/4028)] - * replication: Fix issue with a performance secondary/DR primary node losing - its DR primary status when performing an update-primary operation - * replication: Fix issue where performance secondaries could be unable to - automatically connect to a performance primary after that performance - primary has been promoted to a DR primary from a DR secondary - * ui: Fix behavior when a value contains a `.` - -## 0.9.4 (February 20th, 2018) - -SECURITY: - - * Role Tags used with the EC2 style of AWS auth were being improperly parsed; - as a result they were not being used to properly restrict values. - Implementations following our suggestion of using these as defense-in-depth - rather than the only source of restriction should not have significant - impact. - -FEATURES: - - * **ChaCha20-Poly1305 support in `transit`**: You can now encrypt and decrypt - with ChaCha20-Poly1305 in `transit`. Key derivation and convergent - encryption is also supported. - * **Okta Push support in Okta Auth Backend**: If a user account has MFA - required within Okta, an Okta Push MFA flow can be used to successfully - finish authentication. - * **PKI Improvements**: Custom OID subject alternate names can now be set, - subject to allow restrictions that support globbing. Additionally, Country, - Locality, Province, Street Address, and Postal Code can now be set in - certificate subjects. - * **Manta Storage**: Joyent Triton Manta can now be used for Vault storage - * **Google Cloud Spanner Storage**: Google Cloud Spanner can now be used for - Vault storage - -IMPROVEMENTS: - - * auth/centrify: Add CLI helper - * audit: Always log failure metrics, even if zero, to ensure the values appear - on dashboards [[GH-3937](https://github.com/hashicorp/vault/pull/3937)] - * cli: Disable color when output is not a TTY [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] - * cli: Add `-format` flag to all subcommands [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] - * cli: Do not display deprecation warnings when the format is not table - [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] - * core: If over a predefined lease count (256k), log a warning not more than - once a minute. Too many leases can be problematic for many of the storage - backends and often this number of leases is indicative of a need for - workflow improvements. [[GH-3957](https://github.com/hashicorp/vault/pull/3957)] - * secret/nomad: Have generated ACL tokens cap out at 64 characters [[GH-4009](https://github.com/hashicorp/vault/pull/4009)] - * secret/pki: Country, Locality, Province, Street Address, and Postal Code can - now be set on certificates [[GH-3992](https://github.com/hashicorp/vault/pull/3992)] - * secret/pki: UTF-8 Other Names can now be set in Subject Alternate Names in - issued certs; allowed values can be set per role and support globbing - [[GH-3889](https://github.com/hashicorp/vault/pull/3889)] - * secret/pki: Add a flag to make the common name optional on certs [[GH-3940](https://github.com/hashicorp/vault/pull/3940)] - * secret/pki: Ensure only DNS-compatible names go into DNS SANs; additionally, - properly handle IDNA transformations for these DNS names [[GH-3953](https://github.com/hashicorp/vault/pull/3953)] - * secret/ssh: Add `valid-principles` flag to CLI for CA mode [[GH-3922](https://github.com/hashicorp/vault/pull/3922)] - * storage/manta: Add Manta storage [[GH-3270](https://github.com/hashicorp/vault/pull/3270)] - * ui (Enterprise): Support for ChaCha20-Poly1305 keys in the transit engine. +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17384](https://github.com/hashicorp/vault/pull/17384)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] -BUG FIXES: - * api/renewer: Honor increment value in renew auth calls [[GH-3904](https://github.com/hashicorp/vault/pull/3904)] - * auth/approle: Fix inability to use limited-use-count secret IDs on - replication performance secondaries - * auth/approle: Cleanup of secret ID accessors during tidy and removal of - dangling accessor entries [[GH-3924](https://github.com/hashicorp/vault/pull/3924)] - * auth/aws-ec2: Avoid masking of role tag response [[GH-3941](https://github.com/hashicorp/vault/pull/3941)] - * auth/cert: Verify DNS SANs in the authenticating certificate [[GH-3982](https://github.com/hashicorp/vault/pull/3982)] - * auth/okta: Return configured durations as seconds, not nanoseconds [[GH-3871](https://github.com/hashicorp/vault/pull/3871)] - * auth/okta: Get all okta groups for a user vs. default 200 limit [[GH-4034](https://github.com/hashicorp/vault/pull/4034)] - * auth/token: Token creation via the CLI no longer forces periodic token - creation. Passing an explicit zero value for the period no longer create - periodic tokens. [[GH-3880](https://github.com/hashicorp/vault/pull/3880)] - * command: Fix interpreted formatting directives when printing raw fields - [[GH-4005](https://github.com/hashicorp/vault/pull/4005)] - * command: Correctly format output when using -field and -format flags at the - same time [[GH-3987](https://github.com/hashicorp/vault/pull/3987)] - * command/rekey: Re-add lost `stored-shares` parameter [[GH-3974](https://github.com/hashicorp/vault/pull/3974)] - * command/ssh: Create and reuse the api client [[GH-3909](https://github.com/hashicorp/vault/pull/3909)] - * command/status: Fix panic when status returns 500 from leadership lookup - [[GH-3998](https://github.com/hashicorp/vault/pull/3998)] - * identity: Fix race when creating entities [[GH-3932](https://github.com/hashicorp/vault/pull/3932)] - * plugin/gRPC: Fixed an issue with list requests and raw responses coming from - plugins using gRPC transport [[GH-3881](https://github.com/hashicorp/vault/pull/3881)] - * plugin/gRPC: Fix panic when special paths are not set [[GH-3946](https://github.com/hashicorp/vault/pull/3946)] - * secret/pki: Verify a name is a valid hostname before adding to DNS SANs - [[GH-3918](https://github.com/hashicorp/vault/pull/3918)] - * secret/transit: Fix auditing when reading a key after it has been backed up - or restored [[GH-3919](https://github.com/hashicorp/vault/pull/3919)] - * secret/transit: Fix storage/memory consistency when persistence fails - [[GH-3959](https://github.com/hashicorp/vault/pull/3959)] - * storage/consul: Validate that service names are RFC 1123 compliant [[GH-3960](https://github.com/hashicorp/vault/pull/3960)] - * storage/etcd3: Fix memory ballooning with standby instances [[GH-3798](https://github.com/hashicorp/vault/pull/3798)] - * storage/etcd3: Fix large lists (like token loading at startup) not being - handled [[GH-3772](https://github.com/hashicorp/vault/pull/3772)] - * storage/postgresql: Fix compatibility with versions using custom string - version tags [[GH-3949](https://github.com/hashicorp/vault/pull/3949)] - * storage/zookeeper: Update vendoring to fix freezing issues [[GH-3896](https://github.com/hashicorp/vault/pull/3896)] - * ui (Enterprise): Decoding the replication token should no longer error and - prevent enabling of a secondary replication cluster via the ui. - * plugin/gRPC: Add connection info to the request object [[GH-3997](https://github.com/hashicorp/vault/pull/3997)] - -## 0.9.3 (January 28th, 2018) - -A regression from a feature merge disabled the Nomad secrets backend in 0.9.2. -This release re-enables the Nomad secrets backend; it is otherwise identical to -0.9.2. - -## 0.9.2 (January 26th, 2018) +## 1.11.4 +### September 30, 2022 SECURITY: - * Okta Auth Backend: While the Okta auth backend was successfully verifying - usernames and passwords, it was not checking the returned state of the - account, so accounts that had been marked locked out could still be used to - log in. Only accounts in SUCCESS or PASSWORD_WARN states are now allowed. - * Periodic Tokens: A regression in 0.9.1 meant that periodic tokens created by - the AppRole, AWS, and Cert auth backends would expire when the max TTL for - the backend/mount/system was hit instead of their stated behavior of living - as long as they are renewed. This is now fixed; existing tokens do not have - to be reissued as this was purely a regression in the renewal logic. - * Seal Wrapping: During certain replication states values written marked for - seal wrapping may not be wrapped on the secondaries. This has been fixed, - and existing values will be wrapped on next read or write. This does not - affect the barrier keys. - -DEPRECATIONS/CHANGES: - - * `sys/health` DR Secondary Reporting: The `replication_dr_secondary` bool - returned by `sys/health` could be misleading since it would be `false` both - when a cluster was not a DR secondary but also when the node is a standby in - the cluster and has not yet fully received state from the active node. This - could cause health checks on LBs to decide that the node was acceptable for - traffic even though DR secondaries cannot handle normal Vault traffic. (In - other words, the bool could only convey "yes" or "no" but not "not sure - yet".) This has been replaced by `replication_dr_mode` and - `replication_perf_mode` which are string values that convey the current - state of the node; a value of `disabled` indicates that replication is - disabled or the state is still being discovered. As a result, an LB check - can positively verify that the node is both not `disabled` and is not a DR - secondary, and avoid sending traffic to it if either is true. - * PKI Secret Backend Roles parameter types: For `ou` and `organization` - in role definitions in the PKI secret backend, input can now be a - comma-separated string or an array of strings. Reading a role will - now return arrays for these parameters. - * Plugin API Changes: The plugin API has been updated to utilize golang's - context.Context package. Many function signatures now accept a context - object as the first parameter. Existing plugins will need to pull in the - latest Vault code and update their function signatures to begin using - context and the new gRPC transport. - -FEATURES: - - * **gRPC Backend Plugins**: Backend plugins now use gRPC for transport, - allowing them to be written in other languages. - * **Brand New CLI**: Vault has a brand new CLI interface that is significantly - streamlined, supports autocomplete, and is almost entirely backwards - compatible. - * **UI: PKI Secret Backend (Enterprise)**: Configure PKI secret backends, - create and browse roles and certificates, and issue and sign certificates via - the listed roles. - -IMPROVEMENTS: - - * auth/aws: Handle IAM headers produced by clients that formulate numbers as - ints rather than strings [[GH-3763](https://github.com/hashicorp/vault/pull/3763)] - * auth/okta: Support JSON lists when specifying groups and policies [[GH-3801](https://github.com/hashicorp/vault/pull/3801)] - * autoseal/hsm: Attempt reconnecting to the HSM on certain kinds of issues, - including HA scenarios for some Gemalto HSMs. - (Enterprise) - * cli: Output password prompts to stderr to make it easier to pipe an output - token to another command [[GH-3782](https://github.com/hashicorp/vault/pull/3782)] - * core: Report replication status in `sys/health` [[GH-3810](https://github.com/hashicorp/vault/pull/3810)] - * physical/s3: Allow using paths with S3 for non-AWS deployments [[GH-3730](https://github.com/hashicorp/vault/pull/3730)] - * physical/s3: Add ability to disable SSL for non-AWS deployments [[GH-3730](https://github.com/hashicorp/vault/pull/3730)] - * plugins: Args for plugins can now be specified separately from the command, - allowing the same output format and input format for plugin information - [[GH-3778](https://github.com/hashicorp/vault/pull/3778)] - * secret/pki: `ou` and `organization` can now be specified as a - comma-separated string or an array of strings [[GH-3804](https://github.com/hashicorp/vault/pull/3804)] - * plugins: Plugins will fall back to using netrpc as the communication protocol - on older versions of Vault [[GH-3833](https://github.com/hashicorp/vault/pull/3833)] - -BUG FIXES: - - * auth/(approle,aws,cert): Fix behavior where periodic tokens generated by - these backends could not have their TTL renewed beyond the system/mount max - TTL value [[GH-3803](https://github.com/hashicorp/vault/pull/3803)] - * auth/aws: Fix error returned if `bound_iam_principal_arn` was given to an - existing role update [[GH-3843](https://github.com/hashicorp/vault/pull/3843)] - * core/sealwrap: Speed improvements and bug fixes (Enterprise) - * identity: Delete group alias when an external group is deleted [[GH-3773](https://github.com/hashicorp/vault/pull/3773)] - * legacymfa/duo: Fix intermittent panic when Duo could not be reached - [[GH-2030](https://github.com/hashicorp/vault/pull/2030)] - * secret/database: Fix a location where a lock could potentially not be - released, leading to deadlock [[GH-3774](https://github.com/hashicorp/vault/pull/3774)] - * secret/(all databases) Fix behavior where if a max TTL was specified but no - default TTL was specified the system/mount default TTL would be used but not - be capped by the local max TTL [[GH-3814](https://github.com/hashicorp/vault/pull/3814)] - * secret/database: Fix an issue where plugins were not closed properly if they - failed to initialize [[GH-3768](https://github.com/hashicorp/vault/pull/3768)] - * ui: mounting a secret backend will now properly set `max_lease_ttl` and - `default_lease_ttl` when specified - previously both fields set - `default_lease_ttl`. - -## 0.9.1 (December 21st, 2017) - -DEPRECATIONS/CHANGES: - - * AppRole Case Sensitivity: In prior versions of Vault, `list` operations - against AppRole roles would require preserving case in the role name, even - though most other operations within AppRole are case-insensitive with - respect to the role name. This has been fixed; existing roles will behave as - they have in the past, but new roles will act case-insensitively in these - cases. - * Token Auth Backend Roles parameter types: For `allowed_policies` and - `disallowed_policies` in role definitions in the token auth backend, input - can now be a comma-separated string or an array of strings. Reading a role - will now return arrays for these parameters. - * Transit key exporting: You can now mark a key in the `transit` backend as - `exportable` at any time, rather than just at creation time; however, once - this value is set, it still cannot be unset. - * PKI Secret Backend Roles parameter types: For `allowed_domains` and - `key_usage` in role definitions in the PKI secret backend, input - can now be a comma-separated string or an array of strings. Reading a role - will now return arrays for these parameters. - * SSH Dynamic Keys Method Defaults to 2048-bit Keys: When using the dynamic - key method in the SSH backend, the default is now to use 2048-bit keys if no - specific key bit size is specified. - * Consul Secret Backend lease handling: The `consul` secret backend can now - accept both strings and integer numbers of seconds for its lease value. The - value returned on a role read will be an integer number of seconds instead - of a human-friendly string. - * Unprintable characters not allowed in API paths: Unprintable characters are - no longer allowed in names in the API (paths and path parameters), with an - extra restriction on whitespace characters. Allowed characters are those - that are considered printable by Unicode plus spaces. - -FEATURES: - - * **Transit Backup/Restore**: The `transit` backend now supports a backup - operation that can export a given key, including all key versions and - configuration, as well as a restore operation allowing import into another - Vault. - * **gRPC Database Plugins**: Database plugins now use gRPC for transport, - allowing them to be written in other languages. - * **Nomad Secret Backend**: Nomad ACL tokens can now be generated and revoked - using Vault. - * **TLS Cert Auth Backend Improvements**: The `cert` auth backend can now - match against custom certificate extensions via exact or glob matching, and - additionally supports max_ttl and periodic token toggles. +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] IMPROVEMENTS: - * auth/cert: Support custom certificate constraints [[GH-3634](https://github.com/hashicorp/vault/pull/3634)] - * auth/cert: Support setting `max_ttl` and `period` [[GH-3642](https://github.com/hashicorp/vault/pull/3642)] - * audit/file: Setting a file mode of `0000` will now disable Vault from - automatically `chmod`ing the log file [[GH-3649](https://github.com/hashicorp/vault/pull/3649)] - * auth/github: The legacy MFA system can now be used with the GitHub auth - backend [[GH-3696](https://github.com/hashicorp/vault/pull/3696)] - * auth/okta: The legacy MFA system can now be used with the Okta auth backend - [[GH-3653](https://github.com/hashicorp/vault/pull/3653)] - * auth/token: `allowed_policies` and `disallowed_policies` can now be specified - as a comma-separated string or an array of strings [[GH-3641](https://github.com/hashicorp/vault/pull/3641)] - * command/server: The log level can now be specified with `VAULT_LOG_LEVEL` - [[GH-3721](https://github.com/hashicorp/vault/pull/3721)] - * core: Period values from auth backends will now be checked and applied to the - TTL value directly by core on login and renewal requests [[GH-3677](https://github.com/hashicorp/vault/pull/3677)] - * database/mongodb: Add optional `write_concern` parameter, which can be set - during database configuration. This establishes a session-wide [write - concern](https://docs.mongodb.com/manual/reference/write-concern/) for the - lifecycle of the mount [[GH-3646](https://github.com/hashicorp/vault/pull/3646)] - * http: Request path containing non-printable characters will return 400 - Bad - Request [[GH-3697](https://github.com/hashicorp/vault/pull/3697)] - * mfa/okta: Filter a given email address as a login filter, allowing operation - when login email and account email are different - * plugins: Make Vault more resilient when unsealing when plugins are - unavailable [[GH-3686](https://github.com/hashicorp/vault/pull/3686)] - * secret/pki: `allowed_domains` and `key_usage` can now be specified - as a comma-separated string or an array of strings [[GH-3642](https://github.com/hashicorp/vault/pull/3642)] - * secret/ssh: Allow 4096-bit keys to be used in dynamic key method [[GH-3593](https://github.com/hashicorp/vault/pull/3593)] - * secret/consul: The Consul secret backend now uses the value of `lease` set - on the role, if set, when renewing a secret. [[GH-3796](https://github.com/hashicorp/vault/pull/3796)] - * storage/mysql: Don't attempt database creation if it exists, which can help - under certain permissions constraints [[GH-3716](https://github.com/hashicorp/vault/pull/3716)] +* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] +* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] BUG FIXES: - * api/status (enterprise): Fix status reporting when using an auto seal - * auth/approle: Fix case-sensitive/insensitive comparison issue [[GH-3665](https://github.com/hashicorp/vault/pull/3665)] - * auth/cert: Return `allowed_names` on role read [[GH-3654](https://github.com/hashicorp/vault/pull/3654)] - * auth/ldap: Fix incorrect control information being sent [[GH-3402](https://github.com/hashicorp/vault/pull/3402)] [[GH-3496](https://github.com/hashicorp/vault/pull/3496)] - [[GH-3625](https://github.com/hashicorp/vault/pull/3625)] [[GH-3656](https://github.com/hashicorp/vault/pull/3656)] - * core: Fix seal status reporting when using an autoseal - * core: Add creation path to wrap info for a control group token - * core: Fix potential panic that could occur using plugins when a node - transitioned from active to standby [[GH-3638](https://github.com/hashicorp/vault/pull/3638)] - * core: Fix memory ballooning when a connection would connect to the cluster - port and then go away -- redux! [[GH-3680](https://github.com/hashicorp/vault/pull/3680)] - * core: Replace recursive token revocation logic with depth-first logic, which - can avoid hitting stack depth limits in extreme cases [[GH-2348](https://github.com/hashicorp/vault/pull/2348)] - * core: When doing a read on configured audited-headers, properly handle case - insensitivity [[GH-3701](https://github.com/hashicorp/vault/pull/3701)] - * core/pkcs11 (enterprise): Fix panic when PKCS#11 library is not readable - * database/mysql: Allow the creation statement to use commands that are not yet - supported by the prepare statement protocol [[GH-3619](https://github.com/hashicorp/vault/pull/3619)] - * plugin/auth-gcp: Fix IAM roles when using `allow_gce_inference` [VPAG-19] - -## 0.9.0.1 (November 21st, 2017) (Enterprise Only) - -IMPROVEMENTS: +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17162](https://github.com/hashicorp/vault/pull/17162)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] - * auth/gcp: Support seal wrapping of configuration parameters - * auth/kubernetes: Support seal wrapping of configuration parameters +## 1.11.3 +### August 31, 2022 -BUG FIXES: +SECURITY: - * Fix an upgrade issue with some physical backends when migrating from legacy - HSM stored key support to the new Seal Wrap mechanism (Enterprise) - * mfa: Add the 'mfa' flag that was removed by mistake [[GH-4223](https://github.com/hashicorp/vault/pull/4223)] - -## 0.9.0 (November 14th, 2017) - -DEPRECATIONS/CHANGES: - - * HSM config parameter requirements: When using Vault with an HSM, a new - parameter is required: `hmac_key_label`. This performs a similar function to - `key_label` but for the HMAC key Vault will use. Vault will generate a - suitable key if this value is specified and `generate_key` is set true. - * API HTTP client behavior: When calling `NewClient` the API no longer - modifies the provided client/transport. In particular this means it will no - longer enable redirection limiting and HTTP/2 support on custom clients. It - is suggested that if you want to make changes to an HTTP client that you use - one created by `DefaultConfig` as a starting point. - * AWS EC2 client nonce behavior: The client nonce generated by the backend - that gets returned along with the authentication response will be audited in - plaintext. If this is undesired, the clients can choose to supply a custom - nonce to the login endpoint. The custom nonce set by the client will from - now on, not be returned back with the authentication response, and hence not - audit logged. - * AWS Auth role options: The API will now error when trying to create or - update a role with the mutually-exclusive options - `disallow_reauthentication` and `allow_instance_migration`. - * SSH CA role read changes: When reading back a role from the `ssh` backend, - the TTL/max TTL values will now be an integer number of seconds rather than - a string. This better matches the API elsewhere in Vault. - * SSH role list changes: When listing roles from the `ssh` backend via the API, - the response data will additionally return a `key_info` map that will contain - a map of each key with a corresponding object containing the `key_type`. - * More granularity in audit logs: Audit request and response entries are still - in RFC3339 format but now have a granularity of nanoseconds. - * High availability related values have been moved out of the `storage` and - `ha_storage` stanzas, and into the top-level configuration. `redirect_addr` - has been renamed to `api_addr`. The stanzas still support accepting - HA-related values to maintain backward compatibility, but top-level values - will take precedence. - * A new `seal` stanza has been added to the configuration file, which is - optional and enables configuration of the seal type to use for additional - data protection, such as using HSM or Cloud KMS solutions to encrypt and - decrypt data. +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] -FEATURES: +CHANGES: - * **RSA Support for Transit Backend**: Transit backend can now generate RSA - keys which can be used for encryption and signing. [[GH-3489](https://github.com/hashicorp/vault/pull/3489)] - * **Identity System**: Now in open source and with significant enhancements, - Identity is an integrated system for understanding users across tokens and - enabling easier management of users directly and via groups. - * **External Groups in Identity**: Vault can now automatically assign users - and systems to groups in Identity based on their membership in external - groups. - * **Seal Wrap / FIPS 140-2 Compatibility (Enterprise)**: Vault can now take - advantage of FIPS 140-2-certified HSMs to ensure that Critical Security - Parameters are protected in a compliant fashion. Vault's implementation has - received a statement of compliance from Leidos. - * **Control Groups (Enterprise)**: Require multiple members of an Identity - group to authorize a requested action before it is allowed to run. - * **Cloud Auto-Unseal (Enterprise)**: Automatically unseal Vault using AWS KMS - and GCP CKMS. - * **Sentinel Integration (Enterprise)**: Take advantage of HashiCorp Sentinel - to create extremely flexible access control policies -- even on - unauthenticated endpoints. - * **Barrier Rekey Support for Auto-Unseal (Enterprise)**: When using auto-unsealing - functionality, the `rekey` operation is now supported; it uses recovery keys - to authorize the master key rekey. - * **Operation Token for Disaster Recovery Actions (Enterprise)**: When using - Disaster Recovery replication, a token can be created that can be used to - authorize actions such as promotion and updating primary information, rather - than using recovery keys. - * **Trigger Auto-Unseal with Recovery Keys (Enterprise)**: When using - auto-unsealing, a request to unseal Vault can be triggered by a threshold of - recovery keys, rather than requiring the Vault process to be restarted. - * **UI Redesign (Enterprise)**: All new experience for the Vault Enterprise - UI. The look and feel has been completely redesigned to give users a better - experience and make managing secrets fast and easy. - * **UI: SSH Secret Backend (Enterprise)**: Configure an SSH secret backend, - create and browse roles. And use them to sign keys or generate one time - passwords. - * **UI: AWS Secret Backend (Enterprise)**: You can now configure the AWS - backend via the Vault Enterprise UI. In addition you can create roles, - browse the roles and Generate IAM Credentials from them in the UI. +* core: Bump Go version to 1.17.13. IMPROVEMENTS: - * api: Add ability to set custom headers on each call [[GH-3394](https://github.com/hashicorp/vault/pull/3394)] - * command/server: Add config option to disable requesting client certificates - [[GH-3373](https://github.com/hashicorp/vault/pull/3373)] - * auth/aws: Max retries can now be customized for the AWS client [[GH-3965](https://github.com/hashicorp/vault/pull/3965)] - * core: Disallow mounting underneath an existing path, not just over [[GH-2919](https://github.com/hashicorp/vault/pull/2919)] - * physical/file: Use `700` as permissions when creating directories. The files - themselves were `600` and are all encrypted, but this doesn't hurt. - * secret/aws: Add ability to use custom IAM/STS endpoints [[GH-3416](https://github.com/hashicorp/vault/pull/3416)] - * secret/aws: Max retries can now be customized for the AWS client [[GH-3965](https://github.com/hashicorp/vault/pull/3965)] - * secret/cassandra: Work around Cassandra ignoring consistency levels for a - user listing query [[GH-3469](https://github.com/hashicorp/vault/pull/3469)] - * secret/pki: Private keys can now be marshalled as PKCS#8 [[GH-3518](https://github.com/hashicorp/vault/pull/3518)] - * secret/pki: Allow entering URLs for `pki` as both comma-separated strings and JSON - arrays [[GH-3409](https://github.com/hashicorp/vault/pull/3409)] - * secret/ssh: Role TTL/max TTL can now be specified as either a string or an - integer [[GH-3507](https://github.com/hashicorp/vault/pull/3507)] - * secret/transit: Sign and verify operations now support a `none` hash - algorithm to allow signing/verifying pre-hashed data [[GH-3448](https://github.com/hashicorp/vault/pull/3448)] - * secret/database: Add the ability to glob allowed roles in the Database Backend [[GH-3387](https://github.com/hashicorp/vault/pull/3387)] - * ui (enterprise): Support for RSA keys in the transit backend - * ui (enterprise): Support for DR Operation Token generation, promoting, and - updating primary on DR Secondary clusters +* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] +* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the +Kerberos config in Vault. This removes any instance names found in the keytab +service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] +* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] BUG FIXES: - * api: Fix panic when setting a custom HTTP client but with a nil transport - [[GH-3435](https://github.com/hashicorp/vault/pull/3435)] [[GH-3437](https://github.com/hashicorp/vault/pull/3437)] - * api: Fix authing to the `cert` backend when the CA for the client cert is - not known to the server's listener [[GH-2946](https://github.com/hashicorp/vault/pull/2946)] - * auth/approle: Create role ID index during read if a role is missing one [[GH-3561](https://github.com/hashicorp/vault/pull/3561)] - * auth/aws: Don't allow mutually exclusive options [[GH-3291](https://github.com/hashicorp/vault/pull/3291)] - * auth/radius: Fix logging in in some situations [[GH-3461](https://github.com/hashicorp/vault/pull/3461)] - * core: Fix memleak when a connection would connect to the cluster port and - then go away [[GH-3513](https://github.com/hashicorp/vault/pull/3513)] - * core: Fix panic if a single-use token is used to step-down or seal [[GH-3497](https://github.com/hashicorp/vault/pull/3497)] - * core: Set rather than add headers to prevent some duplicated headers in - responses when requests were forwarded to the active node [[GH-3485](https://github.com/hashicorp/vault/pull/3485)] - * physical/etcd3: Fix some listing issues due to how etcd3 does prefix - matching [[GH-3406](https://github.com/hashicorp/vault/pull/3406)] - * physical/etcd3: Fix case where standbys can lose their etcd client lease - [[GH-3031](https://github.com/hashicorp/vault/pull/3031)] - * physical/file: Fix listing when underscores are the first component of a - path [[GH-3476](https://github.com/hashicorp/vault/pull/3476)] - * plugins: Allow response errors to be returned from backend plugins [[GH-3412](https://github.com/hashicorp/vault/pull/3412)] - * secret/transit: Fix panic if the length of the input ciphertext was less - than the expected nonce length [[GH-3521](https://github.com/hashicorp/vault/pull/3521)] - * ui (enterprise): Reinstate support for generic secret backends - this was - erroneously removed in a previous release - -## 0.8.3 (September 19th, 2017) - -CHANGES: +* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] +* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16523](https://github.com/hashicorp/vault/pull/16523)] +* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails +* database/elasticsearch: Fixes a bug in boolean parsing for initialize [[GH-16526](https://github.com/hashicorp/vault/pull/16526)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the +Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] +* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] +* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] - * Policy input/output standardization: For all built-in authentication - backends, policies can now be specified as a comma-delimited string or an - array if using JSON as API input; on read, policies will be returned as an - array; and the `default` policy will not be forcefully added to policies - saved in configurations. Please note that the `default` policy will continue - to be added to generated tokens, however, rather than backends adding - `default` to the given set of input policies (in some cases, and not in - others), the stored set will reflect the user-specified set. - * `sign-self-issued` modifies Issuer in generated certificates: In 0.8.2 the - endpoint would not modify the Issuer in the generated certificate, leaving - the output self-issued. Although theoretically valid, in practice crypto - stacks were unhappy validating paths containing such certs. As a result, - `sign-self-issued` now encodes the signing CA's Subject DN into the Issuer - DN of the generated certificate. - * `sys/raw` requires enabling: While the `sys/raw` endpoint can be extremely - useful in break-glass or support scenarios, it is also extremely dangerous. - As of now, a configuration file option `raw_storage_endpoint` must be set in - order to enable this API endpoint. Once set, the available functionality has - been enhanced slightly; it now supports listing and decrypting most of - Vault's core data structures, except for the encryption keyring itself. - * `generic` is now `kv`: To better reflect its actual use, the `generic` - backend is now `kv`. Using `generic` will still work for backwards - compatibility. +SECURITY: -FEATURES: +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - * **GCE Support for GCP Auth**: GCE instances can now authenticate to Vault - using machine credentials. - * **Support for Kubernetes Service Account Auth**: Kubernetes Service Accounts - can now authenticate to vault using JWT tokens. +## 1.11.2 +### August 2, 2022 IMPROVEMENTS: - * configuration: Provide a config option to store Vault server's process ID - (PID) in a file [[GH-3321](https://github.com/hashicorp/vault/pull/3321)] - * mfa (Enterprise): Add the ability to use identity metadata in username format - * mfa/okta (Enterprise): Add support for configuring base_url for API calls - * secret/pki: `sign-intermediate` will now allow specifying a `ttl` value - longer than the signing CA certificate's NotAfter value. [[GH-3325](https://github.com/hashicorp/vault/pull/3325)] - * sys/raw: Raw storage access is now disabled by default [[GH-3329](https://github.com/hashicorp/vault/pull/3329)] - -BUG FIXES: - - * auth/okta: Fix regression that removed the ability to set base_url [[GH-3313](https://github.com/hashicorp/vault/pull/3313)] - * core: Fix panic while loading leases at startup on ARM processors - [[GH-3314](https://github.com/hashicorp/vault/pull/3314)] - * secret/pki: Fix `sign-self-issued` encoding the wrong subject public key - [[GH-3325](https://github.com/hashicorp/vault/pull/3325)] - -## 0.8.2.1 (September 11th, 2017) (Enterprise Only) +* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] BUG FIXES: - * Fix an issue upgrading to 0.8.2 for Enterprise customers. +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -## 0.8.2 (September 5th, 2017) +## 1.11.1 +### July 21, 2022 SECURITY: -* In prior versions of Vault, if authenticating via AWS IAM and requesting a - periodic token, the period was not properly respected. This could lead to - tokens expiring unexpectedly, or a token lifetime being longer than expected. - Upon token renewal with Vault 0.8.2 the period will be properly enforced. - -DEPRECATIONS/CHANGES: +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] -* `vault ssh` users should supply `-mode` and `-role` to reduce the number of - API calls. A future version of Vault will mark these optional values are - required. Failure to supply `-mode` or `-role` will result in a warning. -* Vault plugins will first briefly run a restricted version of the plugin to - fetch metadata, and then lazy-load the plugin on first request to prevent - crash/deadlock of Vault during the unseal process. Plugins will need to be - built with the latest changes in order for them to run properly. - -FEATURES: +CHANGES: -* **Lazy Lease Loading**: On startup, Vault will now load leases from storage - in a lazy fashion (token checks and revocation/renewal requests still force - an immediate load). For larger installations this can significantly reduce - downtime when switching active nodes or bringing Vault up from cold start. -* **SSH CA Login with `vault ssh`**: `vault ssh` now supports the SSH CA - backend for authenticating to machines. It also supports remote host key - verification through the SSH CA backend, if enabled. -* **Signing of Self-Issued Certs in PKI**: The `pki` backend now supports - signing self-issued CA certs. This is useful when switching root CAs. +* core: Bump Go version to 1.17.12. IMPROVEMENTS: - * audit/file: Allow specifying `stdout` as the `file_path` to log to standard - output [[GH-3235](https://github.com/hashicorp/vault/pull/3235)] - * auth/aws: Allow wildcards in `bound_iam_principal_arn` [[GH-3213](https://github.com/hashicorp/vault/pull/3213)] - * auth/okta: Compare groups case-insensitively since Okta is only - case-preserving [[GH-3240](https://github.com/hashicorp/vault/pull/3240)] - * auth/okta: Standardize Okta configuration APIs across backends [[GH-3245](https://github.com/hashicorp/vault/pull/3245)] - * cli: Add subcommand autocompletion that can be enabled with - `vault -autocomplete-install` [[GH-3223](https://github.com/hashicorp/vault/pull/3223)] - * cli: Add ability to handle wrapped responses when using `vault auth`. What - is output depends on the other given flags; see the help output for that - command for more information. [[GH-3263](https://github.com/hashicorp/vault/pull/3263)] - * core: TLS cipher suites used for cluster behavior can now be set via - `cluster_cipher_suites` in configuration [[GH-3228](https://github.com/hashicorp/vault/pull/3228)] - * core: The `plugin_name` can now either be specified directly as part of the - parameter or within the `config` object when mounting a secret or auth backend - via `sys/mounts/:path` or `sys/auth/:path` respectively [[GH-3202](https://github.com/hashicorp/vault/pull/3202)] - * core: It is now possible to update the `description` of a mount when - mount-tuning, although this must be done through the HTTP layer [[GH-3285](https://github.com/hashicorp/vault/pull/3285)] - * secret/databases/mongo: If an EOF is encountered, attempt reconnecting and - retrying the operation [[GH-3269](https://github.com/hashicorp/vault/pull/3269)] - * secret/pki: TTLs can now be specified as a string or an integer number of - seconds [[GH-3270](https://github.com/hashicorp/vault/pull/3270)] - * secret/pki: Self-issued certs can now be signed via - `pki/root/sign-self-issued` [[GH-3274](https://github.com/hashicorp/vault/pull/3274)] - * storage/gcp: Use application default credentials if they exist [[GH-3248](https://github.com/hashicorp/vault/pull/3248)] +* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] BUG FIXES: - * auth/aws: Properly use role-set period values for IAM-derived token renewals - [[GH-3220](https://github.com/hashicorp/vault/pull/3220)] - * auth/okta: Fix updating organization/ttl/max_ttl after initial setting - [[GH-3236](https://github.com/hashicorp/vault/pull/3236)] - * core: Fix PROXY when underlying connection is TLS [[GH-3195](https://github.com/hashicorp/vault/pull/3195)] - * core: Policy-related commands would sometimes fail to act case-insensitively - [[GH-3210](https://github.com/hashicorp/vault/pull/3210)] - * storage/consul: Fix parsing TLS configuration when using a bare IPv6 address - [[GH-3268](https://github.com/hashicorp/vault/pull/3268)] - * plugins: Lazy-load plugins to prevent crash/deadlock during unseal process. - [[GH-3255](https://github.com/hashicorp/vault/pull/3255)] - * plugins: Skip mounting plugin-based secret and credential mounts when setting - up mounts if the plugin is no longer present in the catalog. [[GH-3255](https://github.com/hashicorp/vault/pull/3255)] - -## 0.8.1 (August 16th, 2017) - -DEPRECATIONS/CHANGES: - - * PKI Root Generation: Calling `pki/root/generate` when a CA cert/key already - exists will now return a `204` instead of overwriting an existing root. If - you want to recreate the root, first run a delete operation on `pki/root` - (requires `sudo` capability), then generate it again. - -FEATURES: - - * **Oracle Secret Backend**: There is now an external plugin to support leased - credentials for Oracle databases (distributed separately). - * **GCP IAM Auth Backend**: There is now an authentication backend that allows - using GCP IAM credentials to retrieve Vault tokens. This is available as - both a plugin and built-in to Vault. - * **PingID Push Support for Path-Based MFA (Enterprise)**: PingID Push can - now be used for MFA with the new path-based MFA introduced in Vault - Enterprise 0.8. - * **Permitted DNS Domains Support in PKI**: The `pki` backend now supports - specifying permitted DNS domains for CA certificates, allowing you to - narrowly scope the set of domains for which a CA can issue or sign child - certificates. - * **Plugin Backend Reload Endpoint**: Plugin backends can now be triggered to - reload using the `sys/plugins/reload/backend` endpoint and providing either - the plugin name or the mounts to reload. - * **Self-Reloading Plugins**: The plugin system will now attempt to reload a - crashed or stopped plugin, once per request. - -IMPROVEMENTS: - - * auth/approle: Allow array input for policies in addition to comma-delimited - strings [[GH-3163](https://github.com/hashicorp/vault/pull/3163)] - * plugins: Send logs through Vault's logger rather than stdout [[GH-3142](https://github.com/hashicorp/vault/pull/3142)] - * secret/pki: Add `pki/root` delete operation [[GH-3165](https://github.com/hashicorp/vault/pull/3165)] - * secret/pki: Don't overwrite an existing root cert/key when calling generate - [[GH-3165](https://github.com/hashicorp/vault/pull/3165)] +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* kmip (enterprise): Return SecretData as supported Object Type. +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] -BUG FIXES: +SECURITY: - * aws: Don't prefer a nil HTTP client over an existing one [[GH-3159](https://github.com/hashicorp/vault/pull/3159)] - * core: If there is an error when checking for create/update existence, return - 500 instead of 400 [[GH-3162](https://github.com/hashicorp/vault/pull/3162)] - * secret/database: Avoid creating usernames that are too long for legacy MySQL - [[GH-3138](https://github.com/hashicorp/vault/pull/3138)] +* storage/raft (enterprise): Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HCSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] -## 0.8.0 (August 9th, 2017) +## 1.11.0 +### June 20, 2022 -SECURITY: +CHANGES: - * We've added a note to the docs about the way the GitHub auth backend works - as it may not be readily apparent that GitHub personal access tokens, which - are used by the backend, can be used for unauthorized access if they are - stolen from third party services and access to Vault is public. - -DEPRECATIONS/CHANGES: - - * Database Plugin Backends: Passwords generated for these backends now - enforce stricter password requirements, as opposed to the previous behavior - of returning a randomized UUID. Passwords are of length 20, and have a `A1a-` - characters prepended to ensure stricter requirements. No regressions are - expected from this change. (For database backends that were previously - substituting underscores for hyphens in passwords, this will remain the - case.) - * Lease Endpoints: The endpoints `sys/renew`, `sys/revoke`, `sys/revoke-prefix`, - `sys/revoke-force` have been deprecated and relocated under `sys/leases`. - Additionally, the deprecated path `sys/revoke-force` now requires the `sudo` - capability. - * Response Wrapping Lookup Unauthenticated: The `sys/wrapping/lookup` endpoint - is now unauthenticated. This allows introspection of the wrapping info by - clients that only have the wrapping token without then invalidating the - token. Validation functions/checks are still performed on the token. +* auth/aws: Add RoleSession to DisplayName when using assumeRole for authentication [[GH-14954](https://github.com/hashicorp/vault/pull/14954)] +* auth/kubernetes: If `kubernetes_ca_cert` is unset, and there is no pod-local CA available, an error will be surfaced when writing config instead of waiting for login. [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] +* auth: Remove support for legacy MFA +(https://www.vaultproject.io/docs/v1.10.x/auth/mfa) [[GH-14869](https://github.com/hashicorp/vault/pull/14869)] +* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.11. [[GH-go-ver-1110](https://github.com/hashicorp/vault/pull/go-ver-1110)] +* database & storage: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx). This change affects Redshift & Postgres database secrets engines, and CockroachDB & Postgres storage engines [[GH-15343](https://github.com/hashicorp/vault/pull/15343)] +* licensing (enterprise): Remove support for stored licenses and associated `sys/license` and `sys/license/signed` +endpoints in favor of [autoloaded licenses](https://www.vaultproject.io/docs/enterprise/license/autoloading). +* replication (enterprise): The `/sys/replication/performance/primary/mount-filter` endpoint has been removed. Please use [Paths Filter](https://www.vaultproject.io/api-docs/system/replication/replication-performance#create-paths-filter) instead. +* secret/pki: Remove unused signature_bits parameter from intermediate CSR generation; this parameter doesn't control the final certificate's signature algorithm selection as that is up to the signing CA [[GH-15478](https://github.com/hashicorp/vault/pull/15478)] +* secrets/kubernetes: Split `additional_metadata` into `extra_annotations` and `extra_labels` parameters [[GH-15655](https://github.com/hashicorp/vault/pull/15655)] +* secrets/pki: A new aliased api path (/pki/issuer/:issuer_ref/sign-self-issued) +providing the same functionality as the existing API(/pki/root/sign-self-issued) +does not require sudo capabilities but the latter still requires it in an +effort to maintain backwards compatibility. [[GH-15211](https://github.com/hashicorp/vault/pull/15211)] +* secrets/pki: Err on unknown role during sign-verbatim. [[GH-15543](https://github.com/hashicorp/vault/pull/15543)] +* secrets/pki: Existing CRL API (/pki/crl) now returns an X.509 v2 CRL instead +of a v1 CRL. [[GH-15100](https://github.com/hashicorp/vault/pull/15100)] +* secrets/pki: The `ca_chain` response field within issuing (/pki/issue/:role) +and signing APIs will now include the root CA certificate if the mount is +aware of it. [[GH-15155](https://github.com/hashicorp/vault/pull/15155)] +* secrets/pki: existing Delete Root API (pki/root) will now delete all issuers +and keys within the mount path. [[GH-15004](https://github.com/hashicorp/vault/pull/15004)] +* secrets/pki: existing Generate Root (pki/root/generate/:type), +Set Signed Intermediate (/pki/intermediate/set-signed) APIs will +add new issuers/keys to a mount instead of warning that an existing CA exists [[GH-14975](https://github.com/hashicorp/vault/pull/14975)] +* secrets/pki: the signed CA certificate from the sign-intermediate api will now appear within the ca_chain +response field along with the issuer's ca chain. [[GH-15524](https://github.com/hashicorp/vault/pull/15524)] +* ui: Upgrade Ember to version 3.28 [[GH-14763](https://github.com/hashicorp/vault/pull/14763)] FEATURES: - * **Cassandra Storage**: Cassandra can now be used for Vault storage - * **CockroachDB Storage**: CockroachDB can now be used for Vault storage - * **CouchDB Storage**: CouchDB can now be used for Vault storage - * **SAP HANA Database Plugin**: The `databases` backend can now manage users - for SAP HANA databases - * **Plugin Backends**: Vault now supports running secret and auth backends as - plugins. Plugins can be mounted like normal backends and can be developed - independently from Vault. - * **PROXY Protocol Support** Vault listeners can now be configured to honor - PROXY protocol v1 information to allow passing real client IPs into Vault. A - list of authorized addresses (IPs or subnets) can be defined and - accept/reject behavior controlled. - * **Lease Lookup and Browsing in the Vault Enterprise UI**: Vault Enterprise UI - now supports lookup and listing of leases and the associated actions from the - `sys/leases` endpoints in the API. These are located in the new top level - navigation item "Leases". - * **Filtered Mounts for Performance Mode Replication**: Whitelists or - blacklists of mounts can be defined per-secondary to control which mounts - are actually replicated to that secondary. This can allow targeted - replication of specific sets of data to specific geolocations/datacenters. - * **Disaster Recovery Mode Replication (Enterprise Only)**: There is a new - replication mode, Disaster Recovery (DR), that performs full real-time - replication (including tokens and leases) to DR secondaries. DR secondaries - cannot handle client requests, but can be promoted to primary as needed for - failover. - * **Manage New Replication Features in the Vault Enterprise UI**: Support for - Replication features in Vault Enterprise UI has expanded to include new DR - Replication mode and management of Filtered Mounts in Performance Replication - mode. - * **Vault Identity (Enterprise Only)**: Vault's new Identity system allows - correlation of users across tokens. At present this is only used for MFA, - but will be the foundation of many other features going forward. - * **Duo Push, Okta Push, and TOTP MFA For All Authenticated Paths (Enterprise - Only)**: A brand new MFA system built on top of Identity allows MFA - (currently Duo Push, Okta Push, and TOTP) for any authenticated path within - Vault. MFA methods can be configured centrally, and TOTP keys live within - the user's Identity information to allow using the same key across tokens. - Specific MFA method(s) required for any given path within Vault can be - specified in normal ACL path statements. +* **Autopilot Improvements (Enterprise)**: Autopilot on Vault Enterprise now supports automated upgrades and redundancy zones when using integrated storage. +* **KeyMgmt UI**: Add UI support for managing the Key Management Secrets Engine [[GH-15523](https://github.com/hashicorp/vault/pull/15523)] +* **Kubernetes Secrets Engine**: This new secrets engine generates Kubernetes service account tokens, service accounts, role bindings, and roles dynamically. [[GH-15551](https://github.com/hashicorp/vault/pull/15551)] +* **Non-Disruptive Intermediate/Root Certificate Rotation**: This allows +import, generation and configuration of any number of keys and/or issuers +within a PKI mount, providing operators the ability to rotate certificates +in place without affecting existing client configurations. [[GH-15277](https://github.com/hashicorp/vault/pull/15277)] +* **Print minimum required policy for any command**: The global CLI flag `-output-policy` can now be used with any command to print out the minimum required policy HCL for that operation, including whether the given path requires the "sudo" capability. [[GH-14899](https://github.com/hashicorp/vault/pull/14899)] +* **Snowflake Database Plugin**: Adds ability to manage RSA key pair credentials for dynamic and static Snowflake users. [[GH-15376](https://github.com/hashicorp/vault/pull/15376)] +* **Transit BYOK**: Allow import of externally-generated keys into the Transit secrets engine. [[GH-15414](https://github.com/hashicorp/vault/pull/15414)] +* nomad: Bootstrap Nomad ACL system if no token is provided [[GH-12451](https://github.com/hashicorp/vault/pull/12451)] +* storage/dynamodb: Added `AWS_DYNAMODB_REGION` environment variable. [[GH-15054](https://github.com/hashicorp/vault/pull/15054)] IMPROVEMENTS: - * api: Add client method for a secret renewer background process [[GH-2886](https://github.com/hashicorp/vault/pull/2886)] - * api: Add `RenewTokenAsSelf` [[GH-2886](https://github.com/hashicorp/vault/pull/2886)] - * api: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env - var or with a new API function [[GH-2956](https://github.com/hashicorp/vault/pull/2956)] - * api/cli: Client will now attempt to look up SRV records for the given Vault - hostname [[GH-3035](https://github.com/hashicorp/vault/pull/3035)] - * audit/socket: Enhance reconnection logic and don't require the connection to - be established at unseal time [[GH-2934](https://github.com/hashicorp/vault/pull/2934)] - * audit/file: Opportunistically try re-opening the file on error [[GH-2999](https://github.com/hashicorp/vault/pull/2999)] - * auth/approle: Add role name to token metadata [[GH-2985](https://github.com/hashicorp/vault/pull/2985)] - * auth/okta: Allow specifying `ttl`/`max_ttl` inside the mount [[GH-2915](https://github.com/hashicorp/vault/pull/2915)] - * cli: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env - var [[GH-2956](https://github.com/hashicorp/vault/pull/2956)] - * command/auth: Add `-token-only` flag to `vault auth` that returns only the - token on stdout and does not store it via the token helper [[GH-2855](https://github.com/hashicorp/vault/pull/2855)] - * core: CORS allowed origins can now be configured [[GH-2021](https://github.com/hashicorp/vault/pull/2021)] - * core: Add metrics counters for audit log failures [[GH-2863](https://github.com/hashicorp/vault/pull/2863)] - * cors: Allow setting allowed headers via the API instead of always using - wildcard [[GH-3023](https://github.com/hashicorp/vault/pull/3023)] - * secret/ssh: Allow specifying the key ID format using template values for CA - type [[GH-2888](https://github.com/hashicorp/vault/pull/2888)] - * server: Add `tls_client_ca_file` option for specifying a CA file to use for - client certificate verification when `tls_require_and_verify_client_cert` is - enabled [[GH-3034](https://github.com/hashicorp/vault/pull/3034)] - * storage/cockroachdb: Add CockroachDB storage backend [[GH-2713](https://github.com/hashicorp/vault/pull/2713)] - * storage/couchdb: Add CouchDB storage backend [[GH-2880](https://github.com/hashicorp/vault/pull/2880)] - * storage/mssql: Add `max_parallel` [[GH-3026](https://github.com/hashicorp/vault/pull/3026)] - * storage/postgresql: Add `max_parallel` [[GH-3026](https://github.com/hashicorp/vault/pull/3026)] - * storage/postgresql: Improve listing speed [[GH-2945](https://github.com/hashicorp/vault/pull/2945)] - * storage/s3: More efficient paging when an object has a lot of subobjects - [[GH-2780](https://github.com/hashicorp/vault/pull/2780)] - * sys/wrapping: Make `sys/wrapping/lookup` unauthenticated [[GH-3084](https://github.com/hashicorp/vault/pull/3084)] - * sys/wrapping: Wrapped tokens now store the original request path of the data - [[GH-3100](https://github.com/hashicorp/vault/pull/3100)] - * telemetry: Add support for DogStatsD [[GH-2490](https://github.com/hashicorp/vault/pull/2490)] +* activity: return nil response months in activity log API when no month data exists [[GH-15420](https://github.com/hashicorp/vault/pull/15420)] +* agent/auto-auth: Add `min_backoff` to the method stanza for configuring initial backoff duration. [[GH-15204](https://github.com/hashicorp/vault/pull/15204)] +* agent: Update consul-template to v0.29.0 [[GH-15293](https://github.com/hashicorp/vault/pull/15293)] +* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] +* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* api: Add ability to pass certificate as PEM bytes to api.Client. [[GH-14753](https://github.com/hashicorp/vault/pull/14753)] +* api: Add context-aware functions to vault/api for each API wrapper function. [[GH-14388](https://github.com/hashicorp/vault/pull/14388)] +* api: Added MFALogin() for handling MFA flow when using login helpers. [[GH-14900](https://github.com/hashicorp/vault/pull/14900)] +* api: If the parameters supplied over the API payload are ignored due to not +being what the endpoints were expecting, or if the parameters supplied get +replaced by the values in the endpoint's path itself, warnings will be added to +the non-empty responses listing all the ignored and replaced parameters. [[GH-14962](https://github.com/hashicorp/vault/pull/14962)] +* api: KV helper methods to simplify the common use case of reading and writing KV secrets [[GH-15305](https://github.com/hashicorp/vault/pull/15305)] +* api: Provide a helper method WithNamespace to create a cloned client with a new NS [[GH-14963](https://github.com/hashicorp/vault/pull/14963)] +* api: Support VAULT_PROXY_ADDR environment variable to allow overriding the Vault client's HTTP proxy. [[GH-15377](https://github.com/hashicorp/vault/pull/15377)] +* api: Use the context passed to the api/auth Login helpers. [[GH-14775](https://github.com/hashicorp/vault/pull/14775)] +* api: make ListPlugins parse only known plugin types [[GH-15434](https://github.com/hashicorp/vault/pull/15434)] +* audit: Add a policy_results block into the audit log that contains the set of +policies that granted this request access. [[GH-15457](https://github.com/hashicorp/vault/pull/15457)] +* audit: Include mount_accessor in audit request and response logs [[GH-15342](https://github.com/hashicorp/vault/pull/15342)] +* audit: added entity_created boolean to audit log, set when login operations create an entity [[GH-15487](https://github.com/hashicorp/vault/pull/15487)] +* auth/aws: Add rsa2048 signature type to API [[GH-15719](https://github.com/hashicorp/vault/pull/15719)] +* auth/gcp: Enable the Google service endpoints used by the underlying client to be customized [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] +* auth/gcp: Vault CLI now infers the service account email when running on Google Cloud [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] +* auth/jwt: Adds ability to use JSON pointer syntax for the `user_claim` value. [[GH-15593](https://github.com/hashicorp/vault/pull/15593)] +* auth/okta: Add support for Google provider TOTP type in the Okta auth method [[GH-14985](https://github.com/hashicorp/vault/pull/14985)] +* auth/okta: Add support for performing [the number +challenge](https://help.okta.com/en-us/Content/Topics/Mobile/ov-admin-config.htm?cshid=csh-okta-verify-number-challenge-v1#enable-number-challenge) +during an Okta Verify push challenge [[GH-15361](https://github.com/hashicorp/vault/pull/15361)] +* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] +* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] +* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] +* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] +* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] +* cli: Alternative flag-based syntax for KV to mitigate confusion from automatically appended /data [[GH-14807](https://github.com/hashicorp/vault/pull/14807)] +* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] +* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* command: Support optional '-log-level' flag to be passed to 'operator migrate' command (defaults to info). Also support VAULT_LOG_LEVEL env var. [[GH-15405](https://github.com/hashicorp/vault/pull/15405)] +* command: Support the optional '-detailed' flag to be passed to 'vault list' command to show ListResponseWithInfo data. Also supports the VAULT_DETAILED env var. [[GH-15417](https://github.com/hashicorp/vault/pull/15417)] +* core (enterprise): Include `termination_time` in `sys/license/status` response +* core (enterprise): Include termination time in `license inspect` command output +* core,transit: Allow callers to choose random byte source including entropy augmentation sources for the sys/tools/random and transit/random endpoints. [[GH-15213](https://github.com/hashicorp/vault/pull/15213)] +* core/activity: Order month data in ascending order of timestamps [[GH-15259](https://github.com/hashicorp/vault/pull/15259)] +* core/activity: allow client counts to be precomputed and queried on non-contiguous chunks of data [[GH-15352](https://github.com/hashicorp/vault/pull/15352)] +* core/managed-keys (enterprise): Allow configuring the number of parallel operations to PKCS#11 managed keys. +* core: Add an export API for historical activity log data [[GH-15586](https://github.com/hashicorp/vault/pull/15586)] +* core: Add new DB methods that do not prepare statements. [[GH-15166](https://github.com/hashicorp/vault/pull/15166)] +* core: check uid and permissions of config dir, config file, plugin dir and plugin binaries [[GH-14817](https://github.com/hashicorp/vault/pull/14817)] +* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] +* core: Include build date in `sys/seal-status` and `sys/version-history` endpoints. [[GH-14957](https://github.com/hashicorp/vault/pull/14957)] +* core: Upgrade github.org/x/crypto/ssh [[GH-15125](https://github.com/hashicorp/vault/pull/15125)] +* kmip (enterprise): Implement operations Query, Import, Encrypt and Decrypt. Improve operations Locate, Add Attribute, Get Attributes and Get Attribute List to handle most supported attributes. +* mfa/okta: migrate to use official Okta SDK [[GH-15355](https://github.com/hashicorp/vault/pull/15355)] +* sdk: Change OpenAPI code generator to extract request objects into /components/schemas and reference them by name. [[GH-14217](https://github.com/hashicorp/vault/pull/14217)] +* secrets/consul: Add support for Consul node-identities and service-identities [[GH-15295](https://github.com/hashicorp/vault/pull/15295)] +* secrets/consul: Vault is now able to automatically bootstrap the Consul ACL system. [[GH-10751](https://github.com/hashicorp/vault/pull/10751)] +* secrets/database/elasticsearch: Use the new /_security base API path instead of /_xpack/security when managing elasticsearch. [[GH-15614](https://github.com/hashicorp/vault/pull/15614)] +* secrets/pki: Add not_before_duration to root CA generation, intermediate CA signing paths. [[GH-14178](https://github.com/hashicorp/vault/pull/14178)] +* secrets/pki: Add support for CPS URLs and User Notice to Policy Information [[GH-15751](https://github.com/hashicorp/vault/pull/15751)] +* secrets/pki: Allow operators to control the issuing certificate behavior when +the requested TTL is beyond the NotAfter value of the signing certificate [[GH-15152](https://github.com/hashicorp/vault/pull/15152)] +* secrets/pki: Always return CRLs, URLs configurations, even if using the default value. [[GH-15470](https://github.com/hashicorp/vault/pull/15470)] +* secrets/pki: Enable Patch Functionality for Roles and Issuers (API only) [[GH-15510](https://github.com/hashicorp/vault/pull/15510)] +* secrets/pki: Have pki/sign-verbatim use the not_before_duration field defined in the role [[GH-15429](https://github.com/hashicorp/vault/pull/15429)] +* secrets/pki: Warn on empty Subject field during issuer generation (root/generate and root/sign-intermediate). [[GH-15494](https://github.com/hashicorp/vault/pull/15494)] +* secrets/pki: Warn on missing AIA access information when generating issuers (config/urls). [[GH-15509](https://github.com/hashicorp/vault/pull/15509)] +* secrets/pki: Warn when `generate_lease` and `no_store` are both set to `true` on requests. [[GH-14292](https://github.com/hashicorp/vault/pull/14292)] +* secrets/ssh: Add connection timeout of 1 minute for outbound SSH connection in deprecated Dynamic SSH Keys mode. [[GH-15440](https://github.com/hashicorp/vault/pull/15440)] +* secrets/ssh: Support for `add_before_duration` in SSH [[GH-15250](https://github.com/hashicorp/vault/pull/15250)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer +* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] +* ui: Default auto-rotation period in transit is 30 days [[GH-15474](https://github.com/hashicorp/vault/pull/15474)] +* ui: Parse schema refs from OpenAPI [[GH-14508](https://github.com/hashicorp/vault/pull/14508)] +* ui: Remove stored license references [[GH-15513](https://github.com/hashicorp/vault/pull/15513)] +* ui: Remove storybook. [[GH-15074](https://github.com/hashicorp/vault/pull/15074)] +* ui: Replaces the IvyCodemirror wrapper with a custom ember modifier. [[GH-14659](https://github.com/hashicorp/vault/pull/14659)] +* website/docs: Add usage documentation for Kubernetes Secrets Engine [[GH-15527](https://github.com/hashicorp/vault/pull/15527)] +* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] + +DEPRECATIONS: + +* docs: Document removal of X.509 certificates with signatures who use SHA-1 in Vault 1.12 [[GH-15581](https://github.com/hashicorp/vault/pull/15581)] +* secrets/consul: Deprecate old parameters "token_type" and "policy" [[GH-15550](https://github.com/hashicorp/vault/pull/15550)] +* secrets/consul: Deprecate parameter "policies" in favor of "consul_policies" for consistency [[GH-15400](https://github.com/hashicorp/vault/pull/15400)] BUG FIXES: - * api/health: Don't treat standby `429` codes as an error [[GH-2850](https://github.com/hashicorp/vault/pull/2850)] - * api/leases: Fix lease lookup returning lease properties at the top level - * audit: Fix panic when audit logging a read operation on an asymmetric - `transit` key [[GH-2958](https://github.com/hashicorp/vault/pull/2958)] - * auth/approle: Fix panic when secret and cidr list not provided in role - [[GH-3075](https://github.com/hashicorp/vault/pull/3075)] - * auth/aws: Look up proper account ID on token renew [[GH-3012](https://github.com/hashicorp/vault/pull/3012)] - * auth/aws: Store IAM header in all cases when it changes [[GH-3004](https://github.com/hashicorp/vault/pull/3004)] - * auth/ldap: Verify given certificate is PEM encoded instead of failing - silently [[GH-3016](https://github.com/hashicorp/vault/pull/3016)] - * auth/token: Don't allow using the same token ID twice when manually - specifying [[GH-2916](https://github.com/hashicorp/vault/pull/2916)] - * cli: Fix issue with parsing keys that start with special characters [[GH-2998](https://github.com/hashicorp/vault/pull/2998)] - * core: Relocated `sys/leases/renew` returns same payload as original - `sys/leases` endpoint [[GH-2891](https://github.com/hashicorp/vault/pull/2891)] - * secret/ssh: Fix panic when signing with incorrect key type [[GH-3072](https://github.com/hashicorp/vault/pull/3072)] - * secret/totp: Ensure codes can only be used once. This makes some automated - workflows harder but complies with the RFC. [[GH-2908](https://github.com/hashicorp/vault/pull/2908)] - * secret/transit: Fix locking when creating a key with unsupported options - [[GH-2974](https://github.com/hashicorp/vault/pull/2974)] - -## 0.7.3 (June 7th, 2017) +* Fixed panic when adding or modifying a Duo MFA Method in Enterprise +* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Fixes bug where OutputCurlString field was unintentionally being copied over during client cloning [[GH-14968](https://github.com/hashicorp/vault/pull/14968)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* auth/kubernetes: Fix error code when using the wrong service account [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] +* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] +* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* cli: kv get command now honors trailing spaces to retrieve secrets [[GH-15188](https://github.com/hashicorp/vault/pull/15188)] +* command: do not report listener and storage types as key not found warnings [[GH-15383](https://github.com/hashicorp/vault/pull/15383)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core (enterprise): Fix some races in merkle index flushing code found in testing +* core (enterprise): Handle additional edge cases reinitializing PKCS#11 libraries after login errors. +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix double counting for "route" metrics [[GH-12763](https://github.com/hashicorp/vault/pull/12763)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* core: renaming the environment variable VAULT_DISABLE_FILE_PERMISSIONS_CHECK to VAULT_ENABLE_FILE_PERMISSIONS_CHECK and adjusting the logic [[GH-15452](https://github.com/hashicorp/vault/pull/15452)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* identity: deduplicate policies when creating/updating identity groups [[GH-15055](https://github.com/hashicorp/vault/pull/15055)] +* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] +* plugin: Fix a bug where plugin reload would falsely report success in certain scenarios. [[GH-15579](https://github.com/hashicorp/vault/pull/15579)] +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* sdk/cidrutil: Only check if cidr contains remote address for IP addresses [[GH-14487](https://github.com/hashicorp/vault/pull/14487)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] +* sdk: Fix OpenApi spec generator to remove duplicate sha_256 parameter [[GH-15163](https://github.com/hashicorp/vault/pull/15163)] +* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] +* secrets/kv: Fix issue preventing the ability to reset the `delete_version_after` key metadata field to 0s via HTTP `PATCH`. [[GH-15792](https://github.com/hashicorp/vault/pull/15792)] +* secrets/pki: CRLs on performance secondary clusters are now automatically +rebuilt upon changes to the list of issuers. [[GH-15179](https://github.com/hashicorp/vault/pull/15179)] +* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] +* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] +* secrets/ssh: Convert role field not_before_duration to seconds before returning it [[GH-15559](https://github.com/hashicorp/vault/pull/15559)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* storage/raft: Forward autopilot state requests on perf standbys to active node. [[GH-15493](https://github.com/hashicorp/vault/pull/15493)] +* storage/raft: joining a node to a cluster now ignores any VAULT_NAMESPACE environment variable set on the server process [[GH-15519](https://github.com/hashicorp/vault/pull/15519)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not accepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fix KV secret showing in the edit form after a user creates a new version but doesn't have read capabilities [[GH-14794](https://github.com/hashicorp/vault/pull/14794)] +* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Fix issue with KV not recomputing model when you changed versions. [[GH-14941](https://github.com/hashicorp/vault/pull/14941)] +* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] +* ui: Fixed unsupported revocation statements field for DB roles [[GH-15573](https://github.com/hashicorp/vault/pull/15573)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] +* ui: Revert using localStorage in favor of sessionStorage [[GH-15769](https://github.com/hashicorp/vault/pull/15769)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] +* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] +* ui: fix form validations ignoring default values and disabling submit button [[GH-15560](https://github.com/hashicorp/vault/pull/15560)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + +## 1.10.11 +### March 01, 2023 SECURITY: - * Cert auth backend now checks validity of individual certificates: In - previous versions of Vault, validity (e.g. expiration) of individual leaf - certificates added for authentication was not checked. This was done to make - it easier for administrators to control lifecycles of individual - certificates added to the backend, e.g. the authentication material being - checked was access to that specific certificate's private key rather than - all private keys signed by a CA. However, this behavior is often unexpected - and as a result can lead to insecure deployments, so we are now validating - these certificates as well. - * App-ID path salting was skipped in 0.7.1/0.7.2: A regression in 0.7.1/0.7.2 - caused the HMACing of any App-ID information stored in paths (including - actual app-IDs and user-IDs) to be unsalted and written as-is from the API. - In 0.7.3 any such paths will be automatically changed to salted versions on - access (e.g. login or read); however, if you created new app-IDs or user-IDs - in 0.7.1/0.7.2, you may want to consider whether any users with access to - Vault's underlying data store may have intercepted these values, and - revoke/roll them. - -DEPRECATIONS/CHANGES: - - * Step-Down is Forwarded: When a step-down is issued against a non-active node - in an HA cluster, it will now forward the request to the active node. +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] -FEATURES: +CHANGES: - * **ed25519 Signing/Verification in Transit with Key Derivation**: The - `transit` backend now supports generating - [ed25519](https://ed25519.cr.yp.to/) keys for signing and verification - functionality. These keys support derivation, allowing you to modify the - actual encryption key used by supplying a `context` value. - * **Key Version Specification for Encryption in Transit**: You can now specify - the version of a key you use to wish to generate a signature, ciphertext, or - HMAC. This can be controlled by the `min_encryption_version` key - configuration property. - * **Replication Primary Discovery (Enterprise)**: Replication primaries will - now advertise the addresses of their local HA cluster members to replication - secondaries. This helps recovery if the primary active node goes down and - neither service discovery nor load balancers are in use to steer clients. +* core: Bump Go version to 1.19.6. IMPROVEMENTS: - * api/health: Add Sys().Health() [[GH-2805](https://github.com/hashicorp/vault/pull/2805)] - * audit: Add auth information to requests that error out [[GH-2754](https://github.com/hashicorp/vault/pull/2754)] - * command/auth: Add `-no-store` option that prevents the auth command from - storing the returned token into the configured token helper [[GH-2809](https://github.com/hashicorp/vault/pull/2809)] - * core/forwarding: Request forwarding now heartbeats to prevent unused - connections from being terminated by firewalls or proxies - * plugins/databases: Add MongoDB as an internal database plugin [[GH-2698](https://github.com/hashicorp/vault/pull/2698)] - * storage/dynamodb: Add a method for checking the existence of children, - speeding up deletion operations in the DynamoDB storage backend [[GH-2722](https://github.com/hashicorp/vault/pull/2722)] - * storage/mysql: Add max_parallel parameter to MySQL backend [[GH-2760](https://github.com/hashicorp/vault/pull/2760)] - * secret/databases: Support listing connections [[GH-2823](https://github.com/hashicorp/vault/pull/2823)] - * secret/databases: Support custom renewal statements in Postgres database - plugin [[GH-2788](https://github.com/hashicorp/vault/pull/2788)] - * secret/databases: Use the role name as part of generated credentials - [[GH-2812](https://github.com/hashicorp/vault/pull/2812)] - * ui (Enterprise): Transit key and secret browsing UI handle large lists better - * ui (Enterprise): root tokens are no longer persisted - * ui (Enterprise): support for mounting Database and TOTP secret backends - -BUG FIXES: - - * auth/app-id: Fix regression causing loading of salts to be skipped - * auth/aws: Improve EC2 describe instances performance [[GH-2766](https://github.com/hashicorp/vault/pull/2766)] - * auth/aws: Fix lookup of some instance profile ARNs [[GH-2802](https://github.com/hashicorp/vault/pull/2802)] - * auth/aws: Resolve ARNs to internal AWS IDs which makes lookup at various - points (e.g. renewal time) more robust [[GH-2814](https://github.com/hashicorp/vault/pull/2814)] - * auth/aws: Properly honor configured period when using IAM authentication - [[GH-2825](https://github.com/hashicorp/vault/pull/2825)] - * auth/aws: Check that a bound IAM principal is not empty (in the current - state of the role) before requiring it match the previously authenticated - client [[GH-2781](https://github.com/hashicorp/vault/pull/2781)] - * auth/cert: Fix panic on renewal [[GH-2749](https://github.com/hashicorp/vault/pull/2749)] - * auth/cert: Certificate verification for non-CA certs [[GH-2761](https://github.com/hashicorp/vault/pull/2761)] - * core/acl: Prevent race condition when compiling ACLs in some scenarios - [[GH-2826](https://github.com/hashicorp/vault/pull/2826)] - * secret/database: Increase wrapping token TTL; in a loaded scenario it could - be too short - * secret/generic: Allow integers to be set as the value of `ttl` field as the - documentation claims is supported [[GH-2699](https://github.com/hashicorp/vault/pull/2699)] - * secret/ssh: Added host key callback to ssh client config [[GH-2752](https://github.com/hashicorp/vault/pull/2752)] - * storage/s3: Avoid a panic when some bad data is returned [[GH-2785](https://github.com/hashicorp/vault/pull/2785)] - * storage/dynamodb: Fix list functions working improperly on Windows [[GH-2789](https://github.com/hashicorp/vault/pull/2789)] - * storage/file: Don't leak file descriptors in some error cases - * storage/swift: Fix pre-v3 project/tenant name reading [[GH-2803](https://github.com/hashicorp/vault/pull/2803)] - -## 0.7.2 (May 8th, 2017) +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] BUG FIXES: - * audit: Fix auditing entries containing certain kinds of time values - [[GH-2689](https://github.com/hashicorp/vault/pull/2689)] - -## 0.7.1 (May 5th, 2017) +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18209](https://github.com/hashicorp/vault/pull/18209)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] -DEPRECATIONS/CHANGES: +## 1.10.10 +### February 6, 2023 - * LDAP Auth Backend: Group membership queries will now run as the `binddn` - user when `binddn`/`bindpass` are configured, rather than as the - authenticating user as was the case previously. - -FEATURES: +CHANGES: - * **AWS IAM Authentication**: IAM principals can get Vault tokens - automatically, opening AWS-based authentication to users, ECS containers, - Lambda instances, and more. Signed client identity information retrieved - using the AWS API `sts:GetCallerIdentity` is validated against the AWS STS - service before issuing a Vault token. This backend is unified with the - `aws-ec2` authentication backend under the name `aws`, and allows additional - EC2-related restrictions to be applied during the IAM authentication; the - previous EC2 behavior is also still available. [[GH-2441](https://github.com/hashicorp/vault/pull/2441)] - * **MSSQL Physical Backend**: You can now use Microsoft SQL Server as your - Vault physical data store [[GH-2546](https://github.com/hashicorp/vault/pull/2546)] - * **Lease Listing and Lookup**: You can now introspect a lease to get its - creation and expiration properties via `sys/leases/lookup`; with `sudo` - capability you can also list leases for lookup, renewal, or revocation via - that endpoint. Various lease functions (renew, revoke, revoke-prefix, - revoke-force) have also been relocated to `sys/leases/`, but they also work - at the old paths for compatibility. Reading (but not listing) leases via - `sys/leases/lookup` is now a part of the current `default` policy. [[GH-2650](https://github.com/hashicorp/vault/pull/2650)] - * **TOTP Secret Backend**: You can now store multi-factor authentication keys - in Vault and use the API to retrieve time-based one-time use passwords on - demand. The backend can also be used to generate a new key and validate - passwords generated by that key. [[GH-2492](https://github.com/hashicorp/vault/pull/2492)] - * **Database Secret Backend & Secure Plugins (Beta)**: This new secret backend - combines the functionality of the MySQL, PostgreSQL, MSSQL, and Cassandra - backends. It also provides a plugin interface for extendability through - custom databases. [[GH-2200](https://github.com/hashicorp/vault/pull/2200)] +* core: Bump Go version to 1.19.4. IMPROVEMENTS: - * auth/cert: Support for constraints on subject Common Name and DNS/email - Subject Alternate Names in certificates [[GH-2595](https://github.com/hashicorp/vault/pull/2595)] - * auth/ldap: Use the binding credentials to search group membership rather - than the user credentials [[GH-2534](https://github.com/hashicorp/vault/pull/2534)] - * cli/revoke: Add `-self` option to allow revoking the currently active token - [[GH-2596](https://github.com/hashicorp/vault/pull/2596)] - * core: Randomize x coordinate in Shamir shares [[GH-2621](https://github.com/hashicorp/vault/pull/2621)] - * replication: Fix a bug when enabling `approle` on a primary before - secondaries were connected - * replication: Add heartbeating to ensure firewalls don't kill connections to - primaries - * secret/pki: Add `no_store` option that allows certificates to be issued - without being stored. This removes the ability to look up and/or add to a - CRL but helps with scaling to very large numbers of certificates. [[GH-2565](https://github.com/hashicorp/vault/pull/2565)] - * secret/pki: If used with a role parameter, the `sign-verbatim/` - endpoint honors the values of `generate_lease`, `no_store`, `ttl` and - `max_ttl` from the given role [[GH-2593](https://github.com/hashicorp/vault/pull/2593)] - * secret/pki: Add role parameter `allow_glob_domains` that enables defining - names in `allowed_domains` containing `*` glob patterns [[GH-2517](https://github.com/hashicorp/vault/pull/2517)] - * secret/pki: Update certificate storage to not use characters that are not - supported on some filesystems [[GH-2575](https://github.com/hashicorp/vault/pull/2575)] - * storage/etcd3: Add `discovery_srv` option to query for SRV records to find - servers [[GH-2521](https://github.com/hashicorp/vault/pull/2521)] - * storage/s3: Support `max_parallel` option to limit concurrent outstanding - requests [[GH-2466](https://github.com/hashicorp/vault/pull/2466)] - * storage/s3: Use pooled transport for http client [[GH-2481](https://github.com/hashicorp/vault/pull/2481)] - * storage/swift: Allow domain values for V3 authentication [[GH-2554](https://github.com/hashicorp/vault/pull/2554)] - * tidy: Improvements to `auth/token/tidy` and `sys/leases/tidy` to handle more - cleanup cases [[GH-2452](https://github.com/hashicorp/vault/pull/2452)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] BUG FIXES: - * api: Respect a configured path in Vault's address [[GH-2588](https://github.com/hashicorp/vault/pull/2588)] - * auth/aws-ec2: New bounds added as criteria to allow role creation [[GH-2600](https://github.com/hashicorp/vault/pull/2600)] - * auth/ldap: Don't lowercase groups attached to users [[GH-2613](https://github.com/hashicorp/vault/pull/2613)] - * cli: Don't panic if `vault write` is used with the `force` flag but no path - [[GH-2674](https://github.com/hashicorp/vault/pull/2674)] - * core: Help operations should request forward since standbys may not have - appropriate info [[GH-2677](https://github.com/hashicorp/vault/pull/2677)] - * replication: Fix enabling secondaries when certain mounts already existed on - the primary - * secret/mssql: Update mssql driver to support queries with colons [[GH-2610](https://github.com/hashicorp/vault/pull/2610)] - * secret/pki: Don't lowercase O/OU values in certs [[GH-2555](https://github.com/hashicorp/vault/pull/2555)] - * secret/pki: Don't attempt to validate IP SANs if none are provided [[GH-2574](https://github.com/hashicorp/vault/pull/2574)] - * secret/ssh: Don't automatically lowercase principles in issued SSH certs - [[GH-2591](https://github.com/hashicorp/vault/pull/2591)] - * storage/consul: Properly handle state events rather than timing out - [[GH-2548](https://github.com/hashicorp/vault/pull/2548)] - * storage/etcd3: Ensure locks are released if client is improperly shut down - [[GH-2526](https://github.com/hashicorp/vault/pull/2526)] - -## 0.7.0 (March 21th, 2017) - -SECURITY: - - * Common name not being validated when `exclude_cn_from_sans` option used in - `pki` backend: When using a role in the `pki` backend that specified the - `exclude_cn_from_sans` option, the common name would not then be properly - validated against the role's constraints. This has been fixed. We recommend - any users of this feature to upgrade to 0.7 as soon as feasible. - -DEPRECATIONS/CHANGES: - - * List Operations Always Use Trailing Slash: Any list operation, whether via - the `GET` or `LIST` HTTP verb, will now internally canonicalize the path to - have a trailing slash. This makes policy writing more predictable, as it - means clients will no longer work or fail based on which client they're - using or which HTTP verb they're using. However, it also means that policies - allowing `list` capability must be carefully checked to ensure that they - contain a trailing slash; some policies may need to be split into multiple - stanzas to accommodate. - * PKI Defaults to Unleased Certificates: When issuing certificates from the - PKI backend, by default, no leases will be issued. If you want to manually - revoke a certificate, its serial number can be used with the `pki/revoke` - endpoint. Issuing leases is still possible by enabling the `generate_lease` - toggle in PKI role entries (this will default to `true` for upgrades, to - keep existing behavior), which will allow using lease IDs to revoke - certificates. For installations issuing large numbers of certificates (tens - to hundreds of thousands, or millions), this will significantly improve - Vault startup time since leases associated with these certificates will not - have to be loaded; however note that it also means that revocation of a - token used to issue certificates will no longer add these certificates to a - CRL. If this behavior is desired or needed, consider keeping leases enabled - and ensuring lifetimes are reasonable, and issue long-lived certificates via - a different role with leases disabled. - -FEATURES: - - * **Replication (Enterprise)**: Vault Enterprise now has support for creating - a multi-datacenter replication set between clusters. The current replication - offering is based on an asynchronous primary/secondary (1:N) model that - replicates static data while keeping dynamic data (leases, tokens) - cluster-local, focusing on horizontal scaling for high-throughput and - high-fanout deployments. - * **Response Wrapping & Replication in the Vault Enterprise UI**: Vault - Enterprise UI now supports looking up and rotating response wrapping tokens, - as well as creating tokens with arbitrary values inside. It also now - supports replication functionality, enabling the configuration of a - replication set in the UI. - * **Expanded Access Control Policies**: Access control policies can now - specify allowed and denied parameters -- and, optionally, their values -- to - control what a client can and cannot submit during an API call. Policies can - also specify minimum/maximum response wrapping TTLs to both enforce the use - of response wrapping and control the duration of resultant wrapping tokens. - See the [policies concepts - page](https://www.vaultproject.io/docs/concepts/policies.html) for more - information. - * **SSH Backend As Certificate Authority**: The SSH backend can now be - configured to sign host and user certificates. Each mount of the backend - acts as an independent signing authority. The CA key pair can be configured - for each mount and the public key is accessible via an unauthenticated API - call; additionally, the backend can generate a public/private key pair for - you. We recommend using separate mounts for signing host and user - certificates. - -IMPROVEMENTS: +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* licensing (enterprise): update autoloaded license cache after reload +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] - * api/request: Passing username and password information in API request - [GH-2469] - * audit: Logging the token's use count with authentication response and - logging the remaining uses of the client token with request [GH-2437] - * auth/approle: Support for restricting the number of uses on the tokens - issued [GH-2435] - * auth/aws-ec2: AWS EC2 auth backend now supports constraints for VPC ID, - Subnet ID and Region [GH-2407] - * auth/ldap: Use the value of the `LOGNAME` or `USER` env vars for the - username if not explicitly set on the command line when authenticating - [GH-2154] - * audit: Support adding a configurable prefix (such as `@cee`) before each - line [GH-2359] - * core: Canonicalize list operations to use a trailing slash [GH-2390] - * core: Add option to disable caching on a per-mount level [GH-2455] - * core: Add ability to require valid client certs in listener config [GH-2457] - * physical/dynamodb: Implement a session timeout to avoid having to use - recovery mode in the case of an unclean shutdown, which makes HA much safer - [GH-2141] - * secret/pki: O (Organization) values can now be set to role-defined values - for issued/signed certificates [GH-2369] - * secret/pki: Certificates issued/signed from PKI backend do not generate - leases by default [GH-2403] - * secret/pki: When using DER format, still return the private key type - [GH-2405] - * secret/pki: Add an intermediate to the CA chain even if it lacks an - authority key ID [GH-2465] - * secret/pki: Add role option to use CSR SANs [GH-2489] - * secret/ssh: SSH backend as CA to sign user and host certificates [GH-2208] - * secret/ssh: Support reading of SSH CA public key from `config/ca` endpoint - and also return it when CA key pair is generated [GH-2483] +## 1.10.9 +### November 30, 2022 BUG FIXES: - * audit: When auditing headers use case-insensitive comparisons [GH-2362] - * auth/aws-ec2: Return role period in seconds and not nanoseconds [GH-2374] - * auth/okta: Fix panic if user had no local groups and/or policies set - [GH-2367] - * command/server: Fix parsing of redirect address when port is not mentioned - [GH-2354] - * physical/postgresql: Fix listing returning incorrect results if there were - multiple levels of children [GH-2393] - -## 0.6.5 (February 7th, 2017) - -FEATURES: - - * **Okta Authentication**: A new Okta authentication backend allows you to use - Okta usernames and passwords to authenticate to Vault. If provided with an - appropriate Okta API token, group membership can be queried to assign - policies; users and groups can be defined locally as well. - * **RADIUS Authentication**: A new RADIUS authentication backend allows using - a RADIUS server to authenticate to Vault. Policies can be configured for - specific users or for any authenticated user. - * **Exportable Transit Keys**: Keys in `transit` can now be marked as - `exportable` at creation time. This allows a properly ACL'd user to retrieve - the associated signing key, encryption key, or HMAC key. The `exportable` - value is returned on a key policy read and cannot be changed, so if a key is - marked `exportable` it will always be exportable, and if it is not it will - never be exportable. - * **Batch Transit Operations**: `encrypt`, `decrypt` and `rewrap` operations - in the transit backend now support processing multiple input items in one - call, returning the output of each item in the response. - * **Configurable Audited HTTP Headers**: You can now specify headers that you - want to have included in each audit entry, along with whether each header - should be HMAC'd or kept plaintext. This can be useful for adding additional - client or network metadata to the audit logs. - * **Transit Backend UI (Enterprise)**: Vault Enterprise UI now supports the transit - backend, allowing creation, viewing and editing of named keys as well as using - those keys to perform supported transit operations directly in the UI. - * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent - through TCP, UDP, or UNIX Sockets. - -IMPROVEMENTS: +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18084](https://github.com/hashicorp/vault/pull/18084)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18109](https://github.com/hashicorp/vault/pull/18109)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - * auth/aws-ec2: Add support for cross-account auth using STS [GH-2148] - * auth/aws-ec2: Support issuing periodic tokens [GH-2324] - * auth/github: Support listing teams and users [GH-2261] - * auth/ldap: Support adding policies to local users directly, in addition to - local groups [GH-2152] - * command/server: Add ability to select and prefer server cipher suites - [GH-2293] - * core: Add a nonce to unseal operations as a check (useful mostly for - support, not as a security principle) [GH-2276] - * duo: Added ability to supply extra context to Duo pushes [GH-2118] - * physical/consul: Add option for setting consistency mode on Consul gets - [GH-2282] - * physical/etcd: Full v3 API support; code will autodetect which API version - to use. The v3 code path is significantly less complicated and may be much - more stable. [GH-2168] - * secret/pki: Allow specifying OU entries in generated certificate subjects - [GH-2251] - * secret mount ui (Enterprise): the secret mount list now shows all mounted - backends even if the UI cannot browse them. Additional backends can now be - mounted from the UI as well. +## 1.10.8 +### November 2, 2022 BUG FIXES: - * auth/token: Fix regression in 0.6.4 where using token store roles as a - blacklist (with only `disallowed_policies` set) would not work in most - circumstances [GH-2286] - * physical/s3: Page responses in client so list doesn't truncate [GH-2224] - * secret/cassandra: Stop a connection leak that could occur on active node - failover [GH-2313] - * secret/pki: When using `sign-verbatim`, don't require a role and use the - CSR's common name [GH-2243] +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] -## 0.6.4 (December 16, 2016) +## 1.10.7 +### September 30, 2022 SECURITY: -Further details about these security issues can be found in the 0.6.4 upgrade -guide. - - * `default` Policy Privilege Escalation: If a parent token did not have the - `default` policy attached to its token, it could still create children with - the `default` policy. This is no longer allowed (unless the parent has - `sudo` capability for the creation path). In most cases this is low severity - since the access grants in the `default` policy are meant to be access - grants that are acceptable for all tokens to have. - * Leases Not Expired When Limited Use Token Runs Out of Uses: When using - limited-use tokens to create leased secrets, if the limited-use token was - revoked due to running out of uses (rather than due to TTL expiration or - explicit revocation) it would fail to revoke the leased secrets. These - secrets would still be revoked when their TTL expired, limiting the severity - of this issue. An endpoint has been added (`auth/token/tidy`) that can - perform housekeeping tasks on the token store; one of its tasks can detect - this situation and revoke the associated leases. - -FEATURES: - - * **Policy UI (Enterprise)**: Vault Enterprise UI now supports viewing, - creating, and editing policies. - -IMPROVEMENTS: - - * http: Vault now sets a `no-store` cache control header to make it more - secure in setups that are not end-to-end encrypted [GH-2183] - -BUG FIXES: - - * auth/ldap: Don't panic if dialing returns an error and starttls is enabled; - instead, return the error [GH-2188] - * ui (Enterprise): Submitting an unseal key now properly resets the - form so a browser refresh isn't required to continue. - -## 0.6.3 (December 6, 2016) - -DEPRECATIONS/CHANGES: - - * Request size limitation: A maximum request size of 32MB is imposed to - prevent a denial of service attack with arbitrarily large requests [GH-2108] - * LDAP denies passwordless binds by default: In new LDAP mounts, or when - existing LDAP mounts are rewritten, passwordless binds will be denied by - default. The new `deny_null_bind` parameter can be set to `false` to allow - these. [GH-2103] - * Any audit backend activated satisfies conditions: Previously, when a new - Vault node was taking over service in an HA cluster, all audit backends were - required to be loaded successfully to take over active duty. This behavior - now matches the behavior of the audit logging system itself: at least one - audit backend must successfully be loaded. The server log contains an error - when this occurs. This helps keep a Vault HA cluster working when there is a - misconfiguration on a standby node. [GH-2083] - -FEATURES: - - * **Web UI (Enterprise)**: Vault Enterprise now contains a built-in web UI - that offers access to a number of features, including init/unsealing/sealing, - authentication via userpass or LDAP, and K/V reading/writing. The capability - set of the UI will be expanding rapidly in further releases. To enable it, - set `ui = true` in the top level of Vault's configuration file and point a - web browser at your Vault address. - * **Google Cloud Storage Physical Backend**: You can now use GCS for storing - Vault data [GH-2099] - -IMPROVEMENTS: - - * auth/github: Policies can now be assigned to users as well as to teams - [GH-2079] - * cli: Set the number of retries on 500 down to 0 by default (no retrying). It - can be very confusing to users when there is a pause while the retries - happen if they haven't explicitly set it. With request forwarding the need - for this is lessened anyways. [GH-2093] - * core: Response wrapping is now allowed to be specified by backend responses - (requires backends gaining support) [GH-2088] - * physical/consul: When announcing service, use the scheme of the Vault server - rather than the Consul client [GH-2146] - * secret/consul: Added listing functionality to roles [GH-2065] - * secret/postgresql: Added `revocation_sql` parameter on the role endpoint to - enable customization of user revocation SQL statements [GH-2033] - * secret/transit: Add listing of keys [GH-1987] - -BUG FIXES: - - * api/unwrap, command/unwrap: Increase compatibility of `unwrap` command with - Vault 0.6.1 and older [GH-2014] - * api/unwrap, command/unwrap: Fix error when no client token exists [GH-2077] - * auth/approle: Creating the index for the role_id properly [GH-2004] - * auth/aws-ec2: Handle the case of multiple upgrade attempts when setting the - instance-profile ARN [GH-2035] - * auth/ldap: Avoid leaking connections on login [GH-2130] - * command/path-help: Use the actual error generated by Vault rather than - always using 500 when there is a path help error [GH-2153] - * command/ssh: Use temporary file for identity and ensure its deletion before - the command returns [GH-2016] - * cli: Fix error printing values with `-field` if the values contained - formatting directives [GH-2109] - * command/server: Don't say mlock is supported on OSX when it isn't. [GH-2120] - * core: Fix bug where a failure to come up as active node (e.g. if an audit - backend failed) could lead to deadlock [GH-2083] - * physical/mysql: Fix potential crash during setup due to a query failure - [GH-2105] - * secret/consul: Fix panic on user error [GH-2145] - -## 0.6.2 (October 5, 2016) - -DEPRECATIONS/CHANGES: - - * Convergent Encryption v2: New keys in `transit` using convergent mode will - use a new nonce derivation mechanism rather than require the user to supply - a nonce. While not explicitly increasing security, it minimizes the - likelihood that a user will use the mode improperly and impact the security - of their keys. Keys in convergent mode that were created in v0.6.1 will - continue to work with the same mechanism (user-supplied nonce). - * `etcd` HA off by default: Following in the footsteps of `dynamodb`, the - `etcd` storage backend now requires that `ha_enabled` be explicitly - specified in the configuration file. The backend currently has known broken - HA behavior, so this flag discourages use by default without explicitly - enabling it. If you are using this functionality, when upgrading, you should - set `ha_enabled` to `"true"` *before* starting the new versions of Vault. - * Default/Max lease/token TTLs are now 32 days: In previous versions of Vault - the default was 30 days, but moving it to 32 days allows some operations - (e.g. reauthenticating, renewing, etc.) to be performed via a monthly cron - job. - * AppRole Secret ID endpoints changed: Secret ID and Secret ID accessors are - no longer part of request URLs. The GET and DELETE operations are now moved - to new endpoints (`/lookup` and `/destroy`) which consumes the input from - the body and not the URL. - * AppRole requires at least one constraint: previously it was sufficient to - turn off all AppRole authentication constraints (secret ID, CIDR block) and - use the role ID only. It is now required that at least one additional - constraint is enabled. Existing roles are unaffected, but any new roles or - updated roles will require this. - * Reading wrapped responses from `cubbyhole/response` is deprecated. The - `sys/wrapping/unwrap` endpoint should be used instead as it provides - additional security, auditing, and other benefits. The ability to read - directly will be removed in a future release. - * Request Forwarding is now on by default: in 0.6.1 this required toggling on, - but is now enabled by default. This can be disabled via the - `"disable_clustering"` parameter in Vault's - [config](https://www.vaultproject.io/docs/config/index.html), or per-request - with the `X-Vault-No-Request-Forwarding` header. - * In prior versions a bug caused the `bound_iam_role_arn` value in the - `aws-ec2` authentication backend to actually use the instance profile ARN. - This has been corrected, but as a result there is a behavior change. To - match using the instance profile ARN, a new parameter - `bound_iam_instance_profile_arn` has been added. Existing roles will - automatically transfer the value over to the correct parameter, but the next - time the role is updated, the new meanings will take effect. - -FEATURES: - - * **Secret ID CIDR Restrictions in `AppRole`**: Secret IDs generated under an - approle can now specify a list of CIDR blocks from where the requests to - generate secret IDs should originate from. If an approle already has CIDR - restrictions specified, the CIDR restrictions on the secret ID should be a - subset of those specified on the role [GH-1910] - * **Initial Root Token PGP Encryption**: Similar to `generate-root`, the root - token created at initialization time can now be PGP encrypted [GH-1883] - * **Support Chained Intermediate CAs in `pki`**: The `pki` backend now allows, - when a CA cert is being supplied as a signed root or intermediate, a trust - chain of arbitrary length. The chain is returned as a parameter at - certificate issue/sign time and is retrievable independently as well. - [GH-1694] - * **Response Wrapping Enhancements**: There are new endpoints to look up - response wrapped token parameters; wrap arbitrary values; rotate wrapping - tokens; and unwrap with enhanced validation. In addition, list operations - can now be response-wrapped. [GH-1927] - * **Transit Features**: The `transit` backend now supports generating random - bytes and SHA sums; HMACs; and signing and verification functionality using - EC keys (P-256 curve) - -IMPROVEMENTS: - - * api: Return error when an invalid (as opposed to incorrect) unseal key is - submitted, rather than ignoring it [GH-1782] - * api: Add method to call `auth/token/create-orphan` endpoint [GH-1834] - * api: Rekey operation now redirects from standbys to master [GH-1862] - * audit/file: Sending a `SIGHUP` to Vault now causes Vault to close and - re-open the log file, making it easier to rotate audit logs [GH-1953] - * auth/aws-ec2: EC2 instances can get authenticated by presenting the identity - document and its SHA256 RSA digest [GH-1961] - * auth/aws-ec2: IAM bound parameters on the aws-ec2 backend will perform a - prefix match instead of exact match [GH-1943] - * auth/aws-ec2: Added a new constraint `bound_iam_instance_profile_arn` to - refer to IAM instance profile ARN and fixed the earlier `bound_iam_role_arn` - to refer to IAM role ARN instead of the instance profile ARN [GH-1913] - * auth/aws-ec2: Backend generates the nonce by default and clients can - explicitly disable reauthentication by setting empty nonce [GH-1889] - * auth/token: Added warnings if tokens and accessors are used in URLs [GH-1806] - * command/format: The `format` flag on select CLI commands takes `yml` as an - alias for `yaml` [GH-1899] - * core: Allow the size of the read cache to be set via the config file, and - change the default value to 1MB (from 32KB) [GH-1784] - * core: Allow single and two-character path parameters for most places - [GH-1811] - * core: Allow list operations to be response-wrapped [GH-1814] - * core: Provide better protection against timing attacks in Shamir code - [GH-1877] - * core: Unmounting/disabling backends no longer returns an error if the mount - didn't exist. This is line with elsewhere in Vault's API where `DELETE` is - an idempotent operation. [GH-1903] - * credential/approle: At least one constraint is required to be enabled while - creating and updating a role [GH-1882] - * secret/cassandra: Added consistency level for use with roles [GH-1931] - * secret/mysql: SQL for revoking user can be configured on the role [GH-1914] - * secret/transit: Use HKDF (RFC 5869) as the key derivation function for new - keys [GH-1812] - * secret/transit: Empty plaintext values are now allowed [GH-1874] +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] BUG FIXES: - * audit: Fix panic being caused by some values logging as underlying Go types - instead of formatted strings [GH-1912] - * auth/approle: Fixed panic on deleting approle that doesn't exist [GH-1920] - * auth/approle: Not letting secret IDs and secret ID accessors to get logged - in plaintext in audit logs [GH-1947] - * auth/aws-ec2: Allow authentication if the underlying host is in a bad state - but the instance is running [GH-1884] - * auth/token: Fixed metadata getting missed out from token lookup response by - gracefully handling token entry upgrade [GH-1924] - * cli: Don't error on newline in token file [GH-1774] - * core: Pass back content-type header for forwarded requests [GH-1791] - * core: Fix panic if the same key was given twice to `generate-root` [GH-1827] - * core: Fix potential deadlock on unmount/remount [GH-1793] - * physical/file: Remove empty directories from the `file` storage backend [GH-1821] - * physical/zookeeper: Remove empty directories from the `zookeeper` storage - backend and add a fix to the `file` storage backend's logic [GH-1964] - * secret/aws: Added update operation to `aws/sts` path to consider `ttl` - parameter [39b75c6] - * secret/aws: Mark STS secrets as non-renewable [GH-1804] - * secret/cassandra: Properly store session for re-use [GH-1802] - * secret/ssh: Fix panic when revoking SSH dynamic keys [GH-1781] - -## 0.6.1 (August 22, 2016) - -DEPRECATIONS/CHANGES: - - * Once the active node is 0.6.1, standby nodes must also be 0.6.1 in order to - connect to the HA cluster. We recommend following our [general upgrade - instructions](https://www.vaultproject.io/docs/install/upgrade.html) in - addition to 0.6.1-specific upgrade instructions to ensure that this is not - an issue. - * Status codes for sealed/uninitialized Vaults have changed to `503`/`501` - respectively. See the [version-specific upgrade - guide](https://www.vaultproject.io/docs/install/upgrade-to-0.6.1.html) for - more details. - * Root tokens (tokens with the `root` policy) can no longer be created except - by another root token or the `generate-root` endpoint. - * Issued certificates from the `pki` backend against new roles created or - modified after upgrading will contain a set of default key usages. - * The `dynamodb` physical data store no longer supports HA by default. It has - some non-ideal behavior around failover that was causing confusion. See the - [documentation](https://www.vaultproject.io/docs/config/index.html#ha_enabled) - for information on enabling HA mode. It is very important that this - configuration is added _before upgrading_. - * The `ldap` backend no longer searches for `memberOf` groups as part of its - normal flow. Instead, the desired group filter must be specified. This fixes - some errors and increases speed for directories with different structures, - but if this behavior has been relied upon, ensure that you see the upgrade - notes _before upgrading_. - * `app-id` is now deprecated with the addition of the new AppRole backend. - There are no plans to remove it, but we encourage using AppRole whenever - possible, as it offers enhanced functionality and can accommodate many more - types of authentication paradigms. - -FEATURES: - - * **AppRole Authentication Backend**: The `approle` backend is a - machine-oriented authentication backend that provides a similar concept to - App-ID while adding many missing features, including a pull model that - allows for the backend to generate authentication credentials rather than - requiring operators or other systems to push credentials in. It should be - useful in many more situations than App-ID. The inclusion of this backend - deprecates App-ID. [GH-1426] - * **Request Forwarding**: Vault servers can now forward requests to each other - rather than redirecting clients. This feature is off by default in 0.6.1 but - will be on by default in the next release. See the [HA concepts - page](https://www.vaultproject.io/docs/concepts/ha.html) for information on - enabling and configuring it. [GH-443] - * **Convergent Encryption in `Transit`**: The `transit` backend now supports a - convergent encryption mode where the same plaintext will produce the same - ciphertext. Although very useful in some situations, this has potential - security implications, which are mostly mitigated by requiring the use of - key derivation when convergent encryption is enabled. See [the `transit` - backend - documentation](https://www.vaultproject.io/docs/secrets/transit/index.html) - for more details. [GH-1537] - * **Improved LDAP Group Filters**: The `ldap` auth backend now uses templates - to define group filters, providing the capability to support some - directories that could not easily be supported before (especially specific - Active Directory setups with nested groups). [GH-1388] - * **Key Usage Control in `PKI`**: Issued certificates from roles created or - modified after upgrading contain a set of default key usages for increased - compatibility with OpenVPN and some other software. This set can be changed - when writing a role definition. Existing roles are unaffected. [GH-1552] - * **Request Retrying in the CLI and Go API**: Requests that fail with a `5xx` - error code will now retry after a backoff. The maximum total number of - retries (including disabling this functionality) can be set with an - environment variable. See the [environment variable - documentation](https://www.vaultproject.io/docs/commands/environment.html) - for more details. [GH-1594] - * **Service Discovery in `vault init`**: The new `-auto` option on `vault init` - will perform service discovery using Consul. When only one node is discovered, - it will be initialized and when more than one node is discovered, they will - be output for easy selection. See `vault init --help` for more details. [GH-1642] - * **MongoDB Secret Backend**: Generate dynamic unique MongoDB database - credentials based on configured roles. Sponsored by - [CommerceHub](http://www.commercehub.com/). [GH-1414] - * **Circonus Metrics Integration**: Vault can now send metrics to - [Circonus](http://www.circonus.com/). See the [configuration - documentation](https://www.vaultproject.io/docs/config/index.html) for - details. [GH-1646] - -IMPROVEMENTS: - - * audit: Added a unique identifier to each request which will also be found in - the request portion of the response. [GH-1650] - * auth/aws-ec2: Added a new constraint `bound_account_id` to the role - [GH-1523] - * auth/aws-ec2: Added a new constraint `bound_iam_role_arn` to the role - [GH-1522] - * auth/aws-ec2: Added `ttl` field for the role [GH-1703] - * auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config` - have the minimum TLS version set to 1.2 by default. This is configurable. - * auth/token: Added endpoint to list accessors [GH-1676] - * auth/token: Added `disallowed_policies` option to token store roles [GH-1681] - * auth/token: `root` or `sudo` tokens can now create periodic tokens via - `auth/token/create`; additionally, the same token can now be periodic and - have an explicit max TTL [GH-1725] - * build: Add support for building on Solaris/Illumos [GH-1726] - * cli: Output formatting in the presence of warnings in the response object - [GH-1533] - * cli: `vault auth` command supports a `-path` option to take in the path at - which the auth backend is enabled, thereby allowing authenticating against - different paths using the command options [GH-1532] - * cli: `vault auth -methods` will now display the config settings of the mount - [GH-1531] - * cli: `vault read/write/unwrap -field` now allows selecting token response - fields [GH-1567] - * cli: `vault write -field` now allows selecting wrapped response fields - [GH-1567] - * command/status: Version information and cluster details added to the output - of `vault status` command [GH-1671] - * core: Response wrapping is now enabled for login endpoints [GH-1588] - * core: The duration of leadership is now exported via events through - telemetry [GH-1625] - * core: `sys/capabilities-self` is now accessible as part of the `default` - policy [GH-1695] - * core: `sys/renew` is now accessible as part of the `default` policy [GH-1701] - * core: Unseal keys will now be returned in both hex and base64 forms, and - either can be used [GH-1734] - * core: Responses from most `/sys` endpoints now return normal `api.Secret` - structs in addition to the values they carried before. This means that - response wrapping can now be used with most authenticated `/sys` operations - [GH-1699] - * physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576] - * physical/consul: Allowing additional tags to be added to Consul service - registration via `service_tags` option [GH-1643] - * secret/aws: Listing of roles is supported now [GH-1546] - * secret/cassandra: Add `connect_timeout` value for Cassandra connection - configuration [GH-1581] - * secret/mssql,mysql,postgresql: Reading of connection settings is supported - in all the sql backends [GH-1515] - * secret/mysql: Added optional maximum idle connections value to MySQL - connection configuration [GH-1635] - * secret/mysql: Use a combination of the role name and token display name in - generated user names and allow the length to be controlled [GH-1604] - * secret/{cassandra,mssql,mysql,postgresql}: SQL statements can now be passed - in via one of four ways: a semicolon-delimited string, a base64-delimited - string, a serialized JSON string array, or a base64-encoded serialized JSON - string array [GH-1686] - * secret/ssh: Added `allowed_roles` to vault-ssh-helper's config and returning - role name as part of response of `verify` API - * secret/ssh: Added passthrough of command line arguments to `ssh` [GH-1680] - * sys/health: Added version information to the response of health status - endpoint [GH-1647] - * sys/health: Cluster information isbe returned as part of health status when - Vault is unsealed [GH-1671] - * sys/mounts: MountTable data is compressed before serializing to accommodate - thousands of mounts [GH-1693] - * website: The [token - concepts](https://www.vaultproject.io/docs/concepts/tokens.html) page has - been completely rewritten [GH-1725] - -BUG FIXES: +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] - * auth/aws-ec2: Added a nil check for stored whitelist identity object - during renewal [GH-1542] - * auth/cert: Fix panic if no client certificate is supplied [GH-1637] - * auth/token: Don't report that a non-expiring root token is renewable, as - attempting to renew it results in an error [GH-1692] - * cli: Don't retry a command when a redirection is received [GH-1724] - * core: Fix regression causing status codes to be `400` in most non-5xx error - cases [GH-1553] - * core: Fix panic that could occur during a leadership transition [GH-1627] - * physical/postgres: Remove use of prepared statements as this causes - connection multiplexing software to break [GH-1548] - * physical/consul: Multiple Vault nodes on the same machine leading to check ID - collisions were resulting in incorrect health check responses [GH-1628] - * physical/consul: Fix deregistration of health checks on exit [GH-1678] - * secret/postgresql: Check for existence of role before attempting deletion - [GH-1575] - * secret/postgresql: Handle revoking roles that have privileges on sequences - [GH-1573] - * secret/postgresql(,mysql,mssql): Fix incorrect use of database over - transaction object which could lead to connection exhaustion [GH-1572] - * secret/pki: Fix parsing CA bundle containing trailing whitespace [GH-1634] - * secret/pki: Fix adding email addresses as SANs [GH-1688] - * secret/pki: Ensure that CRL values are always UTC, per RFC [GH-1727] - * sys/seal-status: Fixed nil Cluster object while checking seal status [GH-1715] - -## 0.6.0 (June 14th, 2016) +## 1.10.6 +### August 31, 2022 SECURITY: - * Although `sys/revoke-prefix` was intended to revoke prefixes of secrets (via - lease IDs, which incorporate path information) and - `auth/token/revoke-prefix` was intended to revoke prefixes of tokens (using - the tokens' paths and, since 0.5.2, role information), in implementation - they both behaved exactly the same way since a single component in Vault is - responsible for managing lifetimes of both, and the type of the tracked - lifetime was not being checked. The end result was that either endpoint - could revoke both secret leases and tokens. We consider this a very minor - security issue as there are a number of mitigating factors: both endpoints - require `sudo` capability in addition to write capability, preventing - blanket ACL path globs from providing access; both work by using the prefix - to revoke as a part of the endpoint path, allowing them to be properly - ACL'd; and both are intended for emergency scenarios and users should - already not generally have access to either one. In order to prevent - confusion, we have simply removed `auth/token/revoke-prefix` in 0.6, and - `sys/revoke-prefix` will be meant for both leases and tokens instead. - -DEPRECATIONS/CHANGES: - - * `auth/token/revoke-prefix` has been removed. See the security notice for - details. [GH-1280] - * Vault will now automatically register itself as the `vault` service when - using the `consul` backend and will perform its own health checks. See - the Consul backend documentation for information on how to disable - auto-registration and service checks. - * List operations that do not find any keys now return a `404` status code - rather than an empty response object [GH-1365] - * CA certificates issued from the `pki` backend no longer have associated - leases, and any CA certs already issued will ignore revocation requests from - the lease manager. This is to prevent CA certificates from being revoked - when the token used to issue the certificate expires; it was not be obvious - to users that they need to ensure that the token lifetime needed to be at - least as long as a potentially very long-lived CA cert. +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] -FEATURES: +CHANGES: - * **AWS EC2 Auth Backend**: Provides a secure introduction mechanism for AWS - EC2 instances allowing automated retrieval of Vault tokens. Unlike most - Vault authentication backends, this backend does not require first deploying - or provisioning security-sensitive credentials (tokens, username/password, - client certificates, etc). Instead, it treats AWS as a Trusted Third Party - and uses the cryptographically signed dynamic metadata information that - uniquely represents each EC2 instance. [Vault - Enterprise](https://www.hashicorp.com/vault.html) customers have access to a - turnkey client that speaks the backend API and makes access to a Vault token - easy. - * **Response Wrapping**: Nearly any response within Vault can now be wrapped - inside a single-use, time-limited token's cubbyhole, taking the [Cubbyhole - Authentication - Principles](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html) - mechanism to its logical conclusion. Retrieving the original response is as - simple as a single API command or the new `vault unwrap` command. This makes - secret distribution easier and more secure, including secure introduction. - * **Azure Physical Backend**: You can now use Azure blob object storage as - your Vault physical data store [GH-1266] - * **Swift Physical Backend**: You can now use Swift blob object storage as - your Vault physical data store [GH-1425] - * **Consul Backend Health Checks**: The Consul backend will automatically - register a `vault` service and perform its own health checking. By default - the active node can be found at `active.vault.service.consul` and all with - standby nodes are `standby.vault.service.consul`. Sealed vaults are marked - critical and are not listed by default in Consul's service discovery. See - the documentation for details. [GH-1349] - * **Explicit Maximum Token TTLs**: You can now set explicit maximum TTLs on - tokens that do not honor changes in the system- or mount-set values. This is - useful, for instance, when the max TTL of the system or the `auth/token` - mount must be set high to accommodate certain needs but you want more - granular restrictions on tokens being issued directly from the Token - authentication backend at `auth/token`. [GH-1399] - * **Non-Renewable Tokens**: When creating tokens directly through the token - authentication backend, you can now specify in both token store roles and - the API whether or not a token should be renewable, defaulting to `true`. - * **RabbitMQ Secret Backend**: Vault can now generate credentials for - RabbitMQ. Vhosts and tags can be defined within roles. [GH-788] +* core: Bump Go version to 1.17.13. IMPROVEMENTS: - * audit: Add the DisplayName value to the copy of the Request object embedded - in the associated Response, to match the original Request object [GH-1387] - * audit: Enable auditing of the `seal` and `step-down` commands [GH-1435] - * backends: Remove most `root`/`sudo` paths in favor of normal ACL mechanisms. - A particular exception are any current MFA paths. A few paths in `token` and - `sys` also require `root` or `sudo`. [GH-1478] - * command/auth: Restore the previous authenticated token if the `auth` command - fails to authenticate the provided token [GH-1233] - * command/write: `-format` and `-field` can now be used with the `write` - command [GH-1228] - * core: Add `mlock` support for FreeBSD, OpenBSD, and Darwin [GH-1297] - * core: Don't keep lease timers around when tokens are revoked [GH-1277] - * core: If using the `disable_cache` option, caches for the policy store and - the `transit` backend are now disabled as well [GH-1346] - * credential/cert: Renewal requests are rejected if the set of policies has - changed since the token was issued [GH-477] - * credential/cert: Check CRLs for specific non-CA certs configured in the - backend [GH-1404] - * credential/ldap: If `groupdn` is not configured, skip searching LDAP and - only return policies for local groups, plus a warning [GH-1283] - * credential/ldap: `vault list` support for users and groups [GH-1270] - * credential/ldap: Support for the `memberOf` attribute for group membership - searching [GH-1245] - * credential/userpass: Add list support for users [GH-911] - * credential/userpass: Remove user configuration paths from requiring sudo, in - favor of normal ACL mechanisms [GH-1312] - * credential/token: Sanitize policies and add `default` policies in appropriate - places [GH-1235] - * credential/token: Setting the renewable status of a token is now possible - via `vault token-create` and the API. The default is true, but tokens can be - specified as non-renewable. [GH-1499] - * secret/aws: Use chain credentials to allow environment/EC2 instance/shared - providers [GH-307] - * secret/aws: Support for STS AssumeRole functionality [GH-1318] - * secret/consul: Reading consul access configuration supported. The response - will contain non-sensitive information only [GH-1445] - * secret/pki: Added `exclude_cn_from_sans` field to prevent adding the CN to - DNS or Email Subject Alternate Names [GH-1220] - * secret/pki: Added list support for certificates [GH-1466] - * sys/capabilities: Enforce ACL checks for requests that query the capabilities - of a token on a given path [GH-1221] - * sys/health: Status information can now be retrieved with `HEAD` [GH-1509] +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] BUG FIXES: - * command/read: Fix panic when using `-field` with a non-string value [GH-1308] - * command/token-lookup: Fix TTL showing as 0 depending on how a token was - created. This only affected the value shown at lookup, not the token - behavior itself. [GH-1306] - * command/various: Tell the JSON decoder to not convert all numbers to floats; - fixes some various places where numbers were showing up in scientific - notation - * command/server: Prioritized `devRootTokenID` and `devListenAddress` flags - over their respective env vars [GH-1480] - * command/ssh: Provided option to disable host key checking. The automated - variant of `vault ssh` command uses `sshpass` which was failing to handle - host key checking presented by the `ssh` binary. [GH-1473] - * core: Properly persist mount-tuned TTLs for auth backends [GH-1371] - * core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372] - * credential/github: Make organization comparison case-insensitive during - login [GH-1359] - * credential/github: Fix panic when renewing a token created with some earlier - versions of Vault [GH-1510] - * credential/github: The token used to log in via `vault auth` can now be - specified in the `VAULT_AUTH_GITHUB_TOKEN` environment variable [GH-1511] - * credential/ldap: Fix problem where certain error conditions when configuring - or opening LDAP connections would cause a panic instead of return a useful - error message [GH-1262] - * credential/token: Fall back to normal parent-token semantics if - `allowed_policies` is empty for a role. Using `allowed_policies` of - `default` resulted in the same behavior anyways. [GH-1276] - * credential/token: Fix issues renewing tokens when using the "suffix" - capability of token roles [GH-1331] - * credential/token: Fix lookup via POST showing the request token instead of - the desired token [GH-1354] - * credential/various: Fix renewal conditions when `default` policy is not - contained in the backend config [GH-1256] - * physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353] - * secret/consul: Use non-pooled Consul API client to avoid leaving files open - [GH-1428] - * secret/pki: Don't check whether a certificate is destined to be a CA - certificate if sign-verbatim endpoint is used [GH-1250] - -## 0.5.3 (May 27th, 2016) +* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16524](https://github.com/hashicorp/vault/pull/16524)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the +Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] SECURITY: - * Consul ACL Token Revocation: An issue was reported to us indicating that - generated Consul ACL tokens were not being properly revoked. Upon - investigation, we found that this behavior was reproducible in a specific - scenario: when a generated lease for a Consul ACL token had been renewed - prior to revocation. In this case, the generated token was not being - properly persisted internally through the renewal function, leading to an - error during revocation due to the missing token. Unfortunately, this was - coded as a user error rather than an internal error, and the revocation - logic was expecting internal errors if revocation failed. As a result, the - revocation logic believed the revocation to have succeeded when it in fact - failed, causing the lease to be dropped while the token was still valid - within Consul. In this release, the Consul backend properly persists the - token through renewals, and the revocation logic has been changed to - consider any error type to have been a failure to revoke, causing the lease - to persist and attempt to be revoked later. - -We have written an example shell script that searches through Consul's ACL -tokens and looks for those generated by Vault, which can be used as a template -for a revocation script as deemed necessary for any particular security -response. The script is available at -https://gist.github.com/jefferai/6233c2963f9407a858d84f9c27d725c0 - -Please note that any outstanding leases for Consul tokens produced prior to -0.5.3 that have been renewed will continue to exhibit this behavior. As a -result, we recommend either revoking all tokens produced by the backend and -issuing new ones, or if needed, a more advanced variant of the provided example -could use the timestamp embedded in each generated token's name to decide which -tokens are too old and should be deleted. This could then be run periodically -up until the maximum lease time for any outstanding pre-0.5.3 tokens has -expired. - -This is a security-only release. There are no other code changes since 0.5.2. -The binaries have one additional change: they are built against Go 1.6.1 rather -than Go 1.6, as Go 1.6.1 contains two security fixes to the Go programming -language itself. - -## 0.5.2 (March 16th, 2016) - -FEATURES: - - * **MSSQL Backend**: Generate dynamic unique MSSQL database credentials based - on configured roles [GH-998] - * **Token Accessors**: Vault now provides an accessor with each issued token. - This accessor is an identifier that can be used for a limited set of - actions, notably for token revocation. This value can be logged in - plaintext to audit logs, and in combination with the plaintext metadata - logged to audit logs, provides a searchable and straightforward way to - revoke particular users' or services' tokens in many cases. To enable - plaintext audit logging of these accessors, set `hmac_accessor=false` when - enabling an audit backend. - * **Token Credential Backend Roles**: Roles can now be created in the `token` - credential backend that allow modifying token behavior in ways that are not - otherwise exposed or easily delegated. This allows creating tokens with a - fixed set (or subset) of policies (rather than a subset of the calling - token's), periodic tokens with a fixed TTL but no expiration, specified - prefixes, and orphans. - * **Listener Certificate Reloading**: Vault's configured listeners now reload - their TLS certificate and private key when the Vault process receives a - SIGHUP. - -IMPROVEMENTS: - - * auth/token: Endpoints optionally accept tokens from the HTTP body rather - than just from the URLs [GH-1211] - * auth/token,sys/capabilities: Added new endpoints - `auth/token/lookup-accessor`, `auth/token/revoke-accessor` and - `sys/capabilities-accessor`, which enables performing the respective actions - with just the accessor of the tokens, without having access to the actual - token [GH-1188] - * core: Ignore leading `/` in policy paths [GH-1170] - * core: Ignore leading `/` in mount paths [GH-1172] - * command/policy-write: Provided HCL is now validated for format violations - and provides helpful information around where the violation occurred - [GH-1200] - * command/server: The initial root token ID when running in `-dev` mode can - now be specified via `-dev-root-token-id` or the environment variable - `VAULT_DEV_ROOT_TOKEN_ID` [GH-1162] - * command/server: The listen address when running in `-dev` mode can now be - specified via `-dev-listen-address` or the environment variable - `VAULT_DEV_LISTEN_ADDRESS` [GH-1169] - * command/server: The configured listeners now reload their TLS - certificates/keys when Vault is SIGHUP'd [GH-1196] - * command/step-down: New `vault step-down` command and API endpoint to force - the targeted node to give up active status, but without sealing. The node - will wait ten seconds before attempting to grab the lock again. [GH-1146] - * command/token-renew: Allow no token to be passed in; use `renew-self` in - this case. Change the behavior for any token being passed in to use `renew`. - [GH-1150] - * credential/app-id: Allow `app-id` parameter to be given in the login path; - this causes the `app-id` to be part of the token path, making it easier to - use with `revoke-prefix` [GH-424] - * credential/cert: Non-CA certificates can be used for authentication. They - must be matched exactly (issuer and serial number) for authentication, and - the certificate must carry the client authentication or 'any' extended usage - attributes. [GH-1153] - * credential/cert: Subject and Authority key IDs are output in metadata; this - allows more flexible searching/revocation in the audit logs [GH-1183] - * credential/cert: Support listing configured certs [GH-1212] - * credential/userpass: Add support for `create`/`update` capability - distinction in user path, and add user-specific endpoints to allow changing - the password and policies [GH-1216] - * credential/token: Add roles [GH-1155] - * secret/mssql: Add MSSQL backend [GH-998] - * secret/pki: Add revocation time (zero or Unix epoch) to `pki/cert/SERIAL` - endpoint [GH-1180] - * secret/pki: Sanitize serial number in `pki/revoke` endpoint to allow some - other formats [GH-1187] - * secret/ssh: Added documentation for `ssh/config/zeroaddress` endpoint. - [GH-1154] - * sys: Added new endpoints `sys/capabilities` and `sys/capabilities-self` to - fetch the capabilities of a token on a given path [GH-1171] - * sys: Added `sys/revoke-force`, which enables a user to ignore backend errors - when revoking a lease, necessary in some emergency/failure scenarios - [GH-1168] - * sys: The return codes from `sys/health` can now be user-specified via query - parameters [GH-1199] - -BUG FIXES: - - * logical/cassandra: Apply hyphen/underscore replacement to the entire - generated username, not just the UUID, in order to handle token display name - hyphens [GH-1140] - * physical/etcd: Output actual error when cluster sync fails [GH-1141] - * vault/expiration: Not letting the error responses from the backends to skip - during renewals [GH-1176] - -## 0.5.1 (February 25th, 2016) - -DEPRECATIONS/CHANGES: - - * RSA keys less than 2048 bits are no longer supported in the PKI backend. - 1024-bit keys are considered unsafe and are disallowed in the Internet PKI. - The `pki` backend has enforced SHA256 hashes in signatures from the - beginning, and software that can handle these hashes should be able to - handle larger key sizes. [GH-1095] - * The PKI backend now does not automatically delete expired certificates, - including from the CRL. Doing so could lead to a situation where a time - mismatch between the Vault server and clients could result in a certificate - that would not be considered expired by a client being removed from the CRL. - The new `pki/tidy` endpoint can be used to trigger expirations. [GH-1129] - * The `cert` backend now performs a variant of channel binding at renewal time - for increased security. In order to not overly burden clients, a notion of - identity is used. This functionality can be disabled. See the 0.5.1 upgrade - guide for more specific information [GH-1127] - -FEATURES: - - * **Codebase Audit**: Vault's 0.5 codebase was audited by iSEC. (The terms of - the audit contract do not allow us to make the results public.) [GH-220] - -IMPROVEMENTS: - - * api: The `VAULT_TLS_SERVER_NAME` environment variable can be used to control - the SNI header during TLS connections [GH-1131] - * api/health: Add the server's time in UTC to health responses [GH-1117] - * command/rekey and command/generate-root: These now return the status at - attempt initialization time, rather than requiring a separate fetch for the - nonce [GH-1054] - * credential/cert: Don't require root/sudo tokens for the `certs/` and `crls/` - paths; use normal ACL behavior instead [GH-468] - * credential/github: The validity of the token used for login will be checked - at renewal time [GH-1047] - * credential/github: The `config` endpoint no longer requires a root token; - normal ACL path matching applies - * deps: Use the standardized Go 1.6 vendoring system - * secret/aws: Inform users of AWS-imposed policy restrictions around STS - tokens if they attempt to use an invalid policy [GH-1113] - * secret/mysql: The MySQL backend now allows disabling verification of the - `connection_url` [GH-1096] - * secret/pki: Submitted CSRs are now verified to have the correct key type and - minimum number of bits according to the role. The exception is intermediate - CA signing and the `sign-verbatim` path [GH-1104] - * secret/pki: New `tidy` endpoint to allow expunging expired certificates. - [GH-1129] - * secret/postgresql: The PostgreSQL backend now allows disabling verification - of the `connection_url` [GH-1096] - * secret/ssh: When verifying an OTP, return 400 if it is not valid instead of - 204 [GH-1086] - * credential/app-id: App ID backend will check the validity of app-id and user-id - during renewal time [GH-1039] - * credential/cert: TLS Certificates backend, during renewal, will now match the - client identity with the client identity used during login [GH-1127] - -BUG FIXES: - - * credential/ldap: Properly escape values being provided to search filters - [GH-1100] - * secret/aws: Capping on length of usernames for both IAM and STS types - [GH-1102] - * secret/pki: If a cert is not found during lookup of a serial number, - respond with a 400 rather than a 500 [GH-1085] - * secret/postgresql: Add extra revocation statements to better handle more - permission scenarios [GH-1053] - * secret/postgresql: Make connection_url work properly [GH-1112] +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] -## 0.5.0 (February 10, 2016) +## 1.10.5 +### July 21, 2022 SECURITY: - * Previous versions of Vault could allow a malicious user to hijack the rekey - operation by canceling an operation in progress and starting a new one. The - practical application of this is very small. If the user was an unseal key - owner, they could attempt to do this in order to either receive unencrypted - reseal keys or to replace the PGP keys used for encryption with ones under - their control. However, since this would invalidate any rekey progress, they - would need other unseal key holders to resubmit, which would be rather - suspicious during this manual operation if they were not also the original - initiator of the rekey attempt. If the user was not an unseal key holder, - there is no benefit to be gained; the only outcome that could be attempted - would be a denial of service against a legitimate rekey operation by sending - cancel requests over and over. Thanks to Josh Snyder for the report! - -DEPRECATIONS/CHANGES: - - * `s3` physical backend: Environment variables are now preferred over - configuration values. This makes it behave similar to the rest of Vault, - which, in increasing order of preference, uses values from the configuration - file, environment variables, and CLI flags. [GH-871] - * `etcd` physical backend: `sync` functionality is now supported and turned on - by default. This can be disabled. [GH-921] - * `transit`: If a client attempts to encrypt a value with a key that does not - yet exist, what happens now depends on the capabilities set in the client's - ACL policies. If the client has `create` (or `create` and `update`) - capability, the key will upsert as in the past. If the client has `update` - capability, they will receive an error. [GH-1012] - * `token-renew` CLI command: If the token given for renewal is the same as the - client token, the `renew-self` endpoint will be used in the API. Given that - the `default` policy (by default) allows all clients access to the - `renew-self` endpoint, this makes it much more likely that the intended - operation will be successful. [GH-894] - * Token `lookup`: the `ttl` value in the response now reflects the actual - remaining TTL rather than the original TTL specified when the token was - created; this value is now located in `creation_ttl` [GH-986] - * Vault no longer uses grace periods on leases or token TTLs. Uncertainty - about the length grace period for any given backend could cause confusion - and uncertainty. [GH-1002] - * `rekey`: Rekey now requires a nonce to be supplied with key shares. This - nonce is generated at the start of a rekey attempt and is unique for that - attempt. - * `status`: The exit code for the `status` CLI command is now `2` for an - uninitialized Vault instead of `1`. `1` is returned for errors. This better - matches the rest of the CLI. +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] -FEATURES: +CHANGES: - * **Split Data/High Availability Physical Backends**: You can now configure - two separate physical backends: one to be used for High Availability - coordination and another to be used for encrypted data storage. See the - [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-395] - * **Fine-Grained Access Control**: Policies can now use the `capabilities` set - to specify fine-grained control over operations allowed on a path, including - separation of `sudo` privileges from other privileges. These can be mixed - and matched in any way desired. The `policy` value is kept for backwards - compatibility. See the [updated policy - documentation](https://vaultproject.io/docs/concepts/policies.html) for - details. [GH-914] - * **List Support**: Listing is now supported via the API and the new `vault - list` command. This currently supports listing keys in the `generic` and - `cubbyhole` backends and a few other places (noted in the IMPROVEMENTS - section below). Different parts of the API and backends will need to - implement list capabilities in ways that make sense to particular endpoints, - so further support will appear over time. [GH-617] - * **Root Token Generation via Unseal Keys**: You can now use the - `generate-root` CLI command to generate new orphaned, non-expiring root - tokens in case the original is lost or revoked (accidentally or - purposefully). This requires a quorum of unseal key holders. The output - value is protected via any PGP key of the initiator's choosing or a one-time - pad known only to the initiator (a suitable pad can be generated via the - `-genotp` flag to the command. [GH-915] - * **Unseal Key Archiving**: You can now optionally have Vault store your - unseal keys in your chosen physical store for disaster recovery purposes. - This option is only available when the keys are encrypted with PGP. [GH-907] - * **Keybase Support for PGP Encryption Keys**: You can now specify Keybase - users when passing in PGP keys to the `init`, `rekey`, and `generate-root` - CLI commands. Public keys for these users will be fetched automatically. - [GH-901] - * **DynamoDB HA Physical Backend**: There is now a new, community-supported - HA-enabled physical backend using Amazon DynamoDB. See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-878] - * **PostgreSQL Physical Backend**: There is now a new, community-supported - physical backend using PostgreSQL. See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-945] - * **STS Support in AWS Secret Backend**: You can now use the AWS secret - backend to fetch STS tokens rather than IAM users. [GH-927] - * **Speedups in the transit backend**: The `transit` backend has gained a - cache, and now loads only the working set of keys (e.g. from the - `min_decryption_version` to the current key version) into its working set. - This provides large speedups and potential memory savings when the `rotate` - feature of the backend is used heavily. +* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] +* core: Bump Go version to 1.17.12. IMPROVEMENTS: - * cli: Output secrets sorted by key name [GH-830] - * cli: Support YAML as an output format [GH-832] - * cli: Show an error if the output format is incorrect, rather than falling - back to an empty table [GH-849] - * cli: Allow setting the `advertise_addr` for HA via the - `VAULT_ADVERTISE_ADDR` environment variable [GH-581] - * cli/generate-root: Add generate-root and associated functionality [GH-915] - * cli/init: Add `-check` flag that returns whether Vault is initialized - [GH-949] - * cli/server: Use internal functions for the token-helper rather than shelling - out, which fixes some problems with using a static binary in Docker or paths - with multiple spaces when launching in `-dev` mode [GH-850] - * cli/token-lookup: Add token-lookup command [GH-892] - * command/{init,rekey}: Allow ASCII-armored keychain files to be arguments for - `-pgp-keys` [GH-940] - * conf: Use normal bool values rather than empty/non-empty for the - `tls_disable` option [GH-802] - * credential/ldap: Add support for binding, both anonymously (to discover a - user DN) and via a username and password [GH-975] - * credential/token: Add `last_renewal_time` to token lookup calls [GH-896] - * credential/token: Change `ttl` to reflect the current remaining TTL; the - original value is in `creation_ttl` [GH-1007] - * helper/certutil: Add ability to parse PKCS#8 bundles [GH-829] - * logical/aws: You can now get STS tokens instead of IAM users [GH-927] - * logical/cassandra: Add `protocol_version` parameter to set the CQL proto - version [GH-1005] - * logical/cubbyhole: Add cubbyhole access to default policy [GH-936] - * logical/mysql: Add list support for roles path [GH-984] - * logical/pki: Fix up key usages being specified for CAs [GH-989] - * logical/pki: Add list support for roles path [GH-985] - * logical/pki: Allow `pem_bundle` to be specified as the format, which - provides a concatenated PEM bundle of returned values [GH-1008] - * logical/pki: Add 30 seconds of slack to the validity start period to - accommodate some clock skew in machines [GH-1036] - * logical/postgres: Add `max_idle_connections` parameter [GH-950] - * logical/postgres: Add list support for roles path - * logical/ssh: Add list support for roles path [GH-983] - * logical/transit: Keys are archived and only keys between the latest version - and `min_decryption_version` are loaded into the working set. This can - provide a very large speed increase when rotating keys very often. [GH-977] - * logical/transit: Keys are now cached, which should provide a large speedup - in most cases [GH-979] - * physical/cache: Use 2Q cache instead of straight LRU [GH-908] - * physical/etcd: Support basic auth [GH-859] - * physical/etcd: Support sync functionality and enable by default [GH-921] +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] BUG FIXES: - * api: Correct the HTTP verb used in the LookupSelf method [GH-887] - * api: Fix the output of `Sys().MountConfig(...)` to return proper values - [GH-1017] - * command/read: Fix panic when an empty argument was given [GH-923] - * command/ssh: Fix panic when username lookup fails [GH-886] - * core: When running in standalone mode, don't advertise that we are active - until post-unseal setup completes [GH-872] - * core: Update go-cleanhttp dependency to ensure idle connections aren't - leaked [GH-867] - * core: Don't allow tokens to have duplicate policies [GH-897] - * core: Fix regression in `sys/renew` that caused information stored in the - Secret part of the response to be lost [GH-912] - * physical: Use square brackets when setting an IPv6-based advertise address - as the auto-detected advertise address [GH-883] - * physical/s3: Use an initialized client when using IAM roles to fix a - regression introduced against newer versions of the AWS Go SDK [GH-836] - * secret/pki: Fix a condition where unmounting could fail if the CA - certificate was not properly loaded [GH-946] - * secret/ssh: Fix a problem where SSH connections were not always closed - properly [GH-942] - -MISC: - - * Clarified our stance on support for community-derived physical backends. - See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - * Add `vault-java` to libraries [GH-851] - * Various minor documentation fixes and improvements [GH-839] [GH-854] - [GH-861] [GH-876] [GH-899] [GH-900] [GH-904] [GH-923] [GH-924] [GH-958] - [GH-959] [GH-981] [GH-990] [GH-1024] [GH-1025] - -BUILD NOTE: - - * The HashiCorp-provided binary release of Vault 0.5.0 is built against a - patched version of Go 1.5.3 containing two specific bug fixes affecting TLS - certificate handling. These fixes are in the Go 1.6 tree and were - cherry-picked on top of stock Go 1.5.3. If you want to examine the way in - which the releases were built, please look at our [cross-compilation - Dockerfile](https://github.com/hashicorp/vault/blob/v0.5.0/scripts/cross/Dockerfile-patched-1.5.3). - -## 0.4.1 (January 13, 2016) - -SECURITY: +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* storage/raft (enterprise): Prevent unauthenticated voter status with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Revert using localStorage in favor of sessionStorage [[GH-16169](https://github.com/hashicorp/vault/pull/16169)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] - * Build against Go 1.5.3 to mitigate a security vulnerability introduced in - Go 1.5. For more information, please see - https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4 - -This is a security-only release; other than the version number and building -against Go 1.5.3, there are no changes from 0.4.0. - -## 0.4.0 (December 10, 2015) - -DEPRECATIONS/CHANGES: - - * Policy Name Casing: Policy names are now normalized to lower-case on write, - helping prevent accidental case mismatches. For backwards compatibility, - policy names are not currently normalized when reading or deleting. [GH-676] - * Default etcd port number: the default connection string for the `etcd` - physical store uses port 2379 instead of port 4001, which is the port used - by the supported version 2.x of etcd. [GH-753] - * As noted below in the FEATURES section, if your Vault installation contains - a policy called `default`, new tokens created will inherit this policy - automatically. - * In the PKI backend there have been a few minor breaking changes: - * The token display name is no longer a valid option for providing a base - domain for issuance. Since this name is prepended with the name of the - authentication backend that issued it, it provided a faulty use-case at best - and a confusing experience at worst. We hope to figure out a better - per-token value in a future release. - * The `allowed_base_domain` parameter has been changed to `allowed_domains`, - which accepts a comma-separated list of domains. This allows issuing - certificates with DNS subjects across multiple domains. If you had a - configured `allowed_base_domain` parameter, it will be migrated - automatically when the role is read (either via a normal read, or via - issuing a certificate). +## 1.10.4 +### June 10, 2022 -FEATURES: +CHANGES: - * **Significantly Enhanced PKI Backend**: The `pki` backend can now generate - and sign root CA certificates and intermediate CA CSRs. It can also now sign - submitted client CSRs, as well as a significant number of other - enhancements. See the updated documentation for the full API. [GH-666] - * **CRL Checking for Certificate Authentication**: The `cert` backend now - supports pushing CRLs into the mount and using the contained serial numbers - for revocation checking. See the documentation for the `cert` backend for - more info. [GH-330] - * **Default Policy**: Vault now ensures that a policy named `default` is added - to every token. This policy cannot be deleted, but it can be modified - (including to an empty policy). There are three endpoints allowed in the - default `default` policy, related to token self-management: `lookup-self`, - which allows a token to retrieve its own information, and `revoke-self` and - `renew-self`, which are self-explanatory. If your existing Vault - installation contains a policy called `default`, it will not be overridden, - but it will be added to each new token created. You can override this - behavior when using manual token creation (i.e. not via an authentication - backend) by setting the "no_default_policy" flag to true. [GH-732] +* core: Bump Go version to 1.17.11. [[GH-go-ver-1104](https://github.com/hashicorp/vault/pull/go-ver-1104)] IMPROVEMENTS: - * api: API client now uses a 60 second timeout instead of indefinite [GH-681] - * api: Implement LookupSelf, RenewSelf, and RevokeSelf functions for auth - tokens [GH-739] - * api: Standardize environment variable reading logic inside the API; the CLI - now uses this but can still override via command-line parameters [GH-618] - * audit: HMAC-SHA256'd client tokens are now stored with each request entry. - Previously they were only displayed at creation time; this allows much - better traceability of client actions. [GH-713] - * audit: There is now a `sys/audit-hash` endpoint that can be used to generate - an HMAC-SHA256'd value from provided data using the given audit backend's - salt [GH-784] - * core: The physical storage read cache can now be disabled via - "disable_cache" [GH-674] - * core: The unsealing process can now be reset midway through (this feature - was documented before, but not enabled) [GH-695] - * core: Tokens can now renew themselves [GH-455] - * core: Base64-encoded PGP keys can be used with the CLI for `init` and - `rekey` operations [GH-653] - * core: Print version on startup [GH-765] - * core: Access to `sys/policy` and `sys/mounts` now uses the normal ACL system - instead of requiring a root token [GH-769] - * credential/token: Display whether or not a token is an orphan in the output - of a lookup call [GH-766] - * logical: Allow `.` in path-based variables in many more locations [GH-244] - * logical: Responses now contain a "warnings" key containing a list of - warnings returned from the server. These are conditions that did not require - failing an operation, but of which the client should be aware. [GH-676] - * physical/(consul,etcd): Consul and etcd now use a connection pool to limit - the number of outstanding operations, improving behavior when a lot of - operations must happen at once [GH-677] [GH-780] - * physical/consul: The `datacenter` parameter was removed; It could not be - effective unless the Vault node (or the Consul node it was connecting to) - was in the datacenter specified, in which case it wasn't needed [GH-816] - * physical/etcd: Support TLS-encrypted connections and use a connection pool - to limit the number of outstanding operations [GH-780] - * physical/s3: The S3 endpoint can now be configured, allowing using - S3-API-compatible storage solutions [GH-750] - * physical/s3: The S3 bucket can now be configured with the `AWS_S3_BUCKET` - environment variable [GH-758] - * secret/consul: Management tokens can now be created [GH-714] - -BUG FIXES: - - * api: API client now checks for a 301 response for redirects. Vault doesn't - generate these, but in certain conditions Go's internal HTTP handler can - generate them, leading to client errors. - * cli: `token-create` now supports the `ttl` parameter in addition to the - deprecated `lease` parameter. [GH-688] - * core: Return data from `generic` backends on the last use of a limited-use - token [GH-615] - * core: Fix upgrade path for leases created in `generic` prior to 0.3 [GH-673] - * core: Stale leader entries will now be reaped [GH-679] - * core: Using `mount-tune` on the auth/token path did not take effect. - [GH-688] - * core: Fix a potential race condition when (un)sealing the vault with metrics - enabled [GH-694] - * core: Fix an error that could happen in some failure scenarios where Vault - could fail to revert to a clean state [GH-733] - * core: Ensure secondary indexes are removed when a lease is expired [GH-749] - * core: Ensure rollback manager uses an up-to-date mounts table [GH-771] - * everywhere: Don't use http.DefaultClient, as it shares state implicitly and - is a source of hard-to-track-down bugs [GH-700] - * credential/token: Allow creating orphan tokens via an API path [GH-748] - * secret/generic: Validate given duration at write time, not just read time; - if stored durations are not parseable, return a warning and the default - duration rather than an error [GH-718] - * secret/generic: Return 400 instead of 500 when `generic` backend is written - to with no data fields [GH-825] - * secret/postgresql: Revoke permissions before dropping a user or revocation - may fail [GH-699] - -MISC: - - * Various documentation fixes and improvements [GH-685] [GH-688] [GH-697] - [GH-710] [GH-715] [GH-831] - -## 0.3.1 (October 6, 2015) - -SECURITY: - - * core: In certain failure scenarios, the full values of requests and - responses would be logged [GH-665] - -FEATURES: - - * **Settable Maximum Open Connections**: The `mysql` and `postgresql` backends - now allow setting the number of maximum open connections to the database, - which was previously capped to 2. [GH-661] - * **Renewable Tokens for GitHub**: The `github` backend now supports - specifying a TTL, enabling renewable tokens. [GH-664] +* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] +* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] +* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] +* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] +* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] BUG FIXES: - * dist: linux-amd64 distribution was dynamically linked [GH-656] - * credential/github: Fix acceptance tests [GH-651] - -MISC: - - * Various minor documentation fixes and improvements [GH-649] [GH-650] - [GH-654] [GH-663] - -## 0.3.0 (September 28, 2015) - -DEPRECATIONS/CHANGES: - -Note: deprecations and breaking changes in upcoming releases are announced -ahead of time on the "vault-tool" mailing list. - - * **Cookie Authentication Removed**: As of 0.3 the only way to authenticate is - via the X-Vault-Token header. Cookie authentication was hard to properly - test, could result in browsers/tools/applications saving tokens in plaintext - on disk, and other issues. [GH-564] - * **Terminology/Field Names**: Vault is transitioning from overloading the - term "lease" to mean both "a set of metadata" and "the amount of time the - metadata is valid". The latter is now being referred to as TTL (or - "lease_duration" for backwards-compatibility); some parts of Vault have - already switched to using "ttl" and others will follow in upcoming releases. - In particular, the "token", "generic", and "pki" backends accept both "ttl" - and "lease" but in 0.4 only "ttl" will be accepted. [GH-528] - * **Downgrade Not Supported**: Due to enhancements in the storage subsystem, - values written by Vault 0.3+ will not be able to be read by prior versions - of Vault. There are no expected upgrade issues, however, as with all - critical infrastructure it is recommended to back up Vault's physical - storage before upgrading. - -FEATURES: - - * **SSH Backend**: Vault can now be used to delegate SSH access to machines, - via a (recommended) One-Time Password approach or by issuing dynamic keys. - [GH-385] - * **Cubbyhole Backend**: This backend works similarly to the "generic" backend - but provides a per-token workspace. This enables some additional - authentication workflows (especially for containers) and can be useful to - applications to e.g. store local credentials while being restarted or - upgraded, rather than persisting to disk. [GH-612] - * **Transit Backend Improvements**: The transit backend now allows key - rotation and datakey generation. For rotation, data encrypted with previous - versions of the keys can still be decrypted, down to a (configurable) - minimum previous version; there is a rewrap function for manual upgrades of - ciphertext to newer versions. Additionally, the backend now allows - generating and returning high-entropy keys of a configurable bitsize - suitable for AES and other functions; this is returned wrapped by a named - key, or optionally both wrapped and plaintext for immediate use. [GH-626] - * **Global and Per-Mount Default/Max TTL Support**: You can now set the - default and maximum Time To Live for leases both globally and per-mount. - Per-mount settings override global settings. Not all backends honor these - settings yet, but the maximum is a hard limit enforced outside the backend. - See the documentation for "/sys/mounts/" for details on configuring - per-mount TTLs. [GH-469] - * **PGP Encryption for Unseal Keys**: When initializing or rotating Vault's - master key, PGP/GPG public keys can now be provided. The output keys will be - encrypted with the given keys, in order. [GH-570] - * **Duo Multifactor Authentication Support**: Backends that support MFA can - now use Duo as the mechanism. [GH-464] - * **Performance Improvements**: Users of the "generic" backend will see a - significant performance improvement as the backend no longer creates leases, - although it does return TTLs (global/mount default, or set per-item) as - before. [GH-631] - * **Codebase Audit**: Vault's codebase was audited by iSEC. (The terms of the - audit contract do not allow us to make the results public.) [GH-220] +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* auth/kubernetes: Fix error code when using the wrong service account [[GH-15585](https://github.com/hashicorp/vault/pull/15585)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. +* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] +* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] +* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] -IMPROVEMENTS: +## 1.10.3 +### May 11, 2022 - * audit: Log entries now contain a time field [GH-495] - * audit: Obfuscated audit entries now use hmac-sha256 instead of sha1 [GH-627] - * backends: Add ability for a cleanup function to be called on backend unmount - [GH-608] - * config: Allow specifying minimum acceptable TLS version [GH-447] - * core: If trying to mount in a location that is already mounted, be more - helpful about the error [GH-510] - * core: Be more explicit on failure if the issue is invalid JSON [GH-553] - * core: Tokens can now revoke themselves [GH-620] - * credential/app-id: Give a more specific error when sending a duplicate POST - to sys/auth/app-id [GH-392] - * credential/github: Support custom API endpoints (e.g. for Github Enterprise) - [GH-572] - * credential/ldap: Add per-user policies and option to login with - userPrincipalName [GH-420] - * credential/token: Allow root tokens to specify the ID of a token being - created from CLI [GH-502] - * credential/userpass: Enable renewals for login tokens [GH-623] - * scripts: Use /usr/bin/env to find Bash instead of hardcoding [GH-446] - * scripts: Use godep for build scripts to use same environment as tests - [GH-404] - * secret/mysql: Allow reading configuration data [GH-529] - * secret/pki: Split "allow_any_name" logic to that and "enforce_hostnames", to - allow for non-hostname values (e.g. for client certificates) [GH-555] - * storage/consul: Allow specifying certificates used to talk to Consul - [GH-384] - * storage/mysql: Allow SSL encrypted connections [GH-439] - * storage/s3: Allow using temporary security credentials [GH-433] - * telemetry: Put telemetry object in configuration to allow more flexibility - [GH-419] - * testing: Disable mlock for testing of logical backends so as not to require - root [GH-479] +SECURITY: +* auth: A vulnerability was identified in Vault and Vault Enterprise (“Vault”) from 1.10.0 to 1.10.2 where MFA may not be enforced on user logins after a server restart. This vulnerability, CVE-2022-30689, was fixed in Vault 1.10.3. BUG FIXES: - * audit/file: Do not enable auditing if file permissions are invalid [GH-550] - * backends: Allow hyphens in endpoint patterns (fixes AWS and others) [GH-559] - * cli: Fixed missing setup of client TLS certificates if no custom CA was - provided - * cli/read: Do not include a carriage return when using raw field output - [GH-624] - * core: Bad input data could lead to a panic for that session, rather than - returning an error [GH-503] - * core: Allow SHA2-384/SHA2-512 hashed certificates [GH-448] - * core: Do not return a Secret if there are no uses left on a token (since it - will be unable to be used) [GH-615] - * core: Code paths that called lookup-self would decrement num_uses and - potentially immediately revoke a token [GH-552] - * core: Some /sys/ paths would not properly redirect from a standby to the - leader [GH-499] [GH-551] - * credential/aws: Translate spaces in a token's display name to avoid making - IAM unhappy [GH-567] - * credential/github: Integration failed if more than ten organizations or - teams [GH-489] - * credential/token: Tokens with sudo access to "auth/token/create" can now use - root-only options [GH-629] - * secret/cassandra: Work around backwards-incompatible change made in - Cassandra 2.2 preventing Vault from properly setting/revoking leases - [GH-549] - * secret/mysql: Use varbinary instead of varchar to avoid InnoDB/UTF-8 issues - [GH-522] - * secret/postgres: Explicitly set timezone in connections [GH-597] - * storage/etcd: Renew semaphore periodically to prevent leadership flapping - [GH-606] - * storage/zk: Fix collisions in storage that could lead to data unavailability - [GH-411] - -MISC: - - * Various documentation fixes and improvements [GH-412] [GH-474] [GH-476] - [GH-482] [GH-483] [GH-486] [GH-508] [GH-568] [GH-574] [GH-586] [GH-590] - [GH-591] [GH-592] [GH-595] [GH-613] [GH-637] - * Less "armon" in stack traces [GH-453] - * Sourcegraph integration [GH-456] - -## 0.2.0 (July 13, 2015) - -FEATURES: - - * **Key Rotation Support**: The `rotate` command can be used to rotate the - master encryption key used to write data to the storage (physical) backend. - [GH-277] - * **Rekey Support**: Rekey can be used to rotate the master key and change the - configuration of the unseal keys (number of shares, threshold required). - [GH-277] - * **New secret backend: `pki`**: Enable Vault to be a certificate authority - and generate signed TLS certificates. [GH-310] - * **New secret backend: `cassandra`**: Generate dynamic credentials for - Cassandra [GH-363] - * **New storage backend: `etcd`**: store physical data in etcd [GH-259] - [GH-297] - * **New storage backend: `s3`**: store physical data in S3. Does not support - HA. [GH-242] - * **New storage backend: `MySQL`**: store physical data in MySQL. Does not - support HA. [GH-324] - * `transit` secret backend supports derived keys for per-transaction unique - keys [GH-399] - -IMPROVEMENTS: +* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] - * cli/auth: Enable `cert` method [GH-380] - * cli/auth: read input from stdin [GH-250] - * cli/read: Ability to read a single field from a secret [GH-257] - * cli/write: Adding a force flag when no input required - * core: allow time duration format in place of seconds for some inputs - * core: audit log provides more useful information [GH-360] - * core: graceful shutdown for faster HA failover - * core: **change policy format** to use explicit globbing [GH-400] Any - existing policy in Vault is automatically upgraded to avoid issues. All - policy files must be updated for future writes. Adding the explicit glob - character `*` to the path specification is all that is required. - * core: policy merging to give deny highest precedence [GH-400] - * credential/app-id: Protect against timing attack on app-id - * credential/cert: Record the common name in the metadata [GH-342] - * credential/ldap: Allow TLS verification to be disabled [GH-372] - * credential/ldap: More flexible names allowed [GH-245] [GH-379] [GH-367] - * credential/userpass: Protect against timing attack on password - * credential/userpass: Use bcrypt for password matching - * http: response codes improved to reflect error [GH-366] - * http: the `sys/health` endpoint supports `?standbyok` to return 200 on - standby [GH-389] - * secret/app-id: Support deleting AppID and UserIDs [GH-200] - * secret/consul: Fine grained lease control [GH-261] - * secret/transit: Decouple raw key from key management endpoint [GH-355] - * secret/transit: Upsert named key when encrypt is used [GH-355] - * storage/zk: Support for HA configuration [GH-252] - * storage/zk: Changing node representation. **Backwards incompatible**. - [GH-416] +## 1.10.2 +### April 29, 2022 BUG FIXES: - * audit/file: file removing TLS connection state - * audit/syslog: fix removing TLS connection state - * command/*: commands accepting `k=v` allow blank values - * core: Allow building on FreeBSD [GH-365] - * core: Fixed various panics when audit logging enabled - * core: Lease renewal does not create redundant lease - * core: fixed leases with negative duration [GH-354] - * core: token renewal does not create child token - * core: fixing panic when lease increment is null [GH-408] - * credential/app-id: Salt the paths in storage backend to avoid information - leak - * credential/cert: Fixing client certificate not being requested - * credential/cert: Fixing panic when no certificate match found [GH-361] - * http: Accept PUT as POST for sys/auth - * http: Accept PUT as POST for sys/mounts [GH-349] - * http: Return 503 when sealed [GH-225] - * secret/postgres: Username length is capped to exceeding limit - * server: Do not panic if backend not configured [GH-222] - * server: Explicitly check value of tls_diable [GH-201] - * storage/zk: Fixed issues with version conflicts [GH-190] - -MISC: - - * cli/path-help: renamed from `help` to avoid confusion - -## 0.1.2 (May 11, 2015) +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] + +## 1.10.1 +### April 22, 2022 -FEATURES: +CHANGES: - * **New physical backend: `zookeeper`**: store physical data in Zookeeper. - HA not supported yet. - * **New credential backend: `ldap`**: authenticate using LDAP credentials. +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.9. [[GH-15044](https://github.com/hashicorp/vault/pull/15044)] IMPROVEMENTS: - * core: Auth backends can store internal data about auth creds - * audit: display name for auth is shown in logs [GH-176] - * command/*: `-insecure` has been renamed to `-tls-skip-verify` [GH-130] - * command/*: `VAULT_TOKEN` overrides local stored auth [GH-162] - * command/server: environment variables are copy-pastable - * credential/app-id: hash of app and user ID are in metadata [GH-176] - * http: HTTP API accepts `X-Vault-Token` as auth header [GH-124] - * logical/*: Generate help output even if no synopsis specified +* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] +* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] +* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] +* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer BUG FIXES: - * core: login endpoints should never return secrets - * core: Internal data should never be returned from core endpoints - * core: defer barrier initialization to as late as possible to avoid error - cases during init that corrupt data (no data loss) - * core: guard against invalid init config earlier - * audit/file: create file if it doesn't exist [GH-148] - * command/*: ignore directories when traversing CA paths [GH-181] - * credential/*: all policy mapping keys are case insensitive [GH-163] - * physical/consul: Fixing path for locking so HA works in every case +* Fixed panic when adding or modifying a Duo MFA Method in Enterprise +* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata [[GH-changelog:_2747](https://github.com/hashicorp/vault/pull/changelog:_2747)] +* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] +* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] +* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] -## 0.1.1 (May 2, 2015) +## 1.10.0 +### March 23, 2022 -SECURITY CHANGES: +CHANGES: - * physical/file: create the storge with 0600 permissions [GH-102] - * token/disk: write the token to disk with 0600 perms +* core (enterprise): requests with newly generated tokens to perf standbys which are lagging behind the active node return http 412 instead of 400/403/50x. +* core: Changes the unit of `default_lease_ttl` and `max_lease_ttl` values returned by +the `/sys/config/state/sanitized` endpoint from nanoseconds to seconds. [[GH-14206](https://github.com/hashicorp/vault/pull/14206)] +* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] +* plugin/database: The return value from `POST /database/config/:name` has been updated to "204 No Content" [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] +* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft +Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* storage/etcd: Remove support for v2. [[GH-14193](https://github.com/hashicorp/vault/pull/14193)] +* ui: Upgrade Ember to version 3.24 [[GH-13443](https://github.com/hashicorp/vault/pull/13443)] -IMPROVEMENTS: +FEATURES: - * core: Very verbose error if mlock fails [GH-59] - * command/*: On error with TLS oversized record, show more human-friendly - error message. [GH-123] - * command/read: `lease_renewable` is now outputted along with the secret to - show whether it is renewable or not - * command/server: Add configuration option to disable mlock - * command/server: Disable mlock for dev mode so it works on more systems +* **Database plugin multiplexing**: manage multiple database connections with a single plugin process [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] +* **Login MFA**: Single and two phase MFA is now available when authenticating to Vault. [[GH-14025](https://github.com/hashicorp/vault/pull/14025)] +* **Mount Migration**: Vault supports moving secrets and auth mounts both within and across namespaces. +* **Postgres in the UI**: Postgres DB is now supported by the UI [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] +* **Report in-flight requests**: Adding a trace capability to show in-flight requests, and a new gauge metric to show the total number of in-flight requests [[GH-13024](https://github.com/hashicorp/vault/pull/13024)] +* **Server Side Consistent Tokens**: Service tokens have been updated to be longer (a minimum of 95 bytes) and token prefixes for all token types are updated from s., b., and r. to hvs., hvb., and hvr. for service, batch, and recovery tokens respectively. Vault clusters with integrated storage will now have read-after-write consistency by default. [[GH-14109](https://github.com/hashicorp/vault/pull/14109)] +* **Transit SHA-3 Support**: Add support for SHA-3 in the Transit backend. [[GH-13367](https://github.com/hashicorp/vault/pull/13367)] +* **Transit Time-Based Key Autorotation**: Add support for automatic, time-based key rotation to transit secrets engine, including in the UI. [[GH-13691](https://github.com/hashicorp/vault/pull/13691)] +* **UI Client Count Improvements**: Restructures client count dashboard, making use of billing start date to improve accuracy. Adds mount-level distribution and filtering. [[GH-client-counts](https://github.com/hashicorp/vault/pull/client-counts)] +* **Agent Telemetry**: The Vault Agent can now collect and return telemetry information at the `/agent/v1/metrics` endpoint. -BUG FIXES: +IMPROVEMENTS: - * core: if token helper isn't absolute, prepend with path to Vault - executable, not "vault" (which requires PATH) [GH-60] - * core: Any "mapping" routes allow hyphens in keys [GH-119] - * core: Validate `advertise_addr` is a valid URL with scheme [GH-106] - * command/auth: Using an invalid token won't crash [GH-75] - * credential/app-id: app and user IDs can have hyphens in keys [GH-119] - * helper/password: import proper DLL for Windows to ask password [GH-83] +* agent: Adds ability to configure specific user-assigned managed identities for Azure auto-auth. [[GH-14214](https://github.com/hashicorp/vault/pull/14214)] +* agent: The `agent/v1/quit` endpoint can now be used to stop the Vault Agent remotely [[GH-14223](https://github.com/hashicorp/vault/pull/14223)] +* api: Allow cloning `api.Client` tokens via `api.Config.CloneToken` or `api.Client.SetCloneToken()`. [[GH-13515](https://github.com/hashicorp/vault/pull/13515)] +* api: Define constants for X-Vault-Forward and X-Vault-Inconsistent headers [[GH-14067](https://github.com/hashicorp/vault/pull/14067)] +* api: Implements Login method in Go client libraries for GCP and Azure auth methods [[GH-13022](https://github.com/hashicorp/vault/pull/13022)] +* api: Implements Login method in Go client libraries for LDAP auth methods [[GH-13841](https://github.com/hashicorp/vault/pull/13841)] +* api: Trim newline character from wrapping token in logical.Unwrap from the api package [[GH-13044](https://github.com/hashicorp/vault/pull/13044)] +* api: add api method for modifying raft autopilot configuration [[GH-12428](https://github.com/hashicorp/vault/pull/12428)] +* api: respect WithWrappingToken() option during AppRole login authentication when used with secret ID specified from environment or from string [[GH-13241](https://github.com/hashicorp/vault/pull/13241)] +* audit: The audit logs now contain the port used by the client [[GH-12790](https://github.com/hashicorp/vault/pull/12790)] +* auth/aws: Enable region detection in the CLI by specifying the region as `auto` [[GH-14051](https://github.com/hashicorp/vault/pull/14051)] +* auth/cert: Add certificate extensions as metadata [[GH-13348](https://github.com/hashicorp/vault/pull/13348)] +* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] +* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13595](https://github.com/hashicorp/vault/pull/13595)] +* auth/ldap: Add a response warning and server log whenever the config is accessed +if `userfilter` doesn't consider `userattr` [[GH-14095](https://github.com/hashicorp/vault/pull/14095)] +* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] +* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] +* auth/okta: Update [okta-sdk-golang](https://github.com/okta/okta-sdk-golang) dependency to version v2.9.1 for improved request backoff handling [[GH-13439](https://github.com/hashicorp/vault/pull/13439)] +* auth/token: The `auth/token/revoke-accessor` endpoint is now idempotent and will +not error out if the token has already been revoked. [[GH-13661](https://github.com/hashicorp/vault/pull/13661)] +* auth: reading `sys/auth/:path` now returns the configuration for the auth engine mounted at the given path [[GH-12793](https://github.com/hashicorp/vault/pull/12793)] +* cli: interactive CLI for login mfa [[GH-14131](https://github.com/hashicorp/vault/pull/14131)] +* command (enterprise): "vault license get" now uses non-deprecated endpoint /sys/license/status +* core/ha: Add new mechanism for keeping track of peers talking to active node, and new 'operator members' command to view them. [[GH-13292](https://github.com/hashicorp/vault/pull/13292)] +* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] +* core/pki: Support Y10K value in notAfter field to be compliant with IEEE 802.1AR-2018 standard [[GH-12795](https://github.com/hashicorp/vault/pull/12795)] +* core/pki: Support Y10K value in notAfter field when signing non-CA certificates [[GH-13736](https://github.com/hashicorp/vault/pull/13736)] +* core: Add duration and start_time to completed requests log entries [[GH-13682](https://github.com/hashicorp/vault/pull/13682)] +* core: Add support to list password policies at `sys/policies/password` [[GH-12787](https://github.com/hashicorp/vault/pull/12787)] +* core: Add support to list version history via API at `sys/version-history` and via CLI with `vault version-history` [[GH-13766](https://github.com/hashicorp/vault/pull/13766)] +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] +* core: Periodically test the health of connectivity to auto-seal backends [[GH-13078](https://github.com/hashicorp/vault/pull/13078)] +* core: Reading `sys/mounts/:path` now returns the configuration for the secret engine at the given path [[GH-12792](https://github.com/hashicorp/vault/pull/12792)] +* core: Replace "master key" terminology with "root key" [[GH-13324](https://github.com/hashicorp/vault/pull/13324)] +* core: Small changes to ensure goroutines terminate in tests [[GH-14197](https://github.com/hashicorp/vault/pull/14197)] +* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] +* core: Update github.com/prometheus/client_golang to fix security vulnerability CVE-2022-21698. [[GH-14190](https://github.com/hashicorp/vault/pull/14190)] +* core: Vault now supports the PROXY protocol v2. Support for UNKNOWN connections +has also been added to the PROXY protocol v1. [[GH-13540](https://github.com/hashicorp/vault/pull/13540)] +* http (enterprise): Serve /sys/license/status endpoint within namespaces +* identity/oidc: Adds a default OIDC provider [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds a default key for OIDC clients [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds an `allow_all` assignment that permits all entities to authenticate via an OIDC client [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds proof key for code exchange (PKCE) support to OIDC providers. [[GH-13917](https://github.com/hashicorp/vault/pull/13917)] +* sdk: Add helper for decoding root tokens [[GH-10505](https://github.com/hashicorp/vault/pull/10505)] +* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] +* secrets/consul: Add support for consul enterprise namespaces and admin partitions. [[GH-13850](https://github.com/hashicorp/vault/pull/13850)] +* secrets/consul: Add support for consul roles. [[GH-14014](https://github.com/hashicorp/vault/pull/14014)] +* secrets/database/influxdb: Switch/upgrade to the `influxdb1-client` module [[GH-12262](https://github.com/hashicorp/vault/pull/12262)] +* secrets/database: Add database configuration parameter 'disable_escaping' for username and password when connecting to a database. [[GH-13414](https://github.com/hashicorp/vault/pull/13414)] +* secrets/kv: add full secret path output to table-formatted responses [[GH-14301](https://github.com/hashicorp/vault/pull/14301)] +* secrets/kv: add patch support for KVv2 key metadata [[GH-13215](https://github.com/hashicorp/vault/pull/13215)] +* secrets/kv: add subkeys endpoint to retrieve a secret's stucture without its values [[GH-13893](https://github.com/hashicorp/vault/pull/13893)] +* secrets/pki: Add ability to fetch individual certificate as DER or PEM [[GH-10948](https://github.com/hashicorp/vault/pull/10948)] +* secrets/pki: Add count and duration metrics to PKI issue and revoke calls. [[GH-13889](https://github.com/hashicorp/vault/pull/13889)] +* secrets/pki: Add error handling for error types other than UserError or InternalError [[GH-14195](https://github.com/hashicorp/vault/pull/14195)] +* secrets/pki: Allow URI SAN templates in allowed_uri_sans when allowed_uri_sans_template is set to true. [[GH-10249](https://github.com/hashicorp/vault/pull/10249)] +* secrets/pki: Allow other_sans in sign-intermediate and sign-verbatim [[GH-13958](https://github.com/hashicorp/vault/pull/13958)] +* secrets/pki: Calculate the Subject Key Identifier as suggested in [RFC 5280, Section 4.2.1.2](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2). [[GH-11218](https://github.com/hashicorp/vault/pull/11218)] +* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] +* secrets/pki: Return complete chain (in `ca_chain` field) on calls to `pki/cert/ca_chain` [[GH-13935](https://github.com/hashicorp/vault/pull/13935)] +* secrets/pki: Use application/pem-certificate-chain for PEM certificates, application/x-pem-file for PEM CRLs [[GH-13927](https://github.com/hashicorp/vault/pull/13927)] +* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] +* secrets/ssh: Add support for generating non-RSA SSH CAs [[GH-14008](https://github.com/hashicorp/vault/pull/14008)] +* secrets/ssh: Allow specifying multiple approved key lengths for a single algorithm [[GH-13991](https://github.com/hashicorp/vault/pull/13991)] +* secrets/ssh: Use secure default for algorithm signer (rsa-sha2-256) with RSA SSH CA keys on new roles [[GH-14006](https://github.com/hashicorp/vault/pull/14006)] +* secrets/transit: Don't abort transit encrypt or decrypt batches on single item failure. [[GH-13111](https://github.com/hashicorp/vault/pull/13111)] +* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] +* storage/raft: Set InitialMmapSize to 100GB on 64bit architectures [[GH-13178](https://github.com/hashicorp/vault/pull/13178)] +* storage/raft: When using retry_join stanzas, join against all of them in parallel. [[GH-13606](https://github.com/hashicorp/vault/pull/13606)] +* sys/raw: Enhance sys/raw to read and write values that cannot be encoded in json. [[GH-13537](https://github.com/hashicorp/vault/pull/13537)] +* ui: Add support for ECDSA and Ed25519 certificate views [[GH-13894](https://github.com/hashicorp/vault/pull/13894)] +* ui: Add version diff view for KV V2 [[GH-13000](https://github.com/hashicorp/vault/pull/13000)] +* ui: Added client side paging for namespace list view [[GH-13195](https://github.com/hashicorp/vault/pull/13195)] +* ui: Adds flight icons to UI [[GH-12976](https://github.com/hashicorp/vault/pull/12976)] +* ui: Adds multi-factor authentication support [[GH-14049](https://github.com/hashicorp/vault/pull/14049)] +* ui: Allow static role credential rotation in Database secrets engines [[GH-14268](https://github.com/hashicorp/vault/pull/14268)] +* ui: Display badge for all versions in secrets engine header [[GH-13015](https://github.com/hashicorp/vault/pull/13015)] +* ui: Swap browser localStorage in favor of sessionStorage [[GH-14054](https://github.com/hashicorp/vault/pull/14054)] +* ui: The integrated web terminal now accepts both `-f` and `--force` as aliases +for `-force` for the `write` command. [[GH-13683](https://github.com/hashicorp/vault/pull/13683)] +* ui: Transform advanced templating with encode/decode format support [[GH-13908](https://github.com/hashicorp/vault/pull/13908)] +* ui: Updates ember blueprints to glimmer components [[GH-13149](https://github.com/hashicorp/vault/pull/13149)] +* ui: customizes empty state messages for transit and transform [[GH-13090](https://github.com/hashicorp/vault/pull/13090)] -## 0.1.0 (April 28, 2015) +BUG FIXES: - * Initial release +* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] +* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] +* agent: Fixes bug where vault agent is unaware of the namespace in the config when wrapping token +* api/client: Fixes an issue where the `replicateStateStore` was being set to `nil` upon consecutive calls to `client.SetReadYourWrites(true)`. [[GH-13486](https://github.com/hashicorp/vault/pull/13486)] +* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] +* auth/approle: Fix wrapping of nil errors in `login` endpoint [[GH-14107](https://github.com/hashicorp/vault/pull/14107)] +* auth/github: Use the Organization ID instead of the Organization name to verify the org membership. [[GH-13332](https://github.com/hashicorp/vault/pull/13332)] +* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] +* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] +* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] +* core (enterprise): Fix a data race in logshipper. +* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions +* core/api: Fix overwriting of request headers when using JSONMergePatch. [[GH-14222](https://github.com/hashicorp/vault/pull/14222)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] +* core/token: Fix null token panic from 'v1/auth/token/' endpoints and return proper error response. [[GH-13233](https://github.com/hashicorp/vault/pull/13233)] +* core/token: Fix null token_type panic resulting from 'v1/auth/token/roles/{role_name}' endpoint [[GH-13236](https://github.com/hashicorp/vault/pull/13236)] +* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] +* core: `-output-curl-string` now properly sets cURL options for client and CA +certificates. [[GH-13660](https://github.com/hashicorp/vault/pull/13660)] +* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] +* core: authentication to "login" endpoint for non-existent mount path returns permission denied with status code 403 [[GH-13162](https://github.com/hashicorp/vault/pull/13162)] +* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] +* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] +* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] +* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] +* identity/oidc: Fixes potential write to readonly storage on performance secondary clusters during key rotation [[GH-14426](https://github.com/hashicorp/vault/pull/14426)] +* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] +* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] +* identity: Fix possible nil pointer dereference. [[GH-13318](https://github.com/hashicorp/vault/pull/13318)] +* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] +* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] +* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. +* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. +* metrics/autosnapshots (enterprise) : Fix bug that could cause +vault.autosnapshots.save.errors to not be incremented when there is an +autosnapshot save error. +* physical/mysql: Create table with wider `vault_key` column when initializing database tables. [[GH-14231](https://github.com/hashicorp/vault/pull/14231)] +* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] +* replication (enterprise): When using encrypted secondary tokens, only clear the +private key after a successful connection to the primary cluster +* sdk/framework: Generate proper OpenAPI specs for path patterns that use an alternation as the root. [[GH-13487](https://github.com/hashicorp/vault/pull/13487)] +* sdk/helper/ldaputil: properly escape a trailing escape character to prevent panics. [[GH-13452](https://github.com/hashicorp/vault/pull/13452)] +* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] +* sdk: Fixes OpenAPI to distinguish between paths that can do only List, or both List and Read. [[GH-13643](https://github.com/hashicorp/vault/pull/13643)] +* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] +* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] +* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) +operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* secrets/database/cassandra: change connect_timeout to 5s as documentation says [[GH-12443](https://github.com/hashicorp/vault/pull/12443)] +* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] +* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] +* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] +* secrets/pki: Default value for key_bits changed to 0, enabling key_type=ec key generation with default value [[GH-13080](https://github.com/hashicorp/vault/pull/13080)] +* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] +* secrets/pki: Fixes around NIST P-curve signature hash length, default value for signature_bits changed to 0. [[GH-12872](https://github.com/hashicorp/vault/pull/12872)] +* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] +* secrets/pki: Skip signature bits validation for ed25519 curve key type [[GH-13254](https://github.com/hashicorp/vault/pull/13254)] +* secrets/transit: Ensure that Vault does not panic for invalid nonce size when we aren't in convergent encryption mode. [[GH-13690](https://github.com/hashicorp/vault/pull/13690)] +* secrets/transit: Return an error if any required parameter is missing. [[GH-14074](https://github.com/hashicorp/vault/pull/14074)] +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] +* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] +* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] +* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] +* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] +* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] +* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] +* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] +* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] +* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] +* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] +* ui: Fixes displaying empty masked values in PKI engine [[GH-14400](https://github.com/hashicorp/vault/pull/14400)] +* ui: Fixes horizontal bar chart hover issue when filtering namespaces and mounts [[GH-14493](https://github.com/hashicorp/vault/pull/14493)] +* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] +* ui: Fixes issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with SearchSelect component not holding focus [[GH-13590](https://github.com/hashicorp/vault/pull/13590)] +* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] +* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] +* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] +* ui: Fixes long secret key names overlapping masked values [[GH-13032](https://github.com/hashicorp/vault/pull/13032)] +* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] +* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] +* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] +* ui: trigger token renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] diff --git a/CODEOWNERS b/CODEOWNERS index f1eff372ab53..2332eea289ce 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -5,62 +5,83 @@ # More on CODEOWNERS files: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners # Select Auth engines are owned by Ecosystem -/builtin/credential/aws/ @hashicorp/vault-ecosystem -/builtin/credential/github/ @hashicorp/vault-ecosystem -/builtin/credential/ldap/ @hashicorp/vault-ecosystem -/builtin/credential/okta/ @hashicorp/vault-ecosystem +/builtin/credential/aws/ @hashicorp/vault-ecosystem-applications +/builtin/credential/github/ @hashicorp/vault-ecosystem-applications +/builtin/credential/ldap/ @hashicorp/vault-ecosystem-applications +/builtin/credential/okta/ @hashicorp/vault-ecosystem-applications # Secrets engines (pki, ssh, totp and transit omitted) -/builtin/logical/aws/ @hashicorp/vault-ecosystem -/builtin/logical/cassandra/ @hashicorp/vault-ecosystem -/builtin/logical/consul/ @hashicorp/vault-ecosystem -/builtin/logical/database/ @hashicorp/vault-ecosystem -/builtin/logical/mongodb/ @hashicorp/vault-ecosystem -/builtin/logical/mssql/ @hashicorp/vault-ecosystem -/builtin/logical/mysql/ @hashicorp/vault-ecosystem -/builtin/logical/nomad/ @hashicorp/vault-ecosystem -/builtin/logical/postgresql/ @hashicorp/vault-ecosystem -/builtin/logical/rabbitmq/ @hashicorp/vault-ecosystem +/builtin/logical/aws/ @hashicorp/vault-ecosystem-applications +/builtin/logical/cassandra/ @hashicorp/vault-ecosystem-applications +/builtin/logical/consul/ @hashicorp/vault-ecosystem-applications +/builtin/logical/database/ @hashicorp/vault-ecosystem-applications +/builtin/logical/mongodb/ @hashicorp/vault-ecosystem-applications +/builtin/logical/mssql/ @hashicorp/vault-ecosystem-applications +/builtin/logical/mysql/ @hashicorp/vault-ecosystem-applications +/builtin/logical/nomad/ @hashicorp/vault-ecosystem-applications +/builtin/logical/postgresql/ @hashicorp/vault-ecosystem-applications +/builtin/logical/rabbitmq/ @hashicorp/vault-ecosystem-applications + +# Identity Integrations (OIDC, tokens) +/vault/identity_store_oidc* @hashicorp/vault-ecosystem-applications /plugins/ @hashicorp/vault-ecosystem /vault/plugin_catalog.go @hashicorp/vault-ecosystem -/website/content/ @tjperry07 -/website/content/docs/plugin-portal.mdx @acahn @tjperry07 +/website/content/ @hashicorp/vault-education-approvers +/website/content/docs/plugin-portal.mdx @acahn @hashicorp/vault-education-approvers # Plugin docs -/website/content/docs/plugins/ @fairclothjm @tjperry07 -/website/content/docs/upgrading/plugins.mdx @fairclothjm @tjperry07 +/website/content/docs/plugins/ @hashicorp/vault-ecosystem @hashicorp/vault-education-approvers +/website/content/docs/upgrading/plugins.mdx @hashicorp/vault-ecosystem @hashicorp/vault-education-approvers # UI code related to Vault's JWT/OIDC auth method and OIDC provider. # Changes to these files often require coordination with backend code, # so stewards of the backend code are added below for notification. -/ui/app/components/auth-jwt.js @austingebauer -/ui/app/routes/vault/cluster/oidc-*.js @austingebauer +/ui/app/components/auth-jwt.js @hashicorp/vault-ecosystem-applications +/ui/app/routes/vault/cluster/oidc-*.js @hashicorp/vault-ecosystem-applications # Release config; service account is required for automation tooling. -/.release/ @hashicorp/release-engineering @hashicorp/github-secure-vault-core @hashicorp/quality-team -/.github/workflows/build.yml @hashicorp/release-engineering @hashicorp/github-secure-vault-core @hashicorp/quality-team +/.release/ @hashicorp/github-secure-vault-core @hashicorp/quality-team +/.github/workflows/build.yml @hashicorp/github-secure-vault-core @hashicorp/quality-team # Quality engineering /.github/ @hashicorp/quality-team /enos/ @hashicorp/quality-team # Cryptosec -/builtin/logical/pki/ @hashicorp/vault-crypto -/builtin/credential/cert/ @hashicorp/vault-crypto -/builtin/logical/ssh/ @hashicorp/vault-crypto -/builtin/logical/transit/ @hashicorp/vault-crypto -/helper/random/ @hashicorp/vault-crypto -/sdk/helper/certutil/ @hashicorp/vault-crypto -/sdk/helper/cryptoutil/ @hashicorp/vault-crypto -/sdk/helper/kdf/ @hashicorp/vault-crypto -/sdk/helper/keysutil/ @hashicorp/vault-crypto -/sdk/helper/ocsp/ @hashicorp/vault-crypto -/sdk/helper/salt/ @hashicorp/vault-crypto -/sdk/helper/tlsutil/ @hashicorp/vault-crypto -/shamir/ @hashicorp/vault-crypto -/vault/barrier* @hashicorp/vault-crypto -/vault/managed_key* @hashicorp/vault-crypto -/vault/seal* @hashicorp/vault-crypto -/vault/seal/ @hashicorp/vault-crypto +/builtin/logical/pki/ @hashicorp/vault-crypto +/builtin/logical/pkiext/ @hashicorp/vault-crypto +/website/content/docs/secrets/pki/ @hashicorp/vault-crypto +/website/content/api-docs/secret/pki.mdx @hashicorp/vault-crypto +/builtin/credential/cert/ @hashicorp/vault-crypto +/website/content/docs/auth/cert.mdx @hashicorp/vault-crypto +/website/content/api-docs/auth/cert.mdx @hashicorp/vault-crypto +/builtin/logical/ssh/ @hashicorp/vault-crypto +/website/content/docs/secrets/ssh/ @hashicorp/vault-crypto +/website/content/api-docs/secret/ssh.mdx @hashicorp/vault-crypto +/builtin/logical/transit/ @hashicorp/vault-crypto +/website/content/docs/secrets/transit/ @hashicorp/vault-crypto +/website/content/api-docs/secret/transit.mdx @hashicorp/vault-crypto +/helper/random/ @hashicorp/vault-crypto +/sdk/helper/certutil/ @hashicorp/vault-crypto +/sdk/helper/cryptoutil/ @hashicorp/vault-crypto +/sdk/helper/kdf/ @hashicorp/vault-crypto +/sdk/helper/keysutil/ @hashicorp/vault-crypto +/sdk/helper/ocsp/ @hashicorp/vault-crypto +/sdk/helper/salt/ @hashicorp/vault-crypto +/sdk/helper/tlsutil/ @hashicorp/vault-crypto +/shamir/ @hashicorp/vault-crypto +/vault/barrier* @hashicorp/vault-crypto +/vault/managed_key* @hashicorp/vault-crypto +/vault/seal* @hashicorp/vault-crypto +/vault/seal/ @hashicorp/vault-crypto +/website/content/docs/configuration/seal/ @hashicorp/vault-crypto +/website/content/docs/enterprise/sealwrap.mdx @hashicorp/vault-crypto +/website/content/api-docs/system/sealwrap-rewrap.mdx @hashicorp/vault-crypto +/website/content/docs/secrets/transform/ @hashicorp/vault-crypto +/website/content/api-docs/secret/transform.mdx @hashicorp/vault-crypto +/website/content/docs/secrets/kmip-profiles.mdx @hashicorp/vault-crypto +/website/content/docs/secrets/kmip.mdx @hashicorp/vault-crypto +/website/content/api-docs/secret/kmip.mdx @hashicorp/vault-crypto +/website/content/docs/enterprise/fips/ @hashicorp/vault-crypto diff --git a/Dockerfile b/Dockerfile index 9041da20ff9d..62860b7efa6c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,8 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + ## DOCKERHUB DOCKERFILE ## -FROM alpine:3.15 as default +FROM alpine:3.18 as default ARG BIN_NAME # NAME and PRODUCT_VERSION are the name of the software in releases.hashicorp.com @@ -21,7 +24,8 @@ LABEL name="Vault" \ summary="Vault is a tool for securely accessing secrets." \ description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log." -COPY LICENSE /licenses/mozilla.txt +# Copy the license file as per Legal requirement +COPY LICENSE /licenses/LICENSE.txt # Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD ENV NAME=$NAME @@ -59,7 +63,7 @@ EXPOSE 8200 # The entry point script uses dumb-init as the top-level process to reap any # zombie processes created by Vault sub-processes. # -# For production derivatives of this container, you shoud add the IPC_LOCK +# For production derivatives of this container, you should add the IPC_LOCK # capability so that Vault can mlock memory. COPY .release/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh ENTRYPOINT ["docker-entrypoint.sh"] @@ -71,7 +75,7 @@ CMD ["server", "-dev"] ## UBI DOCKERFILE ## -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.7 as ubi +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9 as ubi ARG BIN_NAME # PRODUCT_VERSION is the version built dist/$TARGETOS/$TARGETARCH/$BIN_NAME, @@ -92,7 +96,8 @@ LABEL name="Vault" \ summary="Vault is a tool for securely accessing secrets." \ description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log." -COPY LICENSE /licenses/mozilla.txt +# Copy the license file as per Legal requirement +COPY LICENSE /licenses/LICENSE.txt # Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD ENV NAME=$NAME @@ -142,7 +147,7 @@ EXPOSE 8200 # The entry point script uses dumb-init as the top-level process to reap any # zombie processes created by Vault sub-processes. # -# For production derivatives of this container, you shoud add the IPC_LOCK +# For production derivatives of this container, you should add the IPC_LOCK # capability so that Vault can mlock memory. COPY .release/docker/ubi-docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh ENTRYPOINT ["docker-entrypoint.sh"] diff --git a/LICENSE b/LICENSE index f4f97ee5853a..fbeca00ad74c 100644 --- a/LICENSE +++ b/LICENSE @@ -1,365 +1,92 @@ -Copyright (c) 2015 HashiCorp, Inc. - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - +License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. +"Business Source License" is a trademark of MariaDB Corporation Ab. + +Parameters + +Licensor: HashiCorp, Inc. +Licensed Work: Vault Version 1.15.0 or later. The Licensed Work is (c) 2024 + HashiCorp, Inc. +Additional Use Grant: You may make production use of the Licensed Work, provided + Your use does not include offering the Licensed Work to third + parties on a hosted or embedded basis in order to compete with + HashiCorp's paid version(s) of the Licensed Work. For purposes + of this license: + + A "competitive offering" is a Product that is offered to third + parties on a paid basis, including through paid support + arrangements, that significantly overlaps with the capabilities + of HashiCorp's paid version(s) of the Licensed Work. If Your + Product is not a competitive offering when You first make it + generally available, it will not become a competitive offering + later due to HashiCorp releasing a new version of the Licensed + Work with additional capabilities. In addition, Products that + are not provided on a paid basis are not competitive. + + "Product" means software that is offered to end users to manage + in their own environments or offered as a service on a hosted + basis. + + "Embedded" means including the source code or executable code + from the Licensed Work in a competitive offering. "Embedded" + also means packaging the competitive offering in such a way + that the Licensed Work must be accessed or downloaded for the + competitive offering to operate. + + Hosting or using the Licensed Work(s) for internal purposes + within an organization is not considered a competitive + offering. HashiCorp considers your organization to include all + of your affiliates under common control. + + For binding interpretive guidance on using HashiCorp products + under the Business Source License, please visit our FAQ. + (https://www.hashicorp.com/license-faq) +Change Date: Four years from the date the Licensed Work is published. +Change License: MPL 2.0 + +For information about alternative licensing arrangements for the Licensed Work, +please contact licensing@hashicorp.com. + +Notice + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. diff --git a/META.d/_summary.yaml b/META.d/_summary.yaml new file mode 100644 index 000000000000..c1da2a666c77 --- /dev/null +++ b/META.d/_summary.yaml @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +--- +schema: 1.1 +partition: secure +category: product +summary: + owner: team-vault + description: The repositories holding the Vault OSS & ENT codebase + visibility: internal diff --git a/Makefile b/Makefile index 56798035a84a..d16a56c1bf75 100644 --- a/Makefile +++ b/Makefile @@ -2,21 +2,19 @@ # Be sure to place this BEFORE `include` directives, if any. THIS_FILE := $(lastword $(MAKEFILE_LIST)) -TEST?=$$($(GO_CMD) list ./... | grep -v /vendor/ | grep -v /integ) +MAIN_PACKAGES=$$($(GO_CMD) list ./... | grep -v vendor/ ) +SDK_PACKAGES=$$(cd $(CURDIR)/sdk && $(GO_CMD) list ./... | grep -v vendor/ ) +API_PACKAGES=$$(cd $(CURDIR)/api && $(GO_CMD) list ./... | grep -v vendor/ ) +ALL_PACKAGES=$(MAIN_PACKAGES) $(SDK_PACKAGES) $(API_PACKAGES) +TEST=$$(echo $(ALL_PACKAGES) | grep -v integ/ ) TEST_TIMEOUT?=45m EXTENDED_TEST_TIMEOUT=60m INTEG_TEST_TIMEOUT=120m VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr -EXTERNAL_TOOLS_CI=\ - golang.org/x/tools/cmd/goimports -EXTERNAL_TOOLS=\ - github.com/client9/misspell/cmd/misspell GOFMT_FILES?=$$(find . -name '*.go' | grep -v pb.go | grep -v vendor) SED?=$(shell command -v gsed || command -v sed) - GO_VERSION_MIN=$$(cat $(CURDIR)/.go-version) -PROTOC_VERSION_MIN=3.21.9 GO_CMD?=go CGO_ENABLED?=0 ifneq ($(FDB_ENABLED), ) @@ -32,10 +30,13 @@ bin: prep # dev creates binaries for testing Vault locally. These are put # into ./bin/ as well as $GOPATH/bin +dev: BUILD_TAGS+=testonly dev: prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" +dev-ui: BUILD_TAGS+=testonly dev-ui: assetcheck prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" +dev-dynamic: BUILD_TAGS+=testonly dev-dynamic: prep @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" @@ -51,13 +52,16 @@ dev-dynamic-mem: dev-dynamic # Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin. # The resulting image is tagged "vault:dev". +docker-dev: BUILD_TAGS+=testonly docker-dev: prep docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile -t vault:dev . +docker-dev-ui: BUILD_TAGS+=testonly docker-dev-ui: prep docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile.ui -t vault:dev-ui . # test runs the unit tests and vets the code +test: BUILD_TAGS+=testonly test: prep @CGO_ENABLED=$(CGO_ENABLED) \ VAULT_ADDR= \ @@ -66,12 +70,14 @@ test: prep VAULT_ACC= \ $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=$(TEST_TIMEOUT) -parallel=20 +testcompile: BUILD_TAGS+=testonly testcompile: prep @for pkg in $(TEST) ; do \ $(GO_CMD) test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \ done # testacc runs acceptance tests +testacc: BUILD_TAGS+=testonly testacc: prep @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package"; \ @@ -80,6 +86,7 @@ testacc: prep VAULT_ACC=1 $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT) # testrace runs the race checker +testrace: BUILD_TAGS+=testonly testrace: prep @CGO_ENABLED=1 \ VAULT_ADDR= \ @@ -102,35 +109,72 @@ vet: echo "and fix them if necessary before submitting the code for reviewal."; \ fi +# deprecations runs staticcheck tool to look for deprecations. Checks entire code to see if it +# has deprecated function, variable, constant or field +deprecations: bootstrap prep + @BUILD_TAGS='$(BUILD_TAGS)' ./scripts/deprecations-checker.sh "" + +# ci-deprecations runs staticcheck tool to look for deprecations. All output gets piped to revgrep +# which will only return an error if changes that is not on main has deprecated function, variable, constant or field +ci-deprecations: prep check-tools-external + @BUILD_TAGS='$(BUILD_TAGS)' ./scripts/deprecations-checker.sh main + +# vet-codechecker runs our custom linters on the test functions. All output gets +# piped to revgrep which will only return an error if new piece of code violates +# the check +vet-codechecker: check-tools-internal + @echo "==> Running go vet with ./tools/codechecker..." + @$(GO_CMD) vet -vettool=$$(which codechecker) -tags=$(BUILD_TAGS) ./... 2>&1 | revgrep + +# vet-codechecker runs our custom linters on the test functions. All output gets +# piped to revgrep which will only return an error if new piece of code that is +# not on main violates the check +ci-vet-codechecker: tools-internal check-tools-external + @echo "==> Running go vet with ./tools/codechecker..." + @$(GO_CMD) vet -vettool=$$(which codechecker) -tags=$(BUILD_TAGS) ./... 2>&1 | revgrep origin/main + # lint runs vet plus a number of other checkers, it is more comprehensive, but louder -lint: +lint: check-tools-external @$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \ | xargs golangci-lint run; if [ $$? -eq 1 ]; then \ echo ""; \ echo "Lint found suspicious constructs. Please check the reported constructs"; \ echo "and fix them if necessary before submitting the code for reviewal."; \ fi + # for ci jobs, runs lint against the changed packages in the commit -ci-lint: +ci-lint: check-tools-external @golangci-lint run --deadline 10m --new-from-rev=HEAD~ +# Lint protobuf files +protolint: prep check-tools-external + @echo "==> Linting protobufs..." + @buf lint + # prep runs `go generate` to build the dynamically generated # source files. -prep: fmtcheck - @sh -c "'$(CURDIR)/scripts/goversioncheck.sh' '$(GO_VERSION_MIN)'" - @$(GO_CMD) generate $($(GO_CMD) list ./... | grep -v /vendor/) +# +# n.b.: prep used to depend on fmtcheck, but since fmtcheck is +# now run as a pre-commit hook (and there's little value in +# making every build run the formatter), we've removed that +# dependency. +prep: check-go-version clean + @echo "==> Running go generate..." + @GOARCH= GOOS= $(GO_CMD) generate $(MAIN_PACKAGES) + @GOARCH= GOOS= cd api && $(GO_CMD) generate $(API_PACKAGES) + @GOARCH= GOOS= cd sdk && $(GO_CMD) generate $(SDK_PACKAGES) + +# Git doesn't allow us to store shared hooks in .git. Instead, we make sure they're up-to-date +# whenever a make target is invoked. +.PHONY: hooks +hooks: @if [ -d .git/hooks ]; then cp .hooks/* .git/hooks/; fi -# bootstrap the build by downloading additional tools needed to build -ci-bootstrap: - @for tool in $(EXTERNAL_TOOLS_CI) ; do \ - echo "Installing/Updating $$tool" ; \ - GO111MODULE=off $(GO_CMD) get -u $$tool; \ - done +-include hooks # Make sure they're always up-to-date -# bootstrap the build by downloading additional tools that may be used by devs -bootstrap: ci-bootstrap - go generate -tags tools tools/tools.go +# bootstrap the build by generating any necessary code and downloading additional tools that may +# be used by devs. +bootstrap: prep tools # Note: if you have plugins in GOPATH you can update all of them via something like: # for i in $(ls | grep vault-plugin-); do cd $i; git remote update; git reset --hard origin/master; dep ensure -update; git add .; git commit; git push; cd ..; done @@ -141,76 +185,64 @@ static-assets-dir: @mkdir -p ./http/web_ui install-ui-dependencies: - @echo "--> Installing JavaScript assets" - @cd ui && yarn --ignore-optional + @echo "==> Installing JavaScript assets" + @cd ui && yarn test-ember: install-ui-dependencies - @echo "--> Running ember tests" + @echo "==> Running ember tests" @cd ui && yarn run test:oss test-ember-enos: install-ui-dependencies - @echo "--> Running ember tests with a real backend" + @echo "==> Running ember tests with a real backend" @cd ui && yarn run test:enos -check-vault-in-path: - @VAULT_BIN=$$(command -v vault) || { echo "vault command not found"; exit 1; }; \ - [ -x "$$VAULT_BIN" ] || { echo "$$VAULT_BIN not executable"; exit 1; }; \ - printf "Using Vault at %s:\n\$$ vault version\n%s\n" "$$VAULT_BIN" "$$(vault version)" - ember-dist: install-ui-dependencies @cd ui && npm rebuild node-sass - @echo "--> Building Ember application" + @echo "==> Building Ember application" @cd ui && yarn run build @rm -rf ui/if-you-need-to-delete-this-open-an-issue-async-disk-cache ember-dist-dev: install-ui-dependencies @cd ui && npm rebuild node-sass - @echo "--> Building Ember application" + @echo "==> Building Ember application" @cd ui && yarn run build:dev static-dist: ember-dist static-dist-dev: ember-dist-dev -proto: bootstrap - @sh -c "'$(CURDIR)/scripts/protocversioncheck.sh' '$(PROTOC_VERSION_MIN)'" - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/activity/activity_log.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/storagepacker/types.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/forwarding/types.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/logical/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative physical/raft/types.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/identity/mfa/types.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/identity/types.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/database/dbplugin/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/database/dbplugin/v5/proto/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/plugin/pb/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/tokens/token.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/helper/pluginutil/*.proto - protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/hcp_link/proto/*/*.proto +proto: check-tools-external + @echo "==> Generating Go code from protobufs..." + buf generate # No additional sed expressions should be added to this list. Going forward # we should just use the variable names choosen by protobuf. These are left # here for backwards compatability, namely for SDK compilation. - $(SED) -i -e 's/Id/ID/' vault/request_forwarding_service.pb.go - $(SED) -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go vault/activity/activity_log.pb.go + $(SED) -i -e 's/Id/ID/' -e 's/SPDX-License-IDentifier/SPDX-License-Identifier/' vault/request_forwarding_service.pb.go + $(SED) -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' -e 's/SPDX-License-IDentifier/SPDX-License-Identifier/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go vault/activity/activity_log.pb.go # This will inject the sentinel struct tags as decorated in the proto files. protoc-go-inject-tag -input=./helper/identity/types.pb.go protoc-go-inject-tag -input=./helper/identity/mfa/types.pb.go -fmtcheck: - @true -#@sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" - fmt: find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs gofumpt -w +fmtcheck: check-go-fmt + +.PHONY: go-mod-download +go-mod-download: + @$(CURDIR)/scripts/go-helper.sh mod-download + +.PHONY: go-mod-tidy +go-mod-tidy: + @$(CURDIR)/scripts/go-helper.sh mod-tidy + +protofmt: + buf format -w + semgrep: semgrep --include '*.go' --exclude 'vendor' -a -f tools/semgrep . -semgrep-ci: - semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci . - assetcheck: @echo "==> Checking compiled UI assets..." @sh -c "'$(CURDIR)/scripts/assetcheck.sh'" @@ -219,6 +251,60 @@ spellcheck: @echo "==> Spell checking website..." @misspell -error -source=text website/source +.PHONY check-go-fmt: +check-go-fmt: + @$(CURDIR)/scripts/go-helper.sh check-fmt + +.PHONY check-go-version: +check-go-version: + @$(CURDIR)/scripts/go-helper.sh check-version $(GO_VERSION_MIN) + +.PHONY: check-proto-fmt +check-proto-fmt: + buf format -d --error-format github-actions --exit-code + +.PHONY: check-proto-delta +check-proto-delta: prep + @echo "==> Checking for a delta in proto generated Go files..." + @echo "==> Deleting all *.pg.go files..." + find . -type f -name '*.pb.go' -delete -print0 + @$(MAKE) -f $(THIS_FILE) proto + @if ! git diff --exit-code; then echo "Go protobuf bindings need to be regenerated. Run 'make proto' to fix them." && exit 1; fi + +.PHONY:check-sempgrep +check-sempgrep: check-tools-external + @echo "==> Checking semgrep..." + @semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci . + +.PHONY: check-tools +check-tools: + @$(CURDIR)/tools/tools.sh check + +.PHONY: check-tools-external +check-tools-external: + @$(CURDIR)/tools/tools.sh check-external + +.PHONY: check-tools-internal +check-tools-internal: + @$(CURDIR)/tools/tools.sh check-internal + +check-vault-in-path: + @VAULT_BIN=$$(command -v vault) || { echo "vault command not found"; exit 1; }; \ + [ -x "$$VAULT_BIN" ] || { echo "$$VAULT_BIN not executable"; exit 1; }; \ + printf "Using Vault at %s:\n\$$ vault version\n%s\n" "$$VAULT_BIN" "$$(vault version)" + +.PHONY: tools +tools: + @$(CURDIR)/tools/tools.sh install + +.PHONY: tools-external +tools-external: + @$(CURDIR)/tools/tools.sh install-external + +.PHONY: tools-internal +tools-internal: + @$(CURDIR)/tools/tools.sh install-internal + mysql-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-database-plugin ./plugins/database/mysql/mysql-database-plugin @@ -243,17 +329,6 @@ hana-database-plugin: mongodb-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin -.PHONY: ci-config -ci-config: - @$(MAKE) -C .circleci ci-config -.PHONY: ci-verify -ci-verify: - @$(MAKE) -C .circleci ci-verify - -.PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci - -.NOTPARALLEL: ember-dist ember-dist-dev - # These ci targets are used for used for building and testing in Github Actions # workflows and for Enos scenarios. .PHONY: ci-build @@ -268,10 +343,6 @@ ci-build-ui: ci-bundle: @$(CURDIR)/scripts/ci-helper.sh bundle -.PHONY: ci-filter-matrix -ci-filter-matrix: - @$(CURDIR)/scripts/ci-helper.sh matrix-filter-file - .PHONY: ci-get-artifact-basename ci-get-artifact-basename: @$(CURDIR)/scripts/ci-helper.sh artifact-basename @@ -280,46 +351,46 @@ ci-get-artifact-basename: ci-get-date: @$(CURDIR)/scripts/ci-helper.sh date -.PHONY: ci-get-matrix-group-id -ci-get-matrix-group-id: - @$(CURDIR)/scripts/ci-helper.sh matrix-group-id - .PHONY: ci-get-revision ci-get-revision: @$(CURDIR)/scripts/ci-helper.sh revision -.PHONY: ci-get-version -ci-get-version: - @$(CURDIR)/scripts/ci-helper.sh version +.PHONY: ci-get-version-package +ci-get-version-package: + @$(CURDIR)/scripts/ci-helper.sh version-package -.PHONY: ci-get-version-base -ci-get-version-base: - @$(CURDIR)/scripts/ci-helper.sh version-base +.PHONY: ci-install-external-tools +ci-install-external-tools: + @$(CURDIR)/scripts/ci-helper.sh install-external-tools -.PHONY: ci-get-version-major -ci-get-version-major: - @$(CURDIR)/scripts/ci-helper.sh version-major +.PHONY: ci-prepare-ent-legal +ci-prepare-ent-legal: + @$(CURDIR)/scripts/ci-helper.sh prepare-ent-legal -.PHONY: ci-get-version-meta -ci-get-version-meta: - @$(CURDIR)/scripts/ci-helper.sh version-meta +.PHONY: ci-prepare-ce-legal +ci-prepare-ce-legal: + @$(CURDIR)/scripts/ci-helper.sh prepare-ce-legal -.PHONY: ci-get-version-minor -ci-get-version-minor: - @$(CURDIR)/scripts/ci-helper.sh version-minor +.PHONY: ci-update-external-tool-modules +ci-update-external-tool-modules: + @$(CURDIR)/scripts/ci-helper.sh update-external-tool-modules -.PHONY: ci-get-version-package -ci-get-version-package: - @$(CURDIR)/scripts/ci-helper.sh version-package +.PHONY: ci-copywriteheaders +ci-copywriteheaders: + copywrite headers --plan + # Special case for MPL headers in /api, /sdk, and /shamir + cd api && $(CURDIR)/scripts/copywrite-exceptions.sh + cd sdk && $(CURDIR)/scripts/copywrite-exceptions.sh + cd shamir && $(CURDIR)/scripts/copywrite-exceptions.sh -.PHONY: ci-get-version-patch -ci-get-version-patch: - @$(CURDIR)/scripts/ci-helper.sh version-patch +.PHONY: all bin default prep test vet bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-codechecker ci-vet-codechecker clean dev + +.NOTPARALLEL: ember-dist ember-dist-dev -.PHONY: ci-get-version-pre -ci-get-version-pre: - @$(CURDIR)/scripts/ci-helper.sh version-pre +.PHONY: all-packages +all-packages: + @echo $(ALL_PACKAGES) | tr ' ' '\n' -.PHONY: ci-prepare-legal -ci-prepare-legal: - @$(CURDIR)/scripts/ci-helper.sh prepare-legal +.PHONY: clean +clean: + @echo "==> Cleaning..." diff --git a/README.md b/README.md index 7de5fb620cd3..109570940e09 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Vault [![CircleCI](https://circleci.com/gh/hashicorp/vault.svg?style=svg)](https://circleci.com/gh/hashicorp/vault) [![vault enterprise](https://img.shields.io/badge/vault-enterprise-yellow.svg?colorB=7c8797&colorA=000000)](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=banner&utm_campaign=github-vault-enterprise) +# Vault [![build](https://github.com/hashicorp/vault/actions/workflows/build.yml/badge.svg)](https://github.com/hashicorp/vault/actions/workflows/build.yml) [![ci](https://github.com/hashicorp/vault/actions/workflows/ci.yml/badge.svg)](https://github.com/hashicorp/vault/actions/workflows/ci.yml) [![vault enterprise](https://img.shields.io/badge/vault-enterprise-yellow.svg?colorB=7c8797&colorA=000000)](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=banner&utm_campaign=github-vault-enterprise) ---- @@ -9,9 +9,9 @@ - Website: https://www.vaultproject.io - Announcement list: [Google Groups](https://groups.google.com/group/hashicorp-announce) - Discussion forum: [Discuss](https://discuss.hashicorp.com/c/vault) -- Documentation: [https://www.vaultproject.io/docs/](https://www.vaultproject.io/docs/) -- Tutorials: [HashiCorp's Learn Platform](https://learn.hashicorp.com/vault) -- Certification Exam: [Vault Associate](https://www.hashicorp.com/certification/#hashicorp-certified-vault-associate) +- Documentation: [https://developer.hashicorp.com/vault/docs](https://developer.hashicorp.com/vault/docs) +- Tutorials: [https://developer.hashicorp.com/vault/tutorials](https://developer.hashicorp.com/vault/tutorials) +- Certification Exam: [https://developer.hashicorp.com/certifications/security-automation](https://developer.hashicorp.com/certifications/security-automation) Vault Logo @@ -52,7 +52,7 @@ The key features of Vault are: Documentation, Getting Started, and Certification Exams ------------------------------- -Documentation is available on the [Vault website](https://www.vaultproject.io/docs/). +Documentation is available on the [Vault website](https://developer.hashicorp.com/vault/docs). If you're new to Vault and want to get started with security automation, please check out our [Getting Started guides](https://learn.hashicorp.com/collections/vault/getting-started) @@ -136,6 +136,8 @@ is not, and has never been, a supported way to use the Vault project. We aren't likely to fix bugs relating to failure to import `github.com/hashicorp/vault` into your project. +See also the section "Docker-based tests" below. + ### Acceptance Tests Vault has comprehensive [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing) @@ -169,3 +171,118 @@ things such as access keys. The test itself should error early and tell you what to set, so it is not documented here. For more information on Vault Enterprise features, visit the [Vault Enterprise site](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=referral&utm_campaign=github-vault-enterprise). + +### Docker-based Tests + +We have created an experimental new testing mechanism inspired by NewTestCluster. +An example of how to use it: + +```go +import ( + "testing" + "github.com/hashicorp/vault/sdk/helper/testcluster/docker" +) + +func Test_Something_With_Docker(t *testing.T) { + opts := &docker.DockerClusterOptions{ + ImageRepo: "hashicorp/vault", // or "hashicorp/vault-enterprise" + ImageTag: "latest", + } + cluster := docker.NewTestDockerCluster(t, opts) + defer cluster.Cleanup() + + client := cluster.Nodes()[0].APIClient() + _, err := client.Logical().Read("sys/storage/raft/configuration") + if err != nil { + t.Fatal(err) + } +} +``` + +Or for Enterprise: + +```go +import ( + "testing" + "github.com/hashicorp/vault/sdk/helper/testcluster/docker" +) + +func Test_Something_With_Docker(t *testing.T) { + opts := &docker.DockerClusterOptions{ + ImageRepo: "hashicorp/vault-enterprise", + ImageTag: "latest", + VaultLicense: licenseString, // not a path, the actual license bytes + } + cluster := docker.NewTestDockerCluster(t, opts) + defer cluster.Cleanup() +} +``` + +Here is a more realistic example of how we use it in practice. DefaultOptions uses +`hashicorp/vault`:`latest` as the repo and tag, but it also looks at the environment +variable VAULT_BINARY. If populated, it will copy the local file referenced by +VAULT_BINARY into the container. This is useful when testing local changes. + +Instead of setting the VaultLicense option, you can set the VAULT_LICENSE_CI environment +variable, which is better than committing a license to version control. + +Optionally you can set COMMIT_SHA, which will be appended to the image name we +build as a debugging convenience. + +```go +func Test_Custom_Build_With_Docker(t *testing.T) { + opts := docker.DefaultOptions(t) + cluster := docker.NewTestDockerCluster(t, opts) + defer cluster.Cleanup() +} +``` + +There are a variety of helpers in the `github.com/hashicorp/vault/sdk/helper/testcluster` +package, e.g. these tests below will create a pair of 3-node clusters and link them using +PR or DR replication respectively, and fail if the replication state doesn't become healthy +before the passed context expires. + +Again, as written, these depend on having a Vault Enterprise binary locally and the env +var VAULT_BINARY set to point to it, as well as having VAULT_LICENSE_CI set. + +```go +func TestStandardPerfReplication_Docker(t *testing.T) { + opts := docker.DefaultOptions(t) + r, err := docker.NewReplicationSetDocker(t, opts) + if err != nil { + t.Fatal(err) + } + defer r.Cleanup() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err = r.StandardPerfReplication(ctx) + if err != nil { + t.Fatal(err) + } +} + +func TestStandardDRReplication_Docker(t *testing.T) { + opts := docker.DefaultOptions(t) + r, err := docker.NewReplicationSetDocker(t, opts) + if err != nil { + t.Fatal(err) + } + defer r.Cleanup() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err = r.StandardDRReplication(ctx) + if err != nil { + t.Fatal(err) + } +} +``` + +Finally, here's an example of running an existing OSS docker test with a custom binary: + +```bash +$ GOOS=linux make dev +$ VAULT_BINARY=$(pwd)/bin/vault go test -run 'TestRaft_Configuration_Docker' ./vault/external_tests/raft/raft_binary +ok github.com/hashicorp/vault/vault/external_tests/raft/raft_binary 20.960s +``` diff --git a/api/.copywrite.hcl b/api/.copywrite.hcl new file mode 100644 index 000000000000..c4b09f33640c --- /dev/null +++ b/api/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2024 + + header_ignore = [] +} diff --git a/api/LICENSE b/api/LICENSE new file mode 100644 index 000000000000..f4f97ee5853a --- /dev/null +++ b/api/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/api/README.md b/api/README.md index 7230ce779fe2..d21458c11449 100644 --- a/api/README.md +++ b/api/README.md @@ -4,6 +4,6 @@ Vault API This provides the `github.com/hashicorp/vault/api` package which contains code useful for interacting with a Vault server. For examples of how to use this module, see the [vault-examples](https://github.com/hashicorp/vault-examples) repo. -For a step-by-step walkthrough on using these client libraries, see the [developer quickstart](https://www.vaultproject.io/docs/get-started/developer-qs). +For a step-by-step walkthrough on using these client libraries, see the [developer quickstart](https://developer.hashicorp.com/vault/docs/get-started/developer-qs). [![GoDoc](https://godoc.org/github.com/hashicorp/vault/api?status.png)](https://godoc.org/github.com/hashicorp/vault/api) \ No newline at end of file diff --git a/api/api_test.go b/api/api_test.go index e4ba3153203e..8bf69e0de97a 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/auth.go b/api/auth.go index fa92de4b3fd3..c1ef7a77989d 100644 --- a/api/auth.go +++ b/api/auth.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -63,7 +66,7 @@ func (a *Auth) MFAValidate(ctx context.Context, mfaSecret *Secret, payload map[s return nil, fmt.Errorf("secret does not contain MFARequirements") } - s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.GetMFARequestID(), payload) + s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.MFARequestID, payload) if err != nil { return nil, err } diff --git a/api/auth/approle/approle.go b/api/auth/approle/approle.go index b8cf01228441..10d26b610f42 100644 --- a/api/auth/approle/approle.go +++ b/api/auth/approle/approle.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package approle import ( diff --git a/api/auth/approle/approle_test.go b/api/auth/approle/approle_test.go index f2628c695cc2..cdfb4e285e79 100644 --- a/api/auth/approle/approle_test.go +++ b/api/auth/approle/approle_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package approle import ( diff --git a/api/auth/approle/go.mod b/api/auth/approle/go.mod index 5270376f6e69..0c52b4943511 100644 --- a/api/auth/approle/go.mod +++ b/api/auth/approle/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/approle go 1.16 -require github.com/hashicorp/vault/api v1.8.1 +require github.com/hashicorp/vault/api v1.12.0 diff --git a/api/auth/approle/go.sum b/api/auth/approle/go.sum index c0a9666e8dd4..8645312630e5 100644 --- a/api/auth/approle/go.sum +++ b/api/auth/approle/go.sum @@ -1,160 +1,46 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= -github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI= -github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4= +github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -164,169 +50,88 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/aws/aws.go b/api/auth/aws/aws.go index 44a4f6db1f58..df873828e13c 100644 --- a/api/auth/aws/aws.go +++ b/api/auth/aws/aws.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package aws import ( @@ -53,7 +56,7 @@ const ( // passed as a parameter to the client.Auth().Login method. // // Supported options: WithRole, WithMountPath, WithIAMAuth, WithEC2Auth, -// WithPKCS7Signature, WithIdentitySignature, WithIAMServerIDHeader, WithNonce, WithRegion +// WithPKCS7Signature, WithIdentitySignature, WithRSA2048Signature, WithIAMServerIDHeader, WithNonce, WithRegion func NewAWSAuth(opts ...LoginOption) (*AWSAuth, error) { a := &AWSAuth{ mountPath: defaultMountPath, @@ -238,7 +241,7 @@ func WithIAMAuth() LoginOption { // If this option is not provided, will default to using the PKCS #7 signature. // The signature type used should match the type of the public AWS cert Vault // has been configured with to verify EC2 instance identity. -// https://www.vaultproject.io/api/auth/aws#create-certificate-configuration +// https://developer.hashicorp.com/vault/api-docs/auth/aws#create-certificate-configuration func WithIdentitySignature() LoginOption { return func(a *AWSAuth) error { a.signatureType = identityType @@ -251,7 +254,7 @@ func WithIdentitySignature() LoginOption { // PKCS #7 is the default, but this method is provided for additional clarity. // The signature type used should match the type of the public AWS cert Vault // has been configured with to verify EC2 instance identity. -// https://www.vaultproject.io/api/auth/aws#create-certificate-configuration +// https://developer.hashicorp.com/vault/api-docs/auth/aws#create-certificate-configuration func WithPKCS7Signature() LoginOption { return func(a *AWSAuth) error { a.signatureType = pkcs7Type @@ -259,6 +262,19 @@ func WithPKCS7Signature() LoginOption { } } +// WithRSA2048Signature will explicitly tell the client to send the RSA2048 +// signature to verify EC2 auth logins. Only used by EC2 auth type. +// If this option is not provided, will default to using the PKCS #7 signature. +// The signature type used should match the type of the public AWS cert Vault +// has been configured with to verify EC2 instance identity. +// https://www.vaultproject.io/api/auth/aws#create-certificate-configuration +func WithRSA2048Signature() LoginOption { + return func(a *AWSAuth) error { + a.signatureType = rsa2048Type + return nil + } +} + func WithIAMServerIDHeader(headerValue string) LoginOption { return func(a *AWSAuth) error { a.iamServerIDHeaderValue = headerValue diff --git a/api/auth/aws/go.mod b/api/auth/aws/go.mod index 2174d38c4121..a4fe80a7bdc6 100644 --- a/api/auth/aws/go.mod +++ b/api/auth/aws/go.mod @@ -3,9 +3,9 @@ module github.com/hashicorp/vault/api/auth/aws go 1.16 require ( - github.com/aws/aws-sdk-go v1.30.27 + github.com/aws/aws-sdk-go v1.49.22 github.com/hashicorp/go-hclog v0.16.2 github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 github.com/hashicorp/go-uuid v1.0.2 - github.com/hashicorp/vault/api v1.8.1 + github.com/hashicorp/vault/api v1.12.0 ) diff --git a/api/auth/aws/go.sum b/api/auth/aws/go.sum index 1af5e11246b0..96afd5f25d00 100644 --- a/api/auth/aws/go.sum +++ b/api/auth/aws/go.sum @@ -1,167 +1,64 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.30.27 h1:9gPjZWVDSoQrBO2AvqrWObS6KAZByfEJxQoCYo4ZfK0= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/aws/aws-sdk-go v1.49.22 h1:r01+cQJ3cORQI1PJxG8af0jzrZpUOL9L+/3kU2x1geU= +github.com/aws/aws-sdk-go v1.49.22/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= -github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA= github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI= -github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4= +github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -172,177 +69,99 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/azure/azure.go b/api/auth/azure/azure.go index a09d15a14721..b68219570115 100644 --- a/api/auth/azure/azure.go +++ b/api/auth/azure/azure.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package azure import ( diff --git a/api/auth/azure/go.mod b/api/auth/azure/go.mod index d00993f8eb03..5fc1e6716e1f 100644 --- a/api/auth/azure/go.mod +++ b/api/auth/azure/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/azure go 1.16 -require github.com/hashicorp/vault/api v1.8.1 +require github.com/hashicorp/vault/api v1.12.0 diff --git a/api/auth/azure/go.sum b/api/auth/azure/go.sum index c0a9666e8dd4..8645312630e5 100644 --- a/api/auth/azure/go.sum +++ b/api/auth/azure/go.sum @@ -1,160 +1,46 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= -github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI= -github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4= +github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -164,169 +50,88 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/gcp/gcp.go b/api/auth/gcp/gcp.go index a5dd93646128..2d6ef842a4b4 100644 --- a/api/auth/gcp/gcp.go +++ b/api/auth/gcp/gcp.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package gcp import ( diff --git a/api/auth/gcp/go.mod b/api/auth/gcp/go.mod index 3181ec263c45..977ebec0a426 100644 --- a/api/auth/gcp/go.mod +++ b/api/auth/gcp/go.mod @@ -3,7 +3,9 @@ module github.com/hashicorp/vault/api/auth/gcp go 1.16 require ( - cloud.google.com/go v0.97.0 - github.com/hashicorp/vault/api v1.8.1 - google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0 + cloud.google.com/go/compute/metadata v0.2.3 + cloud.google.com/go/iam v0.13.0 + github.com/hashicorp/vault/api v1.12.0 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 + google.golang.org/grpc v1.56.3 // indirect ) diff --git a/api/auth/gcp/go.sum b/api/auth/gcp/go.sum index c9f85c4e892c..2a0772861148 100644 --- a/api/auth/gcp/go.sum +++ b/api/auth/gcp/go.sum @@ -3,6 +3,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,6 +16,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -24,64 +26,628 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -90,30 +656,39 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -143,13 +718,14 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -161,13 +737,16 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -177,6 +756,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -184,89 +764,81 @@ github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= -github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.0 h1:7765sW1XBt+qf4XKIYE4ebY9qc/yi9V2/egzGSUNMZU= -github.com/hashicorp/vault/api v1.8.0/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI= -github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4= +github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -274,107 +846,124 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -398,10 +987,14 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -409,7 +1002,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -431,12 +1023,36 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -451,8 +1067,22 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -464,15 +1094,17 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -503,11 +1135,14 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -515,10 +1150,50 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365 h1:6wSTsvPddg9gc/mVEEyk9oOAoxn+bT4Z9q1zx+4RwA4= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -526,15 +1201,30 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -547,6 +1237,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -575,20 +1266,38 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -616,8 +1325,37 @@ google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNe google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0 h1:4t9zuDlHLcIx0ZEhmXEeFVCRsiOgpgn2QOH9N0MNjPI= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -626,7 +1364,6 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -662,10 +1399,13 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -681,9 +1421,82 @@ google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0 h1:5Tbluzus3QxoAJx4IefGt1W0HQZW4nuMrVk684jI74Q= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -709,8 +1522,23 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -724,22 +1552,21 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -747,6 +1574,42 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/api/auth/kubernetes/go.mod b/api/auth/kubernetes/go.mod index 6a084b9e85b7..62f1d3655867 100644 --- a/api/auth/kubernetes/go.mod +++ b/api/auth/kubernetes/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/kubernetes go 1.16 -require github.com/hashicorp/vault/api v1.8.1 +require github.com/hashicorp/vault/api v1.12.0 diff --git a/api/auth/kubernetes/go.sum b/api/auth/kubernetes/go.sum index c0a9666e8dd4..8645312630e5 100644 --- a/api/auth/kubernetes/go.sum +++ b/api/auth/kubernetes/go.sum @@ -1,160 +1,46 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= -github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI= -github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4= +github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -164,169 +50,88 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/kubernetes/kubernetes.go b/api/auth/kubernetes/kubernetes.go index c2fef86a5fd0..f0e38c17a2b4 100644 --- a/api/auth/kubernetes/kubernetes.go +++ b/api/auth/kubernetes/kubernetes.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package kubernetes import ( diff --git a/api/auth/ldap/go.mod b/api/auth/ldap/go.mod index f44f89356bd0..72326b0d416b 100644 --- a/api/auth/ldap/go.mod +++ b/api/auth/ldap/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/ldap go 1.16 -require github.com/hashicorp/vault/api v1.8.1 +require github.com/hashicorp/vault/api v1.12.0 diff --git a/api/auth/ldap/go.sum b/api/auth/ldap/go.sum index c0a9666e8dd4..8645312630e5 100644 --- a/api/auth/ldap/go.sum +++ b/api/auth/ldap/go.sum @@ -1,160 +1,46 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= -github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI= -github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4= +github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -164,169 +50,88 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/ldap/ldap.go b/api/auth/ldap/ldap.go index 9f37abc664f7..fdf1a38dd0c1 100644 --- a/api/auth/ldap/ldap.go +++ b/api/auth/ldap/ldap.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package ldap import ( diff --git a/api/auth/ldap/ldap_test.go b/api/auth/ldap/ldap_test.go index 8633c4dfac11..abdccb035835 100644 --- a/api/auth/ldap/ldap_test.go +++ b/api/auth/ldap/ldap_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package ldap import ( diff --git a/api/auth/userpass/go.mod b/api/auth/userpass/go.mod index a3cf9ba5ea97..e764c5e31217 100644 --- a/api/auth/userpass/go.mod +++ b/api/auth/userpass/go.mod @@ -2,4 +2,4 @@ module github.com/hashicorp/vault/api/auth/userpass go 1.16 -require github.com/hashicorp/vault/api v1.8.1 +require github.com/hashicorp/vault/api v1.12.0 diff --git a/api/auth/userpass/go.sum b/api/auth/userpass/go.sum index c0a9666e8dd4..8645312630e5 100644 --- a/api/auth/userpass/go.sum +++ b/api/auth/userpass/go.sum @@ -1,160 +1,46 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= -github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/vault/api v1.8.1 h1:bMieWIe6dAlqAAPReZO/8zYtXaWUg/21umwqGZpEjCI= -github.com/hashicorp/vault/api v1.8.1/go.mod h1:uJrw6D3y9Rv7hhmS17JQC50jbPDAZdjZoTtrCCxxs7E= -github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= -github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/hashicorp/vault/api v1.12.0 h1:meCpJSesvzQyao8FCOgk2fGdoADAnbDu2WPJN1lDLJ4= +github.com/hashicorp/vault/api v1.12.0/go.mod h1:si+lJCYO7oGkIoNPAN8j3azBLTn9SjMGS+jFaHd1Cck= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= @@ -164,169 +50,88 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/userpass/userpass.go b/api/auth/userpass/userpass.go index 124cd7a68f8e..3e8942953d3a 100644 --- a/api/auth/userpass/userpass.go +++ b/api/auth/userpass/userpass.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package userpass import ( diff --git a/api/auth/userpass/userpass_test.go b/api/auth/userpass/userpass_test.go index 0728117a1e8c..4fe68d8d4ef1 100644 --- a/api/auth/userpass/userpass_test.go +++ b/api/auth/userpass/userpass_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package userpass import ( diff --git a/api/auth_test.go b/api/auth_test.go index 46113d92f7be..ca69630cce5e 100644 --- a/api/auth_test.go +++ b/api/auth_test.go @@ -1,10 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "context" "testing" - - "github.com/hashicorp/vault/sdk/logical" ) type mockAuthMethod struct { @@ -91,7 +92,7 @@ func TestAuth_MFALoginTwoPhase(t *testing.T) { m: &mockAuthMethod{ mockedSecret: &Secret{ Auth: &SecretAuth{ - MFARequirement: &logical.MFARequirement{ + MFARequirement: &MFARequirement{ MFARequestID: "a-req-id", MFAConstraints: nil, }, diff --git a/api/auth_token.go b/api/auth_token.go index 52be1e7852b9..1980be06ef5b 100644 --- a/api/auth_token.go +++ b/api/auth_token.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/client.go b/api/client.go index c6843348e58e..52c991b1e2f6 100644 --- a/api/client.go +++ b/api/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -24,12 +27,9 @@ import ( "github.com/hashicorp/go-retryablehttp" "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" "golang.org/x/net/http2" "golang.org/x/time/rate" - - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/strutil" - "github.com/hashicorp/vault/sdk/logical" ) const ( @@ -56,7 +56,19 @@ const ( HeaderIndex = "X-Vault-Index" HeaderForward = "X-Vault-Forward" HeaderInconsistent = "X-Vault-Inconsistent" - TLSErrorString = "This error usually means that the server is running with TLS disabled\n" + + + // NamespaceHeaderName is the header set to specify which namespace the + // request is indented for. + NamespaceHeaderName = "X-Vault-Namespace" + + // AuthHeaderName is the name of the header containing the token. + AuthHeaderName = "X-Vault-Token" + + // RequestHeaderName is the name of the header used by the Agent for + // SSRF protection. + RequestHeaderName = "X-Vault-Request" + + TLSErrorString = "This error usually means that the server is running with TLS disabled\n" + "but the client is configured to use TLS. Please either enable TLS\n" + "on the server or run the client with -address set to an address\n" + "that uses the http protocol:\n\n" + @@ -70,6 +82,8 @@ const ( const ( EnvVaultAgentAddress = "VAULT_AGENT_ADDR" EnvVaultInsecure = "VAULT_SKIP_VERIFY" + + DefaultAddress = "https://127.0.0.1:8200" ) // WrappingLookupFunc is a function that, given an HTTP verb and a path, @@ -114,7 +128,11 @@ type Config struct { // of three tries). MaxRetries int - // Timeout is for setting custom timeout parameter in the HttpClient + // Timeout, given a non-negative value, will apply the request timeout + // to each request function unless an earlier deadline is passed to the + // request function through context.Context. Note that this timeout is + // not applicable to Logical().ReadRaw* (raw response) functions. + // Defaults to 60 seconds. Timeout time.Duration // If there is an error when creating the configuration, this will be the @@ -169,6 +187,9 @@ type Config struct { // CloneToken from parent. CloneToken bool + // CloneTLSConfig from parent (tls.Config). + CloneTLSConfig bool + // ReadYourWrites ensures isolated read-after-write semantics by // providing discovered cluster replication states in each request. // The shared state is automatically propagated to all Client clones. @@ -187,6 +208,7 @@ type Config struct { // commands such as 'vault operator raft snapshot' as this redirects to the // primary node. DisableRedirects bool + clientTLSConfig *tls.Config } // TLSConfig contains the parameters needed to configure TLS on the HTTP client @@ -228,7 +250,7 @@ type TLSConfig struct { // If an error is encountered, the Error field on the returned *Config will be populated with the specific error. func DefaultConfig() *Config { config := &Config{ - Address: "https://127.0.0.1:8200", + Address: DefaultAddress, HttpClient: cleanhttp.DefaultPooledClient(), Timeout: time.Second * 60, MinRetryWait: time.Millisecond * 1000, @@ -273,7 +295,14 @@ func (c *Config) configureTLS(t *TLSConfig) error { if c.HttpClient == nil { c.HttpClient = DefaultConfig().HttpClient } - clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig + + transport, ok := c.HttpClient.Transport.(*http.Transport) + if !ok { + return fmt.Errorf( + "unsupported HTTPClient transport type %T", c.HttpClient.Transport) + } + + clientTLSConfig := transport.TLSClientConfig var clientCert tls.Certificate foundClientCert := false @@ -321,10 +350,17 @@ func (c *Config) configureTLS(t *TLSConfig) error { if t.TLSServerName != "" { clientTLSConfig.ServerName = t.TLSServerName } + c.clientTLSConfig = clientTLSConfig return nil } +func (c *Config) TLSConfig() *tls.Config { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.clientTLSConfig.Clone() +} + // ConfigureTLS takes a set of TLS configurations and applies those to the // HTTP client. func (c *Config) ConfigureTLS(t *TLSConfig) error { @@ -494,6 +530,7 @@ func (c *Config) ParseAddress(address string) (*url.URL, error) { return nil, err } + previousAddress := c.Address c.Address = address if strings.HasPrefix(address, "unix://") { @@ -511,12 +548,12 @@ func (c *Config) ParseAddress(address string) (*url.URL, error) { // be pointing to the protocol used in the application layer and not to // the transport layer. Hence, setting the fields accordingly. u.Scheme = "http" - u.Host = socket + u.Host = "localhost" u.Path = "" } else { return nil, fmt.Errorf("attempting to specify unix:// address with non-transport transport") } - } else if strings.HasPrefix(c.Address, "unix://") { + } else if strings.HasPrefix(previousAddress, "unix://") { // When the address being set does not begin with unix:// but the previous // address in the Config did, change the transport's DialContext back to // use the default configuration that cleanhttp uses. @@ -555,6 +592,7 @@ type Client struct { requestCallbacks []RequestCallback responseCallbacks []ResponseCallback replicationStateStore *replicationStateStore + hcpCookie *http.Cookie } // NewClient returns a new client for the given configuration. @@ -617,7 +655,7 @@ func NewClient(c *Config) (*Client, error) { } // Add the VaultRequest SSRF protection header - client.headers[consts.RequestHeaderName] = []string{"true"} + client.headers[RequestHeaderName] = []string{"true"} if token := os.Getenv(EnvVaultToken); token != "" { client.token = token @@ -649,6 +687,7 @@ func (c *Client) CloneConfig() *Config { newConfig.CloneHeaders = c.config.CloneHeaders newConfig.CloneToken = c.config.CloneToken newConfig.ReadYourWrites = c.config.ReadYourWrites + newConfig.clientTLSConfig = c.config.clientTLSConfig // we specifically want a _copy_ of the client here, not a pointer to the original one newClient := *c.config.HttpClient @@ -934,7 +973,7 @@ func (c *Client) setNamespace(namespace string) { c.headers = make(http.Header) } - c.headers.Set(consts.NamespaceHeaderName, namespace) + c.headers.Set(NamespaceHeaderName, namespace) } // ClearNamespace removes the namespace header if set. @@ -942,7 +981,7 @@ func (c *Client) ClearNamespace() { c.modifyLock.Lock() defer c.modifyLock.Unlock() if c.headers != nil { - c.headers.Del(consts.NamespaceHeaderName) + c.headers.Del(NamespaceHeaderName) } } @@ -954,7 +993,7 @@ func (c *Client) Namespace() string { if c.headers == nil { return "" } - return c.headers.Get(consts.NamespaceHeaderName) + return c.headers.Get(NamespaceHeaderName) } // WithNamespace makes a shallow copy of Client, modifies it to use @@ -963,7 +1002,9 @@ func (c *Client) Namespace() string { func (c *Client) WithNamespace(namespace string) *Client { c2 := *c c2.modifyLock = sync.RWMutex{} - c2.headers = c.Headers() + c.modifyLock.RLock() + c2.headers = c.headersInternal() + c.modifyLock.RUnlock() if namespace == "" { c2.ClearNamespace() } else { @@ -988,6 +1029,33 @@ func (c *Client) SetToken(v string) { c.token = v } +// HCPCookie returns the HCP cookie being used by this client. It will +// return an empty cookie when no cookie is set. +func (c *Client) HCPCookie() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.hcpCookie == nil { + return "" + } + return c.hcpCookie.String() +} + +// SetHCPCookie sets the hcp cookie directly. This won't perform any auth +// verification, it simply sets the token properly for future requests. +func (c *Client) SetHCPCookie(v *http.Cookie) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + if err := v.Valid(); err != nil { + return err + } + + c.hcpCookie = v + + return nil +} + // ClearToken deletes the token if it is set or does nothing otherwise. func (c *Client) ClearToken() { c.modifyLock.Lock() @@ -1000,7 +1068,12 @@ func (c *Client) ClearToken() { func (c *Client) Headers() http.Header { c.modifyLock.RLock() defer c.modifyLock.RUnlock() + return c.headersInternal() +} +// headersInternal gets the current set of headers used for requests. Must be called +// with the read modifyLock held. +func (c *Client) headersInternal() http.Header { if c.headers == nil { return nil } @@ -1118,6 +1191,26 @@ func (c *Client) ReadYourWrites() bool { return c.config.ReadYourWrites } +// SetCloneTLSConfig from parent. +func (c *Client) SetCloneTLSConfig(clone bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CloneTLSConfig = clone +} + +// CloneTLSConfig gets the configured CloneTLSConfig value. +func (c *Client) CloneTLSConfig() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CloneTLSConfig +} + // Clone creates a new client with the same configuration. Note that the same // underlying http.Client is used; modifying the client from more than one // goroutine at once may not be safe, so modify the client as needed and then @@ -1128,24 +1221,28 @@ func (c *Client) ReadYourWrites() bool { // the api.Config struct, such as policy override and wrapping function // behavior, must currently then be set as desired on the new client. func (c *Client) Clone() (*Client, error) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() return c.clone(c.config.CloneHeaders) } // CloneWithHeaders creates a new client similar to Clone, with the difference -// being that the headers are always cloned +// being that the headers are always cloned func (c *Client) CloneWithHeaders() (*Client, error) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() return c.clone(true) } // clone creates a new client, with the headers being cloned based on the -// passed in cloneheaders boolean +// passed in cloneheaders boolean. +// Must be called with the read lock and config read lock held. func (c *Client) clone(cloneHeaders bool) (*Client, error) { - c.modifyLock.RLock() - defer c.modifyLock.RUnlock() - config := c.config - config.modifyLock.RLock() - defer config.modifyLock.RUnlock() newConfig := &Config{ Address: config.Address, @@ -1164,13 +1261,18 @@ func (c *Client) clone(cloneHeaders bool) (*Client, error) { CloneToken: config.CloneToken, ReadYourWrites: config.ReadYourWrites, } + + if config.CloneTLSConfig { + newConfig.clientTLSConfig = config.clientTLSConfig + } + client, err := NewClient(newConfig) if err != nil { return nil, err } if cloneHeaders { - client.SetHeaders(c.Headers().Clone()) + client.SetHeaders(c.headersInternal().Clone()) } if config.CloneToken { @@ -1201,6 +1303,7 @@ func (c *Client) NewRequest(method, requestPath string) *Request { mfaCreds := c.mfaCreds wrappingLookupFunc := c.wrappingLookupFunc policyOverride := c.policyOverride + headers := c.headersInternal() c.modifyLock.RUnlock() host := addr.Host @@ -1227,6 +1330,8 @@ func (c *Client) NewRequest(method, requestPath string) *Request { Params: make(map[string][]string), } + req.HCPCookie = c.hcpCookie + var lookupPath string switch { case strings.HasPrefix(requestPath, "/v1/"): @@ -1245,7 +1350,7 @@ func (c *Client) NewRequest(method, requestPath string) *Request { req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) } - req.Headers = c.Headers() + req.Headers = headers req.PolicyOverride = policyOverride return req @@ -1255,8 +1360,9 @@ func (c *Client) NewRequest(method, requestPath string) *Request { // a Vault server not configured with this client. This is an advanced operation // that generally won't need to be called externally. // -// Deprecated: This method should not be used directly. Use higher level -// methods instead. +// Deprecated: RawRequest exists for historical compatibility and should not be +// used directly. Use client.Logical().ReadRaw(...) or higher level methods +// instead. func (c *Client) RawRequest(r *Request) (*Response, error) { return c.RawRequestWithContext(context.Background(), r) } @@ -1265,8 +1371,9 @@ func (c *Client) RawRequest(r *Request) (*Response, error) { // a Vault server not configured with this client. This is an advanced operation // that generally won't need to be called externally. // -// Deprecated: This method should not be used directly. Use higher level -// methods instead. +// Deprecated: RawRequestWithContext exists for historical compatibility and +// should not be used directly. Use client.Logical().ReadRawWithContext(...) +// or higher level methods instead. func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { // Note: we purposefully do not call cancel manually. The reason is // when canceled, the request.Body will EOF when reading due to the way @@ -1288,7 +1395,7 @@ func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Respon checkRetry := c.config.CheckRetry backoff := c.config.Backoff httpClient := c.config.HttpClient - ns := c.headers.Get(consts.NamespaceHeaderName) + ns := c.headers.Get(NamespaceHeaderName) outputCurlString := c.config.OutputCurlString outputPolicy := c.config.OutputPolicy logger := c.config.Logger @@ -1301,9 +1408,9 @@ func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Respon // e.g. calls using (*Client).WithNamespace switch ns { case "": - r.Headers.Del(consts.NamespaceHeaderName) + r.Headers.Del(NamespaceHeaderName) default: - r.Headers.Set(consts.NamespaceHeaderName, ns) + r.Headers.Set(NamespaceHeaderName, ns) } for _, cb := range c.requestCallbacks { @@ -1349,6 +1456,7 @@ START: LastOutputPolicyError = &OutputPolicyError{ method: req.Method, path: strings.TrimPrefix(req.URL.Path, "/v1"), + params: req.URL.Query(), } return nil, LastOutputPolicyError } @@ -1456,8 +1564,8 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo } } // explicitly set the namespace header to current client - if ns := c.headers.Get(consts.NamespaceHeaderName); ns != "" { - r.Headers.Set(consts.NamespaceHeaderName, ns) + if ns := c.headers.Get(NamespaceHeaderName); ns != "" { + r.Headers.Set(NamespaceHeaderName, ns) } } @@ -1478,7 +1586,7 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo req.Host = r.URL.Host if len(r.ClientToken) != 0 { - req.Header.Set(consts.AuthHeaderName, r.ClientToken) + req.Header.Set(AuthHeaderName, r.ClientToken) } if len(r.WrapTTL) != 0 { @@ -1668,7 +1776,13 @@ func MergeReplicationStates(old []string, new string) []string { return strutil.RemoveDuplicates(ret, false) } -func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error) { +type WALState struct { + ClusterID string + LocalIndex uint64 + ReplicatedIndex uint64 +} + +func ParseReplicationState(raw string, hmacKey []byte) (*WALState, error) { cooked, err := base64.StdEncoding.DecodeString(raw) if err != nil { return nil, err @@ -1706,7 +1820,7 @@ func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error return nil, fmt.Errorf("invalid replicated index in state header: %w", err) } - return &logical.WALState{ + return &WALState{ ClusterID: pieces[1], LocalIndex: localIndex, ReplicatedIndex: replicatedIndex, diff --git a/api/client_test.go b/api/client_test.go index 844dcadd94fb..f89d53152786 100644 --- a/api/client_test.go +++ b/api/client_test.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "bytes" "context" + "crypto/tls" "crypto/x509" "encoding/base64" "fmt" @@ -19,7 +23,6 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/helper/consts" ) func init() { @@ -83,7 +86,7 @@ func TestClientDefaultHttpClient_unixSocket(t *testing.T) { if client.addr.Scheme != "http" { t.Fatalf("bad: %s", client.addr.Scheme) } - if client.addr.Host != "/var/run/vault.sock" { + if client.addr.Host != "localhost" { t.Fatalf("bad: %s", client.addr.Host) } } @@ -101,14 +104,15 @@ func TestClientSetAddress(t *testing.T) { t.Fatalf("bad: expected: '172.168.2.1:8300' actual: %q", client.addr.Host) } // Test switching to Unix Socket address from TCP address + client.config.HttpClient.Transport.(*http.Transport).DialContext = nil if err := client.SetAddress("unix:///var/run/vault.sock"); err != nil { t.Fatal(err) } if client.addr.Scheme != "http" { t.Fatalf("bad: expected: 'http' actual: %q", client.addr.Scheme) } - if client.addr.Host != "/var/run/vault.sock" { - t.Fatalf("bad: expected: '/var/run/vault.sock' actual: %q", client.addr.Host) + if client.addr.Host != "localhost" { + t.Fatalf("bad: expected: 'localhost' actual: %q", client.addr.Host) } if client.addr.Path != "" { t.Fatalf("bad: expected '' actual: %q", client.addr.Path) @@ -117,6 +121,7 @@ func TestClientSetAddress(t *testing.T) { t.Fatal("bad: expected DialContext to not be nil") } // Test switching to TCP address from Unix Socket address + client.config.HttpClient.Transport.(*http.Transport).DialContext = nil if err := client.SetAddress("http://172.168.2.1:8300"); err != nil { t.Fatal(err) } @@ -126,6 +131,9 @@ func TestClientSetAddress(t *testing.T) { if client.addr.Scheme != "http" { t.Fatalf("bad: expected: 'http' actual: %q", client.addr.Scheme) } + if client.config.HttpClient.Transport.(*http.Transport).DialContext == nil { + t.Fatal("bad: expected DialContext to not be nil") + } } func TestClientToken(t *testing.T) { @@ -223,6 +231,7 @@ func TestClientDisableRedirects(t *testing.T) { for name, tc := range tests { test := tc + name := name t.Run(name, func(t *testing.T) { t.Parallel() numReqs := 0 @@ -442,7 +451,7 @@ func TestClientDeprecatedEnvSettings(t *testing.T) { func TestClientEnvNamespace(t *testing.T) { var seenNamespace string handler := func(w http.ResponseWriter, req *http.Request) { - seenNamespace = req.Header.Get(consts.NamespaceHeaderName) + seenNamespace = req.Header.Get(NamespaceHeaderName) } config, ln := testHTTPServer(t, http.HandlerFunc(handler)) defer ln.Close() @@ -588,6 +597,24 @@ func TestClone(t *testing.T) { }, token: "cloneToken", }, + { + name: "cloneTLSConfig-enabled", + config: &Config{ + CloneTLSConfig: true, + clientTLSConfig: &tls.Config{ + ServerName: "foo.bar.baz", + }, + }, + }, + { + name: "cloneTLSConfig-disabled", + config: &Config{ + CloneTLSConfig: false, + clientTLSConfig: &tls.Config{ + ServerName: "foo.bar.baz", + }, + }, + }, } for _, tt := range tests { @@ -696,10 +723,81 @@ func TestClone(t *testing.T) { t.Fatalf("expected replicationStateStore %v, actual %v", parent.replicationStateStore, clone.replicationStateStore) } + if tt.config.CloneTLSConfig { + if !reflect.DeepEqual(parent.config.TLSConfig(), clone.config.TLSConfig()) { + t.Fatalf("config.clientTLSConfig doesn't match: %v vs %v", + parent.config.TLSConfig(), clone.config.TLSConfig()) + } + } else if tt.config.clientTLSConfig != nil { + if reflect.DeepEqual(parent.config.TLSConfig(), clone.config.TLSConfig()) { + t.Fatalf("config.clientTLSConfig should not match: %v vs %v", + parent.config.TLSConfig(), clone.config.TLSConfig()) + } + } else { + if !reflect.DeepEqual(parent.config.TLSConfig(), clone.config.TLSConfig()) { + t.Fatalf("config.clientTLSConfig doesn't match: %v vs %v", + parent.config.TLSConfig(), clone.config.TLSConfig()) + } + } }) } } +// TestCloneWithHeadersNoDeadlock confirms that the cloning of the client doesn't cause +// a deadlock. +// Raised in https://github.com/hashicorp/vault/issues/22393 -- there was a +// potential deadlock caused by running the problematicFunc() function in +// multiple goroutines. +func TestCloneWithHeadersNoDeadlock(t *testing.T) { + client, err := NewClient(nil) + if err != nil { + t.Fatal(err) + } + + wg := &sync.WaitGroup{} + + problematicFunc := func() { + client.SetCloneToken(true) + _, err := client.CloneWithHeaders() + if err != nil { + t.Fatal(err) + } + wg.Done() + } + + for i := 0; i < 1000; i++ { + wg.Add(1) + go problematicFunc() + } + wg.Wait() +} + +// TestCloneNoDeadlock is like TestCloneWithHeadersNoDeadlock but with +// Clone instead of CloneWithHeaders +func TestCloneNoDeadlock(t *testing.T) { + client, err := NewClient(nil) + if err != nil { + t.Fatal(err) + } + + wg := &sync.WaitGroup{} + + problematicFunc := func() { + client.SetCloneToken(true) + _, err := client.Clone() + if err != nil { + t.Fatal(err) + } + wg.Done() + } + + for i := 0; i < 1000; i++ { + wg.Add(1) + go problematicFunc() + } + wg.Wait() +} + func TestSetHeadersRaceSafe(t *testing.T) { client, err1 := NewClient(nil) if err1 != nil { @@ -1267,7 +1365,7 @@ func TestClient_SetCloneToken(t *testing.T) { func TestClientWithNamespace(t *testing.T) { var ns string handler := func(w http.ResponseWriter, req *http.Request) { - ns = req.Header.Get(consts.NamespaceHeaderName) + ns = req.Header.Get(NamespaceHeaderName) } config, ln := testHTTPServer(t, http.HandlerFunc(handler)) defer ln.Close() @@ -1423,7 +1521,7 @@ func TestParseAddressWithUnixSocket(t *testing.T) { if u.Scheme != "http" { t.Fatal("Scheme not changed to http") } - if u.Host != "/var/run/vault.sock" { + if u.Host != "localhost" { t.Fatal("Host not changed to socket name") } if u.Path != "" { diff --git a/api/go.mod b/api/go.mod index aa7ea8c448b2..7f75b48d068f 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,11 +1,15 @@ module github.com/hashicorp/vault/api +// The Go version directive for the api package should normally only be updated when +// code in the api package requires a newer Go version to build. It should not +// automatically track the Go version used to build Vault itself. Many projects import +// the api module and we don't want to impose a newer version on them any more than we +// have to. go 1.19 -replace github.com/hashicorp/vault/sdk => ../sdk - require ( github.com/cenkalti/backoff/v3 v3.0.0 + github.com/go-jose/go-jose/v3 v3.0.3 github.com/go-test/deep v1.0.2 github.com/hashicorp/errwrap v1.1.0 github.com/hashicorp/go-cleanhttp v0.5.2 @@ -14,44 +18,25 @@ require ( github.com/hashicorp/go-retryablehttp v0.6.6 github.com/hashicorp/go-rootcerts v1.0.2 github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/vault/sdk v0.6.0 github.com/mitchellh/mapstructure v1.5.0 - golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 + github.com/stretchr/testify v1.8.4 + golang.org/x/net v0.25.0 golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 - gopkg.in/square/go-jose.v2 v2.5.1 ) require ( - github.com/armon/go-metrics v0.3.9 // indirect - github.com/armon/go-radix v1.0.0 // indirect - github.com/fatih/color v1.7.0 // indirect - github.com/frankban/quicktest v1.13.0 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-plugin v1.4.5 // indirect - github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 // indirect - github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.16.0 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect - github.com/hashicorp/go-version v1.2.0 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect - github.com/mattn/go-colorable v0.1.6 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-testing-interface v1.0.0 // indirect - github.com/mitchellh/reflectwalk v1.0.0 // indirect - github.com/oklog/run v1.0.0 // indirect - github.com/pierrec/lz4 v2.5.2+incompatible // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect - go.uber.org/atomic v1.9.0 // indirect - golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect - golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect - golang.org/x/text v0.3.8 // indirect - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect - google.golang.org/grpc v1.41.0 // indirect - google.golang.org/protobuf v1.26.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/api/go.sum b/api/go.sum index 4e02343ff0bd..452fc5c7e17d 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,108 +1,35 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= -github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= -github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= @@ -110,198 +37,91 @@ github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/help.go b/api/help.go index 0988ebcd1fc9..c119f6c3c953 100644 --- a/api/help.go +++ b/api/help.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/kv.go b/api/kv.go index 37699df266f9..720393254690 100644 --- a/api/kv.go +++ b/api/kv.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import "errors" @@ -35,7 +38,7 @@ type KVSecret struct { // by default when a server is started in -dev mode. See the kvv2 struct. // // Learn more about the KV secrets engine here: -// https://www.vaultproject.io/docs/secrets/kv +// https://developer.hashicorp.com/vault/docs/secrets/kv func (c *Client) KVv1(mountPath string) *KVv1 { return &KVv1{c: c, mountPath: mountPath} } @@ -50,7 +53,7 @@ func (c *Client) KVv1(mountPath string) *KVv1 { // as these are the default settings when a server is started in -dev mode. // // Learn more about the KV secrets engine here: -// https://www.vaultproject.io/docs/secrets/kv +// https://developer.hashicorp.com/vault/docs/secrets/kv func (c *Client) KVv2(mountPath string) *KVv2 { return &KVv2{c: c, mountPath: mountPath} } diff --git a/api/kv_test.go b/api/kv_test.go index f8b3d3917be4..36d769feaa6b 100644 --- a/api/kv_test.go +++ b/api/kv_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/kv_v1.go b/api/kv_v1.go index 22ba992384b7..a914e03576e8 100644 --- a/api/kv_v1.go +++ b/api/kv_v1.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/kv_v2.go b/api/kv_v2.go index 335c21001be2..72c29eaa4261 100644 --- a/api/kv_v2.go +++ b/api/kv_v2.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/lifetime_watcher.go b/api/lifetime_watcher.go index 5f3eadbffdd8..4bc1390b93af 100644 --- a/api/lifetime_watcher.go +++ b/api/lifetime_watcher.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "errors" "math/rand" + "strings" "sync" "time" @@ -28,6 +32,7 @@ var ( DefaultRenewerRenewBuffer = 5 ) +//go:generate enumer -type=RenewBehavior -trimprefix=RenewBehavior type RenewBehavior uint const ( @@ -147,6 +152,13 @@ func (c *Client) NewLifetimeWatcher(i *LifetimeWatcherInput) (*LifetimeWatcher, random := i.Rand if random == nil { + // NOTE: + // Rather than a cryptographically secure random number generator (RNG), + // the default behavior uses the math/rand package. The random number is + // used to introduce a slight jitter when calculating the grace period + // for a monitored secret monitoring. This is intended to stagger renewal + // requests to the Vault server, but in a semi-predictable way, so there + // is no need to use a cryptographically secure RNG. random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) } @@ -278,12 +290,18 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, switch { case nonRenewable || r.renewBehavior == RenewBehaviorRenewDisabled: // Can't or won't renew, just keep the same expiration so we exit - // when it's reauthentication time + // when it's re-authentication time remainingLeaseDuration = fallbackLeaseDuration default: // Renew the token renewal, err = renew(credString, r.increment) + if err != nil && strings.Contains(err.Error(), "permission denied") { + // We can't renew since the token doesn't have permission to. Fall back + // to the code path for non-renewable tokens. + nonRenewable = true + continue + } if err != nil || renewal == nil || (tokenMode && renewal.Auth == nil) { if r.renewBehavior == RenewBehaviorErrorOnErrors { if err != nil { @@ -337,25 +355,18 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, var sleepDuration time.Duration - if errorBackoff != nil { + if errorBackoff == nil { + sleepDuration = r.calculateSleepDuration(remainingLeaseDuration, priorDuration) + } else { sleepDuration = errorBackoff.NextBackOff() if sleepDuration == backoff.Stop { return err } - } else { - // We keep evaluating a new grace period so long as the lease is - // extending. Once it stops extending, we've hit the max and need to - // rely on the grace duration. - if remainingLeaseDuration > priorDuration { - r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second) - } - priorDuration = remainingLeaseDuration - - // The sleep duration is set to 2/3 of the current lease duration plus - // 1/3 of the current grace period, which adds jitter. - sleepDuration = time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) } + // remainingLeaseDuration becomes the priorDuration for the next loop + priorDuration = remainingLeaseDuration + // If we are within grace, return now; or, if the amount of time we // would sleep would land us in the grace period. This helps with short // tokens; for example, you don't want a current lease duration of 4 @@ -366,15 +377,32 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, return nil } + timer := time.NewTimer(sleepDuration) select { case <-r.stopCh: + timer.Stop() return nil - case <-time.After(sleepDuration): + case <-timer.C: continue } } } +// calculateSleepDuration calculates the amount of time the LifeTimeWatcher should sleep +// before re-entering its loop. +func (r *LifetimeWatcher) calculateSleepDuration(remainingLeaseDuration, priorDuration time.Duration) time.Duration { + // We keep evaluating a new grace period so long as the lease is + // extending. Once it stops extending, we've hit the max and need to + // rely on the grace duration. + if remainingLeaseDuration > priorDuration { + r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second) + } + + // The sleep duration is set to 2/3 of the current lease duration plus + // 1/3 of the current grace period, which adds jitter. + return time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) +} + // calculateGrace calculates the grace period based on the minimum of the // remaining lease duration and the token increment value; it also adds some // jitter to not have clients be in sync. diff --git a/api/logical.go b/api/logical.go index 1a720cbf2c45..068e9068f389 100644 --- a/api/logical.go +++ b/api/logical.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "bytes" "context" + "encoding/json" "fmt" "io" "net/http" @@ -11,7 +15,6 @@ import ( "strings" "github.com/hashicorp/errwrap" - "github.com/hashicorp/vault/sdk/helper/jsonutil" ) const ( @@ -69,20 +72,46 @@ func (c *Logical) ReadWithDataWithContext(ctx context.Context, path string, data return c.ParseRawResponseAndCloseBody(resp, err) } +// ReadRaw attempts to read the value stored at the given Vault path +// (without '/v1/' prefix) and returns a raw *http.Response. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please use ReadRawWithContext +// instead and set the timeout through context.WithTimeout or context.WithDeadline. func (c *Logical) ReadRaw(path string) (*Response, error) { - return c.ReadRawWithData(path, nil) + return c.ReadRawWithDataWithContext(context.Background(), path, nil) +} + +// ReadRawWithContext attempts to read the value stored at the give Vault path +// (without '/v1/' prefix) and returns a raw *http.Response. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please set it through +// context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRawWithContext(ctx context.Context, path string) (*Response, error) { + return c.ReadRawWithDataWithContext(ctx, path, nil) } +// ReadRawWithData attempts to read the value stored at the given Vault +// path (without '/v1/' prefix) and returns a raw *http.Response. The 'data' map +// is added as query parameters to the request. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please use +// ReadRawWithDataWithContext instead and set the timeout through +// context.WithTimeout or context.WithDeadline. func (c *Logical) ReadRawWithData(path string, data map[string][]string) (*Response, error) { return c.ReadRawWithDataWithContext(context.Background(), path, data) } +// ReadRawWithDataWithContext attempts to read the value stored at the given +// Vault path (without '/v1/' prefix) and returns a raw *http.Response. The 'data' +// map is added as query parameters to the request. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please set it through +// context.WithTimeout or context.WithDeadline. func (c *Logical) ReadRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { - // See note in client.go, RawRequestWithContext for why we do not call - // Cancel here. The difference between these two methods are that the - // former takes a Request object directly, whereas this builds one - // up for the caller. - ctx, _ = c.c.withConfiguredTimeout(ctx) return c.readRawWithDataWithContext(ctx, path, data) } @@ -183,6 +212,17 @@ func (c *Logical) WriteWithContext(ctx context.Context, path string, data map[st return c.write(ctx, path, r) } +func (c *Logical) WriteRaw(path string, data []byte) (*Response, error) { + return c.WriteRawWithContext(context.Background(), path, data) +} + +func (c *Logical) WriteRawWithContext(ctx context.Context, path string, data []byte) (*Response, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) + r.BodyBytes = data + + return c.writeRaw(ctx, r) +} + func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { r := c.c.NewRequest(http.MethodPatch, "/v1/"+path) r.Headers.Set("Content-Type", "application/merge-patch+json") @@ -232,6 +272,14 @@ func (c *Logical) write(ctx context.Context, path string, request *Request) (*Se return ParseSecret(resp.Body) } +func (c *Logical) writeRaw(ctx context.Context, request *Request) (*Response, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, request) + return resp, err +} + func (c *Logical) Delete(path string) (*Secret, error) { return c.DeleteWithContext(context.Background(), path) } @@ -364,7 +412,9 @@ func (c *Logical) UnwrapWithContext(ctx context.Context, wrappingToken string) ( wrappedSecret := new(Secret) buf := bytes.NewBufferString(secret.Data["response"].(string)) - if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil { + dec := json.NewDecoder(buf) + dec.UseNumber() + if err := dec.Decode(wrappedSecret); err != nil { return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err) } diff --git a/api/output_policy.go b/api/output_policy.go index 85d1617e5e94..c3ec522891b5 100644 --- a/api/output_policy.go +++ b/api/output_policy.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "fmt" "net/http" "net/url" + "strconv" "strings" ) @@ -16,6 +20,7 @@ var LastOutputPolicyError *OutputPolicyError type OutputPolicyError struct { method string path string + params url.Values finalHCLString string } @@ -44,8 +49,22 @@ func (d *OutputPolicyError) HCLString() (string, error) { // Builds a sample policy document from the request func (d *OutputPolicyError) buildSamplePolicy() (string, error) { + operation := d.method + // List is often defined as a URL param instead of as an http.Method + // this will check for the header and properly switch off of the intended functionality + if d.params.Has("list") { + isList, err := strconv.ParseBool(d.params.Get("list")) + if err != nil { + return "", fmt.Errorf("the value of the list url param is not a bool: %v", err) + } + + if isList { + operation = "LIST" + } + } + var capabilities []string - switch d.method { + switch operation { case http.MethodGet, "": capabilities = append(capabilities, "read") case http.MethodPost, http.MethodPut: @@ -59,17 +78,15 @@ func (d *OutputPolicyError) buildSamplePolicy() (string, error) { capabilities = append(capabilities, "list") } - // sanitize, then trim the Vault address and v1 from the front of the path - path, err := url.PathUnescape(d.path) - if err != nil { - return "", fmt.Errorf("failed to unescape request URL characters: %v", err) - } - // determine whether to add sudo capability - if IsSudoPath(path) { + if IsSudoPath(d.path) { capabilities = append(capabilities, "sudo") } + return formatOutputPolicy(d.path, capabilities), nil +} + +func formatOutputPolicy(path string, capabilities []string) string { // the OpenAPI response has a / in front of each path, // but policies need the path without that leading slash path = strings.TrimLeft(path, "/") @@ -78,5 +95,5 @@ func (d *OutputPolicyError) buildSamplePolicy() (string, error) { return fmt.Sprintf( `path "%s" { capabilities = ["%s"] -}`, path, capStr), nil +}`, path, capStr) } diff --git a/api/output_policy_test.go b/api/output_policy_test.go new file mode 100644 index 000000000000..2092e2ba2a01 --- /dev/null +++ b/api/output_policy_test.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBuildSamplePolicy(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + req *OutputPolicyError + expected string + err error + }{ + { + "happy path", + &OutputPolicyError{ + method: http.MethodGet, + path: "/something", + }, + formatOutputPolicy("/something", []string{"read"}), + nil, + }, + { // test included to clear up some confusion around the sanitize comment + "demonstrate that this function does not format fully", + &OutputPolicyError{ + method: http.MethodGet, + path: "http://vault.test/v1/something", + }, + formatOutputPolicy("http://vault.test/v1/something", []string{"read"}), + nil, + }, + { // test that list is properly returned + "list over read returned", + &OutputPolicyError{ + method: http.MethodGet, + path: "/something", + params: url.Values{ + "list": []string{"true"}, + }, + }, + formatOutputPolicy("/something", []string{"list"}), + nil, + }, + { + "valid protected path", + &OutputPolicyError{ + method: http.MethodGet, + path: "/sys/config/ui/headers/", + }, + formatOutputPolicy("/sys/config/ui/headers/", []string{"read", "sudo"}), + nil, + }, + { // ensure that a formatted path that trims the trailing slash as the code does still works for recognizing a sudo path + "valid protected path no trailing /", + &OutputPolicyError{ + method: http.MethodGet, + path: "/sys/config/ui/headers", + }, + formatOutputPolicy("/sys/config/ui/headers", []string{"read", "sudo"}), + nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := tc.req.buildSamplePolicy() + if tc.err != err { + t.Fatalf("expected for the error to be %v instead got %v\n", tc.err, err) + } + + if tc.expected != result { + t.Fatalf("expected for the policy string to be %v instead got %v\n", tc.expected, result) + } + }) + } +} diff --git a/api/output_string.go b/api/output_string.go index 80c591f20b5c..d7777712d209 100644 --- a/api/output_string.go +++ b/api/output_string.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/plugin_helpers.go b/api/plugin_helpers.go index 0077ec769bcd..3705c7310a85 100644 --- a/api/plugin_helpers.go +++ b/api/plugin_helpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -9,13 +12,23 @@ import ( "flag" "net/url" "os" - "regexp" - squarejwt "gopkg.in/square/go-jose.v2/jwt" + "github.com/go-jose/go-jose/v3/jwt" "github.com/hashicorp/errwrap" ) +// This file contains helper code used when writing Vault auth method or secrets engine plugins. +// +// As such, it would be better located in the sdk module with the rest of the code which is only to support plugins, +// rather than api, but is here for historical reasons. (The api module used to depend on the sdk module, this code +// calls NewClient within the api package, so placing it in the sdk would have created a dependency cycle. This reason +// is now historical, as the dependency between sdk and api has since been reversed in direction.) +// Moving this code to the sdk would be appropriate if an api v2.0.0 release is ever planned. +// +// This helper code is used when a plugin is hosted by Vault 1.11 and earlier. Vault 1.12 and sdk v0.6.0 introduced +// version 5 of the backend plugin interface, which uses go-plugin's AutoMTLS feature instead of this code. + const ( // PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override // setting a TLSProviderFunc for a plugin. @@ -30,50 +43,6 @@ const ( PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" ) -// sudoPaths is a map containing the paths that require a token's policy -// to have the "sudo" capability. The keys are the paths as strings, in -// the same format as they are returned by the OpenAPI spec. The values -// are the regular expressions that can be used to test whether a given -// path matches that path or not (useful specifically for the paths that -// contain templated fields.) -var sudoPaths = map[string]*regexp.Regexp{ - "/auth/{token_mount_path}/accessors/": regexp.MustCompile(`^/auth/.+/accessors/$`), - "/{pki_mount_path}/root": regexp.MustCompile(`^/.+/root$`), - "/{pki_mount_path}/root/sign-self-issued": regexp.MustCompile(`^/.+/root/sign-self-issued$`), - "/sys/audit": regexp.MustCompile(`^/sys/audit$`), - "/sys/audit/{path}": regexp.MustCompile(`^/sys/audit/.+$`), - "/sys/auth/{path}": regexp.MustCompile(`^/sys/auth/.+$`), - "/sys/auth/{path}/tune": regexp.MustCompile(`^/sys/auth/.+/tune$`), - "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), - "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), - "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), - "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/$`), - "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), - "/sys/leases": regexp.MustCompile(`^/sys/leases$`), - "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/$`), - "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`), - "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), - "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), - "/sys/plugins/catalog/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[^/]+$`), - "/sys/plugins/catalog/{type}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+$`), - "/sys/plugins/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+/[^/]+$`), - "/sys/raw": regexp.MustCompile(`^/sys/raw$`), - "/sys/raw/{path}": regexp.MustCompile(`^/sys/raw/.+$`), - "/sys/remount": regexp.MustCompile(`^/sys/remount$`), - "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), - "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), - "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), - "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), - - // enterprise-only paths - "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), - "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), - "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), - "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), - "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`), - "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), -} - // PluginAPIClientMeta is a helper that plugins can use to configure TLS connections // back to Vault. type PluginAPIClientMeta struct { @@ -82,6 +51,7 @@ type PluginAPIClientMeta struct { flagCAPath string flagClientCert string flagClientKey string + flagServerName string flagInsecure bool } @@ -93,6 +63,7 @@ func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { fs.StringVar(&f.flagCAPath, "ca-path", "", "") fs.StringVar(&f.flagClientCert, "client-cert", "", "") fs.StringVar(&f.flagClientKey, "client-key", "", "") + fs.StringVar(&f.flagServerName, "tls-server-name", "", "") fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "") return fs @@ -101,13 +72,13 @@ func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { // GetTLSConfig will return a TLSConfig based off the values from the flags func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig { // If we need custom TLS configuration, then set it - if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure { + if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure || f.flagServerName != "" { t := &TLSConfig{ CACert: f.flagCACert, CAPath: f.flagCAPath, ClientCert: f.flagClientCert, ClientKey: f.flagClientKey, - TLSServerName: "", + TLSServerName: f.flagServerName, Insecure: f.flagInsecure, } @@ -132,7 +103,7 @@ func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) return func() (*tls.Config, error) { unwrapToken := os.Getenv(PluginUnwrapTokenEnv) - parsedJWT, err := squarejwt.ParseSigned(unwrapToken) + parsedJWT, err := jwt.ParseSigned(unwrapToken) if err != nil { return nil, errwrap.Wrapf("error parsing wrapping token: {{err}}", err) } @@ -241,28 +212,3 @@ func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) return tlsConfig, nil } } - -func SudoPaths() map[string]*regexp.Regexp { - return sudoPaths -} - -// Determine whether the given path requires the sudo capability -func IsSudoPath(path string) bool { - // Return early if the path is any of the non-templated sudo paths. - if _, ok := sudoPaths[path]; ok { - return true - } - - // Some sudo paths have templated fields in them. - // (e.g. /sys/revoke-prefix/{prefix}) - // The values in the sudoPaths map are actually regular expressions, - // so we can check if our path matches against them. - for _, sudoPathRegexp := range sudoPaths { - match := sudoPathRegexp.MatchString(path) - if match { - return true - } - } - - return false -} diff --git a/api/plugin_helpers_test.go b/api/plugin_helpers_test.go deleted file mode 100644 index 453720ea7a5a..000000000000 --- a/api/plugin_helpers_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package api - -import "testing" - -func TestIsSudoPath(t *testing.T) { - t.Parallel() - - testCases := []struct { - path string - expected bool - }{ - { - "/not/in/sudo/paths/list", - false, - }, - { - "/sys/raw/single-node-path", - true, - }, - { - "/sys/raw/multiple/nodes/path", - true, - }, - { - "/sys/raw/WEIRD(but_still_valid!)p4Th?🗿笑", - true, - }, - { - "/sys/auth/path/in/middle/tune", - true, - }, - { - "/sys/plugins/catalog/some-type", - true, - }, - { - "/sys/plugins/catalog/some/type/or/name/with/slashes", - false, - }, - { - "/sys/plugins/catalog/some-type/some-name", - true, - }, - { - "/sys/plugins/catalog/some-type/some/name/with/slashes", - false, - }, - } - - for _, tc := range testCases { - result := IsSudoPath(tc.path) - if result != tc.expected { - t.Fatalf("expected api.IsSudoPath to return %v for path %s but it returned %v", tc.expected, tc.path, result) - } - } -} diff --git a/api/plugin_runtime_types.go b/api/plugin_runtime_types.go new file mode 100644 index 000000000000..2514f1279db1 --- /dev/null +++ b/api/plugin_runtime_types.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// NOTE: this file was copied from +// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_runtime_types.go +// Any changes made should be made to both files at the same time. + +import "fmt" + +var PluginRuntimeTypes = _PluginRuntimeTypeValues + +//go:generate enumer -type=PluginRuntimeType -trimprefix=PluginRuntimeType -transform=snake +type PluginRuntimeType uint32 + +// This is a list of PluginRuntimeTypes used by Vault. +const ( + PluginRuntimeTypeUnsupported PluginRuntimeType = iota + PluginRuntimeTypeContainer +) + +// ParsePluginRuntimeType is a wrapper around PluginRuntimeTypeString kept for backwards compatibility. +func ParsePluginRuntimeType(PluginRuntimeType string) (PluginRuntimeType, error) { + t, err := PluginRuntimeTypeString(PluginRuntimeType) + if err != nil { + return PluginRuntimeTypeUnsupported, fmt.Errorf("%q is not a supported plugin runtime type", PluginRuntimeType) + } + return t, nil +} diff --git a/api/plugin_types.go b/api/plugin_types.go new file mode 100644 index 000000000000..c8f69ae404f2 --- /dev/null +++ b/api/plugin_types.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// NOTE: this file was copied from +// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types.go +// Any changes made should be made to both files at the same time. + +import ( + "encoding/json" + "fmt" +) + +var PluginTypes = []PluginType{ + PluginTypeUnknown, + PluginTypeCredential, + PluginTypeDatabase, + PluginTypeSecrets, +} + +type PluginType uint32 + +// This is a list of PluginTypes used by Vault. +// If we need to add any in the future, it would +// be best to add them to the _end_ of the list below +// because they resolve to incrementing numbers, +// which may be saved in state somewhere. Thus if +// the name for one of those numbers changed because +// a value were added to the middle, that could cause +// the wrong plugin types to be read from storage +// for a given underlying number. Example of the problem +// here: https://play.golang.org/p/YAaPw5ww3er +const ( + PluginTypeUnknown PluginType = iota + PluginTypeCredential + PluginTypeDatabase + PluginTypeSecrets +) + +func (p PluginType) String() string { + switch p { + case PluginTypeUnknown: + return "unknown" + case PluginTypeCredential: + return "auth" + case PluginTypeDatabase: + return "database" + case PluginTypeSecrets: + return "secret" + default: + return "unsupported" + } +} + +func ParsePluginType(pluginType string) (PluginType, error) { + switch pluginType { + case "unknown": + return PluginTypeUnknown, nil + case "auth": + return PluginTypeCredential, nil + case "database": + return PluginTypeDatabase, nil + case "secret": + return PluginTypeSecrets, nil + default: + return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) + } +} + +// UnmarshalJSON implements json.Unmarshaler. It supports unmarshaling either a +// string or a uint32. All new serialization will be as a string, but we +// previously serialized as a uint32 so we need to support that for backwards +// compatibility. +func (p *PluginType) UnmarshalJSON(data []byte) error { + var asString string + err := json.Unmarshal(data, &asString) + if err == nil { + *p, err = ParsePluginType(asString) + return err + } + + var asUint32 uint32 + err = json.Unmarshal(data, &asUint32) + if err != nil { + return err + } + *p = PluginType(asUint32) + switch *p { + case PluginTypeUnknown, PluginTypeCredential, PluginTypeDatabase, PluginTypeSecrets: + return nil + default: + return fmt.Errorf("%d is not a supported plugin type", asUint32) + } +} + +// MarshalJSON implements json.Marshaler. +func (p PluginType) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} diff --git a/api/plugin_types_test.go b/api/plugin_types_test.go new file mode 100644 index 000000000000..0b6085379b43 --- /dev/null +++ b/api/plugin_types_test.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// NOTE: this file was copied from +// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types_test.go +// Any changes made should be made to both files at the same time. + +import ( + "encoding/json" + "testing" +) + +type testType struct { + PluginType PluginType `json:"plugin_type"` +} + +func TestPluginTypeJSONRoundTrip(t *testing.T) { + for _, pluginType := range PluginTypes { + original := testType{ + PluginType: pluginType, + } + asBytes, err := json.Marshal(original) + if err != nil { + t.Fatal(err) + } + + var roundTripped testType + err = json.Unmarshal(asBytes, &roundTripped) + if err != nil { + t.Fatal(err) + } + + if original != roundTripped { + t.Fatalf("expected %v, got %v", original, roundTripped) + } + } +} + +func TestPluginTypeJSONUnmarshal(t *testing.T) { + // Failure/unsupported cases. + for name, tc := range map[string]string{ + "unsupported": `{"plugin_type":"unsupported"}`, + "random string": `{"plugin_type":"foo"}`, + "boolean": `{"plugin_type":true}`, + "empty": `{"plugin_type":""}`, + "negative": `{"plugin_type":-1}`, + "out of range": `{"plugin_type":10}`, + } { + t.Run(name, func(t *testing.T) { + var result testType + err := json.Unmarshal([]byte(tc), &result) + if err == nil { + t.Fatal("expected error") + } + }) + } + + // Valid cases. + for name, tc := range map[string]struct { + json string + expected PluginType + }{ + "unknown": {`{"plugin_type":"unknown"}`, PluginTypeUnknown}, + "auth": {`{"plugin_type":"auth"}`, PluginTypeCredential}, + "secret": {`{"plugin_type":"secret"}`, PluginTypeSecrets}, + "database": {`{"plugin_type":"database"}`, PluginTypeDatabase}, + "absent": {`{}`, PluginTypeUnknown}, + "integer unknown": {`{"plugin_type":0}`, PluginTypeUnknown}, + "integer auth": {`{"plugin_type":1}`, PluginTypeCredential}, + "integer db": {`{"plugin_type":2}`, PluginTypeDatabase}, + "integer secret": {`{"plugin_type":3}`, PluginTypeSecrets}, + } { + t.Run(name, func(t *testing.T) { + var result testType + err := json.Unmarshal([]byte(tc.json), &result) + if err != nil { + t.Fatal(err) + } + if tc.expected != result.PluginType { + t.Fatalf("expected %v, got %v", tc.expected, result.PluginType) + } + }) + } +} + +func TestUnknownTypeExcludedWithOmitEmpty(t *testing.T) { + type testTypeOmitEmpty struct { + Type PluginType `json:"type,omitempty"` + } + bytes, err := json.Marshal(testTypeOmitEmpty{}) + if err != nil { + t.Fatal(err) + } + m := map[string]any{} + json.Unmarshal(bytes, &m) + if _, exists := m["type"]; exists { + t.Fatal("type should not be present") + } +} diff --git a/api/pluginruntimetype_enumer.go b/api/pluginruntimetype_enumer.go new file mode 100644 index 000000000000..663f440ff446 --- /dev/null +++ b/api/pluginruntimetype_enumer.go @@ -0,0 +1,49 @@ +// Code generated by "enumer -type=PluginRuntimeType -trimprefix=PluginRuntimeType -transform=snake"; DO NOT EDIT. + +package api + +import ( + "fmt" +) + +const _PluginRuntimeTypeName = "unsupportedcontainer" + +var _PluginRuntimeTypeIndex = [...]uint8{0, 11, 20} + +func (i PluginRuntimeType) String() string { + if i >= PluginRuntimeType(len(_PluginRuntimeTypeIndex)-1) { + return fmt.Sprintf("PluginRuntimeType(%d)", i) + } + return _PluginRuntimeTypeName[_PluginRuntimeTypeIndex[i]:_PluginRuntimeTypeIndex[i+1]] +} + +var _PluginRuntimeTypeValues = []PluginRuntimeType{0, 1} + +var _PluginRuntimeTypeNameToValueMap = map[string]PluginRuntimeType{ + _PluginRuntimeTypeName[0:11]: 0, + _PluginRuntimeTypeName[11:20]: 1, +} + +// PluginRuntimeTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func PluginRuntimeTypeString(s string) (PluginRuntimeType, error) { + if val, ok := _PluginRuntimeTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to PluginRuntimeType values", s) +} + +// PluginRuntimeTypeValues returns all values of the enum +func PluginRuntimeTypeValues() []PluginRuntimeType { + return _PluginRuntimeTypeValues +} + +// IsAPluginRuntimeType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i PluginRuntimeType) IsAPluginRuntimeType() bool { + for _, v := range _PluginRuntimeTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/api/renewbehavior_enumer.go b/api/renewbehavior_enumer.go new file mode 100644 index 000000000000..9b272e3e0cec --- /dev/null +++ b/api/renewbehavior_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=RenewBehavior -trimprefix=RenewBehavior"; DO NOT EDIT. + +package api + +import ( + "fmt" +) + +const _RenewBehaviorName = "IgnoreErrorsRenewDisabledErrorOnErrors" + +var _RenewBehaviorIndex = [...]uint8{0, 12, 25, 38} + +func (i RenewBehavior) String() string { + if i >= RenewBehavior(len(_RenewBehaviorIndex)-1) { + return fmt.Sprintf("RenewBehavior(%d)", i) + } + return _RenewBehaviorName[_RenewBehaviorIndex[i]:_RenewBehaviorIndex[i+1]] +} + +var _RenewBehaviorValues = []RenewBehavior{0, 1, 2} + +var _RenewBehaviorNameToValueMap = map[string]RenewBehavior{ + _RenewBehaviorName[0:12]: 0, + _RenewBehaviorName[12:25]: 1, + _RenewBehaviorName[25:38]: 2, +} + +// RenewBehaviorString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func RenewBehaviorString(s string) (RenewBehavior, error) { + if val, ok := _RenewBehaviorNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to RenewBehavior values", s) +} + +// RenewBehaviorValues returns all values of the enum +func RenewBehaviorValues() []RenewBehavior { + return _RenewBehaviorValues +} + +// IsARenewBehavior returns "true" if the value is listed in the enum definition. "false" otherwise +func (i RenewBehavior) IsARenewBehavior() bool { + for _, v := range _RenewBehaviorValues { + if i == v { + return true + } + } + return false +} diff --git a/api/renewer_test.go b/api/renewer_test.go index 3b28d8546d42..1c9a5d03e2d2 100644 --- a/api/renewer_test.go +++ b/api/renewer_test.go @@ -1,9 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "errors" "fmt" + "math/rand" + "reflect" "testing" + "testing/quick" "time" "github.com/go-test/deep" @@ -171,6 +177,20 @@ func TestLifetimeWatcher(t *testing.T) { expectError: nil, expectRenewal: true, }, + { + maxTestTime: time.Second, + name: "permission_denied_error", + leaseDurationSeconds: 60, + incrementSeconds: 10, + // This should cause the lifetime watcher to behave just + // like a non-renewable secret, i.e. wait until its lifetime + // then be done. + renew: func(_ string, _ int) (*Secret, error) { + return nil, fmt.Errorf("permission denied") + }, + expectError: nil, + expectRenewal: false, + }, } for _, tc := range cases { @@ -198,7 +218,9 @@ func TestLifetimeWatcher(t *testing.T) { for { select { case <-time.After(tc.maxTestTime): - t.Fatalf("renewal didn't happen") + if tc.expectRenewal || tc.expectError != nil { + t.Fatalf("expected error or renewal, and neither happened") + } case r := <-v.RenewCh(): if !tc.expectRenewal { t.Fatal("expected no renewals") @@ -233,3 +255,47 @@ func TestLifetimeWatcher(t *testing.T) { }) } } + +// TestCalcSleepPeriod uses property based testing to evaluate the calculateSleepDuration +// function of LifeTimeWatchers, but also incidentally tests "calculateGrace". +// This is on account of "calculateSleepDuration" performing the "calculateGrace" +// function in particular instances. +// Both of these functions support the vital functionality of the LifeTimeWatcher +// and therefore should be tested rigorously. +func TestCalcSleepPeriod(t *testing.T) { + c := quick.Config{ + MaxCount: 10000, + Values: func(values []reflect.Value, r *rand.Rand) { + leaseDuration := r.Int63() + priorDuration := r.Int63n(leaseDuration) + remainingLeaseDuration := r.Int63n(priorDuration) + increment := r.Int63n(remainingLeaseDuration) + + values[0] = reflect.ValueOf(r) + values[1] = reflect.ValueOf(time.Duration(leaseDuration)) + values[2] = reflect.ValueOf(time.Duration(priorDuration)) + values[3] = reflect.ValueOf(time.Duration(remainingLeaseDuration)) + values[4] = reflect.ValueOf(time.Duration(increment)) + }, + } + + // tests that "calculateSleepDuration" will always return a value less than + // the remaining lease duration given a random leaseDuration, priorDuration, remainingLeaseDuration, and increment. + // Inputs are generated so that: + // leaseDuration > priorDuration > remainingLeaseDuration + // and remainingLeaseDuration > increment + if err := quick.Check(func(r *rand.Rand, leaseDuration, priorDuration, remainingLeaseDuration, increment time.Duration) bool { + lw := LifetimeWatcher{ + grace: 0, + increment: int(increment.Seconds()), + random: r, + } + + lw.calculateGrace(remainingLeaseDuration, increment) + + // ensure that we sleep for less than the remaining lease. + return lw.calculateSleepDuration(remainingLeaseDuration, priorDuration) < remainingLeaseDuration + }, &c); err != nil { + t.Error(err) + } +} diff --git a/api/replication_status.go b/api/replication_status.go new file mode 100644 index 000000000000..9bc02d53935d --- /dev/null +++ b/api/replication_status.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +const ( + apiRepPerformanceStatusPath = "/v1/sys/replication/performance/status" + apiRepDRStatusPath = "/v1/sys/replication/dr/status" + apiRepStatusPath = "/v1/sys/replication/status" +) + +type ClusterInfo struct { + APIAddr string `json:"api_address,omitempty" mapstructure:"api_address"` + ClusterAddress string `json:"cluster_address,omitempty" mapstructure:"cluster_address"` + ConnectionStatus string `json:"connection_status,omitempty" mapstructure:"connection_status"` + LastHeartBeat string `json:"last_heartbeat,omitempty" mapstructure:"last_heartbeat"` + LastHeartBeatDurationMillis string `json:"last_heartbeat_duration_ms,omitempty" mapstructure:"last_heartbeat_duration_ms"` + ClockSkewMillis string `json:"clock_skew_ms,omitempty" mapstructure:"clock_skew_ms"` + NodeID string `json:"node_id,omitempty" mapstructure:"node_id"` +} + +type ReplicationStatusGenericResponse struct { + LastDRWAL uint64 `json:"last_dr_wal,omitempty" mapstructure:"last_dr_wal"` + LastReindexEpoch string `json:"last_reindex_epoch,omitempty" mapstructure:"last_reindex_epoch"` + ClusterID string `json:"cluster_id,omitempty" mapstructure:"cluster_id"` + LastWAL uint64 `json:"last_wal,omitempty" mapstructure:"last_wal"` + MerkleRoot string `json:"merkle_root,omitempty" mapstructure:"merkle_root"` + Mode string `json:"mode,omitempty" mapstructure:"mode"` + PrimaryClusterAddr string `json:"primary_cluster_addr,omitempty" mapstructure:"primary_cluster_addr"` + LastPerformanceWAL uint64 `json:"last_performance_wal,omitempty" mapstructure:"last_performance_wal"` + State string `json:"state,omitempty" mapstructure:"state"` + LastRemoteWAL uint64 `json:"last_remote_wal,omitempty" mapstructure:"last_remote_wal"` + SecondaryID string `json:"secondary_id,omitempty" mapstructure:"secondary_id"` + SSCTGenerationCounter uint64 `json:"ssct_generation_counter,omitempty" mapstructure:"ssct_generation_counter"` + + KnownSecondaries []string `json:"known_secondaries,omitempty" mapstructure:"known_secondaries"` + KnownPrimaryClusterAddrs []string `json:"known_primary_cluster_addrs,omitempty" mapstructure:"known_primary_cluster_addrs"` + Primaries []ClusterInfo `json:"primaries,omitempty" mapstructure:"primaries"` + Secondaries []ClusterInfo `json:"secondaries,omitempty" mapstructure:"secondaries"` +} + +type ReplicationStatusResponse struct { + DR ReplicationStatusGenericResponse `json:"dr,omitempty" mapstructure:"dr"` + Performance ReplicationStatusGenericResponse `json:"performance,omitempty" mapstructure:"performance"` +} + +func (c *Sys) ReplicationStatus() (*ReplicationStatusResponse, error) { + return c.ReplicationStatusWithContext(context.Background(), apiRepStatusPath) +} + +func (c *Sys) ReplicationPerformanceStatusWithContext(ctx context.Context) (*ReplicationStatusGenericResponse, error) { + s, err := c.ReplicationStatusWithContext(ctx, apiRepPerformanceStatusPath) + if err != nil { + return nil, err + } + + return &s.Performance, nil +} + +func (c *Sys) ReplicationDRStatusWithContext(ctx context.Context) (*ReplicationStatusGenericResponse, error) { + s, err := c.ReplicationStatusWithContext(ctx, apiRepDRStatusPath) + if err != nil { + return nil, err + } + + return &s.DR, nil +} + +func (c *Sys) ReplicationStatusWithContext(ctx context.Context, path string) (*ReplicationStatusResponse, error) { + // default to replication/status + if path == "" { + path = apiRepStatusPath + } + + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, path) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + // First decode response into a map[string]interface{} + data := make(map[string]interface{}) + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + if err := dec.Decode(&data); err != nil { + return nil, err + } + + rawData, ok := data["data"] + if !ok { + return nil, fmt.Errorf("empty data in replication status response") + } + + s := &ReplicationStatusResponse{} + g := &ReplicationStatusGenericResponse{} + switch { + case path == apiRepPerformanceStatusPath: + err = mapstructure.Decode(rawData, g) + if err != nil { + return nil, err + } + s.Performance = *g + case path == apiRepDRStatusPath: + err = mapstructure.Decode(rawData, g) + if err != nil { + return nil, err + } + s.DR = *g + default: + err = mapstructure.Decode(rawData, s) + if err != nil { + return nil, err + } + return s, err + } + + return s, err +} diff --git a/api/request.go b/api/request.go index 1cbbc62f908b..a2d912c64dcf 100644 --- a/api/request.go +++ b/api/request.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -8,8 +11,6 @@ import ( "net/http" "net/url" - "github.com/hashicorp/vault/sdk/helper/consts" - retryablehttp "github.com/hashicorp/go-retryablehttp" ) @@ -38,6 +39,9 @@ type Request struct { // EGPs). If set, the override flag will take effect for all policies // evaluated during the request. PolicyOverride bool + + // HCPCookie is used to set a http cookie when client is connected to HCP + HCPCookie *http.Cookie } // SetJSONBody is used to set a request body that is a JSON-encoded value. @@ -127,7 +131,7 @@ func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { } if len(r.ClientToken) != 0 { - req.Header.Set(consts.AuthHeaderName, r.ClientToken) + req.Header.Set(AuthHeaderName, r.ClientToken) } if len(r.WrapTTL) != 0 { @@ -144,5 +148,9 @@ func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { req.Header.Set("X-Vault-Policy-Override", "true") } + if r.HCPCookie != nil { + req.AddCookie(r.HCPCookie) + } + return req, nil } diff --git a/api/request_test.go b/api/request_test.go index f2657e61c503..ac21b8019872 100644 --- a/api/request_test.go +++ b/api/request_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/response.go b/api/response.go index 9ce3d12aacca..2842c125514a 100644 --- a/api/response.go +++ b/api/response.go @@ -1,14 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( "bytes" + "encoding/json" "fmt" "io" "io/ioutil" "net/http" - - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/jsonutil" ) // Response is a raw response that wraps an HTTP response. @@ -20,7 +21,9 @@ type Response struct { // will consume the response body, but will not close it. Close must // still be called. func (r *Response) DecodeJSON(out interface{}) error { - return jsonutil.DecodeJSONFromReader(r.Body, out) + dec := json.NewDecoder(r.Body) + dec.UseNumber() + return dec.Decode(out) } // Error returns an error response if there is one. If there is an error, @@ -42,7 +45,7 @@ func (r *Response) Error() error { r.Body.Close() r.Body = ioutil.NopCloser(bodyBuf) - ns := r.Header.Get(consts.NamespaceHeaderName) + ns := r.Header.Get(NamespaceHeaderName) // Build up the error object respErr := &ResponseError{ @@ -56,7 +59,9 @@ func (r *Response) Error() error { // in a bytes.Reader here so that the JSON decoder doesn't move the // read pointer for the original buffer. var resp ErrorResponse - if err := jsonutil.DecodeJSON(bodyBuf.Bytes(), &resp); err != nil { + dec := json.NewDecoder(bytes.NewReader(bodyBuf.Bytes())) + dec.UseNumber() + if err := dec.Decode(&resp); err != nil { // Store the fact that we couldn't decode the errors respErr.RawError = true respErr.Errors = []string{bodyBuf.String()} diff --git a/api/secret.go b/api/secret.go index 37e60892e4a6..d37bf3cf06b0 100644 --- a/api/secret.go +++ b/api/secret.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -11,8 +14,6 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/logical" ) // Secret is the structure returned for every secret within Vault. @@ -41,6 +42,10 @@ type Secret struct { // cubbyhole of the given token (which has a TTL of the given number of // seconds) WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"` + + // MountType, if non-empty, provides some information about what kind + // of mount this secret came from. + MountType string `json:"mount_type,omitempty"` } // TokenID returns the standardized token ID (token) for the given secret. @@ -149,8 +154,8 @@ TOKEN_DONE: // Identity policies { - _, ok := s.Data["identity_policies"] - if !ok { + v, ok := s.Data["identity_policies"] + if !ok || v == nil { goto DONE } @@ -283,6 +288,22 @@ type SecretWrapInfo struct { WrappedAccessor string `json:"wrapped_accessor"` } +type MFAMethodID struct { + Type string `json:"type,omitempty"` + ID string `json:"id,omitempty"` + UsesPasscode bool `json:"uses_passcode,omitempty"` + Name string `json:"name,omitempty"` +} + +type MFAConstraintAny struct { + Any []*MFAMethodID `json:"any,omitempty"` +} + +type MFARequirement struct { + MFARequestID string `json:"mfa_request_id,omitempty"` + MFAConstraints map[string]*MFAConstraintAny `json:"mfa_constraints,omitempty"` +} + // SecretAuth is the structure containing auth information if we have it. type SecretAuth struct { ClientToken string `json:"client_token"` @@ -297,7 +318,7 @@ type SecretAuth struct { LeaseDuration int `json:"lease_duration"` Renewable bool `json:"renewable"` - MFARequirement *logical.MFARequirement `json:"mfa_requirement"` + MFARequirement *MFARequirement `json:"mfa_requirement"` } // ParseSecret is used to parse a secret value from JSON from an io.Reader. @@ -323,14 +344,18 @@ func ParseSecret(r io.Reader) (*Secret, error) { // First decode the JSON into a map[string]interface{} var secret Secret - if err := jsonutil.DecodeJSONFromReader(&buf, &secret); err != nil { + dec := json.NewDecoder(&buf) + dec.UseNumber() + if err := dec.Decode(&secret); err != nil { return nil, err } // If the secret is null, add raw data to secret data if present if reflect.DeepEqual(secret, Secret{}) { data := make(map[string]interface{}) - if err := jsonutil.DecodeJSONFromReader(&teebuf, &data); err != nil { + dec := json.NewDecoder(&teebuf) + dec.UseNumber() + if err := dec.Decode(&data); err != nil { return nil, err } errRaw, errPresent := data["errors"] diff --git a/api/secret_test.go b/api/secret_test.go new file mode 100644 index 000000000000..9fa20e1a9cf1 --- /dev/null +++ b/api/secret_test.go @@ -0,0 +1,211 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "testing" +) + +func TestTokenPolicies(t *testing.T) { + var s *Secret + + // Verify some of the short-circuit paths in the function + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s = &Secret{} + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth = &SecretAuth{} + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth.Policies = []string{} + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth.Policies = []string{"test"} + + if policies, err := s.TokenPolicies(); policies == nil { + t.Error("policies was nil") + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth = nil + s.Data = make(map[string]interface{}) + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + // Verify that s.Data["policies"] are properly processed + { + policyList := make([]string, 0) + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policies) != len(policyList) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + policyList = append(policyList, "policy1", "policy2") + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policyList) != 2 { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + } + + // Do it again but with an interface{} slice + { + s.Auth = nil + policyList := make([]interface{}, 0) + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policies) != len(policyList) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + policyItems := make([]interface{}, 2) + policyItems[0] = "policy1" + policyItems[1] = "policy2" + + policyList = append(policyList, policyItems...) + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policies) != 2 { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + s.Auth = nil + s.Data["policies"] = 7.0 + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + s.Auth = nil + s.Data["policies"] = []int{2, 3, 5, 8, 13} + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + } + + s.Auth = nil + s.Data["policies"] = nil + + if policies, err := s.TokenPolicies(); err != nil { + t.Errorf("err was not nil, got %v", err) + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + // Verify that logic that merges s.Data["policies"] and s.Data["identity_policies"] works + { + policyList := []string{"policy1", "policy2", "policy3"} + s.Data["policies"] = policyList[:1] + s.Data["identity_policies"] = "not_a_slice" + s.Auth = nil + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + s.Data["identity_policies"] = policyList[1:] + + if policies, err := s.TokenPolicies(); len(policyList) != len(policies) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + } + + // Do it again but with an interface{} slice + { + policyList := []interface{}{"policy1", "policy2", "policy3"} + s.Data["policies"] = policyList[:1] + s.Data["identity_policies"] = "not_a_slice" + s.Auth = nil + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + s.Data["identity_policies"] = policyList[1:] + + if policies, err := s.TokenPolicies(); len(policyList) != len(policies) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + s.Auth = nil + s.Data["identity_policies"] = []int{2, 3, 5, 8, 13} + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + } + + s.Auth = nil + s.Data["policies"] = []string{"policy1"} + s.Data["identity_policies"] = nil + + if policies, err := s.TokenPolicies(); err != nil { + t.Errorf("err was not nil, got %v", err) + } else if len(policies) != 1 { + t.Errorf("expecting policies length %d, got %d", 1, len(policies)) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } +} diff --git a/api/ssh.go b/api/ssh.go index b832e2748290..28510eecc23f 100644 --- a/api/ssh.go +++ b/api/ssh.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/ssh_agent.go b/api/ssh_agent.go index 03fe2bea53ed..e61503772fa3 100644 --- a/api/ssh_agent.go +++ b/api/ssh_agent.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -15,7 +18,6 @@ import ( rootcerts "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/vault/sdk/helper/hclutil" "github.com/mitchellh/mapstructure" ) @@ -169,7 +171,7 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { "tls_skip_verify", "tls_server_name", } - if err := hclutil.CheckHCLKeys(list, valid); err != nil { + if err := CheckHCLKeys(list, valid); err != nil { return nil, multierror.Prefix(err, "ssh_helper:") } @@ -185,6 +187,33 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { return &c, nil } +func CheckHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) + } + } + + return result +} + // SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend // mounted at default path ("ssh"). func (c *Client) SSHHelper() *SSHHelper { diff --git a/api/ssh_agent_test.go b/api/ssh_agent_test.go index d233b09c476c..38117e42a706 100644 --- a/api/ssh_agent_test.go +++ b/api/ssh_agent_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sudo_paths.go b/api/sudo_paths.go new file mode 100644 index 000000000000..24beb4bb1f2a --- /dev/null +++ b/api/sudo_paths.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "regexp" +) + +// sudoPaths is a map containing the paths that require a token's policy +// to have the "sudo" capability. The keys are the paths as strings, in +// the same format as they are returned by the OpenAPI spec. The values +// are the regular expressions that can be used to test whether a given +// path matches that path or not (useful specifically for the paths that +// contain templated fields.) +var sudoPaths = map[string]*regexp.Regexp{ + "/auth/token/accessors": regexp.MustCompile(`^/auth/token/accessors/?$`), + "/auth/token/revoke-orphan": regexp.MustCompile(`^/auth/token/revoke-orphan$`), + "/pki/root": regexp.MustCompile(`^/pki/root$`), + "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), + "/sys/audit": regexp.MustCompile(`^/sys/audit$`), + "/sys/audit/{path}": regexp.MustCompile(`^/sys/audit/.+$`), + "/sys/auth/{path}": regexp.MustCompile(`^/sys/auth/.+$`), + "/sys/auth/{path}/tune": regexp.MustCompile(`^/sys/auth/.+/tune$`), + "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), + "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), + "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), + "/sys/config/ui/headers": regexp.MustCompile(`^/sys/config/ui/headers/?$`), + "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), + "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), + "/sys/leases": regexp.MustCompile(`^/sys/leases$`), + // This entry is a bit wrong... sys/leases/lookup does NOT require sudo. But sys/leases/lookup/ with a trailing + // slash DOES require sudo. But the part of the Vault CLI that uses this logic doesn't pass operation-appropriate + // trailing slashes, it always strips them off, so we end up giving the wrong answer for one of these. + "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup(?:/.+)?$`), + "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), + "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), + "/sys/plugins/catalog/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[^/]+$`), + "/sys/plugins/catalog/{type}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+$`), + "/sys/plugins/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+/[^/]+$`), + "/sys/plugins/runtimes/catalog": regexp.MustCompile(`^/sys/plugins/runtimes/catalog/?$`), + "/sys/plugins/runtimes/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/runtimes/catalog/[\w-]+/[^/]+$`), + "/sys/raw/{path}": regexp.MustCompile(`^/sys/raw(?:/.+)?$`), + "/sys/remount": regexp.MustCompile(`^/sys/remount$`), + "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), + "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), + "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), + "/sys/seal": regexp.MustCompile(`^/sys/seal$`), + "/sys/step-down": regexp.MustCompile(`^/sys/step-down$`), + + // enterprise-only paths + "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), + "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), + "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), + "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), + "/sys/storage/raft/snapshot-auto/config": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/?$`), + "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), +} + +func SudoPaths() map[string]*regexp.Regexp { + return sudoPaths +} + +// Determine whether the given path requires the sudo capability. +// Note that this uses hardcoded static path information, so will return incorrect results for paths in namespaces, +// or for secret engines mounted at non-default paths. +// Expects to receive a path with an initial slash, but no trailing slashes, as the Vault CLI (the only known and +// expected user of this function) sanitizes its paths that way. +func IsSudoPath(path string) bool { + // Return early if the path is any of the non-templated sudo paths. + if _, ok := sudoPaths[path]; ok { + return true + } + + // Some sudo paths have templated fields in them. + // (e.g. /sys/revoke-prefix/{prefix}) + // The values in the sudoPaths map are actually regular expressions, + // so we can check if our path matches against them. + for _, sudoPathRegexp := range sudoPaths { + match := sudoPathRegexp.MatchString(path) + if match { + return true + } + } + + return false +} diff --git a/api/sudo_paths_test.go b/api/sudo_paths_test.go new file mode 100644 index 000000000000..b23af7067fc9 --- /dev/null +++ b/api/sudo_paths_test.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import "testing" + +func TestIsSudoPath(t *testing.T) { + t.Parallel() + + testCases := []struct { + path string + expected bool + }{ + // Testing: Not a real endpoint + { + "/not/in/sudo/paths/list", + false, + }, + // Testing: sys/raw/{path} + { + "/sys/raw/single-node-path", + true, + }, + { + "/sys/raw/multiple/nodes/path", + true, + }, + { + "/sys/raw/WEIRD(but_still_valid!)p4Th?🗿笑", + true, + }, + // Testing: sys/auth/{path}/tune + { + "/sys/auth/path/in/middle/tune", + true, + }, + // Testing: sys/plugins/catalog/{type} and sys/plugins/catalog/{name} (regexes overlap) + { + "/sys/plugins/catalog/some-type", + true, + }, + // Testing: Not a real endpoint + { + "/sys/plugins/catalog/some/type/or/name/with/slashes", + false, + }, + // Testing: sys/plugins/catalog/{type}/{name} + { + "/sys/plugins/catalog/some-type/some-name", + true, + }, + // Testing: Not a real endpoint + { + "/sys/plugins/catalog/some-type/some/name/with/slashes", + false, + }, + // Testing: sys/plugins/runtimes/catalog/{type}/{name} + { + "/sys/plugins/runtimes/catalog/some-type/some-name", + true, + }, + // Testing: auth/token/accessors (an example of a sudo path that only accepts list operations) + // It is matched as sudo without the trailing slash... + { + "/auth/token/accessors", + true, + }, + // ...and also with it. + // (Although at the time of writing, the only caller of IsSudoPath always removes trailing slashes.) + { + "/auth/token/accessors/", + true, + }, + } + + for _, tc := range testCases { + result := IsSudoPath(tc.path) + if result != tc.expected { + t.Fatalf("expected api.IsSudoPath to return %v for path %s but it returned %v", tc.expected, tc.path, result) + } + } +} diff --git a/api/sys.go b/api/sys.go index 5fb111887c0d..81ebb3a2509f 100644 --- a/api/sys.go +++ b/api/sys.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api // Sys is used to perform system-related operations on Vault. diff --git a/api/sys_audit.go b/api/sys_audit.go index 82d9aab0b7a0..2244087aad58 100644 --- a/api/sys_audit.go +++ b/api/sys_audit.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_auth.go b/api/sys_auth.go index 238bd5e468a0..67beb63db21c 100644 --- a/api/sys_auth.go +++ b/api/sys_auth.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -9,6 +12,41 @@ import ( "github.com/mitchellh/mapstructure" ) +func (c *Sys) GetAuth(path string) (*AuthMount, error) { + return c.GetAuthWithContext(context.Background(), path) +} + +func (c *Sys) GetAuthWithContext(ctx context.Context, path string) (*AuthMount, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + // use `sys/mounts/auth/:path` so we don't require sudo permissions + // historically, `sys/auth` doesn't require sudo, so we don't require it here either + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/mounts/auth/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mount := AuthMount{} + err = mapstructure.Decode(secret.Data, &mount) + if err != nil { + return nil, err + } + + return &mount, nil +} + func (c *Sys) ListAuth() (map[string]*AuthMount, error) { return c.ListAuthWithContext(context.Background()) } diff --git a/api/sys_capabilities.go b/api/sys_capabilities.go index af306a07f312..d57b75711753 100644 --- a/api/sys_capabilities.go +++ b/api/sys_capabilities.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -75,3 +78,56 @@ func (c *Sys) CapabilitiesWithContext(ctx context.Context, token, path string) ( return res, nil } + +func (c *Sys) CapabilitiesAccessor(accessor, path string) ([]string, error) { + return c.CapabilitiesAccessorWithContext(context.Background(), accessor, path) +} + +func (c *Sys) CapabilitiesAccessorWithContext(ctx context.Context, accessor, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]string{ + "accessor": accessor, + "path": path, + } + + reqPath := "/v1/sys/capabilities-accessor" + + r := c.c.NewRequest(http.MethodPost, reqPath) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err + } + + if len(res) == 0 { + _, ok := secret.Data["capabilities"] + if ok { + err = mapstructure.Decode(secret.Data["capabilities"], &res) + if err != nil { + return nil, err + } + } + } + + return res, nil +} diff --git a/api/sys_config_cors.go b/api/sys_config_cors.go index 1e2cda4f48cb..e80aa9d8b417 100644 --- a/api/sys_config_cors.go +++ b/api/sys_config_cors.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_generate_root.go b/api/sys_generate_root.go index 096cadb793d9..da4ad2f9b73b 100644 --- a/api/sys_generate_root.go +++ b/api/sys_generate_root.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_hastatus.go b/api/sys_hastatus.go index d89d59651a92..58a73b89cbb7 100644 --- a/api/sys_hastatus.go +++ b/api/sys_hastatus.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -32,12 +35,14 @@ type HAStatusResponse struct { } type HANode struct { - Hostname string `json:"hostname"` - APIAddress string `json:"api_address"` - ClusterAddress string `json:"cluster_address"` - ActiveNode bool `json:"active_node"` - LastEcho *time.Time `json:"last_echo"` - Version string `json:"version"` - UpgradeVersion string `json:"upgrade_version,omitempty"` - RedundancyZone string `json:"redundancy_zone,omitempty"` + Hostname string `json:"hostname"` + APIAddress string `json:"api_address"` + ClusterAddress string `json:"cluster_address"` + ActiveNode bool `json:"active_node"` + LastEcho *time.Time `json:"last_echo"` + EchoDurationMillis int64 `json:"echo_duration_ms"` + ClockSkewMillis int64 `json:"clock_skew_ms"` + Version string `json:"version"` + UpgradeVersion string `json:"upgrade_version,omitempty"` + RedundancyZone string `json:"redundancy_zone,omitempty"` } diff --git a/api/sys_health.go b/api/sys_health.go index 953c1c21eaa3..0dc849885ff4 100644 --- a/api/sys_health.go +++ b/api/sys_health.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -46,4 +49,7 @@ type HealthResponse struct { ClusterName string `json:"cluster_name,omitempty"` ClusterID string `json:"cluster_id,omitempty"` LastWAL uint64 `json:"last_wal,omitempty"` + Enterprise bool `json:"enterprise"` + EchoDurationMillis int64 `json:"echo_duration_ms"` + ClockSkewMillis int64 `json:"clock_skew_ms"` } diff --git a/api/sys_init.go b/api/sys_init.go index 05dea86f6ab5..13fa94806976 100644 --- a/api/sys_init.go +++ b/api/sys_init.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_leader.go b/api/sys_leader.go index a74e206ebed4..868914d3b139 100644 --- a/api/sys_leader.go +++ b/api/sys_leader.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_leases.go b/api/sys_leases.go index c02402f5314c..c46f07e64b41 100644 --- a/api/sys_leases.go +++ b/api/sys_leases.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_mfa.go b/api/sys_mfa.go index a1ba1bd80f94..2be669584648 100644 --- a/api/sys_mfa.go +++ b/api/sys_mfa.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_monitor.go b/api/sys_monitor.go index 6813799f0141..15a8a13d175c 100644 --- a/api/sys_monitor.go +++ b/api/sys_monitor.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -5,8 +8,6 @@ import ( "context" "fmt" "net/http" - - "github.com/hashicorp/vault/sdk/helper/logging" ) // Monitor returns a channel that outputs strings containing the log messages @@ -20,7 +21,7 @@ func (c *Sys) Monitor(ctx context.Context, logLevel string, logFormat string) (c r.Params.Add("log_level", logLevel) } - if logFormat == "" || logFormat == logging.UnspecifiedFormat.String() { + if logFormat == "" { r.Params.Add("log_format", "standard") } else { r.Params.Add("log_format", logFormat) diff --git a/api/sys_mounts.go b/api/sys_mounts.go index f55133cec4c6..64529986af6a 100644 --- a/api/sys_mounts.go +++ b/api/sys_mounts.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -10,6 +13,39 @@ import ( "github.com/mitchellh/mapstructure" ) +func (c *Sys) GetMount(path string) (*MountOutput, error) { + return c.GetMountWithContext(context.Background(), path) +} + +func (c *Sys) GetMountWithContext(ctx context.Context, path string) (*MountOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/mounts/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mount := MountOutput{} + err = mapstructure.Decode(secret.Data, &mount) + if err != nil { + return nil, err + } + + return &mount, nil +} + func (c *Sys) ListMounts() (map[string]*MountOutput, error) { return c.ListMountsWithContext(context.Background()) } @@ -268,6 +304,9 @@ type MountConfigInput struct { AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` PluginVersion string `json:"plugin_version,omitempty"` UserLockoutConfig *UserLockoutConfigInput `json:"user_lockout_config,omitempty"` + DelegatedAuthAccessors []string `json:"delegated_auth_accessors,omitempty" mapstructure:"delegated_auth_accessors"` + IdentityTokenKey string `json:"identity_token_key,omitempty" mapstructure:"identity_token_key"` + // Deprecated: This field will always be blank for newer server responses. PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` } @@ -300,6 +339,9 @@ type MountConfigOutput struct { TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` UserLockoutConfig *UserLockoutConfigOutput `json:"user_lockout_config,omitempty"` + DelegatedAuthAccessors []string `json:"delegated_auth_accessors,omitempty" mapstructure:"delegated_auth_accessors"` + IdentityTokenKey string `json:"identity_token_key,omitempty" mapstructure:"identity_token_key"` + // Deprecated: This field will always be blank for newer server responses. PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` } diff --git a/api/sys_mounts_test.go b/api/sys_mounts_test.go index d461a9d495cf..a810c6268a1c 100644 --- a/api/sys_mounts_test.go +++ b/api/sys_mounts_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_plugins.go b/api/sys_plugins.go index 989c78f1d5ba..9d424d009ec9 100644 --- a/api/sys_plugins.go +++ b/api/sys_plugins.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -7,20 +10,19 @@ import ( "net/http" "time" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/mitchellh/mapstructure" ) // ListPluginsInput is used as input to the ListPlugins function. type ListPluginsInput struct { // Type of the plugin. Required. - Type consts.PluginType `json:"type"` + Type PluginType `json:"type"` } // ListPluginsResponse is the response from the ListPlugins call. type ListPluginsResponse struct { // PluginsByType is the list of plugins by type. - PluginsByType map[consts.PluginType][]string `json:"types"` + PluginsByType map[PluginType][]string `json:"types"` Details []PluginDetails `json:"details,omitempty"` @@ -34,6 +36,8 @@ type ListPluginsResponse struct { type PluginDetails struct { Type string `json:"type"` Name string `json:"name"` + OCIImage string `json:"oci_image,omitempty" mapstructure:"oci_image"` + Runtime string `json:"runtime,omitempty"` Version string `json:"version,omitempty"` Builtin bool `json:"builtin"` DeprecationStatus string `json:"deprecation_status,omitempty" mapstructure:"deprecation_status"` @@ -68,11 +72,11 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) ( } result := &ListPluginsResponse{ - PluginsByType: make(map[consts.PluginType][]string), + PluginsByType: make(map[PluginType][]string), } switch i.Type { - case consts.PluginTypeUnknown: - for _, pluginType := range consts.PluginTypes { + case PluginTypeUnknown: + for _, pluginType := range PluginTypes { pluginsRaw, ok := secret.Data[pluginType.String()] if !ok { continue @@ -113,7 +117,7 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) ( } switch i.Type { - case consts.PluginTypeUnknown: + case PluginTypeUnknown: result.Details = details default: // Filter for just the queried type. @@ -133,8 +137,8 @@ type GetPluginInput struct { Name string `json:"-"` // Type of the plugin. Required. - Type consts.PluginType `json:"type"` - Version string `json:"version"` + Type PluginType `json:"type"` + Version string `json:"version"` } // GetPluginResponse is the response from the GetPlugin call. @@ -144,6 +148,8 @@ type GetPluginResponse struct { Command string `json:"command"` Name string `json:"name"` SHA256 string `json:"sha256"` + OCIImage string `json:"oci_image,omitempty"` + Runtime string `json:"runtime,omitempty"` DeprecationStatus string `json:"deprecation_status,omitempty"` Version string `json:"version,omitempty"` } @@ -186,7 +192,7 @@ type RegisterPluginInput struct { Name string `json:"-"` // Type of the plugin. Required. - Type consts.PluginType `json:"type"` + Type PluginType `json:"type"` // Args is the list of args to spawn the process with. Args []string `json:"args,omitempty"` @@ -199,6 +205,16 @@ type RegisterPluginInput struct { // Version is the optional version of the plugin being registered Version string `json:"version,omitempty"` + + // OCIImage specifies the container image to run as a plugin. + OCIImage string `json:"oci_image,omitempty"` + + // Runtime is the Vault plugin runtime to use when running the plugin. + Runtime string `json:"runtime,omitempty"` + + // Env specifies a list of key=value pairs to add to the plugin's environment + // variables. + Env []string `json:"env,omitempty"` } // RegisterPlugin wraps RegisterPluginWithContext using context.Background. @@ -231,7 +247,7 @@ type DeregisterPluginInput struct { Name string `json:"-"` // Type of the plugin. Required. - Type consts.PluginType `json:"type"` + Type PluginType `json:"type"` // Version of the plugin. Optional. Version string `json:"version,omitempty"` @@ -258,6 +274,22 @@ func (c *Sys) DeregisterPluginWithContext(ctx context.Context, i *DeregisterPlug return err } +// RootReloadPluginInput is used as input to the RootReloadPlugin function. +type RootReloadPluginInput struct { + Plugin string `json:"-"` // Plugin name, as registered in the plugin catalog. + Type PluginType `json:"-"` // Plugin type: auth, secret, or database. + Scope string `json:"scope,omitempty"` // Empty to reload on current node, "global" for all nodes. +} + +// RootReloadPlugin reloads plugins, possibly returning reloadID for a global +// scoped reload. This is only available in the root namespace, and reloads +// plugins across all namespaces, whereas ReloadPlugin is available in all +// namespaces but only reloads plugins in use in the request's namespace. +func (c *Sys) RootReloadPlugin(ctx context.Context, i *RootReloadPluginInput) (string, error) { + path := fmt.Sprintf("/v1/sys/plugins/reload/%s/%s", i.Type.String(), i.Plugin) + return c.reloadPluginInternal(ctx, path, i, i.Scope == "global") +} + // ReloadPluginInput is used as input to the ReloadPlugin function. type ReloadPluginInput struct { // Plugin is the name of the plugin to reload, as registered in the plugin catalog @@ -276,15 +308,20 @@ func (c *Sys) ReloadPlugin(i *ReloadPluginInput) (string, error) { } // ReloadPluginWithContext reloads mounted plugin backends, possibly returning -// reloadId for a cluster scoped reload +// reloadID for a cluster scoped reload. It is limited to reloading plugins that +// are in use in the request's namespace. See RootReloadPlugin for an API that +// can reload plugins across all namespaces. func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) (string, error) { + return c.reloadPluginInternal(ctx, "/v1/sys/plugins/reload/backend", i, i.Scope == "global") +} + +func (c *Sys) reloadPluginInternal(ctx context.Context, path string, body any, global bool) (string, error) { ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() - path := "/v1/sys/plugins/reload/backend" req := c.c.NewRequest(http.MethodPut, path) - if err := req.SetJSONBody(i); err != nil { + if err := req.SetJSONBody(body); err != nil { return "", err } @@ -294,7 +331,7 @@ func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) } defer resp.Body.Close() - if i.Scope == "global" { + if global { // Get the reload id secret, parseErr := ParseSecret(resp.Body) if parseErr != nil { @@ -368,11 +405,11 @@ func (c *Sys) ReloadPluginStatusWithContext(ctx context.Context, reloadStatusInp } // catalogPathByType is a helper to construct the proper API path by plugin type -func catalogPathByType(pluginType consts.PluginType, name string) string { +func catalogPathByType(pluginType PluginType, name string) string { path := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", pluginType, name) // Backwards compat, if type is not provided then use old path - if pluginType == consts.PluginTypeUnknown { + if pluginType == PluginTypeUnknown { path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", name) } diff --git a/api/sys_plugins_runtimes.go b/api/sys_plugins_runtimes.go new file mode 100644 index 000000000000..b56a899f6507 --- /dev/null +++ b/api/sys_plugins_runtimes.go @@ -0,0 +1,190 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +// GetPluginRuntimeInput is used as input to the GetPluginRuntime function. +type GetPluginRuntimeInput struct { + Name string `json:"-"` + + // Type of the plugin runtime. Required. + Type PluginRuntimeType `json:"type"` +} + +// GetPluginRuntimeResponse is the response from the GetPluginRuntime call. +type GetPluginRuntimeResponse struct { + Type string `json:"type"` + Name string `json:"name"` + OCIRuntime string `json:"oci_runtime"` + CgroupParent string `json:"cgroup_parent"` + CPU int64 `json:"cpu_nanos"` + Memory int64 `json:"memory_bytes"` +} + +// GetPluginRuntime retrieves information about the plugin. +func (c *Sys) GetPluginRuntime(ctx context.Context, i *GetPluginRuntimeInput) (*GetPluginRuntimeResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := pluginRuntimeCatalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodGet, path) + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result struct { + Data *GetPluginRuntimeResponse + } + err = resp.DecodeJSON(&result) + if err != nil { + return nil, err + } + return result.Data, err +} + +// RegisterPluginRuntimeInput is used as input to the RegisterPluginRuntime function. +type RegisterPluginRuntimeInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type PluginRuntimeType `json:"type"` + + OCIRuntime string `json:"oci_runtime,omitempty"` + CgroupParent string `json:"cgroup_parent,omitempty"` + CPU int64 `json:"cpu_nanos,omitempty"` + Memory int64 `json:"memory_bytes,omitempty"` + Rootless bool `json:"rootless,omitempty"` +} + +// RegisterPluginRuntime registers the plugin with the given information. +func (c *Sys) RegisterPluginRuntime(ctx context.Context, i *RegisterPluginRuntimeInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := pluginRuntimeCatalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// DeregisterPluginRuntimeInput is used as input to the DeregisterPluginRuntime function. +type DeregisterPluginRuntimeInput struct { + // Name is the name of the plugin runtime. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type PluginRuntimeType `json:"type"` +} + +// DeregisterPluginRuntime removes the plugin with the given name from the plugin +// catalog. +func (c *Sys) DeregisterPluginRuntime(ctx context.Context, i *DeregisterPluginRuntimeInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := pluginRuntimeCatalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodDelete, path) + resp, err := c.c.rawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type PluginRuntimeDetails struct { + Type string `json:"type" mapstructure:"type"` + Name string `json:"name" mapstructure:"name"` + OCIRuntime string `json:"oci_runtime" mapstructure:"oci_runtime"` + CgroupParent string `json:"cgroup_parent" mapstructure:"cgroup_parent"` + CPU int64 `json:"cpu_nanos" mapstructure:"cpu_nanos"` + Memory int64 `json:"memory_bytes" mapstructure:"memory_bytes"` +} + +// ListPluginRuntimesInput is used as input to the ListPluginRuntimes function. +type ListPluginRuntimesInput struct { + // Type of the plugin. Required. + Type PluginRuntimeType `json:"type"` +} + +// ListPluginRuntimesResponse is the response from the ListPluginRuntimes call. +type ListPluginRuntimesResponse struct { + // RuntimesByType is the list of plugin runtimes by type. + Runtimes []PluginRuntimeDetails `json:"runtimes"` +} + +// ListPluginRuntimes lists all plugin runtimes in the catalog and returns their names as a +// list of strings. +func (c *Sys) ListPluginRuntimes(ctx context.Context, input *ListPluginRuntimesInput) (*ListPluginRuntimesResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + if input != nil && input.Type == PluginRuntimeTypeUnsupported { + return nil, fmt.Errorf("%q is not a supported runtime type", input.Type.String()) + } + + resp, err := c.c.rawRequestWithContext(ctx, c.c.NewRequest(http.MethodGet, "/v1/sys/plugins/runtimes/catalog")) + if err != nil && resp == nil { + return nil, err + } + if resp == nil { + return nil, nil + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + if _, ok := secret.Data["runtimes"]; !ok { + return nil, fmt.Errorf("data from server response does not contain runtimes") + } + + var runtimes []PluginRuntimeDetails + if err = mapstructure.Decode(secret.Data["runtimes"], &runtimes); err != nil { + return nil, err + } + + // return all runtimes in the catalog + if input == nil { + return &ListPluginRuntimesResponse{Runtimes: runtimes}, nil + } + + result := &ListPluginRuntimesResponse{ + Runtimes: []PluginRuntimeDetails{}, + } + for _, runtime := range runtimes { + if runtime.Type == input.Type.String() { + result.Runtimes = append(result.Runtimes, runtime) + } + } + return result, nil +} + +// pluginRuntimeCatalogPathByType is a helper to construct the proper API path by plugin type +func pluginRuntimeCatalogPathByType(runtimeType PluginRuntimeType, name string) string { + return fmt.Sprintf("/v1/sys/plugins/runtimes/catalog/%s/%s", runtimeType, name) +} diff --git a/api/sys_plugins_runtimes_test.go b/api/sys_plugins_runtimes_test.go new file mode 100644 index 000000000000..6c3486a31a00 --- /dev/null +++ b/api/sys_plugins_runtimes_test.go @@ -0,0 +1,268 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" + "net/http/httptest" + "reflect" + "testing" +) + +func TestRegisterPluginRuntime(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerRegister)) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + err = client.Sys().RegisterPluginRuntime(context.Background(), &RegisterPluginRuntimeInput{ + Name: "gvisor", + Type: PluginRuntimeTypeContainer, + OCIRuntime: "runsc", + CgroupParent: "/cpulimit/", + CPU: 1, + Memory: 10000, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestGetPluginRuntime(t *testing.T) { + for name, tc := range map[string]struct { + body string + expected GetPluginRuntimeResponse + }{ + "gvisor": { + body: getPluginRuntimeResponse, + expected: GetPluginRuntimeResponse{ + Name: "gvisor", + Type: PluginRuntimeTypeContainer.String(), + OCIRuntime: "runsc", + CgroupParent: "/cpulimit/", + CPU: 1, + Memory: 10000, + }, + }, + } { + t.Run(name, func(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerInfo(tc.body))) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + input := GetPluginRuntimeInput{ + Name: "gvisor", + Type: PluginRuntimeTypeContainer, + } + + info, err := client.Sys().GetPluginRuntime(context.Background(), &input) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, *info) { + t.Errorf("expected: %#v\ngot: %#v", tc.expected, info) + } + }) + } +} + +func TestListPluginRuntimeTyped(t *testing.T) { + for _, tc := range []struct { + runtimeType PluginRuntimeType + body string + expectedResponse *ListPluginRuntimesResponse + expectedErrNil bool + }{ + { + runtimeType: PluginRuntimeTypeContainer, + body: listPluginRuntimeTypedResponse, + expectedResponse: &ListPluginRuntimesResponse{ + Runtimes: []PluginRuntimeDetails{ + { + Type: "container", + Name: "gvisor", + OCIRuntime: "runsc", + CgroupParent: "/cpulimit/", + CPU: 1, + Memory: 10000, + }, + }, + }, + expectedErrNil: true, + }, + { + runtimeType: PluginRuntimeTypeUnsupported, + body: listPluginRuntimeTypedResponse, + expectedResponse: nil, + expectedErrNil: false, + }, + } { + t.Run(tc.runtimeType.String(), func(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerInfo(tc.body))) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + input := ListPluginRuntimesInput{ + Type: tc.runtimeType, + } + + list, err := client.Sys().ListPluginRuntimes(context.Background(), &input) + if tc.expectedErrNil && err != nil { + t.Fatal(err) + } + + if (tc.expectedErrNil && !reflect.DeepEqual(tc.expectedResponse, list)) || (!tc.expectedErrNil && list != nil) { + t.Errorf("expected: %#v\ngot: %#v", tc.expectedResponse, list) + } + }) + } +} + +func TestListPluginRuntimeUntyped(t *testing.T) { + for _, tc := range []struct { + body string + expectedResponse *ListPluginRuntimesResponse + expectedErrNil bool + }{ + { + body: listPluginRuntimeUntypedResponse, + expectedResponse: &ListPluginRuntimesResponse{ + Runtimes: []PluginRuntimeDetails{ + { + Type: "container", + Name: "gvisor", + OCIRuntime: "runsc", + CgroupParent: "/cpulimit/", + CPU: 1, + Memory: 10000, + }, + { + Type: "container", + Name: "foo", + OCIRuntime: "otherociruntime", + CgroupParent: "/memorylimit/", + CPU: 2, + Memory: 20000, + }, + { + Type: "container", + Name: "bar", + OCIRuntime: "otherociruntime", + CgroupParent: "/cpulimit/", + CPU: 3, + Memory: 30000, + }, + }, + }, + expectedErrNil: true, + }, + } { + t.Run("", func(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerInfo(tc.body))) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + info, err := client.Sys().ListPluginRuntimes(context.Background(), nil) + if tc.expectedErrNil && err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expectedResponse, info) { + t.Errorf("expected: %#v\ngot: %#v", tc.expectedResponse, info) + } + }) + } +} + +const getPluginRuntimeResponse = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c123456", + "data": { + "name": "gvisor", + "type": "container", + "oci_runtime": "runsc", + "cgroup_parent": "/cpulimit/", + "cpu_nanos": 1, + "memory_bytes": 10000 + }, + "warnings": null, + "auth": null +}` + +const listPluginRuntimeTypedResponse = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c123456", + "data": { + "runtimes": [ + { + "name": "gvisor", + "type": "container", + "oci_runtime": "runsc", + "cgroup_parent": "/cpulimit/", + "cpu_nanos": 1, + "memory_bytes": 10000 + } + ] + }, + "warnings": null, + "auth": null +} +` + +const listPluginRuntimeUntypedResponse = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c123456", + "data": { + "runtimes": [ + { + "name": "gvisor", + "type": "container", + "oci_runtime": "runsc", + "cgroup_parent": "/cpulimit/", + "cpu_nanos": 1, + "memory_bytes": 10000 + }, + { + "name": "foo", + "type": "container", + "oci_runtime": "otherociruntime", + "cgroup_parent": "/memorylimit/", + "cpu_nanos": 2, + "memory_bytes": 20000 + }, + { + "name": "bar", + "type": "container", + "oci_runtime": "otherociruntime", + "cgroup_parent": "/cpulimit/", + "cpu_nanos": 3, + "memory_bytes": 30000 + } + ] + }, + "warnings": null, + "auth": null +}` diff --git a/api/sys_plugins_test.go b/api/sys_plugins_test.go index b3b94d730289..8ba8fc571410 100644 --- a/api/sys_plugins_test.go +++ b/api/sys_plugins_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -7,8 +10,7 @@ import ( "reflect" "testing" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/go-secure-stdlib/strutil" ) func TestRegisterPlugin(t *testing.T) { @@ -43,32 +45,32 @@ func TestListPlugins(t *testing.T) { for name, tc := range map[string]struct { input ListPluginsInput - expectedPlugins map[consts.PluginType][]string + expectedPlugins map[PluginType][]string }{ "no type specified": { input: ListPluginsInput{}, - expectedPlugins: map[consts.PluginType][]string{ - consts.PluginTypeCredential: {"alicloud"}, - consts.PluginTypeDatabase: {"cassandra-database-plugin"}, - consts.PluginTypeSecrets: {"ad", "alicloud"}, + expectedPlugins: map[PluginType][]string{ + PluginTypeCredential: {"alicloud"}, + PluginTypeDatabase: {"cassandra-database-plugin"}, + PluginTypeSecrets: {"ad", "alicloud"}, }, }, "only auth plugins": { - input: ListPluginsInput{Type: consts.PluginTypeCredential}, - expectedPlugins: map[consts.PluginType][]string{ - consts.PluginTypeCredential: {"alicloud"}, + input: ListPluginsInput{Type: PluginTypeCredential}, + expectedPlugins: map[PluginType][]string{ + PluginTypeCredential: {"alicloud"}, }, }, "only database plugins": { - input: ListPluginsInput{Type: consts.PluginTypeDatabase}, - expectedPlugins: map[consts.PluginType][]string{ - consts.PluginTypeDatabase: {"cassandra-database-plugin"}, + input: ListPluginsInput{Type: PluginTypeDatabase}, + expectedPlugins: map[PluginType][]string{ + PluginTypeDatabase: {"cassandra-database-plugin"}, }, }, "only secret plugins": { - input: ListPluginsInput{Type: consts.PluginTypeSecrets}, - expectedPlugins: map[consts.PluginType][]string{ - consts.PluginTypeSecrets: {"ad", "alicloud"}, + input: ListPluginsInput{Type: PluginTypeSecrets}, + expectedPlugins: map[PluginType][]string{ + PluginTypeSecrets: {"ad", "alicloud"}, }, }, } { @@ -104,7 +106,7 @@ func TestListPlugins(t *testing.T) { } for _, actual := range resp.Details { - pluginType, err := consts.ParsePluginType(actual.Type) + pluginType, err := ParsePluginType(actual.Type) if err != nil { t.Fatal(err) } @@ -159,6 +161,21 @@ func TestGetPlugin(t *testing.T) { Version: "", }, }, + "oci image": { + version: "v0.16.0", + body: getResponseOCIImageVersion, + expected: GetPluginResponse{ + Args: []string{}, + Builtin: false, + Command: "", + Name: "jwt", + OCIImage: "hashicorp/vault-plugin-auth-jwt", + Runtime: "gvisor", + SHA256: "8ba442dba253803685b05e35ad29dcdebc48dec16774614aa7a4ebe53c1e90e1", + DeprecationStatus: "", + Version: "v0.16.0", + }, + }, } { t.Run(name, func(t *testing.T) { mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerInfo(tc.body))) @@ -173,7 +190,7 @@ func TestGetPlugin(t *testing.T) { input := GetPluginInput{ Name: "azure", - Type: consts.PluginTypeSecrets, + Type: PluginTypeSecrets, } if tc.version != "" { input.Version = tc.version @@ -251,6 +268,25 @@ const getResponseOldServerVersion = `{ "auth": null }` +const getResponseOCIImageVersion = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c495241", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "args": [], + "builtin": false, + "name": "jwt", + "oci_image" : "hashicorp/vault-plugin-auth-jwt", + "runtime" : "gvisor", + "sha256": "8ba442dba253803685b05e35ad29dcdebc48dec16774614aa7a4ebe53c1e90e1", + "version": "v0.16.0" + }, + "wrap_info": null, + "warnings": null, + "auth": null +}` + func mockVaultHandlerList(w http.ResponseWriter, _ *http.Request) { _, _ = w.Write([]byte(listUntypedResponse)) } diff --git a/api/sys_policy.go b/api/sys_policy.go index 4a4f91b08c71..9ddffe4ec7c4 100644 --- a/api/sys_policy.go +++ b/api/sys_policy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_raft.go b/api/sys_raft.go index 7806a1418df8..699f6e9fd095 100644 --- a/api/sys_raft.go +++ b/api/sys_raft.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -6,6 +9,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "io" "io/ioutil" "net/http" @@ -97,6 +101,23 @@ type AutopilotState struct { OptimisticFailureTolerance int `mapstructure:"optimistic_failure_tolerance,omitempty"` } +func (a *AutopilotState) String() string { + var result string + result += fmt.Sprintf("Healthy: %t. FailureTolerance: %d. Leader: %s. OptimisticFailureTolerance: %d\n", a.Healthy, a.FailureTolerance, a.Leader, a.OptimisticFailureTolerance) + for _, s := range a.Servers { + result += fmt.Sprintf("Server: %s\n", s) + } + result += fmt.Sprintf("Voters: %v\n", a.Voters) + result += fmt.Sprintf("NonVoters: %v\n", a.NonVoters) + + for name, zone := range a.RedundancyZones { + result += fmt.Sprintf("RedundancyZone %s: %s\n", name, &zone) + } + + result += fmt.Sprintf("Upgrade: %s", a.Upgrade) + return result +} + // AutopilotServer represents the server blocks in the response of the raft // autopilot state API. type AutopilotServer struct { @@ -116,12 +137,21 @@ type AutopilotServer struct { NodeType string `mapstructure:"node_type,omitempty"` } +func (a *AutopilotServer) String() string { + return fmt.Sprintf("ID: %s. Name: %s. Address: %s. NodeStatus: %s. LastContact: %s. LastTerm: %d. LastIndex: %d. Healthy: %t. StableSince: %s. Status: %s. Version: %s. UpgradeVersion: %s. RedundancyZone: %s. NodeType: %s", + a.ID, a.Name, a.Address, a.NodeStatus, a.LastContact, a.LastTerm, a.LastIndex, a.Healthy, a.StableSince, a.Status, a.Version, a.UpgradeVersion, a.RedundancyZone, a.NodeType) +} + type AutopilotZone struct { Servers []string `mapstructure:"servers,omitempty"` Voters []string `mapstructure:"voters,omitempty"` FailureTolerance int `mapstructure:"failure_tolerance,omitempty"` } +func (a *AutopilotZone) String() string { + return fmt.Sprintf("Servers: %v. Voters: %v. FailureTolerance: %d", a.Servers, a.Voters, a.FailureTolerance) +} + type AutopilotUpgrade struct { Status string `mapstructure:"status"` TargetVersion string `mapstructure:"target_version,omitempty"` @@ -134,6 +164,17 @@ type AutopilotUpgrade struct { RedundancyZones map[string]AutopilotZoneUpgradeVersions `mapstructure:"redundancy_zones,omitempty"` } +func (a *AutopilotUpgrade) String() string { + result := fmt.Sprintf("Status: %s. TargetVersion: %s. TargetVersionVoters: %v. TargetVersionNonVoters: %v. TargetVersionReadReplicas: %v. OtherVersionVoters: %v. OtherVersionNonVoters: %v. OtherVersionReadReplicas: %v", + a.Status, a.TargetVersion, a.TargetVersionVoters, a.TargetVersionNonVoters, a.TargetVersionReadReplicas, a.OtherVersionVoters, a.OtherVersionNonVoters, a.OtherVersionReadReplicas) + + for name, zone := range a.RedundancyZones { + result += fmt.Sprintf("Redundancy Zone %s: %s", name, zone) + } + + return result +} + type AutopilotZoneUpgradeVersions struct { TargetVersionVoters []string `mapstructure:"target_version_voters,omitempty"` TargetVersionNonVoters []string `mapstructure:"target_version_non_voters,omitempty"` @@ -141,6 +182,11 @@ type AutopilotZoneUpgradeVersions struct { OtherVersionNonVoters []string `mapstructure:"other_version_non_voters,omitempty"` } +func (a *AutopilotZoneUpgradeVersions) String() string { + return fmt.Sprintf("TargetVersionVoters: %v. TargetVersionNonVoters: %v. OtherVersionVoters: %v. OtherVersionNonVoters: %v", + a.TargetVersionVoters, a.TargetVersionNonVoters, a.OtherVersionVoters, a.OtherVersionNonVoters) +} + // RaftJoin wraps RaftJoinWithContext using context.Background. func (c *Sys) RaftJoin(opts *RaftJoinRequest) (*RaftJoinResponse, error) { return c.RaftJoinWithContext(context.Background(), opts) @@ -273,11 +319,19 @@ func (c *Sys) RaftAutopilotState() (*AutopilotState, error) { return c.RaftAutopilotStateWithContext(context.Background()) } +// RaftAutopilotStateWithToken wraps RaftAutopilotStateWithContext using the given token. +func (c *Sys) RaftAutopilotStateWithDRToken(drToken string) (*AutopilotState, error) { + return c.RaftAutopilotStateWithContext(context.WithValue(context.Background(), "dr-token", drToken)) +} + // RaftAutopilotStateWithContext returns the state of the raft cluster as seen by autopilot. func (c *Sys) RaftAutopilotStateWithContext(ctx context.Context) (*AutopilotState, error) { ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() + if ctx.Value("dr-token") != nil { + c.c.SetToken(ctx.Value("dr-token").(string)) + } r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/state") resp, err := c.c.rawRequestWithContext(ctx, r) @@ -313,11 +367,20 @@ func (c *Sys) RaftAutopilotConfiguration() (*AutopilotConfig, error) { return c.RaftAutopilotConfigurationWithContext(context.Background()) } +// RaftAutopilotConfigurationWithDRToken wraps RaftAutopilotConfigurationWithContext using the given token. +func (c *Sys) RaftAutopilotConfigurationWithDRToken(drToken string) (*AutopilotConfig, error) { + return c.RaftAutopilotConfigurationWithContext(context.WithValue(context.Background(), "dr-token", drToken)) +} + // RaftAutopilotConfigurationWithContext fetches the autopilot config. func (c *Sys) RaftAutopilotConfigurationWithContext(ctx context.Context) (*AutopilotConfig, error) { ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) defer cancelFunc() + if ctx.Value("dr-token") != nil { + c.c.SetToken(ctx.Value("dr-token").(string)) + } + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/configuration") resp, err := c.c.rawRequestWithContext(ctx, r) diff --git a/api/sys_rekey.go b/api/sys_rekey.go index 2ac8a4743bcf..573201751c7b 100644 --- a/api/sys_rekey.go +++ b/api/sys_rekey.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_rotate.go b/api/sys_rotate.go index fa86886c35b8..295d989f9e2a 100644 --- a/api/sys_rotate.go +++ b/api/sys_rotate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_seal.go b/api/sys_seal.go index 0522f2a42b76..62002496c36b 100644 --- a/api/sys_seal.go +++ b/api/sys_seal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( @@ -106,6 +109,7 @@ type SealStatusResponse struct { ClusterName string `json:"cluster_name,omitempty"` ClusterID string `json:"cluster_id,omitempty"` RecoverySeal bool `json:"recovery_seal"` + RecoverySealType string `json:"recovery_seal_type,omitempty"` StorageType string `json:"storage_type,omitempty"` HCPLinkStatus string `json:"hcp_link_status,omitempty"` HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` diff --git a/api/sys_stepdown.go b/api/sys_stepdown.go index 833f31a6f760..c55ed1e666db 100644 --- a/api/sys_stepdown.go +++ b/api/sys_stepdown.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package api import ( diff --git a/api/sys_ui_custom_message.go b/api/sys_ui_custom_message.go new file mode 100644 index 000000000000..a129efea7631 --- /dev/null +++ b/api/sys_ui_custom_message.go @@ -0,0 +1,281 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strconv" +) + +const ( + // baseEndpoint is the common base URL path for all endpoints used in this + // module. + baseEndpoint string = "/v1/sys/config/ui/custom-messages" +) + +// ListUICustomMessages calls ListUICustomMessagesWithContext using a background +// Context. +func (c *Sys) ListUICustomMessages(req UICustomMessageListRequest) (*Secret, error) { + return c.ListUICustomMessagesWithContext(context.Background(), req) +} + +// ListUICustomMessagesWithContext sends a request to the List custom messages +// endpoint using the provided Context and UICustomMessageListRequest value as +// the inputs. It returns a pointer to a Secret if a response was obtained from +// the server, including error responses; or an error if a response could not be +// obtained due to an error. +func (c *Sys) ListUICustomMessagesWithContext(ctx context.Context, req UICustomMessageListRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest("LIST", fmt.Sprintf("%s/", baseEndpoint)) + if req.Active != nil { + r.Params.Add("active", strconv.FormatBool(*req.Active)) + } + if req.Authenticated != nil { + r.Params.Add("authenticated", strconv.FormatBool(*req.Authenticated)) + } + if req.Type != nil { + r.Params.Add("type", *req.Type) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + return secret, nil +} + +// CreateUICustomMessage calls CreateUICustomMessageWithContext using a +// background Context. +func (c *Sys) CreateUICustomMessage(req UICustomMessageRequest) (*Secret, error) { + return c.CreateUICustomMessageWithContext(context.Background(), req) +} + +// CreateUICustomMessageWithContext sends a request to the Create custom +// messages endpoint using the provided Context and UICustomMessageRequest +// values as the inputs. It returns a pointer to a Secret if a response was +// obtained from the server, including error responses; or an error if a +// response could not be obtained due to an error. +func (c *Sys) CreateUICustomMessageWithContext(ctx context.Context, req UICustomMessageRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, baseEndpoint) + if err := r.SetJSONBody(&req); err != nil { + return nil, fmt.Errorf("error encoding request body to json: %w", err) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, fmt.Errorf("could not parse secret from server response: %w", err) + } + + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + return secret, nil +} + +// ReadUICustomMessage calls ReadUICustomMessageWithContext using a background +// Context. +func (c *Sys) ReadUICustomMessage(id string) (*Secret, error) { + return c.ReadUICustomMessageWithContext(context.Background(), id) +} + +// ReadUICustomMessageWithContext sends a request to the Read custom message +// endpoint using the provided Context and id values. It returns a pointer to a +// Secret if a response was obtained from the server, including error responses; +// or an error if a response could not be obtained due to an error. +func (c *Sys) ReadUICustomMessageWithContext(ctx context.Context, id string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", baseEndpoint, id)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, fmt.Errorf("could not parse secret from server response: %w", err) + } + + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + return secret, nil +} + +// UpdateUICustomMessage calls UpdateUICustomMessageWithContext using a +// background Context. +func (c *Sys) UpdateUICustomMessage(id string, req UICustomMessageRequest) error { + return c.UpdateUICustomMessageWithContext(context.Background(), id, req) +} + +// UpdateUICustomMessageWithContext sends a request to the Update custom message +// endpoint using the provided Context, id, and UICustomMessageRequest values. +// It returns a pointer to a Secret if a response was obtained from the server, +// including error responses; or an error if a response could not be obtained +// due to an error. +func (c *Sys) UpdateUICustomMessageWithContext(ctx context.Context, id string, req UICustomMessageRequest) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("%s/%s", baseEndpoint, id)) + if err := r.SetJSONBody(&req); err != nil { + return fmt.Errorf("error encoding request body to json: %w", err) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + return nil +} + +// DeleteUICustomMessage calls DeleteUICustomMessageWithContext using a +// background Context. +func (c *Sys) DeleteUICustomMessage(id string) error { + return c.DeletePolicyWithContext(context.Background(), id) +} + +// DeleteUICustomMessageWithContext sends a request to the Delete custom message +// endpoint using the provided Context and id values. It returns a pointer to a +// Secret if a response was obtained from the server, including error responses; +// or an error if a response could not be obtained due to an error. +func (c *Sys) DeleteUICustomMessageWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("%s/%s", baseEndpoint, id)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return fmt.Errorf("error sending request to server: %w", err) + } + defer resp.Body.Close() + + return nil +} + +// UICustomMessageListRequest is a struct used to contain inputs for the List +// custom messages request. Each field is optional, so their types are pointers. +// The With... methods can be used to easily set the fields with pointers to +// values. +type UICustomMessageListRequest struct { + Authenticated *bool + Type *string + Active *bool +} + +// WithAuthenticated sets the Authenticated field to a pointer referencing the +// provided bool value. +func (r *UICustomMessageListRequest) WithAuthenticated(value bool) *UICustomMessageListRequest { + r.Authenticated = &value + + return r +} + +// WithType sets the Type field to a pointer referencing the provided string +// value. +func (r *UICustomMessageListRequest) WithType(value string) *UICustomMessageListRequest { + r.Type = &value + + return r +} + +// WithActive sets the Active field to a pointer referencing the provided bool +// value. +func (r *UICustomMessageListRequest) WithActive(value bool) *UICustomMessageListRequest { + r.Active = &value + + return r +} + +// UICustomMessageRequest is a struct containing the properties of a custom +// message. The Link field can be set using the WithLink method. +type UICustomMessageRequest struct { + Title string `json:"title"` + Message string `json:"message"` + Authenticated bool `json:"authenticated"` + Type string `json:"type"` + StartTime string `json:"start_time"` + EndTime string `json:"end_time,omitempty"` + Link *uiCustomMessageLink `json:"link,omitempty"` + Options map[string]any `json:"options,omitempty"` +} + +// WithLink sets the Link field to the address of a new uiCustomMessageLink +// struct constructed from the provided title and href values. +func (r *UICustomMessageRequest) WithLink(title, href string) *UICustomMessageRequest { + r.Link = &uiCustomMessageLink{ + Title: title, + Href: href, + } + + return r +} + +// uiCustomMessageLink is a utility struct used to represent a link associated +// with a custom message. +type uiCustomMessageLink struct { + Title string + Href string +} + +// MarshalJSON encodes the state of the receiver uiCustomMessageLink as JSON and +// returns those encoded bytes or an error. +func (l uiCustomMessageLink) MarshalJSON() ([]byte, error) { + m := make(map[string]string) + + m[l.Title] = l.Href + + return json.Marshal(m) +} + +// UnmarshalJSON updates the state of the receiver uiCustomMessageLink from the +// provided JSON encoded bytes. It returns an error if there was a failure. +func (l *uiCustomMessageLink) UnmarshalJSON(b []byte) error { + m := make(map[string]string) + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + for k, v := range m { + l.Title = k + l.Href = v + break + } + + return nil +} diff --git a/api/sys_ui_custom_message_test.go b/api/sys_ui_custom_message_test.go new file mode 100644 index 000000000000..0082ad55d778 --- /dev/null +++ b/api/sys_ui_custom_message_test.go @@ -0,0 +1,193 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var messageBase64 string = base64.StdEncoding.EncodeToString([]byte("message")) + +// TestUICustomMessageJsonMarshalling verifies that json marshalling (struct to +// json) works with the uiCustomMessageRequest type. +func TestUICustomMessageJsonMarshalling(t *testing.T) { + for _, testcase := range []struct { + name string + request UICustomMessageRequest + expectedJSON string + }{ + { + name: "no-link-no-options", + request: UICustomMessageRequest{ + Title: "title", + Message: messageBase64, + StartTime: "2024-01-01T00:00:00.000Z", + EndTime: "", + Type: "banner", + Authenticated: true, + }, + expectedJSON: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":true,"type":"banner","start_time":"2024-01-01T00:00:00.000Z"}`, messageBase64), + }, + { + name: "link-no-options", + request: UICustomMessageRequest{ + Title: "title", + Message: messageBase64, + StartTime: "2024-01-01T00:00:00.000Z", + EndTime: "", + Type: "modal", + Authenticated: false, + Link: &uiCustomMessageLink{ + Title: "Click here", + Href: "https://www.example.org", + }, + }, + expectedJSON: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":false,"type":"modal","start_time":"2024-01-01T00:00:00.000Z","link":{"Click here":"https://www.example.org"}}`, messageBase64), + }, + { + name: "no-link-options", + request: UICustomMessageRequest{ + Title: "title", + Message: messageBase64, + StartTime: "2024-01-01T00:00:00.000Z", + EndTime: "", + Authenticated: true, + Type: "banner", + Options: map[string]any{ + "key": "value", + }, + }, + expectedJSON: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":true,"type":"banner","start_time":"2024-01-01T00:00:00.000Z","options":{"key":"value"}}`, messageBase64), + }, + { + name: "link-and-options", + request: UICustomMessageRequest{ + Title: "title", + Message: messageBase64, + StartTime: "2024-01-01T00:00:00.000Z", + EndTime: "", + Authenticated: true, + Type: "banner", + Link: &uiCustomMessageLink{ + Title: "Click here", + Href: "https://www.example.org", + }, + Options: map[string]any{ + "key": "value", + }, + }, + expectedJSON: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":true,"type":"banner","start_time":"2024-01-01T00:00:00.000Z","link":{"Click here":"https://www.example.org"},"options":{"key":"value"}}`, messageBase64), + }, + } { + tc := testcase + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + bytes, err := json.Marshal(&tc.request) + assert.NoError(t, err) + assert.Equal(t, tc.expectedJSON, string(bytes)) + }) + } +} + +// TestUICustomMessageJsonUnmarshal verifies that json unmarshalling (json to +// struct) works with the uiCustomMessageRequest type. +func TestUICustomMessageJsonUnmarshal(t *testing.T) { + for _, testcase := range []struct { + name string + encodedBytes string + linkAssertion func(assert.TestingT, any, ...any) bool + checkLink bool + optionsAssertion func(assert.TestingT, any, ...any) bool + checkOptions bool + }{ + { + name: "no-link-no-options", + encodedBytes: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":false,"type":"modal","start_time":"2024-01-01T00:00:00.000Z"}`, messageBase64), + linkAssertion: assert.Nil, + optionsAssertion: assert.Nil, + }, + { + name: "link-no-options", + encodedBytes: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":false,"type":"modal","start_time":"2024-01-01T00:00:00.000Z","link":{"Click here":"https://www.example.org"}}`, messageBase64), + linkAssertion: assert.NotNil, + checkLink: true, + optionsAssertion: assert.Nil, + }, + { + name: "no-link-options", + encodedBytes: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":false,"type":"modal","start_time":"2024-01-01T00:00:00.000Z","options":{"key":"value"}}`, messageBase64), + linkAssertion: assert.Nil, + optionsAssertion: assert.NotNil, + checkOptions: true, + }, + { + name: "link-and-options", + encodedBytes: fmt.Sprintf(`{"title":"title","message":"%s","authenticated":false,"type":"modal","start_time":"2024-01-01T00:00:00.000Z","link":{"Click here":"https://www.example.org"},"options":{"key":"value"}}`, messageBase64), + linkAssertion: assert.NotNil, + checkLink: true, + optionsAssertion: assert.NotNil, + checkOptions: true, + }, + } { + tc := testcase + t.Run(testcase.name, func(t *testing.T) { + t.Parallel() + + var request UICustomMessageRequest + + err := json.Unmarshal([]byte(tc.encodedBytes), &request) + assert.NoError(t, err) + tc.linkAssertion(t, request.Link) + tc.optionsAssertion(t, request.Options) + + if tc.checkLink { + assert.Equal(t, "Click here", request.Link.Title) + assert.Equal(t, "https://www.example.org", request.Link.Href) + } + + if tc.checkOptions { + assert.Contains(t, request.Options, "key") + } + }) + } +} + +// TestUICustomMessageListRequestOptions verifies the correct behaviour of all +// of the With... methods of the UICustomMessageListRequest. +func TestUICustomMessageListRequestOptions(t *testing.T) { + request := &UICustomMessageListRequest{} + assert.Nil(t, request.Active) + assert.Nil(t, request.Authenticated) + assert.Nil(t, request.Type) + + request = (&UICustomMessageListRequest{}).WithActive(true) + assert.NotNil(t, request.Active) + assert.True(t, *request.Active) + + request = (&UICustomMessageListRequest{}).WithActive(false) + assert.NotNil(t, request.Active) + assert.False(t, *request.Active) + + request = (&UICustomMessageListRequest{}).WithAuthenticated(true) + assert.NotNil(t, request.Authenticated) + assert.True(t, *request.Authenticated) + + request = (&UICustomMessageListRequest{}).WithAuthenticated(false) + assert.NotNil(t, request.Authenticated) + assert.False(t, *request.Authenticated) + + request = (&UICustomMessageListRequest{}).WithType("banner") + assert.NotNil(t, request.Type) + assert.Equal(t, "banner", *request.Type) + + request = (&UICustomMessageListRequest{}).WithType("modal") + assert.NotNil(t, request.Type) + assert.Equal(t, "modal", *request.Type) +} diff --git a/api/test-fixtures/agent_config.hcl b/api/test-fixtures/agent_config.hcl index 8339f53d7eaa..38d8026057f5 100644 --- a/api/test-fixtures/agent_config.hcl +++ b/api/test-fixtures/agent_config.hcl @@ -1,2 +1,5 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + vault_addr="http://127.0.0.1:8200" ssh_mount_point="ssh" diff --git a/audit/audit.go b/audit/audit.go deleted file mode 100644 index 5641b449af30..000000000000 --- a/audit/audit.go +++ /dev/null @@ -1,59 +0,0 @@ -package audit - -import ( - "context" - - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -// Backend interface must be implemented for an audit -// mechanism to be made available. Audit backends can be enabled to -// sink information to different backends such as logs, file, databases, -// or other external services. -type Backend interface { - // LogRequest is used to synchronously log a request. This is done after the - // request is authorized but before the request is executed. The arguments - // MUST not be modified in anyway. They should be deep copied if this is - // a possibility. - LogRequest(context.Context, *logical.LogInput) error - - // LogResponse is used to synchronously log a response. This is done after - // the request is processed but before the response is sent. The arguments - // MUST not be modified in anyway. They should be deep copied if this is - // a possibility. - LogResponse(context.Context, *logical.LogInput) error - - // LogTestMessage is used to check an audit backend before adding it - // permanently. It should attempt to synchronously log the given test - // message, WITHOUT using the normal Salt (which would require a storage - // operation on creation, which is currently disallowed.) - LogTestMessage(context.Context, *logical.LogInput, map[string]string) error - - // GetHash is used to return the given data with the backend's hash, - // so that a caller can determine if a value in the audit log matches - // an expected plaintext value - GetHash(context.Context, string) (string, error) - - // Reload is called on SIGHUP for supporting backends. - Reload(context.Context) error - - // Invalidate is called for path invalidation - Invalidate(context.Context) -} - -// BackendConfig contains configuration parameters used in the factory func to -// instantiate audit backends -type BackendConfig struct { - // The view to store the salt - SaltView logical.Storage - - // The salt config that should be used for any secret obfuscation - SaltConfig *salt.Config - - // Config is the opaque user configuration provided when mounting - Config map[string]string -} - -// Factory is the factory function to create an audit backend. -type Factory func(context.Context, *BackendConfig) (Backend, error) diff --git a/audit/entry_filter.go b/audit/entry_filter.go new file mode 100644 index 000000000000..e8b251694c42 --- /dev/null +++ b/audit/entry_filter.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-bexpr" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" +) + +var _ eventlogger.Node = (*EntryFilter)(nil) + +// EntryFilter should be used to filter audit requests and responses which should +// make it to a sink. +type EntryFilter struct { + // the evaluator for the bexpr expression that should be applied by the node. + evaluator *bexpr.Evaluator +} + +// NewEntryFilter should be used to create an EntryFilter node. +// The filter supplied should be in bexpr format and reference fields from logical.LogInputBexpr. +func NewEntryFilter(filter string) (*EntryFilter, error) { + filter = strings.TrimSpace(filter) + if filter == "" { + return nil, fmt.Errorf("cannot create new audit filter with empty filter expression: %w", ErrExternalOptions) + } + + eval, err := bexpr.CreateEvaluator(filter) + if err != nil { + return nil, fmt.Errorf("cannot create new audit filter: %w: %w", ErrExternalOptions, err) + } + + // Validate the filter by attempting to evaluate it with an empty input. + // This prevents users providing a filter with a field that would error during + // matching, and block all auditable requests to Vault. + li := logical.LogInputBexpr{} + _, err = eval.Evaluate(li) + if err != nil { + return nil, fmt.Errorf("filter references an unsupported field: %s: %w", filter, ErrExternalOptions) + } + + return &EntryFilter{evaluator: eval}, nil +} + +// Reopen is a no-op for the filter node. +func (*EntryFilter) Reopen() error { + return nil +} + +// Type describes the type of this node (filter). +func (*EntryFilter) Type() eventlogger.NodeType { + return eventlogger.NodeTypeFilter +} + +// Process will attempt to parse the incoming event data and decide whether it +// should be filtered or remain in the pipeline and passed to the next node. +func (f *EntryFilter) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + a, ok := e.Payload.(*AuditEvent) + if !ok { + return nil, fmt.Errorf("cannot parse event payload: %w", ErrInvalidParameter) + } + + // If we don't have data to process, then we're done. + if a.Data == nil { + return nil, nil + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, fmt.Errorf("cannot obtain namespace: %w", err) + } + + datum := a.Data.BexprDatum(ns.Path) + + result, err := f.evaluator.Evaluate(datum) + if err != nil { + return nil, fmt.Errorf("unable to evaluate filter: %w", err) + } + + if result { + // Allow this event to carry on through the pipeline. + return e, nil + } + + // End process of this pipeline. + return nil, nil +} diff --git a/audit/entry_filter_test.go b/audit/entry_filter_test.go new file mode 100644 index 000000000000..99230d055959 --- /dev/null +++ b/audit/entry_filter_test.go @@ -0,0 +1,271 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "testing" + "time" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestEntryFilter_NewEntryFilter tests that we can create EntryFilter types correctly. +func TestEntryFilter_NewEntryFilter(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Filter string + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "empty-filter": { + Filter: "", + IsErrorExpected: true, + ExpectedErrorMessage: "cannot create new audit filter with empty filter expression: invalid configuration", + }, + "spacey-filter": { + Filter: " ", + IsErrorExpected: true, + ExpectedErrorMessage: "cannot create new audit filter with empty filter expression: invalid configuration", + }, + "bad-filter": { + Filter: "____", + IsErrorExpected: true, + ExpectedErrorMessage: "cannot create new audit filter", + }, + "unsupported-field-filter": { + Filter: "foo == bar", + IsErrorExpected: true, + ExpectedErrorMessage: "filter references an unsupported field: foo == bar", + }, + "good-filter-operation": { + Filter: "operation == create", + IsErrorExpected: false, + }, + "good-filter-mount_type": { + Filter: "mount_type == kv", + IsErrorExpected: false, + }, + "good-filter-mount_point": { + Filter: "mount_point == \"/auth/userpass\"", + IsErrorExpected: false, + }, + "good-filter-namespace": { + Filter: "namespace == juan", + IsErrorExpected: false, + }, + "good-filter-path": { + Filter: "path == foo", + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + f, err := NewEntryFilter(tc.Filter) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.ExpectedErrorMessage) + require.Nil(t, f) + default: + require.NoError(t, err) + require.NotNil(t, f) + } + }) + } +} + +// TestEntryFilter_Reopen ensures we can reopen the filter node. +func TestEntryFilter_Reopen(t *testing.T) { + t.Parallel() + + f := &EntryFilter{} + res := f.Reopen() + require.Nil(t, res) +} + +// TestEntryFilter_Type ensures we always return the right type for this node. +func TestEntryFilter_Type(t *testing.T) { + t.Parallel() + + f := &EntryFilter{} + require.Equal(t, eventlogger.NodeTypeFilter, f.Type()) +} + +// TestEntryFilter_Process_ContextDone ensures that we stop processing the event +// if the context was cancelled. +func TestEntryFilter_Process_ContextDone(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + + // Explicitly cancel the context + cancel() + + l, err := NewEntryFilter("operation == foo") + require.NoError(t, err) + + // Fake audit event + a, err := NewEvent(RequestType) + require.NoError(t, err) + + // Fake event logger event + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: a, + } + + e2, err := l.Process(ctx, e) + + require.Error(t, err) + require.ErrorContains(t, err, "context canceled") + + // Ensure that the pipeline won't continue. + require.Nil(t, e2) +} + +// TestEntryFilter_Process_NilEvent ensures we receive the right error when the +// event we are trying to process is nil. +func TestEntryFilter_Process_NilEvent(t *testing.T) { + t.Parallel() + + l, err := NewEntryFilter("operation == foo") + require.NoError(t, err) + e, err := l.Process(context.Background(), nil) + require.Error(t, err) + require.EqualError(t, err, "event is nil: invalid internal parameter") + + // Ensure that the pipeline won't continue. + require.Nil(t, e) +} + +// TestEntryFilter_Process_BadPayload ensures we receive the correct error when +// attempting to process an event with a payload that cannot be parsed back to +// an audit event. +func TestEntryFilter_Process_BadPayload(t *testing.T) { + t.Parallel() + + l, err := NewEntryFilter("operation == foo") + require.NoError(t, err) + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: nil, + } + + e2, err := l.Process(context.Background(), e) + require.Error(t, err) + require.EqualError(t, err, "cannot parse event payload: invalid internal parameter") + + // Ensure that the pipeline won't continue. + require.Nil(t, e2) +} + +// TestEntryFilter_Process_NoAuditDataInPayload ensure we stop processing a pipeline +// when the data in the audit event is nil. +func TestEntryFilter_Process_NoAuditDataInPayload(t *testing.T) { + t.Parallel() + + l, err := NewEntryFilter("operation == foo") + require.NoError(t, err) + + a, err := NewEvent(RequestType) + require.NoError(t, err) + + // Ensure audit data is nil + a.Data = nil + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: a, + } + + e2, err := l.Process(context.Background(), e) + + // Make sure we get the 'nil, nil' response to stop processing this pipeline. + require.NoError(t, err) + require.Nil(t, e2) +} + +// TestEntryFilter_Process_FilterSuccess tests that when a filter matches we +// receive no error and the event is not nil so it continues in the pipeline. +func TestEntryFilter_Process_FilterSuccess(t *testing.T) { + t.Parallel() + + l, err := NewEntryFilter("mount_type == juan") + require.NoError(t, err) + + a, err := NewEvent(RequestType) + require.NoError(t, err) + + a.Data = &logical.LogInput{ + Request: &logical.Request{ + Operation: logical.CreateOperation, + MountType: "juan", + }, + } + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: a, + } + + ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) + + e2, err := l.Process(ctx, e) + + require.NoError(t, err) + require.NotNil(t, e2) +} + +// TestEntryFilter_Process_FilterFail tests that when a filter fails to match we +// receive no error, but also the event is nil so that the pipeline completes. +func TestEntryFilter_Process_FilterFail(t *testing.T) { + t.Parallel() + + l, err := NewEntryFilter("mount_type == john and operation == create and namespace == root") + require.NoError(t, err) + + a, err := NewEvent(RequestType) + require.NoError(t, err) + + a.Data = &logical.LogInput{ + Request: &logical.Request{ + Operation: logical.CreateOperation, + MountType: "juan", + }, + } + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: a, + } + + ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) + + e2, err := l.Process(ctx, e) + + require.NoError(t, err) + require.Nil(t, e2) +} diff --git a/audit/entry_formatter.go b/audit/entry_formatter.go new file mode 100644 index 000000000000..1d3ccc59c2d4 --- /dev/null +++ b/audit/entry_formatter.go @@ -0,0 +1,703 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "reflect" + "runtime/debug" + "strings" + "time" + + "github.com/go-jose/go-jose/v3/jwt" + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/jefferai/jsonx" +) + +var ( + _ Formatter = (*EntryFormatter)(nil) + _ eventlogger.Node = (*EntryFormatter)(nil) +) + +// timeProvider offers a way to supply a pre-configured time. +type timeProvider interface { + // formatTime provides the pre-configured time in a particular format. + formattedTime() string +} + +// FormatterConfig is used to provide basic configuration to a formatter. +// Use NewFormatterConfig to initialize the FormatterConfig struct. +type FormatterConfig struct { + Raw bool + HMACAccessor bool + + // Vault lacks pagination in its APIs. As a result, certain list operations can return **very** large responses. + // The user's chosen audit sinks may experience difficulty consuming audit records that swell to tens of megabytes + // of JSON. The responses of list operations are typically not very interesting, as they are mostly lists of keys, + // or, even when they include a "key_info" field, are not returning confidential information. They become even less + // interesting once HMAC-ed by the audit system. + // + // Some example Vault "list" operations that are prone to becoming very large in an active Vault installation are: + // auth/token/accessors/ + // identity/entity/id/ + // identity/entity-alias/id/ + // pki/certs/ + // + // This option exists to provide such users with the option to have response data elided from audit logs, only when + // the operation type is "list". For added safety, the elision only applies to the "keys" and "key_info" fields + // within the response data - these are conventionally the only fields present in a list response - see + // logical.ListResponse, and logical.ListResponseWithInfo. However, other fields are technically possible if a + // plugin author writes unusual code, and these will be preserved in the audit log even with this option enabled. + // The elision replaces the values of the "keys" and "key_info" fields with an integer count of the number of + // entries. This allows even the elided audit logs to still be useful for answering questions like + // "Was any data returned?" or "How many records were listed?". + ElideListResponses bool + + // This should only ever be used in a testing context + OmitTime bool + + // The required/target format for the event (supported: JSONFormat and JSONxFormat). + RequiredFormat format + + // headerFormatter specifies the formatter used for headers that existing in any incoming audit request. + headerFormatter HeaderFormatter + + // Prefix specifies a Prefix that should be prepended to any formatted request or response before serialization. + Prefix string +} + +// EntryFormatter should be used to format audit requests and responses. +// NOTE: Use NewEntryFormatter to initialize the EntryFormatter struct. +type EntryFormatter struct { + config FormatterConfig + salter Salter + logger hclog.Logger + name string +} + +// NewFormatterConfig should be used to create a FormatterConfig. +// Accepted options: WithElision, WithFormat, WithHMACAccessor, WithOmitTime, WithPrefix, WithRaw. +func NewFormatterConfig(headerFormatter HeaderFormatter, opt ...Option) (FormatterConfig, error) { + if headerFormatter == nil || reflect.ValueOf(headerFormatter).IsNil() { + return FormatterConfig{}, fmt.Errorf("header formatter is required: %w", ErrInvalidParameter) + } + + opts, err := getOpts(opt...) + if err != nil { + return FormatterConfig{}, err + } + + return FormatterConfig{ + headerFormatter: headerFormatter, + ElideListResponses: opts.withElision, + HMACAccessor: opts.withHMACAccessor, + OmitTime: opts.withOmitTime, + Prefix: opts.withPrefix, + Raw: opts.withRaw, + RequiredFormat: opts.withFormat, + }, nil +} + +// NewEntryFormatter should be used to create an EntryFormatter. +func NewEntryFormatter(name string, config FormatterConfig, salter Salter, logger hclog.Logger) (*EntryFormatter, error) { + name = strings.TrimSpace(name) + if name == "" { + return nil, fmt.Errorf("name is required: %w", ErrInvalidParameter) + } + + if salter == nil { + return nil, fmt.Errorf("cannot create a new audit formatter with nil salter: %w", ErrInvalidParameter) + } + + if logger == nil || reflect.ValueOf(logger).IsNil() { + return nil, fmt.Errorf("cannot create a new audit formatter with nil logger: %w", ErrInvalidParameter) + } + + return &EntryFormatter{ + config: config, + salter: salter, + logger: logger, + name: name, + }, nil +} + +// Reopen is a no-op for the formatter node. +func (*EntryFormatter) Reopen() error { + return nil +} + +// Type describes the type of this node (formatter). +func (*EntryFormatter) Type() eventlogger.NodeType { + return eventlogger.NodeTypeFormatter +} + +// Process will attempt to parse the incoming event data into a corresponding +// audit Request/Response which is serialized to JSON/JSONx and stored within the event. +func (f *EntryFormatter) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) { + // Return early if the context was cancelled, eventlogger will not carry on + // asking nodes to process, so any sink node in the pipeline won't be called. + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + // Perform validation on the event, then retrieve the underlying AuditEvent + // and LogInput (from the AuditEvent Data). + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + a, ok := e.Payload.(*AuditEvent) + if !ok { + return nil, fmt.Errorf("cannot parse event payload: %w", ErrInvalidParameter) + } + + if a.Data == nil { + return nil, fmt.Errorf("cannot audit event (%s) with no data: %w", a.Subtype, ErrInvalidParameter) + } + + // Handle panics + defer func() { + r := recover() + if r == nil { + return + } + + f.logger.Error("panic during logging", + "request_path", a.Data.Request.Path, + "audit_device_path", f.name, + "error", r, + "stacktrace", string(debug.Stack())) + + // Ensure that we add this error onto any pre-existing error that was being returned. + retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log: %q", f.name)).ErrorOrNil() + }() + + // Take a copy of the event data before we modify anything. + data, err := a.Data.Clone() + if err != nil { + return nil, fmt.Errorf("unable to clone audit event data: %w", err) + } + + // If the request is present in the input data, apply header configuration + // regardless. We shouldn't be in a situation where the header formatter isn't + // present as it's required. + if data.Request != nil { + // Ensure that any headers in the request, are formatted as required, and are + // only present if they have been configured to appear in the audit log. + // e.g. via: /sys/config/auditing/request-headers/:name + data.Request.Headers, err = f.config.headerFormatter.ApplyConfig(ctx, data.Request.Headers, f.salter) + if err != nil { + return nil, fmt.Errorf("unable to transform headers for auditing: %w", err) + } + } + + // If the request contains a Server-Side Consistency Token (SSCT), and we + // have an auth response, overwrite the existing client token with the SSCT, + // so that the SSCT appears in the audit log for this entry. + if data.Request != nil && data.Request.InboundSSCToken != "" && data.Auth != nil { + data.Auth.ClientToken = data.Request.InboundSSCToken + } + + // Using 'any' as we have two different types that we can get back from either + // FormatRequest or FormatResponse, but the JSON encoder doesn't care about types. + var entry any + + switch a.Subtype { + case RequestType: + entry, err = f.FormatRequest(ctx, data, a) + case ResponseType: + entry, err = f.FormatResponse(ctx, data, a) + default: + return nil, fmt.Errorf("unknown audit event subtype: %q", a.Subtype) + } + if err != nil { + return nil, fmt.Errorf("unable to parse %s from audit event: %w", a.Subtype, err) + } + + result, err := jsonutil.EncodeJSON(entry) + if err != nil { + return nil, fmt.Errorf("unable to format %s: %w", a.Subtype, err) + } + + if f.config.RequiredFormat == JSONxFormat { + var err error + result, err = jsonx.EncodeJSONBytes(result) + if err != nil { + return nil, fmt.Errorf("unable to encode JSONx using JSON data: %w", err) + } + if result == nil { + return nil, fmt.Errorf("encoded JSONx was nil: %w", err) + } + } + + // This makes a bit of a mess of the 'format' since both JSON and XML (JSONx) + // don't support a prefix just sitting there. + // However, this would be a breaking change to how Vault currently works to + // include the prefix as part of the JSON object or XML document. + if f.config.Prefix != "" { + result = append([]byte(f.config.Prefix), result...) + } + + // Copy some properties from the event (and audit event) and store the + // format for the next (sink) node to Process. + a2 := &AuditEvent{ + ID: a.ID, + Version: a.Version, + Subtype: a.Subtype, + Timestamp: a.Timestamp, + Data: data, // Use the cloned data here rather than a pointer to the original. + } + + e2 := &eventlogger.Event{ + Type: e.Type, + CreatedAt: e.CreatedAt, + Formatted: make(map[string][]byte), // we are about to set this ourselves. + Payload: a2, + } + + e2.FormattedAs(f.config.RequiredFormat.String(), result) + + return e2, nil +} + +// FormatRequest attempts to format the specified logical.LogInput into a RequestEntry. +func (f *EntryFormatter) FormatRequest(ctx context.Context, in *logical.LogInput, provider timeProvider) (*RequestEntry, error) { + switch { + case in == nil || in.Request == nil: + return nil, errors.New("request to request-audit a nil request") + case f.salter == nil: + return nil, errors.New("salt func not configured") + } + + // Set these to the input values at first + auth := in.Auth + req := in.Request + var connState *tls.ConnectionState + if auth == nil { + auth = new(logical.Auth) + } + + if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { + connState = in.Request.Connection.ConnState + } + + if !f.config.Raw { + var err error + auth, err = HashAuth(ctx, f.salter, auth, f.config.HMACAccessor) + if err != nil { + return nil, err + } + + req, err = HashRequest(ctx, f.salter, req, f.config.HMACAccessor, in.NonHMACReqDataKeys) + if err != nil { + return nil, err + } + } + + var errString string + if in.OuterErr != nil { + errString = in.OuterErr.Error() + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + reqType := in.Type + if reqType == "" { + reqType = "request" + } + reqEntry := &RequestEntry{ + Type: reqType, + Error: errString, + ForwardedFrom: req.ForwardedFrom, + Auth: &Auth{ + ClientToken: auth.ClientToken, + Accessor: auth.Accessor, + DisplayName: auth.DisplayName, + Policies: auth.Policies, + TokenPolicies: auth.TokenPolicies, + IdentityPolicies: auth.IdentityPolicies, + ExternalNamespacePolicies: auth.ExternalNamespacePolicies, + NoDefaultPolicy: auth.NoDefaultPolicy, + Metadata: auth.Metadata, + EntityID: auth.EntityID, + RemainingUses: req.ClientTokenRemainingUses, + TokenType: auth.TokenType.String(), + TokenTTL: int64(auth.TTL.Seconds()), + }, + + Request: &Request{ + ID: req.ID, + ClientID: req.ClientID, + ClientToken: req.ClientToken, + ClientTokenAccessor: req.ClientTokenAccessor, + Operation: req.Operation, + MountPoint: req.MountPoint, + MountType: req.MountType, + MountAccessor: req.MountAccessor, + MountRunningVersion: req.MountRunningVersion(), + MountRunningSha256: req.MountRunningSha256(), + MountIsExternalPlugin: req.MountIsExternalPlugin(), + MountClass: req.MountClass(), + Namespace: &Namespace{ + ID: ns.ID, + Path: ns.Path, + }, + Path: req.Path, + Data: req.Data, + PolicyOverride: req.PolicyOverride, + RemoteAddr: getRemoteAddr(req), + RemotePort: getRemotePort(req), + ReplicationCluster: req.ReplicationCluster, + Headers: req.Headers, + ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), + }, + } + + if req.HTTPRequest != nil && req.HTTPRequest.RequestURI != req.Path { + reqEntry.Request.RequestURI = req.HTTPRequest.RequestURI + } + + if !auth.IssueTime.IsZero() { + reqEntry.Auth.TokenIssueTime = auth.IssueTime.Format(time.RFC3339) + } + + if auth.PolicyResults != nil { + reqEntry.Auth.PolicyResults = &PolicyResults{ + Allowed: auth.PolicyResults.Allowed, + } + + for _, p := range auth.PolicyResults.GrantingPolicies { + reqEntry.Auth.PolicyResults.GrantingPolicies = append(reqEntry.Auth.PolicyResults.GrantingPolicies, PolicyInfo{ + Name: p.Name, + NamespaceId: p.NamespaceId, + NamespacePath: p.NamespacePath, + Type: p.Type, + }) + } + } + + if req.WrapInfo != nil { + reqEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) + } + + if !f.config.OmitTime { + // Use the time provider to supply the time for this entry. + reqEntry.Time = provider.formattedTime() + } + + return reqEntry, nil +} + +// FormatResponse attempts to format the specified logical.LogInput into a ResponseEntry. +func (f *EntryFormatter) FormatResponse(ctx context.Context, in *logical.LogInput, provider timeProvider) (*ResponseEntry, error) { + switch { + case f == nil: + return nil, errors.New("formatter is nil") + case in == nil || in.Request == nil: + return nil, errors.New("request to response-audit a nil request") + case f.salter == nil: + return nil, errors.New("salt func not configured") + } + + // Set these to the input values at first + auth, req, resp := in.Auth, in.Request, in.Response + if auth == nil { + auth = new(logical.Auth) + } + if resp == nil { + resp = new(logical.Response) + } + var connState *tls.ConnectionState + + if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { + connState = in.Request.Connection.ConnState + } + + elideListResponseData := f.config.ElideListResponses && req.Operation == logical.ListOperation + + var respData map[string]interface{} + if f.config.Raw { + // In the non-raw case, elision of list response data occurs inside HashResponse, to avoid redundant deep + // copies and hashing of data only to elide it later. In the raw case, we need to do it here. + if elideListResponseData && resp.Data != nil { + // Copy the data map before making changes, but we only need to go one level deep in this case + respData = make(map[string]interface{}, len(resp.Data)) + for k, v := range resp.Data { + respData[k] = v + } + + doElideListResponseData(respData) + } else { + respData = resp.Data + } + } else { + var err error + auth, err = HashAuth(ctx, f.salter, auth, f.config.HMACAccessor) + if err != nil { + return nil, err + } + + req, err = HashRequest(ctx, f.salter, req, f.config.HMACAccessor, in.NonHMACReqDataKeys) + if err != nil { + return nil, err + } + + resp, err = HashResponse(ctx, f.salter, resp, f.config.HMACAccessor, in.NonHMACRespDataKeys, elideListResponseData) + if err != nil { + return nil, err + } + + respData = resp.Data + } + + var errString string + if in.OuterErr != nil { + errString = in.OuterErr.Error() + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + var respAuth *Auth + if resp.Auth != nil { + respAuth = &Auth{ + ClientToken: resp.Auth.ClientToken, + Accessor: resp.Auth.Accessor, + DisplayName: resp.Auth.DisplayName, + Policies: resp.Auth.Policies, + TokenPolicies: resp.Auth.TokenPolicies, + IdentityPolicies: resp.Auth.IdentityPolicies, + ExternalNamespacePolicies: resp.Auth.ExternalNamespacePolicies, + NoDefaultPolicy: resp.Auth.NoDefaultPolicy, + Metadata: resp.Auth.Metadata, + NumUses: resp.Auth.NumUses, + EntityID: resp.Auth.EntityID, + TokenType: resp.Auth.TokenType.String(), + TokenTTL: int64(resp.Auth.TTL.Seconds()), + } + if !resp.Auth.IssueTime.IsZero() { + respAuth.TokenIssueTime = resp.Auth.IssueTime.Format(time.RFC3339) + } + } + + var respSecret *Secret + if resp.Secret != nil { + respSecret = &Secret{ + LeaseID: resp.Secret.LeaseID, + } + } + + var respWrapInfo *ResponseWrapInfo + if resp.WrapInfo != nil { + token := resp.WrapInfo.Token + if jwtToken := parseVaultTokenFromJWT(token); jwtToken != nil { + token = *jwtToken + } + respWrapInfo = &ResponseWrapInfo{ + TTL: int(resp.WrapInfo.TTL / time.Second), + Token: token, + Accessor: resp.WrapInfo.Accessor, + CreationTime: resp.WrapInfo.CreationTime.UTC().Format(time.RFC3339Nano), + CreationPath: resp.WrapInfo.CreationPath, + WrappedAccessor: resp.WrapInfo.WrappedAccessor, + } + } + + respType := in.Type + if respType == "" { + respType = "response" + } + respEntry := &ResponseEntry{ + Type: respType, + Error: errString, + Forwarded: req.ForwardedFrom != "", + Auth: &Auth{ + ClientToken: auth.ClientToken, + Accessor: auth.Accessor, + DisplayName: auth.DisplayName, + Policies: auth.Policies, + TokenPolicies: auth.TokenPolicies, + IdentityPolicies: auth.IdentityPolicies, + ExternalNamespacePolicies: auth.ExternalNamespacePolicies, + NoDefaultPolicy: auth.NoDefaultPolicy, + Metadata: auth.Metadata, + RemainingUses: req.ClientTokenRemainingUses, + EntityID: auth.EntityID, + EntityCreated: auth.EntityCreated, + TokenType: auth.TokenType.String(), + TokenTTL: int64(auth.TTL.Seconds()), + }, + + Request: &Request{ + ID: req.ID, + ClientToken: req.ClientToken, + ClientTokenAccessor: req.ClientTokenAccessor, + ClientID: req.ClientID, + Operation: req.Operation, + MountPoint: req.MountPoint, + MountType: req.MountType, + MountAccessor: req.MountAccessor, + MountRunningVersion: req.MountRunningVersion(), + MountRunningSha256: req.MountRunningSha256(), + MountIsExternalPlugin: req.MountIsExternalPlugin(), + MountClass: req.MountClass(), + Namespace: &Namespace{ + ID: ns.ID, + Path: ns.Path, + }, + Path: req.Path, + Data: req.Data, + PolicyOverride: req.PolicyOverride, + RemoteAddr: getRemoteAddr(req), + RemotePort: getRemotePort(req), + ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), + ReplicationCluster: req.ReplicationCluster, + Headers: req.Headers, + }, + + Response: &Response{ + MountPoint: req.MountPoint, + MountType: req.MountType, + MountAccessor: req.MountAccessor, + MountRunningVersion: req.MountRunningVersion(), + MountRunningSha256: req.MountRunningSha256(), + MountIsExternalPlugin: req.MountIsExternalPlugin(), + MountClass: req.MountClass(), + Auth: respAuth, + Secret: respSecret, + Data: respData, + Warnings: resp.Warnings, + Redirect: resp.Redirect, + WrapInfo: respWrapInfo, + Headers: resp.Headers, + }, + } + + if req.HTTPRequest != nil && req.HTTPRequest.RequestURI != req.Path { + respEntry.Request.RequestURI = req.HTTPRequest.RequestURI + } + + if auth.PolicyResults != nil { + respEntry.Auth.PolicyResults = &PolicyResults{ + Allowed: auth.PolicyResults.Allowed, + } + + for _, p := range auth.PolicyResults.GrantingPolicies { + respEntry.Auth.PolicyResults.GrantingPolicies = append(respEntry.Auth.PolicyResults.GrantingPolicies, PolicyInfo{ + Name: p.Name, + NamespaceId: p.NamespaceId, + NamespacePath: p.NamespacePath, + Type: p.Type, + }) + } + } + + if !auth.IssueTime.IsZero() { + respEntry.Auth.TokenIssueTime = auth.IssueTime.Format(time.RFC3339) + } + if req.WrapInfo != nil { + respEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) + } + + if !f.config.OmitTime { + // Use the time provider to supply the time for this entry. + respEntry.Time = provider.formattedTime() + } + + return respEntry, nil +} + +// getRemoteAddr safely gets the remote address avoiding a nil pointer +func getRemoteAddr(req *logical.Request) string { + if req != nil && req.Connection != nil { + return req.Connection.RemoteAddr + } + return "" +} + +// getRemotePort safely gets the remote port avoiding a nil pointer +func getRemotePort(req *logical.Request) int { + if req != nil && req.Connection != nil { + return req.Connection.RemotePort + } + return 0 +} + +// getClientCertificateSerialNumber attempts the retrieve the serial number of +// the peer certificate from the specified tls.ConnectionState. +func getClientCertificateSerialNumber(connState *tls.ConnectionState) string { + if connState == nil || len(connState.VerifiedChains) == 0 || len(connState.VerifiedChains[0]) == 0 { + return "" + } + + return connState.VerifiedChains[0][0].SerialNumber.String() +} + +// parseVaultTokenFromJWT returns a string iff the token was a JWT, and we could +// extract the original token ID from inside +func parseVaultTokenFromJWT(token string) *string { + if strings.Count(token, ".") != 2 { + return nil + } + + parsedJWT, err := jwt.ParseSigned(token) + if err != nil { + return nil + } + + var claims jwt.Claims + if err = parsedJWT.UnsafeClaimsWithoutVerification(&claims); err != nil { + return nil + } + + return &claims.ID +} + +// doElideListResponseData performs the actual elision of list operation response data, once surrounding code has +// determined it should apply to a particular request. The data map that is passed in must be a copy that is safe to +// modify in place, but need not be a full recursive deep copy, as only top-level keys are changed. +// +// See the documentation of the controlling option in FormatterConfig for more information on the purpose. +func doElideListResponseData(data map[string]interface{}) { + for k, v := range data { + if k == "keys" { + if vSlice, ok := v.([]string); ok { + data[k] = len(vSlice) + } + } else if k == "key_info" { + if vMap, ok := v.(map[string]interface{}); ok { + data[k] = len(vMap) + } + } + } +} + +// newTemporaryEntryFormatter creates a cloned EntryFormatter instance with a non-persistent Salter. +func newTemporaryEntryFormatter(n *EntryFormatter) *EntryFormatter { + return &EntryFormatter{ + salter: &nonPersistentSalt{}, + config: n.config, + } +} + +// Salt returns a new salt with default configuration and no storage usage, and no error. +func (s *nonPersistentSalt) Salt(_ context.Context) (*salt.Salt, error) { + return salt.NewNonpersistentSalt(), nil +} diff --git a/audit/entry_formatter_test.go b/audit/entry_formatter_test.go new file mode 100644 index 000000000000..349a3b1911e8 --- /dev/null +++ b/audit/entry_formatter_test.go @@ -0,0 +1,1293 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const testFormatJSONReqBasicStrFmt = ` +{ + "time": "2015-08-05T13:45:46Z", + "type": "request", + "auth": { + "client_token": "%s", + "accessor": "bar", + "display_name": "testtoken", + "policies": [ + "root" + ], + "no_default_policy": true, + "metadata": null, + "entity_id": "foobarentity", + "token_type": "service", + "token_ttl": 14400, + "token_issue_time": "2020-05-28T13:40:18-05:00" + }, + "request": { + "operation": "update", + "path": "/foo", + "data": null, + "wrap_ttl": 60, + "remote_address": "127.0.0.1", + "headers": { + "foo": [ + "bar" + ] + } + }, + "error": "this is an error" +} +` + +// testHeaderFormatter is a stub to prevent the need to import the vault package +// to bring in vault.AuditedHeadersConfig for testing. +type testHeaderFormatter struct { + shouldReturnEmpty bool +} + +// ApplyConfig satisfies the HeaderFormatter interface for testing. +// It will either return the headers it was supplied or empty headers depending +// on how it is configured. +// ignore-nil-nil-function-check. +func (f *testHeaderFormatter) ApplyConfig(_ context.Context, headers map[string][]string, salter Salter) (result map[string][]string, retErr error) { + if f.shouldReturnEmpty { + return make(map[string][]string), nil + } + + return headers, nil +} + +// testTimeProvider is just a test struct used to imitate an AuditEvent's ability +// to provide a formatted time. +type testTimeProvider struct{} + +// formattedTime always returns the same value for 22nd March 2024 at 10:00:05 (and 10 nanos). +func (p *testTimeProvider) formattedTime() string { + return time.Date(2024, time.March, 22, 10, 0o0, 5, 10, time.UTC).UTC().Format(time.RFC3339Nano) +} + +// TestNewEntryFormatter ensures we can create new EntryFormatter structs. +func TestNewEntryFormatter(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Name string + UseStaticSalt bool + Logger hclog.Logger + Options []Option // Only supports WithPrefix + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedFormat format + ExpectedPrefix string + }{ + "empty-name": { + Name: "", + IsErrorExpected: true, + ExpectedErrorMessage: "name is required: invalid internal parameter", + }, + "spacey-name": { + Name: " ", + IsErrorExpected: true, + ExpectedErrorMessage: "name is required: invalid internal parameter", + }, + "nil-salter": { + Name: "juan", + UseStaticSalt: false, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot create a new audit formatter with nil salter: invalid internal parameter", + }, + "nil-logger": { + Name: "juan", + UseStaticSalt: true, + Logger: nil, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot create a new audit formatter with nil logger: invalid internal parameter", + }, + "static-salter": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + IsErrorExpected: false, + Options: []Option{ + WithFormat(JSONFormat.String()), + }, + ExpectedFormat: JSONFormat, + }, + "default": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + IsErrorExpected: false, + ExpectedFormat: JSONFormat, + }, + "config-json": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + Options: []Option{ + WithFormat(JSONFormat.String()), + }, + IsErrorExpected: false, + ExpectedFormat: JSONFormat, + }, + "config-jsonx": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + Options: []Option{ + WithFormat(JSONxFormat.String()), + }, + IsErrorExpected: false, + ExpectedFormat: JSONxFormat, + }, + "config-json-prefix": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + Options: []Option{ + WithPrefix("foo"), + WithFormat(JSONFormat.String()), + }, + IsErrorExpected: false, + ExpectedFormat: JSONFormat, + ExpectedPrefix: "foo", + }, + "config-jsonx-prefix": { + Name: "juan", + UseStaticSalt: true, + Logger: hclog.NewNullLogger(), + Options: []Option{ + WithPrefix("foo"), + WithFormat(JSONxFormat.String()), + }, + IsErrorExpected: false, + ExpectedFormat: JSONxFormat, + ExpectedPrefix: "foo", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + var ss Salter + if tc.UseStaticSalt { + ss = newStaticSalt(t) + } + + cfg, err := NewFormatterConfig(&testHeaderFormatter{}, tc.Options...) + require.NoError(t, err) + f, err := NewEntryFormatter(tc.Name, cfg, ss, tc.Logger) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, f) + default: + require.NoError(t, err) + require.NotNil(t, f) + require.Equal(t, tc.ExpectedFormat, f.config.RequiredFormat) + require.Equal(t, tc.ExpectedPrefix, f.config.Prefix) + } + }) + } +} + +// TestEntryFormatter_Reopen ensures that we do not get an error when calling Reopen. +func TestEntryFormatter_Reopen(t *testing.T) { + t.Parallel() + + ss := newStaticSalt(t) + cfg, err := NewFormatterConfig(&testHeaderFormatter{}) + require.NoError(t, err) + + f, err := NewEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, f) + require.NoError(t, f.Reopen()) +} + +// TestEntryFormatter_Type ensures that the node is a 'formatter' type. +func TestEntryFormatter_Type(t *testing.T) { + t.Parallel() + + ss := newStaticSalt(t) + cfg, err := NewFormatterConfig(&testHeaderFormatter{}) + require.NoError(t, err) + + f, err := NewEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, f) + require.Equal(t, eventlogger.NodeTypeFormatter, f.Type()) +} + +// TestEntryFormatter_Process attempts to run the Process method to convert the +// logical.LogInput within an audit event to JSON and JSONx (RequestEntry or ResponseEntry). +func TestEntryFormatter_Process(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + IsErrorExpected bool + ExpectedErrorMessage string + Subtype subtype + RequiredFormat format + Data *logical.LogInput + RootNamespace bool + }{ + "json-request-no-data": { + IsErrorExpected: true, + ExpectedErrorMessage: "cannot audit event (request) with no data: invalid internal parameter", + Subtype: RequestType, + RequiredFormat: JSONFormat, + Data: nil, + }, + "json-response-no-data": { + IsErrorExpected: true, + ExpectedErrorMessage: "cannot audit event (response) with no data: invalid internal parameter", + Subtype: ResponseType, + RequiredFormat: JSONFormat, + Data: nil, + }, + "json-request-basic-input": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from audit event: request to request-audit a nil request", + Subtype: RequestType, + RequiredFormat: JSONFormat, + Data: &logical.LogInput{Type: "magic"}, + }, + "json-response-basic-input": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse response from audit event: request to response-audit a nil request", + Subtype: ResponseType, + RequiredFormat: JSONFormat, + Data: &logical.LogInput{Type: "magic"}, + }, + "json-request-basic-input-and-request-no-ns": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from audit event: no namespace", + Subtype: RequestType, + RequiredFormat: JSONFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + }, + "json-response-basic-input-and-request-no-ns": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse response from audit event: no namespace", + Subtype: ResponseType, + RequiredFormat: JSONFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + }, + "json-request-basic-input-and-request-with-ns": { + IsErrorExpected: false, + Subtype: RequestType, + RequiredFormat: JSONFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + RootNamespace: true, + }, + "json-response-basic-input-and-request-with-ns": { + IsErrorExpected: false, + Subtype: ResponseType, + RequiredFormat: JSONFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + RootNamespace: true, + }, + "jsonx-request-no-data": { + IsErrorExpected: true, + ExpectedErrorMessage: "cannot audit event (request) with no data: invalid internal parameter", + Subtype: RequestType, + RequiredFormat: JSONxFormat, + Data: nil, + }, + "jsonx-response-no-data": { + IsErrorExpected: true, + ExpectedErrorMessage: "cannot audit event (response) with no data: invalid internal parameter", + Subtype: ResponseType, + RequiredFormat: JSONxFormat, + Data: nil, + }, + "jsonx-request-basic-input": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from audit event: request to request-audit a nil request", + Subtype: RequestType, + RequiredFormat: JSONxFormat, + Data: &logical.LogInput{Type: "magic"}, + }, + "jsonx-response-basic-input": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse response from audit event: request to response-audit a nil request", + Subtype: ResponseType, + RequiredFormat: JSONxFormat, + Data: &logical.LogInput{Type: "magic"}, + }, + "jsonx-request-basic-input-and-request-no-ns": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse request from audit event: no namespace", + Subtype: RequestType, + RequiredFormat: JSONxFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + }, + "jsonx-response-basic-input-and-request-no-ns": { + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse response from audit event: no namespace", + Subtype: ResponseType, + RequiredFormat: JSONxFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + }, + "jsonx-request-basic-input-and-request-with-ns": { + IsErrorExpected: false, + Subtype: RequestType, + RequiredFormat: JSONxFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + RootNamespace: true, + }, + "jsonx-response-basic-input-and-request-with-ns": { + IsErrorExpected: false, + Subtype: ResponseType, + RequiredFormat: JSONxFormat, + Data: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + RootNamespace: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + e := fakeEvent(t, tc.Subtype, tc.Data) + require.NotNil(t, e) + + ss := newStaticSalt(t) + cfg, err := NewFormatterConfig(&testHeaderFormatter{}, WithFormat(tc.RequiredFormat.String())) + require.NoError(t, err) + + f, err := NewEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, f) + + var ctx context.Context + switch { + case tc.RootNamespace: + ctx = namespace.RootContext(context.Background()) + default: + ctx = context.Background() + } + + processed, err := f.Process(ctx, e) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, processed) + default: + require.NoError(t, err) + require.NotNil(t, processed) + b, found := processed.Format(string(tc.RequiredFormat)) + require.True(t, found) + require.NotNil(t, b) + } + }) + } +} + +// BenchmarkAuditFileSink_Process benchmarks the EntryFormatter and then event.FileSink calling Process. +// This should replicate the original benchmark testing which used to perform both of these roles together. +func BenchmarkAuditFileSink_Process(b *testing.B) { + // Base input + in := &logical.LogInput{ + Auth: &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + }, + Request: &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + } + + ctx := namespace.RootContext(context.Background()) + + // Create the formatter node. + cfg, err := NewFormatterConfig(&testHeaderFormatter{}) + require.NoError(b, err) + ss := newStaticSalt(b) + formatter, err := NewEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(b, err) + require.NotNil(b, formatter) + + // Create the sink node. + sink, err := event.NewFileSink("/dev/null", JSONFormat.String()) + require.NoError(b, err) + require.NotNil(b, sink) + + // Generate the event + e := fakeEvent(b, RequestType, in) + require.NotNil(b, e) + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + e, err = formatter.Process(ctx, e) + if err != nil { + panic(err) + } + _, err := sink.Process(ctx, e) + if err != nil { + panic(err) + } + } + }) +} + +// TestEntryFormatter_FormatRequest exercises EntryFormatter.FormatRequest with +// varying inputs. +func TestEntryFormatter_FormatRequest(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Input *logical.LogInput + ShouldOmitTime bool + IsErrorExpected bool + ExpectedErrorMessage string + RootNamespace bool + }{ + "nil": { + Input: nil, + IsErrorExpected: true, + ExpectedErrorMessage: "request to request-audit a nil request", + }, + "basic-input": { + Input: &logical.LogInput{}, + IsErrorExpected: true, + ExpectedErrorMessage: "request to request-audit a nil request", + }, + "input-and-request-no-ns": { + Input: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + IsErrorExpected: true, + ExpectedErrorMessage: "no namespace", + RootNamespace: false, + }, + "input-and-request-with-ns": { + Input: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + IsErrorExpected: false, + RootNamespace: true, + }, + "omit-time": { + Input: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + ShouldOmitTime: true, + RootNamespace: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + ss := newStaticSalt(t) + cfg, err := NewFormatterConfig(&testHeaderFormatter{}, WithOmitTime(tc.ShouldOmitTime)) + require.NoError(t, err) + f, err := NewEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + + var ctx context.Context + switch { + case tc.RootNamespace: + ctx = namespace.RootContext(context.Background()) + default: + ctx = context.Background() + } + + entry, err := f.FormatRequest(ctx, tc.Input, &testTimeProvider{}) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, entry) + case tc.ShouldOmitTime: + require.NoError(t, err) + require.NotNil(t, entry) + require.Zero(t, entry.Time) + default: + require.NoError(t, err) + require.NotNil(t, entry) + require.NotZero(t, entry.Time) + require.Equal(t, "2024-03-22T10:00:05.00000001Z", entry.Time) + } + }) + } +} + +// TestEntryFormatter_FormatResponse exercises EntryFormatter.FormatResponse with +// varying inputs. +func TestEntryFormatter_FormatResponse(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Input *logical.LogInput + ShouldOmitTime bool + IsErrorExpected bool + ExpectedErrorMessage string + RootNamespace bool + }{ + "nil": { + Input: nil, + IsErrorExpected: true, + ExpectedErrorMessage: "request to response-audit a nil request", + }, + "basic-input": { + Input: &logical.LogInput{}, + IsErrorExpected: true, + ExpectedErrorMessage: "request to response-audit a nil request", + }, + "input-and-request-no-ns": { + Input: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + IsErrorExpected: true, + ExpectedErrorMessage: "no namespace", + RootNamespace: false, + }, + "input-and-request-with-ns": { + Input: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + IsErrorExpected: false, + RootNamespace: true, + }, + "omit-time": { + Input: &logical.LogInput{Request: &logical.Request{ID: "123"}}, + ShouldOmitTime: true, + IsErrorExpected: false, + RootNamespace: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + ss := newStaticSalt(t) + cfg, err := NewFormatterConfig(&testHeaderFormatter{}, WithOmitTime(tc.ShouldOmitTime)) + require.NoError(t, err) + f, err := NewEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + + var ctx context.Context + switch { + case tc.RootNamespace: + ctx = namespace.RootContext(context.Background()) + default: + ctx = context.Background() + } + + entry, err := f.FormatResponse(ctx, tc.Input, &testTimeProvider{}) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, entry) + case tc.ShouldOmitTime: + require.NoError(t, err) + require.NotNil(t, entry) + require.Zero(t, entry.Time) + default: + require.NoError(t, err) + require.NotNil(t, entry) + require.NotZero(t, entry.Time) + require.Equal(t, "2024-03-22T10:00:05.00000001Z", entry.Time) + } + }) + } +} + +// TestEntryFormatter_Process_JSON ensures that the JSON output we get matches what +// we expect for the specified LogInput. +func TestEntryFormatter_Process_JSON(t *testing.T) { + t.Parallel() + + ss := newStaticSalt(t) + + expectedResultStr := fmt.Sprintf(testFormatJSONReqBasicStrFmt, ss.salt.GetIdentifiedHMAC("foo")) + + issueTime, _ := time.Parse(time.RFC3339, "2020-05-28T13:40:18-05:00") + cases := map[string]struct { + Auth *logical.Auth + Req *logical.Request + Err error + Prefix string + ExpectedStr string + }{ + "auth, request": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + EntityID: "foobarentity", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + errors.New("this is an error"), + "", + expectedResultStr, + }, + "auth, request with prefix": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + errors.New("this is an error"), + "@cee: ", + expectedResultStr, + }, + } + + for name, tc := range cases { + cfg, err := NewFormatterConfig(&testHeaderFormatter{}, WithHMACAccessor(false), WithPrefix(tc.Prefix)) + require.NoError(t, err) + formatter, err := NewEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + + in := &logical.LogInput{ + Auth: tc.Auth, + Request: tc.Req, + OuterErr: tc.Err, + } + + // Create an audit event and more generic eventlogger.event to allow us + // to process (format). + auditEvent, err := NewEvent(RequestType) + require.NoError(t, err) + auditEvent.Data = in + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: auditEvent, + } + + e2, err := formatter.Process(namespace.RootContext(nil), e) + require.NoErrorf(t, err, "bad: %s\nerr: %s", name, err) + + jsonBytes, ok := e2.Format(JSONFormat.String()) + require.True(t, ok) + require.Positive(t, len(jsonBytes)) + + if !strings.HasPrefix(string(jsonBytes), tc.Prefix) { + t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, expectedResultStr, tc.Prefix) + } + + expectedJSON := new(RequestEntry) + + if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedJSON); err != nil { + t.Fatalf("bad json: %s", err) + } + expectedJSON.Request.Namespace = &Namespace{ID: "root"} + + actualJSON := new(RequestEntry) + if err := jsonutil.DecodeJSON(jsonBytes[len(tc.Prefix):], &actualJSON); err != nil { + t.Fatalf("bad json: %s", err) + } + + expectedJSON.Time = actualJSON.Time + + expectedBytes, err := json.Marshal(expectedJSON) + if err != nil { + t.Fatalf("unable to marshal json: %s", err) + } + + if !strings.HasSuffix(strings.TrimSpace(string(jsonBytes)), string(expectedBytes)) { + t.Fatalf("bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", name, string(jsonBytes), string(expectedBytes)) + } + } +} + +// TestEntryFormatter_Process_JSONx ensures that the JSONx output we get matches what +// we expect for the specified LogInput. +func TestEntryFormatter_Process_JSONx(t *testing.T) { + t.Parallel() + + s, err := salt.NewSalt(context.Background(), nil, nil) + require.NoError(t, err) + tempStaticSalt := &staticSalt{salt: s} + + fooSalted := s.GetIdentifiedHMAC("foo") + issueTime, _ := time.Parse(time.RFC3339, "2020-05-28T13:40:18-05:00") + + cases := map[string]struct { + Auth *logical.Auth + Req *logical.Request + Err error + Prefix string + Result string + ExpectedStr string + }{ + "auth, request": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + EntityID: "foobarentity", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + ID: "request", + ClientToken: "foo", + ClientTokenAccessor: "bar", + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + PolicyOverride: true, + }, + errors.New("this is an error"), + "", + "", + fmt.Sprintf(`bar%stesttokenfoobarentitytrueroot2020-05-28T13:40:18-05:0014400servicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, + fooSalted, fooSalted), + }, + "auth, request with prefix": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + NoDefaultPolicy: true, + EntityID: "foobarentity", + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + ID: "request", + ClientToken: "foo", + ClientTokenAccessor: "bar", + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + PolicyOverride: true, + }, + errors.New("this is an error"), + "", + "@cee: ", + fmt.Sprintf(`bar%stesttokenfoobarentitytrueroot2020-05-28T13:40:18-05:0014400servicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, + fooSalted, fooSalted), + }, + } + + for name, tc := range cases { + cfg, err := NewFormatterConfig( + &testHeaderFormatter{}, + WithOmitTime(true), + WithHMACAccessor(false), + WithFormat(JSONxFormat.String()), + WithPrefix(tc.Prefix), + ) + require.NoError(t, err) + formatter, err := NewEntryFormatter("juan", cfg, tempStaticSalt, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, formatter) + + in := &logical.LogInput{ + Auth: tc.Auth, + Request: tc.Req, + OuterErr: tc.Err, + } + + // Create an audit event and more generic eventlogger.event to allow us + // to process (format). + auditEvent, err := NewEvent(RequestType) + require.NoError(t, err) + auditEvent.Data = in + + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: auditEvent, + } + + e2, err := formatter.Process(namespace.RootContext(nil), e) + require.NoErrorf(t, err, "bad: %s\nerr: %s", name, err) + + jsonxBytes, ok := e2.Format(JSONxFormat.String()) + require.True(t, ok) + require.Positive(t, len(jsonxBytes)) + + if !strings.HasPrefix(string(jsonxBytes), tc.Prefix) { + t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix) + } + + if !strings.HasSuffix(strings.TrimSpace(string(jsonxBytes)), string(tc.ExpectedStr)) { + t.Fatalf( + "bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", + name, strings.TrimSpace(string(jsonxBytes)), string(tc.ExpectedStr)) + } + } +} + +// TestEntryFormatter_FormatResponse_ElideListResponses ensures that we correctly +// elide data in responses to LIST operations. +func TestEntryFormatter_FormatResponse_ElideListResponses(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + inputData map[string]any + expectedData map[string]any + }{ + "nil data": { + nil, + nil, + }, + "Normal list (keys only)": { + map[string]any{ + "keys": []string{"foo", "bar", "baz"}, + }, + map[string]any{ + "keys": 3, + }, + }, + "Enhanced list (has key_info)": { + map[string]any{ + "keys": []string{"foo", "bar", "baz", "quux"}, + "key_info": map[string]any{ + "foo": "alpha", + "bar": "beta", + "baz": "gamma", + "quux": "delta", + }, + }, + map[string]any{ + "keys": 4, + "key_info": 4, + }, + }, + "Unconventional other values in a list response are not touched": { + map[string]any{ + "keys": []string{"foo", "bar"}, + "something_else": "baz", + }, + map[string]any{ + "keys": 2, + "something_else": "baz", + }, + }, + "Conventional values in a list response are not elided if their data types are unconventional": { + map[string]any{ + "keys": map[string]any{ + "You wouldn't expect keys to be a map": nil, + }, + "key_info": []string{ + "You wouldn't expect key_info to be a slice", + }, + }, + map[string]any{ + "keys": map[string]any{ + "You wouldn't expect keys to be a map": nil, + }, + "key_info": []string{ + "You wouldn't expect key_info to be a slice", + }, + }, + }, + } + + oneInterestingTestCase := tests["Enhanced list (has key_info)"] + + ss := newStaticSalt(t) + ctx := namespace.RootContext(context.Background()) + var formatter *EntryFormatter + var err error + + format := func(t *testing.T, config FormatterConfig, operation logical.Operation, inputData map[string]any) *ResponseEntry { + formatter, err = NewEntryFormatter("juan", config, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, formatter) + + in := &logical.LogInput{ + Request: &logical.Request{Operation: operation}, + Response: &logical.Response{Data: inputData}, + } + + resp, err := formatter.FormatResponse(ctx, in, &testTimeProvider{}) + require.NoError(t, err) + + return resp + } + + t.Run("Default case", func(t *testing.T) { + config, err := NewFormatterConfig(&testHeaderFormatter{}, WithElision(true)) + require.NoError(t, err) + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + entry := format(t, config, logical.ListOperation, tc.inputData) + assert.Equal(t, formatter.hashExpectedValueForComparison(tc.expectedData), entry.Response.Data) + }) + } + }) + + t.Run("When Operation is not list, eliding does not happen", func(t *testing.T) { + config, err := NewFormatterConfig(&testHeaderFormatter{}, WithElision(true)) + require.NoError(t, err) + tc := oneInterestingTestCase + entry := format(t, config, logical.ReadOperation, tc.inputData) + assert.Equal(t, formatter.hashExpectedValueForComparison(tc.inputData), entry.Response.Data) + }) + + t.Run("When ElideListResponses is false, eliding does not happen", func(t *testing.T) { + config, err := NewFormatterConfig(&testHeaderFormatter{}, WithElision(false), WithFormat(JSONFormat.String())) + require.NoError(t, err) + tc := oneInterestingTestCase + entry := format(t, config, logical.ListOperation, tc.inputData) + assert.Equal(t, formatter.hashExpectedValueForComparison(tc.inputData), entry.Response.Data) + }) + + t.Run("When Raw is true, eliding still happens", func(t *testing.T) { + config, err := NewFormatterConfig(&testHeaderFormatter{}, WithElision(true), WithRaw(true), WithFormat(JSONFormat.String())) + require.NoError(t, err) + tc := oneInterestingTestCase + entry := format(t, config, logical.ListOperation, tc.inputData) + assert.Equal(t, tc.expectedData, entry.Response.Data) + }) +} + +// TestEntryFormatter_Process_NoMutation tests that the event returned by an +// EntryFormatter.Process method is not the same as the one that it accepted. +func TestEntryFormatter_Process_NoMutation(t *testing.T) { + t.Parallel() + + // Create the formatter node. + cfg, err := NewFormatterConfig(&testHeaderFormatter{}) + require.NoError(t, err) + ss := newStaticSalt(t) + formatter, err := NewEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, formatter) + + in := &logical.LogInput{ + Auth: &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + }, + Request: &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + } + + e := fakeEvent(t, RequestType, in) + + e2, err := formatter.Process(namespace.RootContext(nil), e) + require.NoError(t, err) + require.NotNil(t, e2) + + // Ensure the pointers are different. + require.NotEqual(t, e2, e) + + // Do the same for the audit event in the payload. + a, ok := e.Payload.(*AuditEvent) + require.True(t, ok) + require.NotNil(t, a) + + a2, ok := e2.Payload.(*AuditEvent) + require.True(t, ok) + require.NotNil(t, a2) + + require.NotEqual(t, a2, a) +} + +// TestEntryFormatter_Process_Panic tries to send data into the EntryFormatter +// which will currently cause a panic when a response is formatted due to the +// underlying hashing that is done with reflectwalk. +func TestEntryFormatter_Process_Panic(t *testing.T) { + t.Parallel() + + // Create the formatter node. + cfg, err := NewFormatterConfig(&testHeaderFormatter{}) + require.NoError(t, err) + ss := newStaticSalt(t) + formatter, err := NewEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, formatter) + + // The secret sauce, create a bad addr. + // see: https://github.com/hashicorp/vault/issues/16462 + badAddr, err := sockaddr.NewSockAddr("10.10.10.2/32 10.10.10.3/32") + require.NoError(t, err) + + in := &logical.LogInput{ + Auth: &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + }, + Request: &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + Data: map[string]interface{}{}, + }, + Response: &logical.Response{ + Data: map[string]any{ + "token_bound_cidrs": []*sockaddr.SockAddrMarshaler{ + {SockAddr: badAddr}, + }, + }, + }, + } + + e := fakeEvent(t, ResponseType, in) + + e2, err := formatter.Process(namespace.RootContext(nil), e) + require.Error(t, err) + require.Contains(t, err.Error(), "panic generating audit log: \"juan\"") + require.Nil(t, e2) +} + +// TestEntryFormatter_NewFormatterConfig_NilHeaderFormatter ensures we cannot +// create a FormatterConfig using NewFormatterConfig if we supply a nil formatter. +func TestEntryFormatter_NewFormatterConfig_NilHeaderFormatter(t *testing.T) { + _, err := NewFormatterConfig(nil) + require.Error(t, err) +} + +// TestEntryFormatter_Process_NeverLeaksHeaders ensures that if we never accidentally +// leak headers if applying them means we don't have any. This is more like a sense +// check to ensure the returned event doesn't somehow end up with the headers 'back'. +func TestEntryFormatter_Process_NeverLeaksHeaders(t *testing.T) { + t.Parallel() + + // Create the formatter node. + cfg, err := NewFormatterConfig(&testHeaderFormatter{shouldReturnEmpty: true}) + require.NoError(t, err) + ss := newStaticSalt(t) + formatter, err := NewEntryFormatter("juan", cfg, ss, hclog.NewNullLogger()) + require.NoError(t, err) + require.NotNil(t, formatter) + + // Set up the input and verify we have a single foo:bar header. + var input *logical.LogInput + err = json.Unmarshal([]byte(testFormatJSONReqBasicStrFmt), &input) + require.NoError(t, err) + require.NotNil(t, input) + require.ElementsMatch(t, input.Request.Headers["foo"], []string{"bar"}) + + e := fakeEvent(t, RequestType, input) + + // Process the node. + ctx := namespace.RootContext(context.Background()) + e2, err := formatter.Process(ctx, e) + require.NoError(t, err) + require.NotNil(t, e2) + + // Now check we can retrieve the formatted JSON. + jsonFormatted, b2 := e2.Format(JSONFormat.String()) + require.True(t, b2) + require.NotNil(t, jsonFormatted) + var input2 *logical.LogInput + err = json.Unmarshal(jsonFormatted, &input2) + require.NoError(t, err) + require.NotNil(t, input2) + require.Len(t, input2.Request.Headers, 0) +} + +// hashExpectedValueForComparison replicates enough of the audit HMAC process on a piece of expected data in a test, +// so that we can use assert.Equal to compare the expected and output values. +func (f *EntryFormatter) hashExpectedValueForComparison(input map[string]any) map[string]any { + // Copy input before modifying, since we may re-use the same data in another test + copied, err := copystructure.Copy(input) + if err != nil { + panic(err) + } + copiedAsMap := copied.(map[string]any) + + s, err := f.salter.Salt(context.Background()) + if err != nil { + panic(err) + } + + err = hashMap(s.GetIdentifiedHMAC, copiedAsMap, nil) + if err != nil { + panic(err) + } + + return copiedAsMap +} + +// fakeEvent will return a new fake event containing audit data based on the +// specified subtype, format and logical.LogInput. +func fakeEvent(tb testing.TB, subtype subtype, input *logical.LogInput) *eventlogger.Event { + tb.Helper() + + date := time.Date(2023, time.July, 11, 15, 49, 10, 0o0, time.Local) + + auditEvent, err := NewEvent(subtype, + WithID("123"), + WithNow(date), + ) + require.NoError(tb, err) + require.NotNil(tb, auditEvent) + require.Equal(tb, "123", auditEvent.ID) + require.Equal(tb, "v0.1", auditEvent.Version) + require.Equal(tb, subtype, auditEvent.Subtype) + require.Equal(tb, date, auditEvent.Timestamp) + + auditEvent.Data = input + + e := &eventlogger.Event{ + Type: eventlogger.EventType(event.AuditType), + CreatedAt: auditEvent.Timestamp, + Formatted: make(map[string][]byte), + Payload: auditEvent, + } + + return e +} + +// newStaticSalt returns a new staticSalt for use in testing. +func newStaticSalt(tb testing.TB) *staticSalt { + s, err := salt.NewSalt(context.Background(), nil, nil) + require.NoError(tb, err) + + return &staticSalt{salt: s} +} + +// staticSalt is a struct which can be used to obtain a static salt. +// a salt must be assigned when the struct is initialized. +type staticSalt struct { + salt *salt.Salt +} + +// Salt returns the static salt and no error. +func (s *staticSalt) Salt(_ context.Context) (*salt.Salt, error) { + return s.salt, nil +} diff --git a/audit/errors.go b/audit/errors.go new file mode 100644 index 000000000000..be4e879fd2f1 --- /dev/null +++ b/audit/errors.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import "errors" + +var ( + // ErrInternal should be used to represent an unexpected error that occurred + // within the audit system. + ErrInternal = errors.New("audit system internal error") + + // ErrInvalidParameter should be used to represent an error in which the + // internal audit system is receiving invalid parameters from other parts of + // Vault which should have already been validated. + ErrInvalidParameter = errors.New("invalid internal parameter") + + // ErrExternalOptions should be used to represent an error related to + // invalid configuration provided to Vault (i.e. by the Vault Operator). + ErrExternalOptions = errors.New("invalid configuration") +) + +// ConvertToExternalError handles converting an error that was generated in Vault +// and should appear as-is in the server logs, to an error that can be returned to +// calling clients (via the API/CLI). +func ConvertToExternalError(err error) error { + // If the error is an internal error, the contents will have been logged, and + // we should probably shield the caller from the details. + if errors.Is(err, ErrInternal) { + return ErrInternal + } + + return err +} diff --git a/audit/errors_test.go b/audit/errors_test.go new file mode 100644 index 000000000000..2d6314843402 --- /dev/null +++ b/audit/errors_test.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +// TestErrors_ConvertToExternalError is used to check that we 'mute' errors which +// have an internal error in their tree. +func TestErrors_ConvertToExternalError(t *testing.T) { + t.Parallel() + + err := fmt.Errorf("wrap this error: %w", ErrInternal) + res := ConvertToExternalError(err) + require.EqualError(t, res, "audit system internal error") + + err = fmt.Errorf("test: %w", errors.New("this is just an error")) + res = ConvertToExternalError(err) + require.Equal(t, "test: this is just an error", res.Error()) +} diff --git a/audit/event.go b/audit/event.go new file mode 100644 index 000000000000..f3d0d3cbcd0e --- /dev/null +++ b/audit/event.go @@ -0,0 +1,167 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/logical" +) + +// version defines the version of audit events. +const version = "v0.1" + +// Audit subtypes. +const ( + RequestType subtype = "AuditRequest" + ResponseType subtype = "AuditResponse" +) + +// Audit formats. +const ( + JSONFormat format = "json" + JSONxFormat format = "jsonx" +) + +// Check AuditEvent implements the timeProvider at compile time. +var _ timeProvider = (*AuditEvent)(nil) + +// AuditEvent is the audit event. +type AuditEvent struct { + ID string `json:"id"` + Version string `json:"version"` + Subtype subtype `json:"subtype"` // the subtype of the audit event. + Timestamp time.Time `json:"timestamp"` + Data *logical.LogInput `json:"data"` +} + +// format defines types of format audit events support. +type format string + +// subtype defines the type of audit event. +type subtype string + +// NewEvent should be used to create an audit event. The subtype field is needed +// for audit events. It will generate an ID if no ID is supplied. Supported +// options: WithID, WithNow. +func NewEvent(s subtype, opt ...Option) (*AuditEvent, error) { + // Get the default options + opts, err := getOpts(opt...) + if err != nil { + return nil, err + } + + if opts.withID == "" { + var err error + + opts.withID, err = event.NewID(string(event.AuditType)) + if err != nil { + return nil, fmt.Errorf("error creating ID for event: %w", err) + } + } + + audit := &AuditEvent{ + ID: opts.withID, + Timestamp: opts.withNow, + Version: version, + Subtype: s, + } + + if err := audit.validate(); err != nil { + return nil, err + } + return audit, nil +} + +// validate attempts to ensure the audit event in its present state is valid. +func (a *AuditEvent) validate() error { + if a == nil { + return fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + if a.ID == "" { + return fmt.Errorf("missing ID: %w", ErrInvalidParameter) + } + + if a.Version != version { + return fmt.Errorf("event version unsupported: %w", ErrInvalidParameter) + } + + if a.Timestamp.IsZero() { + return fmt.Errorf("event timestamp cannot be the zero time instant: %w", ErrInvalidParameter) + } + + err := a.Subtype.validate() + if err != nil { + return err + } + + return nil +} + +// validate ensures that subtype is one of the set of allowed event subtypes. +func (t subtype) validate() error { + switch t { + case RequestType, ResponseType: + return nil + default: + return fmt.Errorf("invalid event subtype %q: %w", t, ErrInvalidParameter) + } +} + +// validate ensures that format is one of the set of allowed event formats. +func (f format) validate() error { + switch f { + case JSONFormat, JSONxFormat: + return nil + default: + return fmt.Errorf("invalid format %q: %w", f, ErrInvalidParameter) + } +} + +// String returns the string version of a format. +func (f format) String() string { + return string(f) +} + +// MetricTag returns a tag corresponding to this subtype to include in metrics. +// If a tag cannot be found the value is returned 'as-is' in string format. +func (t subtype) MetricTag() string { + switch t { + case RequestType: + return "log_request" + case ResponseType: + return "log_response" + } + + return t.String() +} + +// String returns the subtype as a human-readable string. +func (t subtype) String() string { + switch t { + case RequestType: + return "request" + case ResponseType: + return "response" + } + + return string(t) +} + +// formattedTime returns the UTC time the AuditEvent was created in the RFC3339Nano +// format (which removes trailing zeros from the seconds field). +func (a *AuditEvent) formattedTime() string { + return a.Timestamp.UTC().Format(time.RFC3339Nano) +} + +// IsValidFormat provides a means to validate whether the supplied format is valid. +// Examples of valid formats are JSON and JSONx. +func IsValidFormat(v string) bool { + err := format(strings.TrimSpace(strings.ToLower(v))).validate() + return err == nil +} diff --git a/audit/event_test.go b/audit/event_test.go new file mode 100644 index 000000000000..9dc8d6b6a726 --- /dev/null +++ b/audit/event_test.go @@ -0,0 +1,446 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestAuditEvent_new exercises the newEvent func to create audit events. +func TestAuditEvent_new(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Options []Option + Subtype subtype + Format format + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedID string + ExpectedFormat format + ExpectedSubtype subtype + ExpectedTimestamp time.Time + IsNowExpected bool + }{ + "nil": { + Options: nil, + Subtype: subtype(""), + Format: format(""), + IsErrorExpected: true, + ExpectedErrorMessage: "invalid event subtype \"\": invalid internal parameter", + }, + "empty-Option": { + Options: []Option{}, + Subtype: subtype(""), + Format: format(""), + IsErrorExpected: true, + ExpectedErrorMessage: "invalid event subtype \"\": invalid internal parameter", + }, + "bad-id": { + Options: []Option{WithID("")}, + Subtype: ResponseType, + Format: JSONFormat, + IsErrorExpected: true, + ExpectedErrorMessage: "id cannot be empty", + }, + "good": { + Options: []Option{ + WithID("audit_123"), + WithFormat(string(JSONFormat)), + WithSubtype(string(ResponseType)), + WithNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + }, + Subtype: RequestType, + Format: JSONxFormat, + IsErrorExpected: false, + ExpectedID: "audit_123", + ExpectedTimestamp: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + ExpectedSubtype: RequestType, + ExpectedFormat: JSONxFormat, + }, + "good-no-time": { + Options: []Option{ + WithID("audit_123"), + WithFormat(string(JSONFormat)), + WithSubtype(string(ResponseType)), + }, + Subtype: RequestType, + Format: JSONxFormat, + IsErrorExpected: false, + ExpectedID: "audit_123", + ExpectedSubtype: RequestType, + ExpectedFormat: JSONxFormat, + IsNowExpected: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + audit, err := NewEvent(tc.Subtype, tc.Options...) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, audit) + default: + require.NoError(t, err) + require.NotNil(t, audit) + require.Equal(t, tc.ExpectedID, audit.ID) + require.Equal(t, tc.ExpectedSubtype, audit.Subtype) + switch { + case tc.IsNowExpected: + require.True(t, time.Now().After(audit.Timestamp)) + require.False(t, audit.Timestamp.IsZero()) + default: + require.Equal(t, tc.ExpectedTimestamp, audit.Timestamp) + } + } + }) + } +} + +// TestAuditEvent_Validate exercises the validation for an audit event. +func TestAuditEvent_Validate(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value *AuditEvent + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "nil": { + Value: nil, + IsErrorExpected: true, + ExpectedErrorMessage: "event is nil: invalid internal parameter", + }, + "default": { + Value: &AuditEvent{}, + IsErrorExpected: true, + ExpectedErrorMessage: "missing ID: invalid internal parameter", + }, + "id-empty": { + Value: &AuditEvent{ + ID: "", + Version: version, + Subtype: RequestType, + Timestamp: time.Now(), + Data: nil, + }, + IsErrorExpected: true, + ExpectedErrorMessage: "missing ID: invalid internal parameter", + }, + "version-fiddled": { + Value: &AuditEvent{ + ID: "audit_123", + Version: "magic-v2", + Subtype: RequestType, + Timestamp: time.Now(), + Data: nil, + }, + IsErrorExpected: true, + ExpectedErrorMessage: "event version unsupported: invalid internal parameter", + }, + "subtype-fiddled": { + Value: &AuditEvent{ + ID: "audit_123", + Version: version, + Subtype: subtype("moon"), + Timestamp: time.Now(), + Data: nil, + }, + IsErrorExpected: true, + ExpectedErrorMessage: "invalid event subtype \"moon\": invalid internal parameter", + }, + "default-time": { + Value: &AuditEvent{ + ID: "audit_123", + Version: version, + Subtype: ResponseType, + Timestamp: time.Time{}, + Data: nil, + }, + IsErrorExpected: true, + ExpectedErrorMessage: "event timestamp cannot be the zero time instant: invalid internal parameter", + }, + "valid": { + Value: &AuditEvent{ + ID: "audit_123", + Version: version, + Subtype: ResponseType, + Timestamp: time.Now(), + Data: nil, + }, + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + err := tc.Value.validate() + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + } + }) + } +} + +// TestAuditEvent_Validate_Subtype exercises the validation for an audit event's subtype. +func TestAuditEvent_Validate_Subtype(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "empty": { + Value: "", + IsErrorExpected: true, + ExpectedErrorMessage: "invalid event subtype \"\": invalid internal parameter", + }, + "unsupported": { + Value: "foo", + IsErrorExpected: true, + ExpectedErrorMessage: "invalid event subtype \"foo\": invalid internal parameter", + }, + "request": { + Value: "AuditRequest", + IsErrorExpected: false, + }, + "response": { + Value: "AuditResponse", + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + err := subtype(tc.Value).validate() + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + } + }) + } +} + +// TestAuditEvent_Validate_Format exercises the validation for an audit event's format. +func TestAuditEvent_Validate_Format(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "empty": { + Value: "", + IsErrorExpected: true, + ExpectedErrorMessage: "invalid format \"\": invalid internal parameter", + }, + "unsupported": { + Value: "foo", + IsErrorExpected: true, + ExpectedErrorMessage: "invalid format \"foo\": invalid internal parameter", + }, + "json": { + Value: "json", + IsErrorExpected: false, + }, + "jsonx": { + Value: "jsonx", + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + err := format(tc.Value).validate() + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + } + }) + } +} + +// TestAuditEvent_Subtype_MetricTag is used to ensure that we get the string value +// we expect for a subtype when we want to use it as a metrics tag. +// In some strange scenario where the subtype was never validated, it is technically +// possible to get a value that isn't related to request/response, but this shouldn't +// really be happening, so we will return it as is. +func TestAuditEvent_Subtype_MetricTag(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + input string + expectedOutput string + }{ + "request": { + input: "AuditRequest", + expectedOutput: "log_request", + }, + "response": { + input: "AuditResponse", + expectedOutput: "log_response", + }, + "non-validated": { + input: "juan", + expectedOutput: "juan", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + st := subtype(tc.input) + tag := st.MetricTag() + require.Equal(t, tc.expectedOutput, tag) + }) + } +} + +// TestAuditEvent_Subtype_String is used to ensure that we get the string value +// we expect for a subtype when it is used with the Stringer interface. +// e.g. an AuditRequest subtype is 'request' +func TestAuditEvent_Subtype_String(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + input string + expectedOutput string + }{ + "request": { + input: "AuditRequest", + expectedOutput: "request", + }, + "response": { + input: "AuditResponse", + expectedOutput: "response", + }, + "non-validated": { + input: "juan", + expectedOutput: "juan", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + st := subtype(tc.input) + require.Equal(t, tc.expectedOutput, st.String()) + }) + } +} + +// TestAuditEvent_formattedTime is used to check the output from the formattedTime +// method returns the correct format. +func TestAuditEvent_formattedTime(t *testing.T) { + theTime := time.Date(2024, time.March, 22, 10, 0o0, 5, 10, time.UTC) + a, err := NewEvent(ResponseType, WithNow(theTime)) + require.NoError(t, err) + require.NotNil(t, a) + require.Equal(t, "2024-03-22T10:00:05.00000001Z", a.formattedTime()) +} + +// TestEvent_IsValidFormat ensures that we can correctly determine valid and +// invalid formats. +func TestEvent_IsValidFormat(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + input string + expected bool + }{ + "empty": { + input: "", + expected: false, + }, + "whitespace": { + input: " ", + expected: false, + }, + "invalid-test": { + input: "test", + expected: false, + }, + "valid-json": { + input: "json", + expected: true, + }, + "upper-json": { + input: "JSON", + expected: true, + }, + "mixed-json": { + input: "Json", + expected: true, + }, + "spacey-json": { + input: " json ", + expected: true, + }, + "valid-jsonx": { + input: "jsonx", + expected: true, + }, + "upper-jsonx": { + input: "JSONX", + expected: true, + }, + "mixed-jsonx": { + input: "JsonX", + expected: true, + }, + "spacey-jsonx": { + input: " jsonx ", + expected: true, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + res := IsValidFormat(tc.input) + require.Equal(t, tc.expected, res) + }) + } +} diff --git a/audit/format.go b/audit/format.go deleted file mode 100644 index cbc7f8a06ba4..000000000000 --- a/audit/format.go +++ /dev/null @@ -1,518 +0,0 @@ -package audit - -import ( - "context" - "crypto/tls" - "fmt" - "io" - "strings" - "time" - - squarejwt "gopkg.in/square/go-jose.v2/jwt" - - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -type AuditFormatWriter interface { - // WriteRequest writes the request entry to the writer or returns an error. - WriteRequest(io.Writer, *AuditRequestEntry) error - // WriteResponse writes the response entry to the writer or returns an error. - WriteResponse(io.Writer, *AuditResponseEntry) error - // Salt returns a non-nil salt or an error. - Salt(context.Context) (*salt.Salt, error) -} - -// AuditFormatter implements the Formatter interface, and allows the underlying -// marshaller to be swapped out -type AuditFormatter struct { - AuditFormatWriter -} - -var _ Formatter = (*AuditFormatter)(nil) - -func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config FormatterConfig, in *logical.LogInput) error { - if in == nil || in.Request == nil { - return fmt.Errorf("request to request-audit a nil request") - } - - if w == nil { - return fmt.Errorf("writer for audit request is nil") - } - - if f.AuditFormatWriter == nil { - return fmt.Errorf("no format writer specified") - } - - salt, err := f.Salt(ctx) - if err != nil { - return fmt.Errorf("error fetching salt: %w", err) - } - - // Set these to the input values at first - auth := in.Auth - req := in.Request - var connState *tls.ConnectionState - if auth == nil { - auth = new(logical.Auth) - } - - if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { - connState = in.Request.Connection.ConnState - } - - if !config.Raw { - auth, err = HashAuth(salt, auth, config.HMACAccessor) - if err != nil { - return err - } - - req, err = HashRequest(salt, req, config.HMACAccessor, in.NonHMACReqDataKeys) - if err != nil { - return err - } - } - - var errString string - if in.OuterErr != nil { - errString = in.OuterErr.Error() - } - - ns, err := namespace.FromContext(ctx) - if err != nil { - return err - } - - reqType := in.Type - if reqType == "" { - reqType = "request" - } - reqEntry := &AuditRequestEntry{ - Type: reqType, - Error: errString, - - Auth: &AuditAuth{ - ClientToken: auth.ClientToken, - Accessor: auth.Accessor, - DisplayName: auth.DisplayName, - Policies: auth.Policies, - TokenPolicies: auth.TokenPolicies, - IdentityPolicies: auth.IdentityPolicies, - ExternalNamespacePolicies: auth.ExternalNamespacePolicies, - NoDefaultPolicy: auth.NoDefaultPolicy, - Metadata: auth.Metadata, - EntityID: auth.EntityID, - RemainingUses: req.ClientTokenRemainingUses, - TokenType: auth.TokenType.String(), - TokenTTL: int64(auth.TTL.Seconds()), - }, - - Request: &AuditRequest{ - ID: req.ID, - ClientID: req.ClientID, - ClientToken: req.ClientToken, - ClientTokenAccessor: req.ClientTokenAccessor, - Operation: req.Operation, - MountType: req.MountType, - MountAccessor: req.MountAccessor, - Namespace: &AuditNamespace{ - ID: ns.ID, - Path: ns.Path, - }, - Path: req.Path, - Data: req.Data, - PolicyOverride: req.PolicyOverride, - RemoteAddr: getRemoteAddr(req), - RemotePort: getRemotePort(req), - ReplicationCluster: req.ReplicationCluster, - Headers: req.Headers, - ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), - }, - } - - if !auth.IssueTime.IsZero() { - reqEntry.Auth.TokenIssueTime = auth.IssueTime.Format(time.RFC3339) - } - - if auth.PolicyResults != nil { - reqEntry.Auth.PolicyResults = &AuditPolicyResults{ - Allowed: auth.PolicyResults.Allowed, - } - - for _, p := range auth.PolicyResults.GrantingPolicies { - reqEntry.Auth.PolicyResults.GrantingPolicies = append(reqEntry.Auth.PolicyResults.GrantingPolicies, PolicyInfo{ - Name: p.Name, - NamespaceId: p.NamespaceId, - Type: p.Type, - }) - } - } - - if req.WrapInfo != nil { - reqEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) - } - - if !config.OmitTime { - reqEntry.Time = time.Now().UTC().Format(time.RFC3339Nano) - } - - return f.AuditFormatWriter.WriteRequest(w, reqEntry) -} - -func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config FormatterConfig, in *logical.LogInput) error { - if in == nil || in.Request == nil { - return fmt.Errorf("request to response-audit a nil request") - } - - if w == nil { - return fmt.Errorf("writer for audit request is nil") - } - - if f.AuditFormatWriter == nil { - return fmt.Errorf("no format writer specified") - } - - salt, err := f.Salt(ctx) - if err != nil { - return fmt.Errorf("error fetching salt: %w", err) - } - - // Set these to the input values at first - auth, req, resp := in.Auth, in.Request, in.Response - if auth == nil { - auth = new(logical.Auth) - } - if resp == nil { - resp = new(logical.Response) - } - var connState *tls.ConnectionState - - if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { - connState = in.Request.Connection.ConnState - } - - if !config.Raw { - auth, err = HashAuth(salt, auth, config.HMACAccessor) - if err != nil { - return err - } - - req, err = HashRequest(salt, req, config.HMACAccessor, in.NonHMACReqDataKeys) - if err != nil { - return err - } - - resp, err = HashResponse(salt, resp, config.HMACAccessor, in.NonHMACRespDataKeys) - if err != nil { - return err - } - } - - var errString string - if in.OuterErr != nil { - errString = in.OuterErr.Error() - } - - ns, err := namespace.FromContext(ctx) - if err != nil { - return err - } - - var respAuth *AuditAuth - if resp.Auth != nil { - respAuth = &AuditAuth{ - ClientToken: resp.Auth.ClientToken, - Accessor: resp.Auth.Accessor, - DisplayName: resp.Auth.DisplayName, - Policies: resp.Auth.Policies, - TokenPolicies: resp.Auth.TokenPolicies, - IdentityPolicies: resp.Auth.IdentityPolicies, - ExternalNamespacePolicies: resp.Auth.ExternalNamespacePolicies, - NoDefaultPolicy: resp.Auth.NoDefaultPolicy, - Metadata: resp.Auth.Metadata, - NumUses: resp.Auth.NumUses, - EntityID: resp.Auth.EntityID, - TokenType: resp.Auth.TokenType.String(), - TokenTTL: int64(resp.Auth.TTL.Seconds()), - } - if !resp.Auth.IssueTime.IsZero() { - respAuth.TokenIssueTime = resp.Auth.IssueTime.Format(time.RFC3339) - } - } - - var respSecret *AuditSecret - if resp.Secret != nil { - respSecret = &AuditSecret{ - LeaseID: resp.Secret.LeaseID, - } - } - - var respWrapInfo *AuditResponseWrapInfo - if resp.WrapInfo != nil { - token := resp.WrapInfo.Token - if jwtToken := parseVaultTokenFromJWT(token); jwtToken != nil { - token = *jwtToken - } - respWrapInfo = &AuditResponseWrapInfo{ - TTL: int(resp.WrapInfo.TTL / time.Second), - Token: token, - Accessor: resp.WrapInfo.Accessor, - CreationTime: resp.WrapInfo.CreationTime.UTC().Format(time.RFC3339Nano), - CreationPath: resp.WrapInfo.CreationPath, - WrappedAccessor: resp.WrapInfo.WrappedAccessor, - } - } - - respType := in.Type - if respType == "" { - respType = "response" - } - respEntry := &AuditResponseEntry{ - Type: respType, - Error: errString, - Auth: &AuditAuth{ - ClientToken: auth.ClientToken, - Accessor: auth.Accessor, - DisplayName: auth.DisplayName, - Policies: auth.Policies, - TokenPolicies: auth.TokenPolicies, - IdentityPolicies: auth.IdentityPolicies, - ExternalNamespacePolicies: auth.ExternalNamespacePolicies, - NoDefaultPolicy: auth.NoDefaultPolicy, - Metadata: auth.Metadata, - RemainingUses: req.ClientTokenRemainingUses, - EntityID: auth.EntityID, - EntityCreated: auth.EntityCreated, - TokenType: auth.TokenType.String(), - TokenTTL: int64(auth.TTL.Seconds()), - }, - - Request: &AuditRequest{ - ID: req.ID, - ClientToken: req.ClientToken, - ClientTokenAccessor: req.ClientTokenAccessor, - ClientID: req.ClientID, - Operation: req.Operation, - MountType: req.MountType, - MountAccessor: req.MountAccessor, - Namespace: &AuditNamespace{ - ID: ns.ID, - Path: ns.Path, - }, - Path: req.Path, - Data: req.Data, - PolicyOverride: req.PolicyOverride, - RemoteAddr: getRemoteAddr(req), - RemotePort: getRemotePort(req), - ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), - ReplicationCluster: req.ReplicationCluster, - Headers: req.Headers, - }, - - Response: &AuditResponse{ - MountType: req.MountType, - MountAccessor: req.MountAccessor, - Auth: respAuth, - Secret: respSecret, - Data: resp.Data, - Warnings: resp.Warnings, - Redirect: resp.Redirect, - WrapInfo: respWrapInfo, - Headers: resp.Headers, - }, - } - - if auth.PolicyResults != nil { - respEntry.Auth.PolicyResults = &AuditPolicyResults{ - Allowed: auth.PolicyResults.Allowed, - } - - for _, p := range auth.PolicyResults.GrantingPolicies { - respEntry.Auth.PolicyResults.GrantingPolicies = append(respEntry.Auth.PolicyResults.GrantingPolicies, PolicyInfo{ - Name: p.Name, - NamespaceId: p.NamespaceId, - Type: p.Type, - }) - } - } - - if !auth.IssueTime.IsZero() { - respEntry.Auth.TokenIssueTime = auth.IssueTime.Format(time.RFC3339) - } - if req.WrapInfo != nil { - respEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) - } - - if !config.OmitTime { - respEntry.Time = time.Now().UTC().Format(time.RFC3339Nano) - } - - return f.AuditFormatWriter.WriteResponse(w, respEntry) -} - -// AuditRequestEntry is the structure of a request audit log entry in Audit. -type AuditRequestEntry struct { - Time string `json:"time,omitempty"` - Type string `json:"type,omitempty"` - Auth *AuditAuth `json:"auth,omitempty"` - Request *AuditRequest `json:"request,omitempty"` - Error string `json:"error,omitempty"` -} - -// AuditResponseEntry is the structure of a response audit log entry in Audit. -type AuditResponseEntry struct { - Time string `json:"time,omitempty"` - Type string `json:"type,omitempty"` - Auth *AuditAuth `json:"auth,omitempty"` - Request *AuditRequest `json:"request,omitempty"` - Response *AuditResponse `json:"response,omitempty"` - Error string `json:"error,omitempty"` -} - -type AuditRequest struct { - ID string `json:"id,omitempty"` - ClientID string `json:"client_id,omitempty"` - ReplicationCluster string `json:"replication_cluster,omitempty"` - Operation logical.Operation `json:"operation,omitempty"` - MountType string `json:"mount_type,omitempty"` - MountAccessor string `json:"mount_accessor,omitempty"` - ClientToken string `json:"client_token,omitempty"` - ClientTokenAccessor string `json:"client_token_accessor,omitempty"` - Namespace *AuditNamespace `json:"namespace,omitempty"` - Path string `json:"path,omitempty"` - Data map[string]interface{} `json:"data,omitempty"` - PolicyOverride bool `json:"policy_override,omitempty"` - RemoteAddr string `json:"remote_address,omitempty"` - RemotePort int `json:"remote_port,omitempty"` - WrapTTL int `json:"wrap_ttl,omitempty"` - Headers map[string][]string `json:"headers,omitempty"` - ClientCertificateSerialNumber string `json:"client_certificate_serial_number,omitempty"` -} - -type AuditResponse struct { - Auth *AuditAuth `json:"auth,omitempty"` - MountType string `json:"mount_type,omitempty"` - MountAccessor string `json:"mount_accessor,omitempty"` - Secret *AuditSecret `json:"secret,omitempty"` - Data map[string]interface{} `json:"data,omitempty"` - Warnings []string `json:"warnings,omitempty"` - Redirect string `json:"redirect,omitempty"` - WrapInfo *AuditResponseWrapInfo `json:"wrap_info,omitempty"` - Headers map[string][]string `json:"headers,omitempty"` -} - -type AuditAuth struct { - ClientToken string `json:"client_token,omitempty"` - Accessor string `json:"accessor,omitempty"` - DisplayName string `json:"display_name,omitempty"` - Policies []string `json:"policies,omitempty"` - TokenPolicies []string `json:"token_policies,omitempty"` - IdentityPolicies []string `json:"identity_policies,omitempty"` - ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies,omitempty"` - NoDefaultPolicy bool `json:"no_default_policy,omitempty"` - PolicyResults *AuditPolicyResults `json:"policy_results,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` - NumUses int `json:"num_uses,omitempty"` - RemainingUses int `json:"remaining_uses,omitempty"` - EntityID string `json:"entity_id,omitempty"` - EntityCreated bool `json:"entity_created,omitempty"` - TokenType string `json:"token_type,omitempty"` - TokenTTL int64 `json:"token_ttl,omitempty"` - TokenIssueTime string `json:"token_issue_time,omitempty"` -} - -type AuditPolicyResults struct { - Allowed bool `json:"allowed"` - GrantingPolicies []PolicyInfo `json:"granting_policies,omitempty"` -} - -type PolicyInfo struct { - Name string `json:"name,omitempty"` - NamespaceId string `json:"namespace_id,omitempty"` - Type string `json:"type"` -} - -type AuditSecret struct { - LeaseID string `json:"lease_id,omitempty"` -} - -type AuditResponseWrapInfo struct { - TTL int `json:"ttl,omitempty"` - Token string `json:"token,omitempty"` - Accessor string `json:"accessor,omitempty"` - CreationTime string `json:"creation_time,omitempty"` - CreationPath string `json:"creation_path,omitempty"` - WrappedAccessor string `json:"wrapped_accessor,omitempty"` -} - -type AuditNamespace struct { - ID string `json:"id,omitempty"` - Path string `json:"path,omitempty"` -} - -// getRemoteAddr safely gets the remote address avoiding a nil pointer -func getRemoteAddr(req *logical.Request) string { - if req != nil && req.Connection != nil { - return req.Connection.RemoteAddr - } - return "" -} - -// getRemotePort safely gets the remote port avoiding a nil pointer -func getRemotePort(req *logical.Request) int { - if req != nil && req.Connection != nil { - return req.Connection.RemotePort - } - return 0 -} - -func getClientCertificateSerialNumber(connState *tls.ConnectionState) string { - if connState == nil || len(connState.VerifiedChains) == 0 || len(connState.VerifiedChains[0]) == 0 { - return "" - } - - return connState.VerifiedChains[0][0].SerialNumber.String() -} - -// parseVaultTokenFromJWT returns a string iff the token was a JWT and we could -// extract the original token ID from inside -func parseVaultTokenFromJWT(token string) *string { - if strings.Count(token, ".") != 2 { - return nil - } - - parsedJWT, err := squarejwt.ParseSigned(token) - if err != nil { - return nil - } - - var claims squarejwt.Claims - if err = parsedJWT.UnsafeClaimsWithoutVerification(&claims); err != nil { - return nil - } - - return &claims.ID -} - -// Create a formatter not backed by a persistent salt. -func NewTemporaryFormatter(format, prefix string) *AuditFormatter { - temporarySalt := func(ctx context.Context) (*salt.Salt, error) { - return salt.NewNonpersistentSalt(), nil - } - ret := &AuditFormatter{} - - switch format { - case "jsonx": - ret.AuditFormatWriter = &JSONxFormatWriter{ - Prefix: prefix, - SaltFunc: temporarySalt, - } - default: - ret.AuditFormatWriter = &JSONFormatWriter{ - Prefix: prefix, - SaltFunc: temporarySalt, - } - } - return ret -} diff --git a/audit/format_json.go b/audit/format_json.go deleted file mode 100644 index 4003c05a7217..000000000000 --- a/audit/format_json.go +++ /dev/null @@ -1,53 +0,0 @@ -package audit - -import ( - "context" - "encoding/json" - "fmt" - "io" - - "github.com/hashicorp/vault/sdk/helper/salt" -) - -// JSONFormatWriter is an AuditFormatWriter implementation that structures data into -// a JSON format. -type JSONFormatWriter struct { - Prefix string - SaltFunc func(context.Context) (*salt.Salt, error) -} - -func (f *JSONFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { - if req == nil { - return fmt.Errorf("request entry was nil, cannot encode") - } - - if len(f.Prefix) > 0 { - _, err := w.Write([]byte(f.Prefix)) - if err != nil { - return err - } - } - - enc := json.NewEncoder(w) - return enc.Encode(req) -} - -func (f *JSONFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error { - if resp == nil { - return fmt.Errorf("response entry was nil, cannot encode") - } - - if len(f.Prefix) > 0 { - _, err := w.Write([]byte(f.Prefix)) - if err != nil { - return err - } - } - - enc := json.NewEncoder(w) - return enc.Encode(resp) -} - -func (f *JSONFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { - return f.SaltFunc(ctx) -} diff --git a/audit/format_json_test.go b/audit/format_json_test.go deleted file mode 100644 index e4a703d12ad4..000000000000 --- a/audit/format_json_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package audit - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -func TestFormatJSON_formatRequest(t *testing.T) { - salter, err := salt.NewSalt(context.Background(), nil, nil) - if err != nil { - t.Fatal(err) - } - saltFunc := func(context.Context) (*salt.Salt, error) { - return salter, nil - } - - expectedResultStr := fmt.Sprintf(testFormatJSONReqBasicStrFmt, salter.GetIdentifiedHMAC("foo")) - - issueTime, _ := time.Parse(time.RFC3339, "2020-05-28T13:40:18-05:00") - cases := map[string]struct { - Auth *logical.Auth - Req *logical.Request - Err error - Prefix string - ExpectedStr string - }{ - "auth, request": { - &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - DisplayName: "testtoken", - EntityID: "foobarentity", - NoDefaultPolicy: true, - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, - LeaseOptions: logical.LeaseOptions{ - TTL: time.Hour * 4, - IssueTime: issueTime, - }, - }, - &logical.Request{ - Operation: logical.UpdateOperation, - Path: "/foo", - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - WrapInfo: &logical.RequestWrapInfo{ - TTL: 60 * time.Second, - }, - Headers: map[string][]string{ - "foo": {"bar"}, - }, - }, - errors.New("this is an error"), - "", - expectedResultStr, - }, - "auth, request with prefix": { - &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - EntityID: "foobarentity", - DisplayName: "testtoken", - NoDefaultPolicy: true, - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, - LeaseOptions: logical.LeaseOptions{ - TTL: time.Hour * 4, - IssueTime: issueTime, - }, - }, - &logical.Request{ - Operation: logical.UpdateOperation, - Path: "/foo", - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - WrapInfo: &logical.RequestWrapInfo{ - TTL: 60 * time.Second, - }, - Headers: map[string][]string{ - "foo": {"bar"}, - }, - }, - errors.New("this is an error"), - "@cee: ", - expectedResultStr, - }, - } - - for name, tc := range cases { - var buf bytes.Buffer - formatter := AuditFormatter{ - AuditFormatWriter: &JSONFormatWriter{ - Prefix: tc.Prefix, - SaltFunc: saltFunc, - }, - } - config := FormatterConfig{ - HMACAccessor: false, - } - in := &logical.LogInput{ - Auth: tc.Auth, - Request: tc.Req, - OuterErr: tc.Err, - } - if err := formatter.FormatRequest(namespace.RootContext(nil), &buf, config, in); err != nil { - t.Fatalf("bad: %s\nerr: %s", name, err) - } - - if !strings.HasPrefix(buf.String(), tc.Prefix) { - t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, expectedResultStr, tc.Prefix) - } - - expectedjson := new(AuditRequestEntry) - - if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedjson); err != nil { - t.Fatalf("bad json: %s", err) - } - expectedjson.Request.Namespace = &AuditNamespace{ID: "root"} - - actualjson := new(AuditRequestEntry) - if err := jsonutil.DecodeJSON([]byte(buf.String())[len(tc.Prefix):], &actualjson); err != nil { - t.Fatalf("bad json: %s", err) - } - - expectedjson.Time = actualjson.Time - - expectedBytes, err := json.Marshal(expectedjson) - if err != nil { - t.Fatalf("unable to marshal json: %s", err) - } - - if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(expectedBytes)) { - t.Fatalf( - "bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", - name, buf.String(), string(expectedBytes)) - } - } -} - -const testFormatJSONReqBasicStrFmt = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"client_token":"%s","accessor":"bar","display_name":"testtoken","policies":["root"],"no_default_policy":true,"metadata":null,"entity_id":"foobarentity","token_type":"service", "token_ttl": 14400, "token_issue_time": "2020-05-28T13:40:18-05:00"},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"} -` diff --git a/audit/format_jsonx.go b/audit/format_jsonx.go deleted file mode 100644 index bff244099a9a..000000000000 --- a/audit/format_jsonx.go +++ /dev/null @@ -1,74 +0,0 @@ -package audit - -import ( - "context" - "encoding/json" - "fmt" - "io" - - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/jefferai/jsonx" -) - -// JSONxFormatWriter is an AuditFormatWriter implementation that structures data into -// a XML format. -type JSONxFormatWriter struct { - Prefix string - SaltFunc func(context.Context) (*salt.Salt, error) -} - -func (f *JSONxFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { - if req == nil { - return fmt.Errorf("request entry was nil, cannot encode") - } - - if len(f.Prefix) > 0 { - _, err := w.Write([]byte(f.Prefix)) - if err != nil { - return err - } - } - - jsonBytes, err := json.Marshal(req) - if err != nil { - return err - } - - xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes) - if err != nil { - return err - } - - _, err = w.Write(xmlBytes) - return err -} - -func (f *JSONxFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error { - if resp == nil { - return fmt.Errorf("response entry was nil, cannot encode") - } - - if len(f.Prefix) > 0 { - _, err := w.Write([]byte(f.Prefix)) - if err != nil { - return err - } - } - - jsonBytes, err := json.Marshal(resp) - if err != nil { - return err - } - - xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes) - if err != nil { - return err - } - - _, err = w.Write(xmlBytes) - return err -} - -func (f *JSONxFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { - return f.SaltFunc(ctx) -} diff --git a/audit/format_jsonx_test.go b/audit/format_jsonx_test.go deleted file mode 100644 index 00921c0c71a9..000000000000 --- a/audit/format_jsonx_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package audit - -import ( - "bytes" - "context" - "errors" - "fmt" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -func TestFormatJSONx_formatRequest(t *testing.T) { - salter, err := salt.NewSalt(context.Background(), nil, nil) - if err != nil { - t.Fatal(err) - } - saltFunc := func(context.Context) (*salt.Salt, error) { - return salter, nil - } - - fooSalted := salter.GetIdentifiedHMAC("foo") - issueTime, _ := time.Parse(time.RFC3339, "2020-05-28T13:40:18-05:00") - - cases := map[string]struct { - Auth *logical.Auth - Req *logical.Request - Err error - Prefix string - Result string - ExpectedStr string - }{ - "auth, request": { - &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - DisplayName: "testtoken", - EntityID: "foobarentity", - NoDefaultPolicy: true, - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, - LeaseOptions: logical.LeaseOptions{ - TTL: time.Hour * 4, - IssueTime: issueTime, - }, - }, - &logical.Request{ - ID: "request", - ClientToken: "foo", - ClientTokenAccessor: "bar", - Operation: logical.UpdateOperation, - Path: "/foo", - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - WrapInfo: &logical.RequestWrapInfo{ - TTL: 60 * time.Second, - }, - Headers: map[string][]string{ - "foo": {"bar"}, - }, - PolicyOverride: true, - }, - errors.New("this is an error"), - "", - "", - fmt.Sprintf(`bar%stesttokenfoobarentitytrueroot2020-05-28T13:40:18-05:0014400servicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, - fooSalted, fooSalted), - }, - "auth, request with prefix": { - &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - DisplayName: "testtoken", - NoDefaultPolicy: true, - EntityID: "foobarentity", - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, - LeaseOptions: logical.LeaseOptions{ - TTL: time.Hour * 4, - IssueTime: issueTime, - }, - }, - &logical.Request{ - ID: "request", - ClientToken: "foo", - ClientTokenAccessor: "bar", - Operation: logical.UpdateOperation, - Path: "/foo", - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", - }, - WrapInfo: &logical.RequestWrapInfo{ - TTL: 60 * time.Second, - }, - Headers: map[string][]string{ - "foo": {"bar"}, - }, - PolicyOverride: true, - }, - errors.New("this is an error"), - "", - "@cee: ", - fmt.Sprintf(`bar%stesttokenfoobarentitytrueroot2020-05-28T13:40:18-05:0014400servicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, - fooSalted, fooSalted), - }, - } - - for name, tc := range cases { - var buf bytes.Buffer - formatter := AuditFormatter{ - AuditFormatWriter: &JSONxFormatWriter{ - Prefix: tc.Prefix, - SaltFunc: saltFunc, - }, - } - config := FormatterConfig{ - OmitTime: true, - HMACAccessor: false, - } - in := &logical.LogInput{ - Auth: tc.Auth, - Request: tc.Req, - OuterErr: tc.Err, - } - if err := formatter.FormatRequest(namespace.RootContext(nil), &buf, config, in); err != nil { - t.Fatalf("bad: %s\nerr: %s", name, err) - } - - if !strings.HasPrefix(buf.String(), tc.Prefix) { - t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix) - } - - if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) { - t.Fatalf( - "bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", - name, strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) - } - } -} diff --git a/audit/format_test.go b/audit/format_test.go deleted file mode 100644 index 4f3cc5cbb2e5..000000000000 --- a/audit/format_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package audit - -import ( - "context" - "io" - "io/ioutil" - "testing" - - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -type noopFormatWriter struct { - salt *salt.Salt - SaltFunc func() (*salt.Salt, error) -} - -func (n *noopFormatWriter) WriteRequest(_ io.Writer, _ *AuditRequestEntry) error { - return nil -} - -func (n *noopFormatWriter) WriteResponse(_ io.Writer, _ *AuditResponseEntry) error { - return nil -} - -func (n *noopFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { - if n.salt != nil { - return n.salt, nil - } - var err error - n.salt, err = salt.NewSalt(ctx, nil, nil) - if err != nil { - return nil, err - } - return n.salt, nil -} - -func TestFormatRequestErrors(t *testing.T) { - config := FormatterConfig{} - formatter := AuditFormatter{ - AuditFormatWriter: &noopFormatWriter{}, - } - - if err := formatter.FormatRequest(context.Background(), ioutil.Discard, config, &logical.LogInput{}); err == nil { - t.Fatal("expected error due to nil request") - } - - in := &logical.LogInput{ - Request: &logical.Request{}, - } - if err := formatter.FormatRequest(context.Background(), nil, config, in); err == nil { - t.Fatal("expected error due to nil writer") - } -} - -func TestFormatResponseErrors(t *testing.T) { - config := FormatterConfig{} - formatter := AuditFormatter{ - AuditFormatWriter: &noopFormatWriter{}, - } - - if err := formatter.FormatResponse(context.Background(), ioutil.Discard, config, &logical.LogInput{}); err == nil { - t.Fatal("expected error due to nil request") - } - - in := &logical.LogInput{ - Request: &logical.Request{}, - } - if err := formatter.FormatResponse(context.Background(), nil, config, in); err == nil { - t.Fatal("expected error due to nil writer") - } -} diff --git a/audit/formatter.go b/audit/formatter.go deleted file mode 100644 index c27035768d35..000000000000 --- a/audit/formatter.go +++ /dev/null @@ -1,26 +0,0 @@ -package audit - -import ( - "context" - "io" - - "github.com/hashicorp/vault/sdk/logical" -) - -// Formatter is an interface that is responsible for formating a -// request/response into some format. Formatters write their output -// to an io.Writer. -// -// It is recommended that you pass data through Hash prior to formatting it. -type Formatter interface { - FormatRequest(context.Context, io.Writer, FormatterConfig, *logical.LogInput) error - FormatResponse(context.Context, io.Writer, FormatterConfig, *logical.LogInput) error -} - -type FormatterConfig struct { - Raw bool - HMACAccessor bool - - // This should only ever be used in a testing context - OmitTime bool -} diff --git a/audit/hashstructure.go b/audit/hashstructure.go index 11c6214ff7b3..d49c02865c11 100644 --- a/audit/hashstructure.go +++ b/audit/hashstructure.go @@ -1,13 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package audit import ( + "context" "encoding/json" "errors" "reflect" "time" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/copystructure" @@ -15,17 +18,27 @@ import ( ) // HashString hashes the given opaque string and returns it -func HashString(salter *salt.Salt, data string) string { - return salter.GetIdentifiedHMAC(data) +func HashString(ctx context.Context, salter Salter, data string) (string, error) { + salt, err := salter.Salt(ctx) + if err != nil { + return "", err + } + + return salt.GetIdentifiedHMAC(data), nil } // HashAuth returns a hashed copy of the logical.Auth input. -func HashAuth(salter *salt.Salt, in *logical.Auth, HMACAccessor bool) (*logical.Auth, error) { +func HashAuth(ctx context.Context, salter Salter, in *logical.Auth, HMACAccessor bool) (*logical.Auth, error) { if in == nil { return nil, nil } - fn := salter.GetIdentifiedHMAC + salt, err := salter.Salt(ctx) + if err != nil { + return nil, err + } + + fn := salt.GetIdentifiedHMAC auth := *in if auth.ClientToken != "" { @@ -38,12 +51,17 @@ func HashAuth(salter *salt.Salt, in *logical.Auth, HMACAccessor bool) (*logical. } // HashRequest returns a hashed copy of the logical.Request input. -func HashRequest(salter *salt.Salt, in *logical.Request, HMACAccessor bool, nonHMACDataKeys []string) (*logical.Request, error) { +func HashRequest(ctx context.Context, salter Salter, in *logical.Request, HMACAccessor bool, nonHMACDataKeys []string) (*logical.Request, error) { if in == nil { return nil, nil } - fn := salter.GetIdentifiedHMAC + salt, err := salter.Salt(ctx) + if err != nil { + return nil, err + } + + fn := salt.GetIdentifiedHMAC req := *in if req.Auth != nil { @@ -52,7 +70,7 @@ func HashRequest(salter *salt.Salt, in *logical.Request, HMACAccessor bool, nonH return nil, err } - req.Auth, err = HashAuth(salter, cp.(*logical.Auth), HMACAccessor) + req.Auth, err = HashAuth(ctx, salter, cp.(*logical.Auth), HMACAccessor) if err != nil { return nil, err } @@ -81,11 +99,11 @@ func HashRequest(salter *salt.Salt, in *logical.Request, HMACAccessor bool, nonH return &req, nil } -func hashMap(fn func(string) string, data map[string]interface{}, nonHMACDataKeys []string) error { +func hashMap(hashFunc HashCallback, data map[string]interface{}, nonHMACDataKeys []string) error { for k, v := range data { if o, ok := v.(logical.OptMarshaler); ok { marshaled, err := o.MarshalJSONWithOptions(&logical.MarshalOptions{ - ValueHasher: fn, + ValueHasher: hashFunc, }) if err != nil { return err @@ -94,16 +112,21 @@ func hashMap(fn func(string) string, data map[string]interface{}, nonHMACDataKey } } - return HashStructure(data, fn, nonHMACDataKeys) + return HashStructure(data, hashFunc, nonHMACDataKeys) } // HashResponse returns a hashed copy of the logical.Request input. -func HashResponse(salter *salt.Salt, in *logical.Response, HMACAccessor bool, nonHMACDataKeys []string) (*logical.Response, error) { +func HashResponse(ctx context.Context, salter Salter, in *logical.Response, HMACAccessor bool, nonHMACDataKeys []string, elideListResponseData bool) (*logical.Response, error) { if in == nil { return nil, nil } - fn := salter.GetIdentifiedHMAC + salt, err := salter.Salt(ctx) + if err != nil { + return nil, err + } + + fn := salt.GetIdentifiedHMAC resp := *in if resp.Auth != nil { @@ -112,7 +135,7 @@ func HashResponse(salter *salt.Salt, in *logical.Response, HMACAccessor bool, no return nil, err } - resp.Auth, err = HashAuth(salter, cp.(*logical.Auth), HMACAccessor) + resp.Auth, err = HashAuth(ctx, salter, cp.(*logical.Auth), HMACAccessor) if err != nil { return nil, err } @@ -129,15 +152,23 @@ func HashResponse(salter *salt.Salt, in *logical.Response, HMACAccessor bool, no mapCopy[logical.HTTPRawBody] = string(b) } + // Processing list response data elision takes place at this point in the code for performance reasons: + // - take advantage of the deep copy of resp.Data that was going to be done anyway for hashing + // - but elide data before potentially spending time hashing it + if elideListResponseData { + doElideListResponseData(mapCopy) + } + err = hashMap(fn, mapCopy, nonHMACDataKeys) if err != nil { return nil, err } resp.Data = mapCopy } + if resp.WrapInfo != nil { var err error - resp.WrapInfo, err = HashWrapInfo(salter, resp.WrapInfo, HMACAccessor) + resp.WrapInfo, err = hashWrapInfo(fn, resp.WrapInfo, HMACAccessor) if err != nil { return nil, err } @@ -146,22 +177,21 @@ func HashResponse(salter *salt.Salt, in *logical.Response, HMACAccessor bool, no return &resp, nil } -// HashWrapInfo returns a hashed copy of the wrapping.ResponseWrapInfo input. -func HashWrapInfo(salter *salt.Salt, in *wrapping.ResponseWrapInfo, HMACAccessor bool) (*wrapping.ResponseWrapInfo, error) { +// hashWrapInfo returns a hashed copy of the wrapping.ResponseWrapInfo input. +func hashWrapInfo(hashFunc HashCallback, in *wrapping.ResponseWrapInfo, HMACAccessor bool) (*wrapping.ResponseWrapInfo, error) { if in == nil { return nil, nil } - fn := salter.GetIdentifiedHMAC wrapinfo := *in - wrapinfo.Token = fn(wrapinfo.Token) + wrapinfo.Token = hashFunc(wrapinfo.Token) if HMACAccessor { - wrapinfo.Accessor = fn(wrapinfo.Accessor) + wrapinfo.Accessor = hashFunc(wrapinfo.Accessor) if wrapinfo.WrappedAccessor != "" { - wrapinfo.WrappedAccessor = fn(wrapinfo.WrappedAccessor) + wrapinfo.WrappedAccessor = hashFunc(wrapinfo.WrappedAccessor) } } diff --git a/audit/hashstructure_test.go b/audit/hashstructure_test.go index 7f6c5a869485..771da02033ba 100644 --- a/audit/hashstructure_test.go +++ b/audit/hashstructure_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package audit import ( @@ -95,20 +98,32 @@ func TestCopy_response(t *testing.T) { } } -func TestHashString(t *testing.T) { +// TestSalter is a structure that implements the Salter interface in a trivial +// manner. +type TestSalter struct{} + +// Salt returns a salt.Salt pointer based on dummy data stored in an in-memory +// storage instance. +func (*TestSalter) Salt(ctx context.Context) (*salt.Salt, error) { inmemStorage := &logical.InmemStorage{} inmemStorage.Put(context.Background(), &logical.StorageEntry{ Key: "salt", Value: []byte("foo"), }) - localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ + + return salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ HMAC: sha256.New, HMACType: "hmac-sha256", }) +} + +func TestHashString(t *testing.T) { + salter := &TestSalter{} + + out, err := HashString(context.Background(), salter, "foo") if err != nil { t.Fatalf("Error instantiating salt: %s", err) } - out := HashString(localSalt, "foo") if out != "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a" { t.Fatalf("err: HashString output did not match expected") } @@ -149,16 +164,10 @@ func TestHashAuth(t *testing.T) { Key: "salt", Value: []byte("foo"), }) - localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ - HMAC: sha256.New, - HMACType: "hmac-sha256", - }) - if err != nil { - t.Fatalf("Error instantiating salt: %s", err) - } + salter := &TestSalter{} for _, tc := range cases { input := fmt.Sprintf("%#v", tc.Input) - out, err := HashAuth(localSalt, tc.Input, tc.HMACAccessor) + out, err := HashAuth(context.Background(), salter, tc.Input, tc.HMACAccessor) if err != nil { t.Fatalf("err: %s\n\n%s", err, input) } @@ -213,16 +222,10 @@ func TestHashRequest(t *testing.T) { Key: "salt", Value: []byte("foo"), }) - localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ - HMAC: sha256.New, - HMACType: "hmac-sha256", - }) - if err != nil { - t.Fatalf("Error instantiating salt: %s", err) - } + salter := &TestSalter{} for _, tc := range cases { input := fmt.Sprintf("%#v", tc.Input) - out, err := HashRequest(localSalt, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys) + out, err := HashRequest(context.Background(), salter, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys) if err != nil { t.Fatalf("err: %s\n\n%s", err, input) } @@ -284,16 +287,10 @@ func TestHashResponse(t *testing.T) { Key: "salt", Value: []byte("foo"), }) - localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ - HMAC: sha256.New, - HMACType: "hmac-sha256", - }) - if err != nil { - t.Fatalf("Error instantiating salt: %s", err) - } + salter := &TestSalter{} for _, tc := range cases { input := fmt.Sprintf("%#v", tc.Input) - out, err := HashResponse(localSalt, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys) + out, err := HashResponse(context.Background(), salter, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys, false) if err != nil { t.Fatalf("err: %s\n\n%s", err, input) } diff --git a/audit/nodes.go b/audit/nodes.go new file mode 100644 index 000000000000..6b6b0b845871 --- /dev/null +++ b/audit/nodes.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/logical" +) + +// ProcessManual will attempt to create an (audit) event with the specified data +// and manually iterate over the supplied nodes calling Process on each until the +// event is nil (which indicates the pipeline has completed). +// Order of IDs in the NodeID slice determines the order they are processed. +// (Audit) Event will be of RequestType (as opposed to ResponseType). +// The last node must be a filter node (eventlogger.NodeTypeFilter) or +// sink node (eventlogger.NodeTypeSink). +func ProcessManual(ctx context.Context, data *logical.LogInput, ids []eventlogger.NodeID, nodes map[eventlogger.NodeID]eventlogger.Node) error { + switch { + case data == nil: + return errors.New("data cannot be nil") + case len(ids) < 2: + return errors.New("minimum of 2 ids are required") + case nodes == nil: + return errors.New("nodes cannot be nil") + case len(nodes) == 0: + return errors.New("nodes are required") + } + + // Create an audit event. + a, err := NewEvent(RequestType) + if err != nil { + return err + } + + // Insert the data into the audit event. + a.Data = data + + // Create an eventlogger event with the audit event as the payload. + e := &eventlogger.Event{ + Type: event.AuditType.AsEventType(), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: a, + } + + var lastSeen eventlogger.NodeType + + // Process nodes in order, updating the event with the result. + // This means we *should* do: + // 1. filter (optional if configured) + // 2. formatter (temporary) + // 3. sink + for _, id := range ids { + // If the event is nil, we've completed processing the pipeline (hopefully + // by either a filter node or a sink node). + if e == nil { + break + } + node, ok := nodes[id] + if !ok { + return fmt.Errorf("node not found: %v", id) + } + + switch node.Type() { + case eventlogger.NodeTypeFormatter: + // Use a temporary formatter node which doesn't persist its salt anywhere. + if formatNode, ok := node.(*EntryFormatter); ok && formatNode != nil { + e, err = newTemporaryEntryFormatter(formatNode).Process(ctx, e) + } + default: + e, err = node.Process(ctx, e) + } + + if err != nil { + return err + } + + // Track the last node we have processed, as we should end with a filter or sink. + lastSeen = node.Type() + } + + switch lastSeen { + case eventlogger.NodeTypeSink, eventlogger.NodeTypeFilter: + default: + return errors.New("last node must be a filter or sink") + } + + return nil +} diff --git a/audit/nodes_test.go b/audit/nodes_test.go new file mode 100644 index 000000000000..c425f0418182 --- /dev/null +++ b/audit/nodes_test.go @@ -0,0 +1,337 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/helper/namespace" + + "github.com/hashicorp/go-uuid" + + "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/eventlogger" + + "github.com/hashicorp/vault/internal/observability/event" + "github.com/stretchr/testify/require" +) + +// TestProcessManual_NilData tests ProcessManual when nil data is supplied. +func TestProcessManual_NilData(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Sink node + sinkId, sinkNode := newSinkNode(t) + ids = append(ids, sinkId) + nodes[sinkId] = sinkNode + + err := ProcessManual(namespace.RootContext(context.Background()), nil, ids, nodes) + require.Error(t, err) + require.EqualError(t, err, "data cannot be nil") +} + +// TestProcessManual_BadIDs tests ProcessManual when different bad values are +// supplied for the ID parameter. +func TestProcessManual_BadIDs(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + IDs []eventlogger.NodeID + ExpectedErrorMessage string + }{ + "nil": { + IDs: nil, + ExpectedErrorMessage: "minimum of 2 ids are required", + }, + "one": { + IDs: []eventlogger.NodeID{"1"}, + ExpectedErrorMessage: "minimum of 2 ids are required", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + nodes[formatterId] = formatterNode + + // Sink node + sinkId, sinkNode := newSinkNode(t) + nodes[sinkId] = sinkNode + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = ProcessManual(namespace.RootContext(context.Background()), data, tc.IDs, nodes) + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + }) + } +} + +// TestProcessManual_NoNodes tests ProcessManual when no nodes are supplied. +func TestProcessManual_NoNodes(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, _ := newFormatterNode(t) + ids = append(ids, formatterId) + + // Sink node + sinkId, _ := newSinkNode(t) + ids = append(ids, sinkId) + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = ProcessManual(namespace.RootContext(context.Background()), data, ids, nodes) + require.Error(t, err) + require.EqualError(t, err, "nodes are required") +} + +// TestProcessManual_IdNodeMismatch tests ProcessManual when IDs don't match with +// the nodes in the supplied map. +func TestProcessManual_IdNodeMismatch(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Sink node + sinkId, _ := newSinkNode(t) + ids = append(ids, sinkId) + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = ProcessManual(namespace.RootContext(context.Background()), data, ids, nodes) + require.Error(t, err) + require.ErrorContains(t, err, "node not found: ") +} + +// TestProcessManual_NotEnoughNodes tests ProcessManual when there is only one +// node provided. +func TestProcessManual_NotEnoughNodes(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = ProcessManual(namespace.RootContext(context.Background()), data, ids, nodes) + require.Error(t, err) + require.EqualError(t, err, "minimum of 2 ids are required") +} + +// TestProcessManual_LastNodeNotSink tests ProcessManual when the last node is +// not a Sink node. +func TestProcessManual_LastNodeNotSink(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Another Formatter node + formatterId, formatterNode = newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = ProcessManual(namespace.RootContext(context.Background()), data, ids, nodes) + require.Error(t, err) + require.EqualError(t, err, "last node must be a filter or sink") +} + +// TestProcessManualEndWithSink ensures that the manual processing of a test +// message works as expected with proper inputs, which mean processing ends with +// sink node. +func TestProcessManualEndWithSink(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Sink node + sinkId, sinkNode := newSinkNode(t) + ids = append(ids, sinkId) + nodes[sinkId] = sinkNode + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = ProcessManual(namespace.RootContext(context.Background()), data, ids, nodes) + require.NoError(t, err) +} + +// TestProcessManual_EndWithFilter ensures that the manual processing of a test +// message works as expected with proper inputs, which mean processing ends with +// sink node. +func TestProcessManual_EndWithFilter(t *testing.T) { + t.Parallel() + + var ids []eventlogger.NodeID + nodes := make(map[eventlogger.NodeID]eventlogger.Node) + + // Filter node + filterId, filterNode := newFilterNode(t) + ids = append(ids, filterId) + nodes[filterId] = filterNode + + // Formatter node + formatterId, formatterNode := newFormatterNode(t) + ids = append(ids, formatterId) + nodes[formatterId] = formatterNode + + // Sink node + sinkId, sinkNode := newSinkNode(t) + ids = append(ids, sinkId) + nodes[sinkId] = sinkNode + + // Data + requestId, err := uuid.GenerateUUID() + require.NoError(t, err) + data := newData(requestId) + + err = ProcessManual(namespace.RootContext(context.Background()), data, ids, nodes) + require.NoError(t, err) +} + +// newSinkNode creates a new UUID and NoopSink (sink node). +func newSinkNode(t *testing.T) (eventlogger.NodeID, *event.NoopSink) { + t.Helper() + + sinkId, err := event.GenerateNodeID() + require.NoError(t, err) + sinkNode := event.NewNoopSink() + + return sinkId, sinkNode +} + +// TestFilter is a trivial implementation of eventlogger.Node used as a placeholder +// for Filter nodes in tests. +type TestFilter struct{} + +// Process trivially filters the event preventing it from being processed by subsequent nodes. +func (f *TestFilter) Process(_ context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + return nil, nil +} + +// Reopen does nothing. +func (f *TestFilter) Reopen() error { + return nil +} + +// Type returns the eventlogger.NodeTypeFormatter type. +func (f *TestFilter) Type() eventlogger.NodeType { + return eventlogger.NodeTypeFilter +} + +// TestFormatter is a trivial implementation of the eventlogger.Node interface +// used as a place-holder for Formatter nodes in tests. +type TestFormatter struct{} + +// Process trivially formats the event by storing "test" as a byte slice under +// the test format type. +func (f *TestFormatter) Process(_ context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + e.FormattedAs("test", []byte("test")) + + return e, nil +} + +// Reopen does nothing. +func (f *TestFormatter) Reopen() error { + return nil +} + +// Type returns the eventlogger.NodeTypeFormatter type. +func (f *TestFormatter) Type() eventlogger.NodeType { + return eventlogger.NodeTypeFormatter +} + +// newFilterNode creates a new TestFormatter (filter node). +func newFilterNode(t *testing.T) (eventlogger.NodeID, *TestFilter) { + nodeId, err := event.GenerateNodeID() + require.NoError(t, err) + node := &TestFilter{} + + return nodeId, node +} + +// newFormatterNode creates a new TestFormatter (formatter node). +func newFormatterNode(t *testing.T) (eventlogger.NodeID, *TestFormatter) { + nodeId, err := event.GenerateNodeID() + require.NoError(t, err) + node := &TestFormatter{} + + return nodeId, node +} + +// newData creates a sample logical.LogInput to be used as data for tests. +func newData(id string) *logical.LogInput { + return &logical.LogInput{ + Type: "request", + Auth: nil, + Request: &logical.Request{ + ID: id, + Operation: "update", + Path: "sys/audit/test", + }, + Response: nil, + OuterErr: nil, + } +} diff --git a/audit/options.go b/audit/options.go new file mode 100644 index 000000000000..a48d76cd0227 --- /dev/null +++ b/audit/options.go @@ -0,0 +1,163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "errors" + "strings" + "time" +) + +// Option is how options are passed as arguments. +type Option func(*options) error + +// options are used to represent configuration for a audit related nodes. +type options struct { + withID string + withNow time.Time + withSubtype subtype + withFormat format + withPrefix string + withRaw bool + withElision bool + withOmitTime bool + withHMACAccessor bool +} + +// getDefaultOptions returns options with their default values. +func getDefaultOptions() options { + return options{ + withNow: time.Now(), + withFormat: JSONFormat, + withHMACAccessor: true, + } +} + +// getOpts applies each supplied Option and returns the fully configured options. +// Each Option is applied in the order it appears in the argument list, so it is +// possible to supply the same Option numerous times and the 'last write wins'. +func getOpts(opt ...Option) (options, error) { + opts := getDefaultOptions() + for _, o := range opt { + if o == nil { + continue + } + if err := o(&opts); err != nil { + return options{}, err + } + } + return opts, nil +} + +// WithID provides an optional ID. +func WithID(id string) Option { + return func(o *options) error { + var err error + + id := strings.TrimSpace(id) + switch { + case id == "": + err = errors.New("id cannot be empty") + default: + o.withID = id + } + + return err + } +} + +// WithNow provides an Option to represent 'now'. +func WithNow(now time.Time) Option { + return func(o *options) error { + var err error + + switch { + case now.IsZero(): + err = errors.New("cannot specify 'now' to be the zero time instant") + default: + o.withNow = now + } + + return err + } +} + +// WithSubtype provides an Option to represent the event subtype. +func WithSubtype(s string) Option { + return func(o *options) error { + s := strings.TrimSpace(s) + if s == "" { + return errors.New("subtype cannot be empty") + } + parsed := subtype(s) + err := parsed.validate() + if err != nil { + return err + } + + o.withSubtype = parsed + return nil + } +} + +// WithFormat provides an Option to represent event format. +func WithFormat(f string) Option { + return func(o *options) error { + f := strings.TrimSpace(strings.ToLower(f)) + if f == "" { + // Return early, we won't attempt to apply this option if its empty. + return nil + } + + parsed := format(f) + err := parsed.validate() + if err != nil { + return err + } + + o.withFormat = parsed + return nil + } +} + +// WithPrefix provides an Option to represent a prefix for a file sink. +func WithPrefix(prefix string) Option { + return func(o *options) error { + o.withPrefix = prefix + + return nil + } +} + +// WithRaw provides an Option to represent whether 'raw' is required. +func WithRaw(r bool) Option { + return func(o *options) error { + o.withRaw = r + return nil + } +} + +// WithElision provides an Option to represent whether elision (...) is required. +func WithElision(e bool) Option { + return func(o *options) error { + o.withElision = e + return nil + } +} + +// WithOmitTime provides an Option to represent whether to omit time. +func WithOmitTime(t bool) Option { + return func(o *options) error { + o.withOmitTime = t + return nil + } +} + +// WithHMACAccessor provides an Option to represent whether an HMAC accessor is applicable. +func WithHMACAccessor(h bool) Option { + return func(o *options) error { + o.withHMACAccessor = h + return nil + } +} diff --git a/audit/options_test.go b/audit/options_test.go new file mode 100644 index 000000000000..33de069faeee --- /dev/null +++ b/audit/options_test.go @@ -0,0 +1,509 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestOptions_WithFormat exercises WithFormat Option to ensure it performs as expected. +func TestOptions_WithFormat(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue format + }{ + "empty": { + Value: "", + IsErrorExpected: false, + ExpectedValue: format(""), + }, + "whitespace": { + Value: " ", + IsErrorExpected: false, + ExpectedValue: format(""), + }, + "invalid-test": { + Value: "test", + IsErrorExpected: true, + ExpectedErrorMessage: "invalid format \"test\": invalid internal parameter", + }, + "valid-json": { + Value: "json", + IsErrorExpected: false, + ExpectedValue: JSONFormat, + }, + "valid-jsonx": { + Value: "jsonx", + IsErrorExpected: false, + ExpectedValue: JSONxFormat, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithFormat(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withFormat) + } + }) + } +} + +// TestOptions_WithSubtype exercises WithSubtype Option to ensure it performs as expected. +func TestOptions_WithSubtype(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue subtype + }{ + "empty": { + Value: "", + IsErrorExpected: true, + ExpectedErrorMessage: "subtype cannot be empty", + }, + "whitespace": { + Value: " ", + IsErrorExpected: true, + ExpectedErrorMessage: "subtype cannot be empty", + }, + "valid": { + Value: "AuditResponse", + IsErrorExpected: false, + ExpectedValue: ResponseType, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithSubtype(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withSubtype) + } + }) + } +} + +// TestOptions_WithNow exercises WithNow Option to ensure it performs as expected. +func TestOptions_WithNow(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value time.Time + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue time.Time + }{ + "default-time": { + Value: time.Time{}, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot specify 'now' to be the zero time instant", + }, + "valid-time": { + Value: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + IsErrorExpected: false, + ExpectedValue: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + opts := &options{} + applyOption := WithNow(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withNow) + } + }) + } +} + +// TestOptions_WithID exercises WithID Option to ensure it performs as expected. +func TestOptions_WithID(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue string + }{ + "empty": { + Value: "", + IsErrorExpected: true, + ExpectedErrorMessage: "id cannot be empty", + }, + "whitespace": { + Value: " ", + IsErrorExpected: true, + ExpectedErrorMessage: "id cannot be empty", + }, + "valid": { + Value: "test", + IsErrorExpected: false, + ExpectedValue: "test", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithID(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withID) + } + }) + } +} + +// TestOptions_WithPrefix exercises WithPrefix Option to ensure it performs as expected. +func TestOptions_WithPrefix(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue string + }{ + "empty": { + Value: "", + IsErrorExpected: false, + ExpectedValue: "", + }, + "whitespace": { + Value: " ", + IsErrorExpected: false, + ExpectedValue: " ", + }, + "valid": { + Value: "test", + IsErrorExpected: false, + ExpectedValue: "test", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithPrefix(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withPrefix) + } + }) + } +} + +// TestOptions_WithRaw exercises WithRaw Option to ensure it performs as expected. +func TestOptions_WithRaw(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithRaw(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withRaw) + }) + } +} + +// TestOptions_WithElision exercises WithElision Option to ensure it performs as expected. +func TestOptions_WithElision(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithElision(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withElision) + }) + } +} + +// TestOptions_WithHMACAccessor exercises WithHMACAccessor Option to ensure it performs as expected. +func TestOptions_WithHMACAccessor(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithHMACAccessor(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withHMACAccessor) + }) + } +} + +// TestOptions_WithOmitTime exercises WithOmitTime Option to ensure it performs as expected. +func TestOptions_WithOmitTime(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithOmitTime(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withOmitTime) + }) + } +} + +// TestOptions_Default exercises getDefaultOptions to assert the default values. +func TestOptions_Default(t *testing.T) { + t.Parallel() + + opts := getDefaultOptions() + require.NotNil(t, opts) + require.True(t, time.Now().After(opts.withNow)) + require.False(t, opts.withNow.IsZero()) +} + +// TestOptions_Opts exercises GetOpts with various Option values. +func TestOptions_Opts(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + opts []Option + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedID string + ExpectedSubtype subtype + ExpectedFormat format + IsNowExpected bool + ExpectedNow time.Time + }{ + "nil-options": { + opts: nil, + IsErrorExpected: false, + IsNowExpected: true, + ExpectedFormat: JSONFormat, + }, + "empty-options": { + opts: []Option{}, + IsErrorExpected: false, + IsNowExpected: true, + ExpectedFormat: JSONFormat, + }, + "with-multiple-valid-id": { + opts: []Option{ + WithID("qwerty"), + WithID("juan"), + }, + IsErrorExpected: false, + ExpectedID: "juan", + IsNowExpected: true, + ExpectedFormat: JSONFormat, + }, + "with-multiple-valid-subtype": { + opts: []Option{ + WithSubtype("AuditRequest"), + WithSubtype("AuditResponse"), + }, + IsErrorExpected: false, + ExpectedSubtype: ResponseType, + IsNowExpected: true, + ExpectedFormat: JSONFormat, + }, + "with-multiple-valid-format": { + opts: []Option{ + WithFormat("json"), + WithFormat("jsonx"), + }, + IsErrorExpected: false, + ExpectedFormat: JSONxFormat, + IsNowExpected: true, + }, + "with-multiple-valid-now": { + opts: []Option{ + WithNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + WithNow(time.Date(2023, time.July, 4, 13, 3, 0, 0, time.Local)), + }, + IsErrorExpected: false, + ExpectedNow: time.Date(2023, time.July, 4, 13, 3, 0, 0, time.Local), + IsNowExpected: false, + ExpectedFormat: JSONFormat, + }, + "with-multiple-valid-then-invalid-now": { + opts: []Option{ + WithNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + WithNow(time.Time{}), + }, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot specify 'now' to be the zero time instant", + ExpectedFormat: JSONFormat, + }, + "with-multiple-valid-options": { + opts: []Option{ + WithID("qwerty"), + WithSubtype("AuditRequest"), + WithFormat("json"), + WithNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + }, + IsErrorExpected: false, + ExpectedID: "qwerty", + ExpectedSubtype: RequestType, + ExpectedFormat: JSONFormat, + ExpectedNow: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts, err := getOpts(tc.opts...) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NotNil(t, opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedID, opts.withID) + require.Equal(t, tc.ExpectedSubtype, opts.withSubtype) + require.Equal(t, tc.ExpectedFormat, opts.withFormat) + switch { + case tc.IsNowExpected: + require.True(t, time.Now().After(opts.withNow)) + require.False(t, opts.withNow.IsZero()) + default: + require.Equal(t, tc.ExpectedNow, opts.withNow) + } + + } + }) + } +} diff --git a/audit/sink_metric_labeler.go b/audit/sink_metric_labeler.go new file mode 100644 index 000000000000..1c74a827f74f --- /dev/null +++ b/audit/sink_metric_labeler.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/internal/observability/event" +) + +var ( + _ event.Labeler = (*MetricLabelerAuditSink)(nil) + _ event.Labeler = (*MetricLabelerAuditFallback)(nil) +) + +var ( + metricLabelAuditSinkSuccess = []string{"audit", "sink", "success"} + metricLabelAuditSinkFailure = []string{"audit", "sink", "failure"} + metricLabelAuditFallbackSuccess = []string{"audit", "fallback", "success"} + metricLabelAuditFallbackMiss = []string{"audit", "fallback", "miss"} +) + +// MetricLabelerAuditSink can be used to provide labels for the success or failure +// of a sink node used for a normal audit device. +type MetricLabelerAuditSink struct{} + +// MetricLabelerAuditFallback can be used to provide labels for the success or failure +// of a sink node used for an audit fallback device. +type MetricLabelerAuditFallback struct{} + +// Labels provides the success and failure labels for an audit sink, based on the error supplied. +// Success: 'vault.audit.sink.success' +// Failure: 'vault.audit.sink.failure' +func (m MetricLabelerAuditSink) Labels(_ *eventlogger.Event, err error) []string { + if err != nil { + return metricLabelAuditSinkFailure + } + + return metricLabelAuditSinkSuccess +} + +// Labels provides the success and failures labels for an audit fallback sink, based on the error supplied. +// Success: 'vault.audit.fallback.success' +// Failure: 'vault.audit.sink.failure' +func (m MetricLabelerAuditFallback) Labels(_ *eventlogger.Event, err error) []string { + if err != nil { + return metricLabelAuditSinkFailure + } + + return metricLabelAuditFallbackSuccess +} + +// MetricLabelsFallbackMiss returns the labels which indicate an audit entry was missed. +// 'vault.audit.fallback.miss' +func MetricLabelsFallbackMiss() []string { + return metricLabelAuditFallbackMiss +} diff --git a/audit/sink_metric_labeler_test.go b/audit/sink_metric_labeler_test.go new file mode 100644 index 000000000000..44f60a6d014c --- /dev/null +++ b/audit/sink_metric_labeler_test.go @@ -0,0 +1,75 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestMetricLabelerAuditSink_Label ensures we always get the right label based +// on the input value of the error. +func TestMetricLabelerAuditSink_Label(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + err error + expected []string + }{ + "nil": { + err: nil, + expected: []string{"audit", "sink", "success"}, + }, + "error": { + err: errors.New("I am an error"), + expected: []string{"audit", "sink", "failure"}, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + m := &MetricLabelerAuditSink{} + result := m.Labels(nil, tc.err) + assert.Equal(t, tc.expected, result) + }) + } +} + +// TestMetricLabelerAuditFallback_Label ensures we always get the right label based +// on the input value of the error for fallback devices. +func TestMetricLabelerAuditFallback_Label(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + err error + expected []string + }{ + "nil": { + err: nil, + expected: []string{"audit", "fallback", "success"}, + }, + "error": { + err: errors.New("I am an error"), + expected: []string{"audit", "sink", "failure"}, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + m := &MetricLabelerAuditFallback{} + result := m.Labels(nil, tc.err) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/audit/sink_metric_timer.go b/audit/sink_metric_timer.go new file mode 100644 index 000000000000..57a282ae04ea --- /dev/null +++ b/audit/sink_metric_timer.go @@ -0,0 +1,76 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/armon/go-metrics" + "github.com/hashicorp/eventlogger" +) + +var _ eventlogger.Node = (*SinkMetricTimer)(nil) + +// SinkMetricTimer is a wrapper for any kind of eventlogger.NodeTypeSink node that +// processes events containing an AuditEvent payload. +// It decorates the implemented eventlogger.Node Process method in order to emit +// timing metrics for the duration between the creation time of the event and the +// time the node completes processing. +type SinkMetricTimer struct { + Name string + Sink eventlogger.Node +} + +// NewSinkMetricTimer should be used to create the SinkMetricTimer. +// It expects that an eventlogger.NodeTypeSink should be supplied as the sink. +func NewSinkMetricTimer(name string, sink eventlogger.Node) (*SinkMetricTimer, error) { + name = strings.TrimSpace(name) + if name == "" { + return nil, fmt.Errorf("name is required: %w", ErrInvalidParameter) + } + + if sink == nil || reflect.ValueOf(sink).IsNil() { + return nil, fmt.Errorf("sink node is required: %w", ErrInvalidParameter) + } + + if sink.Type() != eventlogger.NodeTypeSink { + return nil, fmt.Errorf("sink node must be of type 'sink': %w", ErrInvalidParameter) + } + + return &SinkMetricTimer{ + Name: name, + Sink: sink, + }, nil +} + +// Process wraps the Process method of underlying sink (eventlogger.Node). +// Additionally, when the supplied eventlogger.Event has an AuditEvent as its payload, +// it measures the elapsed time between the creation of the eventlogger.Event and +// the completion of processing, emitting this as a metric. +// Examples: +// 'vault.audit.{DEVICE}.log_request' +// 'vault.audit.{DEVICE}.log_response' +func (s *SinkMetricTimer) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + defer func() { + auditEvent, ok := e.Payload.(*AuditEvent) + if ok { + metrics.MeasureSince([]string{"audit", s.Name, auditEvent.Subtype.MetricTag()}, e.CreatedAt) + } + }() + + return s.Sink.Process(ctx, e) +} + +// Reopen wraps the Reopen method of this underlying sink (eventlogger.Node). +func (s *SinkMetricTimer) Reopen() error { + return s.Sink.Reopen() +} + +// Type wraps the Type method of this underlying sink (eventlogger.Node). +func (s *SinkMetricTimer) Type() eventlogger.NodeType { + return s.Sink.Type() +} diff --git a/audit/sink_metric_timer_test.go b/audit/sink_metric_timer_test.go new file mode 100644 index 000000000000..f65bbb9b52a5 --- /dev/null +++ b/audit/sink_metric_timer_test.go @@ -0,0 +1,68 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/stretchr/testify/require" +) + +// TestNewSinkMetricTimer ensures that parameters are checked correctly and errors +// reported as expected when attempting to create a SinkMetricTimer. +func TestNewSinkMetricTimer(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + name string + node eventlogger.Node + isErrorExpected bool + expectedErrorMessage string + }{ + "happy": { + name: "foo", + node: &event.FileSink{}, + isErrorExpected: false, + }, + "no-name": { + name: "", + isErrorExpected: true, + expectedErrorMessage: "name is required: invalid internal parameter", + }, + "no-node": { + name: "foo", + node: nil, + isErrorExpected: true, + expectedErrorMessage: "sink node is required: invalid internal parameter", + }, + "bad-node": { + name: "foo", + node: &EntryFormatter{}, + isErrorExpected: true, + expectedErrorMessage: "sink node must be of type 'sink': invalid internal parameter", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + m, err := NewSinkMetricTimer(tc.name, tc.node) + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrorMessage) + require.Nil(t, m) + default: + require.NoError(t, err) + require.NotNil(t, m) + } + }) + } +} diff --git a/audit/types.go b/audit/types.go new file mode 100644 index 000000000000..448a17393e8a --- /dev/null +++ b/audit/types.go @@ -0,0 +1,206 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package audit + +import ( + "context" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +// Backend interface must be implemented for an audit +// mechanism to be made available. Audit backends can be enabled to +// sink information to different backends such as logs, file, databases, +// or other external services. +type Backend interface { + // Salter interface must be implemented by anything implementing Backend. + Salter + + // The PipelineReader interface allows backends to surface information about their + // nodes for node and pipeline registration. + event.PipelineReader + + // IsFallback can be used to determine if this audit backend device is intended to + // be used as a fallback to catch all events that are not written when only using + // filtered pipelines. + IsFallback() bool + + // LogTestMessage is used to check an audit backend before adding it + // permanently. It should attempt to synchronously log the given test + // message, WITHOUT using the normal Salt (which would require a storage + // operation on creation, which is currently disallowed.) + LogTestMessage(context.Context, *logical.LogInput) error + + // Reload is called on SIGHUP for supporting backends. + Reload(context.Context) error + + // Invalidate is called for path invalidation + Invalidate(context.Context) +} + +// Salter is an interface that provides a way to obtain a Salt for hashing. +type Salter interface { + // Salt returns a non-nil salt or an error. + Salt(context.Context) (*salt.Salt, error) +} + +// Formatter is an interface that is responsible for formatting a request/response into some format. +// It is recommended that you pass data through Hash prior to formatting it. +type Formatter interface { + // FormatRequest formats the logical.LogInput into an RequestEntry. + FormatRequest(context.Context, *logical.LogInput, timeProvider) (*RequestEntry, error) + // FormatResponse formats the logical.LogInput into an ResponseEntry. + FormatResponse(context.Context, *logical.LogInput, timeProvider) (*ResponseEntry, error) +} + +// HeaderFormatter is an interface defining the methods of the +// vault.AuditedHeadersConfig structure needed in this package. +type HeaderFormatter interface { + // ApplyConfig returns a map of header values that consists of the + // intersection of the provided set of header values with a configured + // set of headers and will hash headers that have been configured as such. + ApplyConfig(context.Context, map[string][]string, Salter) (map[string][]string, error) +} + +// RequestEntry is the structure of a request audit log entry. +type RequestEntry struct { + Auth *Auth `json:"auth,omitempty"` + Error string `json:"error,omitempty"` + ForwardedFrom string `json:"forwarded_from,omitempty"` // Populated in Enterprise when a request is forwarded + Request *Request `json:"request,omitempty"` + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` +} + +// ResponseEntry is the structure of a response audit log entry. +type ResponseEntry struct { + Auth *Auth `json:"auth,omitempty"` + Error string `json:"error,omitempty"` + Forwarded bool `json:"forwarded,omitempty"` + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` + Request *Request `json:"request,omitempty"` + Response *Response `json:"response,omitempty"` +} + +type Request struct { + ClientCertificateSerialNumber string `json:"client_certificate_serial_number,omitempty"` + ClientID string `json:"client_id,omitempty"` + ClientToken string `json:"client_token,omitempty"` + ClientTokenAccessor string `json:"client_token_accessor,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + ID string `json:"id,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` + MountAccessor string `json:"mount_accessor,omitempty"` + MountClass string `json:"mount_class,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + MountType string `json:"mount_type,omitempty"` + MountRunningVersion string `json:"mount_running_version,omitempty"` + MountRunningSha256 string `json:"mount_running_sha256,omitempty"` + MountIsExternalPlugin bool `json:"mount_is_external_plugin,omitempty"` + Namespace *Namespace `json:"namespace,omitempty"` + Operation logical.Operation `json:"operation,omitempty"` + Path string `json:"path,omitempty"` + PolicyOverride bool `json:"policy_override,omitempty"` + RemoteAddr string `json:"remote_address,omitempty"` + RemotePort int `json:"remote_port,omitempty"` + ReplicationCluster string `json:"replication_cluster,omitempty"` + RequestURI string `json:"request_uri,omitempty"` + WrapTTL int `json:"wrap_ttl,omitempty"` +} + +type Response struct { + Auth *Auth `json:"auth,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` + MountAccessor string `json:"mount_accessor,omitempty"` + MountClass string `json:"mount_class,omitempty"` + MountIsExternalPlugin bool `json:"mount_is_external_plugin,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + MountRunningSha256 string `json:"mount_running_sha256,omitempty"` + MountRunningVersion string `json:"mount_running_plugin_version,omitempty"` + MountType string `json:"mount_type,omitempty"` + Redirect string `json:"redirect,omitempty"` + Secret *Secret `json:"secret,omitempty"` + WrapInfo *ResponseWrapInfo `json:"wrap_info,omitempty"` + Warnings []string `json:"warnings,omitempty"` +} + +type Auth struct { + Accessor string `json:"accessor,omitempty"` + ClientToken string `json:"client_token,omitempty"` + DisplayName string `json:"display_name,omitempty"` + EntityCreated bool `json:"entity_created,omitempty"` + EntityID string `json:"entity_id,omitempty"` + ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies,omitempty"` + IdentityPolicies []string `json:"identity_policies,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + NumUses int `json:"num_uses,omitempty"` + Policies []string `json:"policies,omitempty"` + PolicyResults *PolicyResults `json:"policy_results,omitempty"` + RemainingUses int `json:"remaining_uses,omitempty"` + TokenPolicies []string `json:"token_policies,omitempty"` + TokenIssueTime string `json:"token_issue_time,omitempty"` + TokenTTL int64 `json:"token_ttl,omitempty"` + TokenType string `json:"token_type,omitempty"` +} + +type PolicyResults struct { + Allowed bool `json:"allowed"` + GrantingPolicies []PolicyInfo `json:"granting_policies,omitempty"` +} + +type PolicyInfo struct { + Name string `json:"name,omitempty"` + NamespaceId string `json:"namespace_id,omitempty"` + NamespacePath string `json:"namespace_path,omitempty"` + Type string `json:"type"` +} + +type Secret struct { + LeaseID string `json:"lease_id,omitempty"` +} + +type ResponseWrapInfo struct { + Accessor string `json:"accessor,omitempty"` + CreationPath string `json:"creation_path,omitempty"` + CreationTime string `json:"creation_time,omitempty"` + Token string `json:"token,omitempty"` + TTL int `json:"ttl,omitempty"` + WrappedAccessor string `json:"wrapped_accessor,omitempty"` +} + +type Namespace struct { + ID string `json:"id,omitempty"` + Path string `json:"path,omitempty"` +} + +// nonPersistentSalt is used for obtaining a salt that is not persisted. +type nonPersistentSalt struct{} + +// BackendConfig contains configuration parameters used in the factory func to +// instantiate audit backends +type BackendConfig struct { + // The view to store the salt + SaltView logical.Storage + + // The salt config that should be used for any secret obfuscation + SaltConfig *salt.Config + + // Config is the opaque user configuration provided when mounting + Config map[string]string + + // MountPath is the path where this Backend is mounted + MountPath string + + // Logger is used to emit log messages usually captured in the server logs. + Logger hclog.Logger +} + +// Factory is the factory function to create an audit backend. +type Factory func(context.Context, *BackendConfig, HeaderFormatter) (Backend, error) diff --git a/buf.gen.yaml b/buf.gen.yaml new file mode 100644 index 000000000000..bc2e940659e0 --- /dev/null +++ b/buf.gen.yaml @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +version: v1 +plugins: + - plugin: go + out: . + opt: + - paths=source_relative + - plugin: go-grpc + out: . + opt: + - paths=source_relative + diff --git a/buf.lock b/buf.lock new file mode 100644 index 000000000000..37619defaeec --- /dev/null +++ b/buf.lock @@ -0,0 +1,8 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: hashicorp + repository: go-kms-wrapping + commit: b117606343c8401082b98ec432af4cce + digest: shake256:6d6ec23f81669bf1d380b0783e6b4b86805f28733aed46222e7358441402b71760689ea10b45592db98caa3215a115120e03b1319192dfc918f966ccdc845715 diff --git a/buf.yaml b/buf.yaml new file mode 100644 index 000000000000..bbaf0d4e367c --- /dev/null +++ b/buf.yaml @@ -0,0 +1,123 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +version: v1 +breaking: + use: + - FILE +deps: + - buf.build/hashicorp/go-kms-wrapping +lint: + ignore_only: + ENUM_VALUE_PREFIX: + - sdk/helper/clientcountutil/generation/generate_data.proto + - vault/hcp_link/proto/node_status/status.proto + - vault/replication_services_ent.proto + ENUM_ZERO_VALUE_SUFFIX: + - sdk/helper/clientcountutil/generation/generate_data.proto + - vault/hcp_link/proto/node_status/status.proto + - vault/replication_services_ent.proto + FIELD_LOWER_SNAKE_CASE: + - enthelpers/wal/types_ent.proto + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/logical/identity.proto + - sdk/plugin/pb/backend.proto + - vault/hcp_link/proto/meta/meta.proto + - vault/hcp_link/proto/node_status/status.proto + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + IMPORT_USED: + - vault/hcp_link/proto/node_status/status.proto + - vault/replication_services_ent.proto + PACKAGE_DIRECTORY_MATCH: + - enthelpers/merkle/types_ent.proto + - enthelpers/wal/types_ent.proto + - helper/forwarding/types.proto + - helper/identity/mfa/types.proto + - helper/identity/types.proto + - helper/storagepacker/types.proto + - physical/raft/types.proto + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/helper/pluginutil/multiplexing.proto + - sdk/logical/event.proto + - sdk/logical/identity.proto + - sdk/logical/plugin.proto + - sdk/logical/version.proto + - sdk/plugin/pb/backend.proto + - vault/activity/activity_log.proto + - sdk/helper/clientcountutil/generation/generate_data.proto + - vault/hcp_link/proto/link_control/link_control.proto + - vault/hcp_link/proto/meta/meta.proto + - vault/hcp_link/proto/node_status/status.proto + - vault/replication/replication_resolver_ent.proto + - vault/seal/multi_wrap_value.proto + - vault/tokens/token.proto + PACKAGE_SAME_DIRECTORY: + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + PACKAGE_SAME_GO_PACKAGE: + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + PACKAGE_VERSION_SUFFIX: + - enthelpers/merkle/types_ent.proto + - enthelpers/wal/types_ent.proto + - helper/forwarding/types.proto + - helper/identity/mfa/types.proto + - helper/identity/types.proto + - helper/storagepacker/types.proto + - physical/raft/types.proto + - sdk/database/dbplugin/database.proto + - sdk/helper/pluginutil/multiplexing.proto + - sdk/logical/event.proto + - sdk/logical/identity.proto + - sdk/logical/plugin.proto + - sdk/logical/version.proto + - sdk/plugin/pb/backend.proto + - vault/activity/activity_log.proto + - sdk/helper/clientcountutil/generation/generate_data.proto + - vault/hcp_link/proto/link_control/link_control.proto + - vault/hcp_link/proto/meta/meta.proto + - vault/hcp_link/proto/node_status/status.proto + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + - vault/seal/multi_wrap_value.proto + - vault/tokens/token.proto + RPC_REQUEST_RESPONSE_UNIQUE: + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/plugin/pb/backend.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + RPC_REQUEST_STANDARD_NAME: + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/logical/version.proto + - sdk/plugin/pb/backend.proto + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + RPC_RESPONSE_STANDARD_NAME: + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/logical/version.proto + - sdk/plugin/pb/backend.proto + - vault/hcp_link/proto/meta/meta.proto + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto + SERVICE_SUFFIX: + - sdk/database/dbplugin/database.proto + - sdk/database/dbplugin/v5/proto/database.proto + - sdk/helper/pluginutil/multiplexing.proto + - sdk/logical/version.proto + - sdk/plugin/pb/backend.proto + - vault/hcp_link/proto/link_control/link_control.proto + - vault/hcp_link/proto/meta/meta.proto + - vault/replication/replication_resolver_ent.proto + - vault/replication_services_ent.proto + - vault/request_forwarding_service.proto diff --git a/builtin/audit/file/backend.go b/builtin/audit/file/backend.go index 9e7d7c36c710..7bede8f564cf 100644 --- a/builtin/audit/file/backend.go +++ b/builtin/audit/file/backend.go @@ -1,167 +1,137 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package file import ( - "bytes" "context" "fmt" - "io" - "os" - "path/filepath" + "reflect" "strconv" "strings" "sync" "sync/atomic" + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/internal/observability/event" "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/logical" ) -func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, error) { +const ( + stdout = "stdout" + discard = "discard" +) + +var _ audit.Backend = (*Backend)(nil) + +// Backend is the audit backend for the file-based audit store. +// +// NOTE: This audit backend is currently very simple: it appends to a file. +// It doesn't do anything more at the moment to assist with rotation +// or reset the write cursor, this should be done in the future. +type Backend struct { + fallback bool + name string + nodeIDList []eventlogger.NodeID + nodeMap map[eventlogger.NodeID]eventlogger.Node + salt *atomic.Value + saltConfig *salt.Config + saltMutex sync.RWMutex + saltView logical.Storage +} + +func Factory(_ context.Context, conf *audit.BackendConfig, headersConfig audit.HeaderFormatter) (audit.Backend, error) { if conf.SaltConfig == nil { - return nil, fmt.Errorf("nil salt config") - } - if conf.SaltView == nil { - return nil, fmt.Errorf("nil salt view") + return nil, fmt.Errorf("nil salt config: %w", audit.ErrInvalidParameter) } - path, ok := conf.Config["file_path"] - if !ok { - path, ok = conf.Config["path"] - if !ok { - return nil, fmt.Errorf("file_path is required") - } + if conf.SaltView == nil { + return nil, fmt.Errorf("nil salt view: %w", audit.ErrInvalidParameter) } - // normalize path if configured for stdout - if strings.EqualFold(path, "stdout") { - path = "stdout" - } - if strings.EqualFold(path, "discard") { - path = "discard" + if conf.Logger == nil || reflect.ValueOf(conf.Logger).IsNil() { + return nil, fmt.Errorf("nil logger: %w", audit.ErrInvalidParameter) } - format, ok := conf.Config["format"] - if !ok { - format = "json" - } - switch format { - case "json", "jsonx": - default: - return nil, fmt.Errorf("unknown format type %q", format) + if conf.MountPath == "" { + return nil, fmt.Errorf("mount path cannot be empty: %w", audit.ErrInvalidParameter) } - // Check if hashing of accessor is disabled - hmacAccessor := true - if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { - value, err := strconv.ParseBool(hmacAccessorRaw) + // The config options 'fallback' and 'filter' are mutually exclusive, a fallback + // device catches everything, so it cannot be allowed to filter. + var fallback bool + var err error + if fallbackRaw, ok := conf.Config["fallback"]; ok { + fallback, err = parseutil.ParseBool(fallbackRaw) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to parse 'fallback': %w", audit.ErrExternalOptions) } - hmacAccessor = value } - // Check if raw logging is enabled - logRaw := false - if raw, ok := conf.Config["log_raw"]; ok { - b, err := strconv.ParseBool(raw) - if err != nil { - return nil, err - } - logRaw = b + if _, ok := conf.Config["filter"]; ok && fallback { + return nil, fmt.Errorf("cannot configure a fallback device with a filter: %w", audit.ErrExternalOptions) } - // Check if mode is provided - mode := os.FileMode(0o600) - if modeRaw, ok := conf.Config["mode"]; ok { - m, err := strconv.ParseUint(modeRaw, 8, 32) - if err != nil { - return nil, err - } - switch m { - case 0: - // if mode is 0000, then do not modify file mode - if path != "stdout" && path != "discard" { - fileInfo, err := os.Stat(path) - if err != nil { - return nil, err - } - mode = fileInfo.Mode() - } - default: - mode = os.FileMode(m) + // Get file path from config or fall back to the old option name ('path') for compatibility + // (see commit bac4fe0799a372ba1245db642f3f6cd1f1d02669). + var filePath string + if p, ok := conf.Config["file_path"]; ok { + filePath = p + } else if p, ok = conf.Config["path"]; ok { + filePath = p + } else { + return nil, fmt.Errorf("file_path is required: %w", audit.ErrExternalOptions) + } - } + // normalize file path if configured for stdout + if strings.EqualFold(filePath, stdout) { + filePath = stdout + } + if strings.EqualFold(filePath, discard) { + filePath = discard + } + cfg, err := newFormatterConfig(headersConfig, conf.Config) + if err != nil { + return nil, err } b := &Backend{ - path: path, - mode: mode, + fallback: fallback, + name: conf.MountPath, saltConfig: conf.SaltConfig, saltView: conf.SaltView, salt: new(atomic.Value), - formatConfig: audit.FormatterConfig{ - Raw: logRaw, - HMACAccessor: hmacAccessor, - }, + nodeIDList: []eventlogger.NodeID{}, + nodeMap: make(map[eventlogger.NodeID]eventlogger.Node), } // Ensure we are working with the right type by explicitly storing a nil of // the right type b.salt.Store((*salt.Salt)(nil)) - switch format { - case "json": - b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } - case "jsonx": - b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } + err = b.configureFilterNode(conf.Config["filter"]) + if err != nil { + return nil, err } - switch path { - case "stdout", "discard": - // no need to test opening file if outputting to stdout or discarding - default: - // Ensure that the file can be successfully opened for writing; - // otherwise it will be too late to catch later without problems - // (ref: https://github.com/hashicorp/vault/issues/550) - if err := b.open(); err != nil { - return nil, fmt.Errorf("sanity check failed; unable to open %q for writing: %w", path, err) - } + err = b.configureFormatterNode(conf.MountPath, cfg, conf.Logger) + if err != nil { + return nil, err } - return b, nil -} - -// Backend is the audit backend for the file-based audit store. -// -// NOTE: This audit backend is currently very simple: it appends to a file. -// It doesn't do anything more at the moment to assist with rotation -// or reset the write cursor, this should be done in the future. -type Backend struct { - path string - - formatter audit.AuditFormatter - formatConfig audit.FormatterConfig - - fileLock sync.RWMutex - f *os.File - mode os.FileMode + err = b.configureSinkNode(conf.MountPath, filePath, conf.Config["mode"], cfg.RequiredFormat.String()) + if err != nil { + return nil, fmt.Errorf("error configuring sink node: %w", err) + } - saltMutex sync.RWMutex - salt *atomic.Value - saltConfig *salt.Config - saltView logical.Storage + return b, nil } -var _ audit.Backend = (*Backend)(nil) - func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { s := b.salt.Load().(*salt.Salt) if s != nil { @@ -186,164 +156,201 @@ func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { return newSalt, nil } -func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { - salt, err := b.Salt(ctx) - if err != nil { - return "", err +func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput) error { + if len(b.nodeIDList) > 0 { + return audit.ProcessManual(ctx, in, b.nodeIDList, b.nodeMap) } - return audit.HashString(salt, data), nil + return nil } -func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { - var writer io.Writer - switch b.path { - case "stdout": - writer = os.Stdout - case "discard": - return nil +func (b *Backend) Reload(_ context.Context) error { + for _, n := range b.nodeMap { + if n.Type() == eventlogger.NodeTypeSink { + return n.Reopen() + } } - buf := bytes.NewBuffer(make([]byte, 0, 2000)) - err := b.formatter.FormatRequest(ctx, buf, b.formatConfig, in) - if err != nil { - return err - } + return nil +} - return b.log(ctx, buf, writer) +func (b *Backend) Invalidate(_ context.Context) { + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt.Store((*salt.Salt)(nil)) } -func (b *Backend) log(ctx context.Context, buf *bytes.Buffer, writer io.Writer) error { - reader := bytes.NewReader(buf.Bytes()) +// newFormatterConfig creates the configuration required by a formatter node using +// the config map supplied to the factory. +func newFormatterConfig(headerFormatter audit.HeaderFormatter, config map[string]string) (audit.FormatterConfig, error) { + var opts []audit.Option + + if format, ok := config["format"]; ok { + if !audit.IsValidFormat(format) { + return audit.FormatterConfig{}, fmt.Errorf("unsupported 'format': %w", audit.ErrExternalOptions) + } - b.fileLock.Lock() + opts = append(opts, audit.WithFormat(format)) + } - if writer == nil { - if err := b.open(); err != nil { - b.fileLock.Unlock() - return err + // Check if hashing of accessor is disabled + if hmacAccessorRaw, ok := config["hmac_accessor"]; ok { + v, err := strconv.ParseBool(hmacAccessorRaw) + if err != nil { + return audit.FormatterConfig{}, fmt.Errorf("unable to parse 'hmac_accessor': %w", audit.ErrExternalOptions) } - writer = b.f + opts = append(opts, audit.WithHMACAccessor(v)) } - if _, err := reader.WriteTo(writer); err == nil { - b.fileLock.Unlock() - return nil - } else if b.path == "stdout" { - b.fileLock.Unlock() - return err + // Check if raw logging is enabled + if raw, ok := config["log_raw"]; ok { + v, err := strconv.ParseBool(raw) + if err != nil { + return audit.FormatterConfig{}, fmt.Errorf("unable to parse 'log_raw: %w", audit.ErrExternalOptions) + } + opts = append(opts, audit.WithRaw(v)) } - // If writing to stdout there's no real reason to think anything would have - // changed so return above. Otherwise, opportunistically try to re-open the - // FD, once per call. - b.f.Close() - b.f = nil + if elideListResponsesRaw, ok := config["elide_list_responses"]; ok { + v, err := strconv.ParseBool(elideListResponsesRaw) + if err != nil { + return audit.FormatterConfig{}, fmt.Errorf("unable to parse 'elide_list_responses': %w", audit.ErrExternalOptions) + } + opts = append(opts, audit.WithElision(v)) + } - if err := b.open(); err != nil { - b.fileLock.Unlock() - return err + if prefix, ok := config["prefix"]; ok { + opts = append(opts, audit.WithPrefix(prefix)) } - reader.Seek(0, io.SeekStart) - _, err := reader.WriteTo(writer) - b.fileLock.Unlock() - return err + return audit.NewFormatterConfig(headerFormatter, opts...) } -func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { - var writer io.Writer - switch b.path { - case "stdout": - writer = os.Stdout - case "discard": - return nil +// configureFormatterNode is used to configure a formatter node and associated ID on the Backend. +func (b *Backend) configureFormatterNode(name string, formatConfig audit.FormatterConfig, logger hclog.Logger) error { + formatterNodeID, err := event.GenerateNodeID() + if err != nil { + return fmt.Errorf("error generating random NodeID for formatter node: %w: %w", audit.ErrInternal, err) } - buf := bytes.NewBuffer(make([]byte, 0, 6000)) - err := b.formatter.FormatResponse(ctx, buf, b.formatConfig, in) + formatterNode, err := audit.NewEntryFormatter(name, formatConfig, b, logger) if err != nil { - return err + return fmt.Errorf("error creating formatter: %w", err) } - return b.log(ctx, buf, writer) + b.nodeIDList = append(b.nodeIDList, formatterNodeID) + b.nodeMap[formatterNodeID] = formatterNode + + return nil } -func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { - var writer io.Writer - switch b.path { - case "stdout": - writer = os.Stdout - case "discard": - return nil +// configureSinkNode is used to configure a sink node and associated ID on the Backend. +func (b *Backend) configureSinkNode(name string, filePath string, mode string, format string) error { + name = strings.TrimSpace(name) + if name == "" { + return fmt.Errorf("name is required: %w", audit.ErrExternalOptions) } - var buf bytes.Buffer - temporaryFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) - if err := temporaryFormatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { - return err + filePath = strings.TrimSpace(filePath) + if filePath == "" { + return fmt.Errorf("file path is required: %w", audit.ErrExternalOptions) } - return b.log(ctx, &buf, writer) -} + format = strings.TrimSpace(format) + if format == "" { + return fmt.Errorf("format is required: %w", audit.ErrInvalidParameter) + } -// The file lock must be held before calling this -func (b *Backend) open() error { - if b.f != nil { - return nil + sinkNodeID, err := event.GenerateNodeID() + if err != nil { + return fmt.Errorf("error generating random NodeID for sink node: %w: %w", audit.ErrInternal, err) } - if err := os.MkdirAll(filepath.Dir(b.path), b.mode); err != nil { - return err + + // normalize file path if configured for stdout or discard + if strings.EqualFold(filePath, stdout) { + filePath = stdout + } else if strings.EqualFold(filePath, discard) { + filePath = discard } - var err error - b.f, err = os.OpenFile(b.path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, b.mode) + var sinkNode eventlogger.Node + var sinkName string + + switch filePath { + case stdout: + sinkName = stdout + sinkNode, err = event.NewStdoutSinkNode(format) + case discard: + sinkName = discard + sinkNode = event.NewNoopSink() + default: + // The NewFileSink function attempts to open the file and will return an error if it can't. + sinkName = name + sinkNode, err = event.NewFileSink(filePath, format, []event.Option{event.WithFileMode(mode)}...) + } + + if err != nil { + return fmt.Errorf("file sink creation failed for path %q: %w", filePath, err) + } + + // Wrap the sink node with metrics middleware + sinkMetricTimer, err := audit.NewSinkMetricTimer(sinkName, sinkNode) if err != nil { - return err + return fmt.Errorf("unable to add timing metrics to sink for path %q: %w", filePath, err) } - // Change the file mode in case the log file already existed. We special - // case /dev/null since we can't chmod it and bypass if the mode is zero - switch b.path { - case "/dev/null": + // Decide what kind of labels we want and wrap the sink node inside a metrics counter. + var metricLabeler event.Labeler + switch { + case b.fallback: + metricLabeler = &audit.MetricLabelerAuditFallback{} default: - if b.mode != 0 { - err = os.Chmod(b.path, b.mode) - if err != nil { - return err - } - } + metricLabeler = &audit.MetricLabelerAuditSink{} + } + + sinkMetricCounter, err := event.NewMetricsCounter(sinkName, sinkMetricTimer, metricLabeler) + if err != nil { + return fmt.Errorf("unable to add counting metrics to sink for path %q: %w", filePath, err) } + b.nodeIDList = append(b.nodeIDList, sinkNodeID) + b.nodeMap[sinkNodeID] = sinkMetricCounter + return nil } -func (b *Backend) Reload(_ context.Context) error { - switch b.path { - case "stdout", "discard": - return nil - } +// Name for this backend, this would ideally correspond to the mount path for the audit device. +func (b *Backend) Name() string { + return b.name +} - b.fileLock.Lock() - defer b.fileLock.Unlock() +// Nodes returns the nodes which should be used by the event framework to process audit entries. +func (b *Backend) Nodes() map[eventlogger.NodeID]eventlogger.Node { + return b.nodeMap +} - if b.f == nil { - return b.open() - } +// NodeIDs returns the IDs of the nodes, in the order they are required. +func (b *Backend) NodeIDs() []eventlogger.NodeID { + return b.nodeIDList +} - err := b.f.Close() - // Set to nil here so that even if we error out, on the next access open() - // will be tried - b.f = nil - if err != nil { - return err +// EventType returns the event type for the backend. +func (b *Backend) EventType() eventlogger.EventType { + return event.AuditType.AsEventType() +} + +// HasFiltering determines if the first node for the pipeline is an eventlogger.NodeTypeFilter. +func (b *Backend) HasFiltering() bool { + if b.nodeMap == nil { + return false } - return b.open() + return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter } -func (b *Backend) Invalidate(_ context.Context) { - b.saltMutex.Lock() - defer b.saltMutex.Unlock() - b.salt.Store((*salt.Salt)(nil)) +// IsFallback can be used to determine if this audit backend device is intended to +// be used as a fallback to catch all events that are not written when only using +// filtered pipelines. +func (b *Backend) IsFallback() bool { + return b.fallback } diff --git a/builtin/audit/file/backend_filter_node.go b/builtin/audit/file/backend_filter_node.go new file mode 100644 index 000000000000..6ab19bd9a3be --- /dev/null +++ b/builtin/audit/file/backend_filter_node.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package file + +// configureFilterNode is used to configure a filter node and associated ID on the Backend. +func (b *Backend) configureFilterNode(_ string) error { + return nil +} diff --git a/builtin/audit/file/backend_filter_node_test.go b/builtin/audit/file/backend_filter_node_test.go new file mode 100644 index 000000000000..3b05b6702f0d --- /dev/null +++ b/builtin/audit/file/backend_filter_node_test.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package file + +import ( + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/stretchr/testify/require" +) + +// TestBackend_configureFilterNode ensures that configureFilterNode handles various +// filter values as expected. Empty (including whitespace) strings should return +// no error but skip configuration of the node. +// NOTE: Audit filtering is an Enterprise feature and behaves differently in the +// community edition of Vault. +func TestBackend_configureFilterNode(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + filter string + }{ + "happy": { + filter: "operation == update", + }, + "empty": { + filter: "", + }, + "spacey": { + filter: " ", + }, + "bad": { + filter: "___qwerty", + }, + "unsupported-field": { + filter: "foo == bar", + }, + } + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + err := b.configureFilterNode(tc.filter) + require.NoError(t, err) + require.Len(t, b.nodeIDList, 0) + require.Len(t, b.nodeMap, 0) + }) + } +} + +// TestBackend_configureFilterFormatterSink ensures that configuring all three +// types of nodes on a Backend works as expected, i.e. we have only formatter and sink +// nodes at the end and nothing gets overwritten. The order of calls influences the +// slice of IDs on the Backend. +// NOTE: Audit filtering is an Enterprise feature and behaves differently in the +// community edition of Vault. +func TestBackend_configureFilterFormatterSink(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + formatConfig, err := audit.NewFormatterConfig(&corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + + err = b.configureFilterNode("path == bar") + require.NoError(t, err) + + err = b.configureFormatterNode("juan", formatConfig, hclog.NewNullLogger()) + require.NoError(t, err) + + err = b.configureSinkNode("foo", "/tmp/foo", "0777", "json") + require.NoError(t, err) + + require.Len(t, b.nodeIDList, 2) + require.Len(t, b.nodeMap, 2) + + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeFormatter, node.Type()) + + id = b.nodeIDList[1] + node = b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeSink, node.Type()) +} diff --git a/builtin/audit/file/backend_test.go b/builtin/audit/file/backend_test.go index 817518c50bd8..004513f1adc1 100644 --- a/builtin/audit/file/backend_test.go +++ b/builtin/audit/file/backend_test.go @@ -1,183 +1,561 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package file import ( "context" - "io/ioutil" "os" "path/filepath" "strconv" "testing" - "time" + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internal/observability/event" "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" ) +// TestAuditFile_fileModeNew verifies that the backend Factory correctly sets +// the file mode when the mode argument is set. func TestAuditFile_fileModeNew(t *testing.T) { + t.Parallel() + modeStr := "0777" mode, err := strconv.ParseUint(modeStr, 8, 32) - if err != nil { - t.Fatal(err) - } - - path, err := ioutil.TempDir("", "vault-test_audit_file-file_mode_new") - if err != nil { - t.Fatal(err) - } - - defer os.RemoveAll(path) + require.NoError(t, err) - file := filepath.Join(path, "auditTest.txt") - - config := map[string]string{ - "path": file, - "mode": modeStr, - } + file := filepath.Join(t.TempDir(), "auditTest.txt") - _, err = Factory(context.Background(), &audit.BackendConfig{ + backendConfig := &audit.BackendConfig{ + Config: map[string]string{ + "path": file, + "mode": modeStr, + }, + MountPath: "foo/bar", SaltConfig: &salt.Config{}, SaltView: &logical.InmemStorage{}, - Config: config, - }) - if err != nil { - t.Fatal(err) + Logger: hclog.NewNullLogger(), } + _, err = Factory(context.Background(), backendConfig, &corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) info, err := os.Stat(file) - if err != nil { - t.Fatalf("Cannot retrieve file mode from `Stat`") - } - if info.Mode() != os.FileMode(mode) { - t.Fatalf("File mode does not match.") - } + require.NoErrorf(t, err, "cannot retrieve file mode from `Stat`") + require.Equalf(t, os.FileMode(mode), info.Mode(), "File mode does not match.") } +// TestAuditFile_fileModeExisting verifies that the backend Factory correctly sets +// the mode on an existing file. func TestAuditFile_fileModeExisting(t *testing.T) { - f, err := ioutil.TempFile("", "test") - if err != nil { - t.Fatalf("Failure to create test file.") - } - defer os.Remove(f.Name()) + t.Parallel() + + dir := t.TempDir() + f, err := os.CreateTemp(dir, "auditTest.log") + require.NoErrorf(t, err, "Failure to create test file.") err = os.Chmod(f.Name(), 0o777) - if err != nil { - t.Fatalf("Failure to chmod temp file for testing.") - } + require.NoErrorf(t, err, "Failure to chmod temp file for testing.") err = f.Close() - if err != nil { - t.Fatalf("Failure to close temp file for test.") - } + require.NoErrorf(t, err, "Failure to close temp file for test.") - config := map[string]string{ - "path": f.Name(), - } - - _, err = Factory(context.Background(), &audit.BackendConfig{ - Config: config, + backendConfig := &audit.BackendConfig{ + Config: map[string]string{ + "path": f.Name(), + }, + MountPath: "foo/bar", SaltConfig: &salt.Config{}, SaltView: &logical.InmemStorage{}, - }) - if err != nil { - t.Fatal(err) + Logger: hclog.NewNullLogger(), } + _, err = Factory(context.Background(), backendConfig, &corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + info, err := os.Stat(f.Name()) - if err != nil { - t.Fatalf("cannot retrieve file mode from `Stat`") - } - if info.Mode() != os.FileMode(0o600) { - t.Fatalf("File mode does not match.") - } + require.NoErrorf(t, err, "cannot retrieve file mode from `Stat`") + require.Equalf(t, os.FileMode(0o600), info.Mode(), "File mode does not match.") } +// TestAuditFile_fileMode0000 verifies that setting the audit file mode to +// "0000" prevents Vault from modifying the permissions of the file. func TestAuditFile_fileMode0000(t *testing.T) { - f, err := ioutil.TempFile("", "test") - if err != nil { - t.Fatalf("Failure to create test file. The error is %v", err) - } - defer os.Remove(f.Name()) + t.Parallel() + + dir := t.TempDir() + f, err := os.CreateTemp(dir, "auditTest.log") + require.NoErrorf(t, err, "Failure to create test file.") err = os.Chmod(f.Name(), 0o777) - if err != nil { - t.Fatalf("Failure to chmod temp file for testing. The error is %v", err) - } + require.NoErrorf(t, err, "Failure to chmod temp file for testing.") err = f.Close() - if err != nil { - t.Fatalf("Failure to close temp file for test. The error is %v", err) - } + require.NoErrorf(t, err, "Failure to close temp file for test.") - config := map[string]string{ - "path": f.Name(), - "mode": "0000", + backendConfig := &audit.BackendConfig{ + Config: map[string]string{ + "path": f.Name(), + "mode": "0000", + }, + MountPath: "foo/bar", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), } - _, err = Factory(context.Background(), &audit.BackendConfig{ - Config: config, + _, err = Factory(context.Background(), backendConfig, &corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + + info, err := os.Stat(f.Name()) + require.NoErrorf(t, err, "cannot retrieve file mode from `Stat`. The error is %v", err) + require.Equalf(t, os.FileMode(0o777), info.Mode(), "File mode does not match.") +} + +// TestAuditFile_EventLogger_fileModeNew verifies that the Factory function +// correctly sets the file mode when the useEventLogger argument is set to +// true. +func TestAuditFile_EventLogger_fileModeNew(t *testing.T) { + modeStr := "0777" + mode, err := strconv.ParseUint(modeStr, 8, 32) + require.NoError(t, err) + + file := filepath.Join(t.TempDir(), "auditTest.txt") + + backendConfig := &audit.BackendConfig{ + Config: map[string]string{ + "path": file, + "mode": modeStr, + }, + MountPath: "foo/bar", SaltConfig: &salt.Config{}, SaltView: &logical.InmemStorage{}, - }) - if err != nil { - t.Fatal(err) + Logger: hclog.NewNullLogger(), } - info, err := os.Stat(f.Name()) - if err != nil { - t.Fatalf("cannot retrieve file mode from `Stat`. The error is %v", err) + _, err = Factory(context.Background(), backendConfig, &corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + + info, err := os.Stat(file) + require.NoErrorf(t, err, "Cannot retrieve file mode from `Stat`") + require.Equalf(t, os.FileMode(mode), info.Mode(), "File mode does not match.") +} + +// TestBackend_newFormatterConfig ensures that all the configuration values are parsed correctly. +func TestBackend_newFormatterConfig(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config map[string]string + want audit.FormatterConfig + wantErr bool + expectedMessage string + }{ + "happy-path-json": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: audit.FormatterConfig{ + Raw: true, + HMACAccessor: true, + ElideListResponses: true, + RequiredFormat: "json", + }, wantErr: false, + }, + "happy-path-jsonx": { + config: map[string]string{ + "format": audit.JSONxFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: audit.FormatterConfig{ + Raw: true, + HMACAccessor: true, + ElideListResponses: true, + RequiredFormat: "jsonx", + }, + wantErr: false, + }, + "invalid-format": { + config: map[string]string{ + "format": " squiggly ", + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedMessage: "unsupported 'format': invalid configuration", + }, + "invalid-hmac-accessor": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "maybe", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedMessage: "unable to parse 'hmac_accessor': invalid configuration", + }, + "invalid-log-raw": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "true", + "log_raw": "maybe", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedMessage: "unable to parse 'log_raw: invalid configuration", + }, + "invalid-elide-bool": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "maybe", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedMessage: "unable to parse 'elide_list_responses': invalid configuration", + }, + "prefix": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "prefix": "foo", + }, + want: audit.FormatterConfig{ + RequiredFormat: audit.JSONFormat, + Prefix: "foo", + HMACAccessor: true, + }, + }, } - if info.Mode() != os.FileMode(0o777) { - t.Fatalf("File mode does not match.") + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := newFormatterConfig(&corehelpers.NoopHeaderFormatter{}, tc.config) + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedMessage) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.want.RequiredFormat, got.RequiredFormat) + require.Equal(t, tc.want.Raw, got.Raw) + require.Equal(t, tc.want.ElideListResponses, got.ElideListResponses) + require.Equal(t, tc.want.HMACAccessor, got.HMACAccessor) + require.Equal(t, tc.want.OmitTime, got.OmitTime) + require.Equal(t, tc.want.Prefix, got.Prefix) + }) } } -func BenchmarkAuditFile_request(b *testing.B) { - config := map[string]string{ - "path": "/dev/null", +// TestBackend_configureFormatterNode ensures that configureFormatterNode +// populates the nodeIDList and nodeMap on Backend when given valid formatConfig. +func TestBackend_configureFormatterNode(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, } - sink, err := Factory(context.Background(), &audit.BackendConfig{ - Config: config, - SaltConfig: &salt.Config{}, - SaltView: &logical.InmemStorage{}, - }) - if err != nil { - b.Fatal(err) + + formatConfig, err := audit.NewFormatterConfig(&corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + + err = b.configureFormatterNode("juan", formatConfig, hclog.NewNullLogger()) + + require.NoError(t, err) + require.Len(t, b.nodeIDList, 1) + require.Len(t, b.nodeMap, 1) + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeFormatter, node.Type()) +} + +// TestBackend_configureSinkNode ensures that we can correctly configure the sink +// node on the Backend, and any incorrect parameters result in the relevant errors. +func TestBackend_configureSinkNode(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + name string + filePath string + mode string + format string + wantErr bool + expectedErrMsg string + expectedName string + }{ + "name-empty": { + name: "", + wantErr: true, + expectedErrMsg: "name is required: invalid configuration", + }, + "name-whitespace": { + name: " ", + wantErr: true, + expectedErrMsg: "name is required: invalid configuration", + }, + "filePath-empty": { + name: "foo", + filePath: "", + wantErr: true, + expectedErrMsg: "file path is required: invalid configuration", + }, + "filePath-whitespace": { + name: "foo", + filePath: " ", + wantErr: true, + expectedErrMsg: "file path is required: invalid configuration", + }, + "filePath-stdout-lower": { + name: "foo", + expectedName: "stdout", + filePath: "stdout", + format: "json", + }, + "filePath-stdout-upper": { + name: "foo", + expectedName: "stdout", + filePath: "STDOUT", + format: "json", + }, + "filePath-stdout-mixed": { + name: "foo", + expectedName: "stdout", + filePath: "StdOut", + format: "json", + }, + "filePath-discard-lower": { + name: "foo", + expectedName: "discard", + filePath: "discard", + format: "json", + }, + "filePath-discard-upper": { + name: "foo", + expectedName: "discard", + filePath: "DISCARD", + format: "json", + }, + "filePath-discard-mixed": { + name: "foo", + expectedName: "discard", + filePath: "DisCArd", + format: "json", + }, + "format-empty": { + name: "foo", + filePath: "/tmp/", + format: "", + wantErr: true, + expectedErrMsg: "format is required: invalid internal parameter", + }, + "format-whitespace": { + name: "foo", + filePath: "/tmp/", + format: " ", + wantErr: true, + expectedErrMsg: "format is required: invalid internal parameter", + }, + "filePath-weird-with-mode-zero": { + name: "foo", + filePath: "/tmp/qwerty", + format: "json", + mode: "0", + wantErr: true, + expectedErrMsg: "file sink creation failed for path \"/tmp/qwerty\": unable to determine existing file mode: stat /tmp/qwerty: no such file or directory", + }, + "happy": { + name: "foo", + filePath: "/tmp/audit.log", + mode: "", + format: "json", + wantErr: false, + expectedName: "foo", + }, } - in := &logical.LogInput{ - Auth: &logical.Auth{ - ClientToken: "foo", - Accessor: "bar", - EntityID: "foobarentity", - DisplayName: "testtoken", - NoDefaultPolicy: true, - Policies: []string{"root"}, - TokenType: logical.TokenTypeService, + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + err := b.configureSinkNode(tc.name, tc.filePath, tc.mode, tc.format) + + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + require.Len(t, b.nodeIDList, 0) + require.Len(t, b.nodeMap, 0) + } else { + require.NoError(t, err) + require.Len(t, b.nodeIDList, 1) + require.Len(t, b.nodeMap, 1) + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeSink, node.Type()) + mc, ok := node.(*event.MetricsCounter) + require.True(t, ok) + require.Equal(t, tc.expectedName, mc.Name) + } + }) + } +} + +// TestBackend_Factory_Conf is used to ensure that any configuration which is +// supplied, is validated and tested. +func TestBackend_Factory_Conf(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + tests := map[string]struct { + backendConfig *audit.BackendConfig + isErrorExpected bool + expectedErrorMessage string + }{ + "nil-salt-config": { + backendConfig: &audit.BackendConfig{ + SaltConfig: nil, + }, + isErrorExpected: true, + expectedErrorMessage: "nil salt config: invalid internal parameter", }, - Request: &logical.Request{ - Operation: logical.UpdateOperation, - Path: "/foo", - Connection: &logical.Connection{ - RemoteAddr: "127.0.0.1", + "nil-salt-view": { + backendConfig: &audit.BackendConfig{ + SaltConfig: &salt.Config{}, }, - WrapInfo: &logical.RequestWrapInfo{ - TTL: 60 * time.Second, + isErrorExpected: true, + expectedErrorMessage: "nil salt view: invalid internal parameter", + }, + "nil-logger": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: nil, }, - Headers: map[string][]string{ - "foo": {"bar"}, + isErrorExpected: true, + expectedErrorMessage: "nil logger: invalid internal parameter", + }, + "fallback-device-with-filter": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "true", + "file_path": discard, + "filter": "mount_type == kv", + }, }, + isErrorExpected: true, + expectedErrorMessage: "cannot configure a fallback device with a filter: invalid configuration", + }, + "non-fallback-device-with-filter": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "false", + "file_path": discard, + "filter": "mount_type == kv", + }, + }, + isErrorExpected: false, }, } - ctx := namespace.RootContext(nil) - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - if err := sink.LogRequest(ctx, in); err != nil { - panic(err) + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + be, err := Factory(ctx, tc.backendConfig, &corehelpers.NoopHeaderFormatter{}) + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrorMessage) + default: + require.NoError(t, err) + require.NotNil(t, be) } - } - }) + }) + } +} + +// TestBackend_IsFallback ensures that the 'fallback' config setting is parsed +// and set correctly, then exposed via the interface method IsFallback(). +func TestBackend_IsFallback(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + tests := map[string]struct { + backendConfig *audit.BackendConfig + isFallbackExpected bool + }{ + "fallback": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "true", + "file_path": discard, + }, + }, + isFallbackExpected: true, + }, + "no-fallback": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "false", + "file_path": discard, + }, + }, + isFallbackExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + be, err := Factory(ctx, tc.backendConfig, &corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + require.NotNil(t, be) + require.Equal(t, tc.isFallbackExpected, be.IsFallback()) + }) + } } diff --git a/builtin/audit/socket/backend.go b/builtin/audit/socket/backend.go index 7a000b2c7c66..96c788df60e2 100644 --- a/builtin/audit/socket/backend.go +++ b/builtin/audit/socket/backend.go @@ -1,32 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package socket import ( - "bytes" "context" "fmt" - "net" + "reflect" "strconv" + "strings" "sync" - "time" - multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/internal/observability/event" "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/logical" ) -func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, error) { +var _ audit.Backend = (*Backend)(nil) + +// Backend is the audit backend for the socket audit transport. +type Backend struct { + fallback bool + name string + nodeIDList []eventlogger.NodeID + nodeMap map[eventlogger.NodeID]eventlogger.Node + salt *salt.Salt + saltConfig *salt.Config + saltMutex sync.RWMutex + saltView logical.Storage +} + +func Factory(_ context.Context, conf *audit.BackendConfig, headersConfig audit.HeaderFormatter) (audit.Backend, error) { if conf.SaltConfig == nil { - return nil, fmt.Errorf("nil salt config") + return nil, fmt.Errorf("nil salt config: %w", audit.ErrInvalidParameter) } + if conf.SaltView == nil { - return nil, fmt.Errorf("nil salt view") + return nil, fmt.Errorf("nil salt view: %w", audit.ErrInvalidParameter) + } + + if conf.Logger == nil || reflect.ValueOf(conf.Logger).IsNil() { + return nil, fmt.Errorf("nil logger: %w", audit.ErrInvalidParameter) + } + if conf.MountPath == "" { + return nil, fmt.Errorf("mount path cannot be empty: %w", audit.ErrInvalidParameter) } address, ok := conf.Config["address"] if !ok { - return nil, fmt.Errorf("address is required") + return nil, fmt.Errorf("address is required: %w", audit.ErrExternalOptions) } socketType, ok := conf.Config["socket_type"] @@ -38,240 +64,256 @@ func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, err if !ok { writeDeadline = "2s" } - writeDuration, err := parseutil.ParseDurationSecond(writeDeadline) - if err != nil { - return nil, err - } - format, ok := conf.Config["format"] - if !ok { - format = "json" + sinkOpts := []event.Option{ + event.WithSocketType(socketType), + event.WithMaxDuration(writeDeadline), } - switch format { - case "json", "jsonx": - default: - return nil, fmt.Errorf("unknown format type %q", format) + + err := event.ValidateOptions(sinkOpts...) + if err != nil { + return nil, err } - // Check if hashing of accessor is disabled - hmacAccessor := true - if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { - value, err := strconv.ParseBool(hmacAccessorRaw) + // The config options 'fallback' and 'filter' are mutually exclusive, a fallback + // device catches everything, so it cannot be allowed to filter. + var fallback bool + if fallbackRaw, ok := conf.Config["fallback"]; ok { + fallback, err = parseutil.ParseBool(fallbackRaw) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to parse 'fallback': %w", audit.ErrExternalOptions) } - hmacAccessor = value } - // Check if raw logging is enabled - logRaw := false - if raw, ok := conf.Config["log_raw"]; ok { - b, err := strconv.ParseBool(raw) - if err != nil { - return nil, err - } - logRaw = b + if _, ok := conf.Config["filter"]; ok && fallback { + return nil, fmt.Errorf("cannot configure a fallback device with a filter: %w", audit.ErrExternalOptions) + } + + cfg, err := newFormatterConfig(headersConfig, conf.Config) + if err != nil { + return nil, err } b := &Backend{ + fallback: fallback, + name: conf.MountPath, saltConfig: conf.SaltConfig, saltView: conf.SaltView, - formatConfig: audit.FormatterConfig{ - Raw: logRaw, - HMACAccessor: hmacAccessor, - }, + nodeIDList: []eventlogger.NodeID{}, + nodeMap: make(map[eventlogger.NodeID]eventlogger.Node), + } - writeDuration: writeDuration, - address: address, - socketType: socketType, + err = b.configureFilterNode(conf.Config["filter"]) + if err != nil { + return nil, err } - switch format { - case "json": - b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } - case "jsonx": - b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } + err = b.configureFormatterNode(conf.MountPath, cfg, conf.Logger) + if err != nil { + return nil, err + } + + err = b.configureSinkNode(conf.MountPath, address, cfg.RequiredFormat.String(), sinkOpts...) + if err != nil { + return nil, err } return b, nil } -// Backend is the audit backend for the socket audit transport. -type Backend struct { - connection net.Conn - - formatter audit.AuditFormatter - formatConfig audit.FormatterConfig +func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput) error { + if len(b.nodeIDList) > 0 { + return audit.ProcessManual(ctx, in, b.nodeIDList, b.nodeMap) + } - writeDuration time.Duration - address string - socketType string + return nil +} - sync.Mutex +func (b *Backend) Reload(ctx context.Context) error { + for _, n := range b.nodeMap { + if n.Type() == eventlogger.NodeTypeSink { + return n.Reopen() + } + } - saltMutex sync.RWMutex - salt *salt.Salt - saltConfig *salt.Config - saltView logical.Storage + return nil } -var _ audit.Backend = (*Backend)(nil) - -func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { - salt, err := b.Salt(ctx) +func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { + b.saltMutex.RLock() + if b.salt != nil { + defer b.saltMutex.RUnlock() + return b.salt, nil + } + b.saltMutex.RUnlock() + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } + s, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) if err != nil { - return "", err + return nil, err } - return audit.HashString(salt, data), nil + b.salt = s + return s, nil } -func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { - var buf bytes.Buffer - if err := b.formatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { - return err - } +func (b *Backend) Invalidate(_ context.Context) { + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt = nil +} - b.Lock() - defer b.Unlock() +// newFormatterConfig creates the configuration required by a formatter node using +// the config map supplied to the factory. +func newFormatterConfig(headerFormatter audit.HeaderFormatter, config map[string]string) (audit.FormatterConfig, error) { + var opts []audit.Option - err := b.write(ctx, buf.Bytes()) - if err != nil { - rErr := b.reconnect(ctx) - if rErr != nil { - err = multierror.Append(err, rErr) - } else { - // Try once more after reconnecting - err = b.write(ctx, buf.Bytes()) + if format, ok := config["format"]; ok { + if !audit.IsValidFormat(format) { + return audit.FormatterConfig{}, fmt.Errorf("unsupported 'format': %w", audit.ErrExternalOptions) } - } - return err -} + opts = append(opts, audit.WithFormat(format)) + } -func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { - var buf bytes.Buffer - if err := b.formatter.FormatResponse(ctx, &buf, b.formatConfig, in); err != nil { - return err + // Check if hashing of accessor is disabled + if hmacAccessorRaw, ok := config["hmac_accessor"]; ok { + v, err := strconv.ParseBool(hmacAccessorRaw) + if err != nil { + return audit.FormatterConfig{}, fmt.Errorf("unable to parse 'hmac_accessor': %w", audit.ErrExternalOptions) + } + opts = append(opts, audit.WithHMACAccessor(v)) } - b.Lock() - defer b.Unlock() + // Check if raw logging is enabled + if raw, ok := config["log_raw"]; ok { + v, err := strconv.ParseBool(raw) + if err != nil { + return audit.FormatterConfig{}, fmt.Errorf("unable to parse 'log_raw: %w", audit.ErrExternalOptions) + } + opts = append(opts, audit.WithRaw(v)) + } - err := b.write(ctx, buf.Bytes()) - if err != nil { - rErr := b.reconnect(ctx) - if rErr != nil { - err = multierror.Append(err, rErr) - } else { - // Try once more after reconnecting - err = b.write(ctx, buf.Bytes()) + if elideListResponsesRaw, ok := config["elide_list_responses"]; ok { + v, err := strconv.ParseBool(elideListResponsesRaw) + if err != nil { + return audit.FormatterConfig{}, fmt.Errorf("unable to parse 'elide_list_responses': %w", audit.ErrExternalOptions) } + opts = append(opts, audit.WithElision(v)) + } + + if prefix, ok := config["prefix"]; ok { + opts = append(opts, audit.WithPrefix(prefix)) } - return err + return audit.NewFormatterConfig(headerFormatter, opts...) } -func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { - var buf bytes.Buffer - temporaryFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) - if err := temporaryFormatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { - return err +// configureFormatterNode is used to configure a formatter node and associated ID on the Backend. +func (b *Backend) configureFormatterNode(name string, formatConfig audit.FormatterConfig, logger hclog.Logger) error { + formatterNodeID, err := event.GenerateNodeID() + if err != nil { + return fmt.Errorf("error generating random NodeID for formatter node: %w: %w", audit.ErrInternal, err) } - b.Lock() - defer b.Unlock() - - err := b.write(ctx, buf.Bytes()) + formatterNode, err := audit.NewEntryFormatter(name, formatConfig, b, logger) if err != nil { - rErr := b.reconnect(ctx) - if rErr != nil { - err = multierror.Append(err, rErr) - } else { - // Try once more after reconnecting - err = b.write(ctx, buf.Bytes()) - } + return fmt.Errorf("error creating formatter: %w", err) } - return err + b.nodeIDList = append(b.nodeIDList, formatterNodeID) + b.nodeMap[formatterNodeID] = formatterNode + + return nil } -func (b *Backend) write(ctx context.Context, buf []byte) error { - if b.connection == nil { - if err := b.reconnect(ctx); err != nil { - return err - } +// configureSinkNode is used to configure a sink node and associated ID on the Backend. +func (b *Backend) configureSinkNode(name string, address string, format string, opts ...event.Option) error { + name = strings.TrimSpace(name) + if name == "" { + return fmt.Errorf("name is required: %w", audit.ErrInvalidParameter) + } + + address = strings.TrimSpace(address) + if address == "" { + return fmt.Errorf("address is required: %w", audit.ErrInvalidParameter) } - err := b.connection.SetWriteDeadline(time.Now().Add(b.writeDuration)) + format = strings.TrimSpace(format) + if format == "" { + return fmt.Errorf("format is required: %w", audit.ErrInvalidParameter) + } + + sinkNodeID, err := event.GenerateNodeID() if err != nil { - return err + return fmt.Errorf("error generating random NodeID for sink node: %w", err) } - _, err = b.connection.Write(buf) + n, err := event.NewSocketSink(address, format, opts...) if err != nil { return err } - return nil -} - -func (b *Backend) reconnect(ctx context.Context) error { - if b.connection != nil { - b.connection.Close() - b.connection = nil + // Wrap the sink node with metrics middleware + sinkMetricTimer, err := audit.NewSinkMetricTimer(name, n) + if err != nil { + return fmt.Errorf("unable to add timing metrics to sink for path %q: %w", name, err) } - timeoutContext, cancel := context.WithTimeout(ctx, b.writeDuration) - defer cancel() + // Decide what kind of labels we want and wrap the sink node inside a metrics counter. + var metricLabeler event.Labeler + switch { + case b.fallback: + metricLabeler = &audit.MetricLabelerAuditFallback{} + default: + metricLabeler = &audit.MetricLabelerAuditSink{} + } - dialer := net.Dialer{} - conn, err := dialer.DialContext(timeoutContext, b.socketType, b.address) + sinkMetricCounter, err := event.NewMetricsCounter(name, sinkMetricTimer, metricLabeler) if err != nil { - return err + return fmt.Errorf("unable to add counting metrics to sink for path %q: %w", name, err) } - b.connection = conn + b.nodeIDList = append(b.nodeIDList, sinkNodeID) + b.nodeMap[sinkNodeID] = sinkMetricCounter return nil } -func (b *Backend) Reload(ctx context.Context) error { - b.Lock() - defer b.Unlock() +// Name for this backend, this would ideally correspond to the mount path for the audit device. +func (b *Backend) Name() string { + return b.name +} - err := b.reconnect(ctx) +// Nodes returns the nodes which should be used by the event framework to process audit entries. +func (b *Backend) Nodes() map[eventlogger.NodeID]eventlogger.Node { + return b.nodeMap +} - return err +// NodeIDs returns the IDs of the nodes, in the order they are required. +func (b *Backend) NodeIDs() []eventlogger.NodeID { + return b.nodeIDList } -func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { - b.saltMutex.RLock() - if b.salt != nil { - defer b.saltMutex.RUnlock() - return b.salt, nil - } - b.saltMutex.RUnlock() - b.saltMutex.Lock() - defer b.saltMutex.Unlock() - if b.salt != nil { - return b.salt, nil - } - salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) - if err != nil { - return nil, err +// EventType returns the event type for the backend. +func (b *Backend) EventType() eventlogger.EventType { + return event.AuditType.AsEventType() +} + +// HasFiltering determines if the first node for the pipeline is an eventlogger.NodeTypeFilter. +func (b *Backend) HasFiltering() bool { + if b.nodeMap == nil { + return false } - b.salt = salt - return salt, nil + + return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter } -func (b *Backend) Invalidate(_ context.Context) { - b.saltMutex.Lock() - defer b.saltMutex.Unlock() - b.salt = nil +// IsFallback can be used to determine if this audit backend device is intended to +// be used as a fallback to catch all events that are not written when only using +// filtered pipelines. +func (b *Backend) IsFallback() bool { + return b.fallback } diff --git a/builtin/audit/socket/backend_filter_node.go b/builtin/audit/socket/backend_filter_node.go new file mode 100644 index 000000000000..6d6f81e15b7c --- /dev/null +++ b/builtin/audit/socket/backend_filter_node.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package socket + +// configureFilterNode is used to configure a filter node and associated ID on the Backend. +func (b *Backend) configureFilterNode(_ string) error { + return nil +} diff --git a/builtin/audit/socket/backend_filter_node_test.go b/builtin/audit/socket/backend_filter_node_test.go new file mode 100644 index 000000000000..c3b9112e7c82 --- /dev/null +++ b/builtin/audit/socket/backend_filter_node_test.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package socket + +import ( + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/stretchr/testify/require" +) + +// TestBackend_configureFilterNode ensures that configureFilterNode handles various +// filter values as expected. Empty (including whitespace) strings should return +// no error but skip configuration of the node. +// NOTE: Audit filtering is an Enterprise feature and behaves differently in the +// community edition of Vault. +func TestBackend_configureFilterNode(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + filter string + }{ + "happy": { + filter: "operation == update", + }, + "empty": { + filter: "", + }, + "spacey": { + filter: " ", + }, + "bad": { + filter: "___qwerty", + }, + "unsupported-field": { + filter: "foo == bar", + }, + } + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + err := b.configureFilterNode(tc.filter) + require.NoError(t, err) + require.Len(t, b.nodeIDList, 0) + require.Len(t, b.nodeMap, 0) + }) + } +} + +// TestBackend_configureFilterFormatterSink ensures that configuring all three +// types of nodes on a Backend works as expected, i.e. we have only formatter and sink +// nodes at the end and nothing gets overwritten. The order of calls influences the +// slice of IDs on the Backend. +// NOTE: Audit filtering is an Enterprise feature and behaves differently in the +// community edition of Vault. +func TestBackend_configureFilterFormatterSink(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + formatConfig, err := audit.NewFormatterConfig(&corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + + err = b.configureFilterNode("path == bar") + require.NoError(t, err) + + err = b.configureFormatterNode("juan", formatConfig, hclog.NewNullLogger()) + require.NoError(t, err) + + err = b.configureSinkNode("foo", "https://hashicorp.com", "json") + require.NoError(t, err) + + require.Len(t, b.nodeIDList, 2) + require.Len(t, b.nodeMap, 2) + + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeFormatter, node.Type()) + + id = b.nodeIDList[1] + node = b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeSink, node.Type()) +} diff --git a/builtin/audit/socket/backend_test.go b/builtin/audit/socket/backend_test.go new file mode 100644 index 000000000000..b800374febd0 --- /dev/null +++ b/builtin/audit/socket/backend_test.go @@ -0,0 +1,457 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package socket + +import ( + "context" + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestBackend_newFormatterConfig ensures that all the configuration values are parsed correctly. +func TestBackend_newFormatterConfig(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config map[string]string + want audit.FormatterConfig + wantErr bool + expectedErrMsg string + }{ + "happy-path-json": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: audit.FormatterConfig{ + Raw: true, + HMACAccessor: true, + ElideListResponses: true, + RequiredFormat: "json", + }, wantErr: false, + }, + "happy-path-jsonx": { + config: map[string]string{ + "format": audit.JSONxFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: audit.FormatterConfig{ + Raw: true, + HMACAccessor: true, + ElideListResponses: true, + RequiredFormat: "jsonx", + }, + wantErr: false, + }, + "invalid-format": { + config: map[string]string{ + "format": " squiggly ", + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedErrMsg: "unsupported 'format': invalid configuration", + }, + "invalid-hmac-accessor": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "maybe", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedErrMsg: "unable to parse 'hmac_accessor': invalid configuration", + }, + "invalid-log-raw": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "true", + "log_raw": "maybe", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedErrMsg: "unable to parse 'log_raw: invalid configuration", + }, + "invalid-elide-bool": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "maybe", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedErrMsg: "unable to parse 'elide_list_responses': invalid configuration", + }, + "prefix": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "prefix": "foo", + }, + want: audit.FormatterConfig{ + RequiredFormat: audit.JSONFormat, + Prefix: "foo", + HMACAccessor: true, + }, + }, + } + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := newFormatterConfig(&corehelpers.NoopHeaderFormatter{}, tc.config) + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.want.RequiredFormat, got.RequiredFormat) + require.Equal(t, tc.want.Raw, got.Raw) + require.Equal(t, tc.want.ElideListResponses, got.ElideListResponses) + require.Equal(t, tc.want.HMACAccessor, got.HMACAccessor) + require.Equal(t, tc.want.OmitTime, got.OmitTime) + require.Equal(t, tc.want.Prefix, got.Prefix) + }) + } +} + +// TestBackend_configureFormatterNode ensures that configureFormatterNode +// populates the nodeIDList and nodeMap on Backend when given valid formatConfig. +func TestBackend_configureFormatterNode(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + formatConfig, err := audit.NewFormatterConfig(&corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + + err = b.configureFormatterNode("juan", formatConfig, hclog.NewNullLogger()) + + require.NoError(t, err) + require.Len(t, b.nodeIDList, 1) + require.Len(t, b.nodeMap, 1) + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeFormatter, node.Type()) +} + +// TestBackend_configureSinkNode ensures that we can correctly configure the sink +// node on the Backend, and any incorrect parameters result in the relevant errors. +func TestBackend_configureSinkNode(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + name string + address string + format string + wantErr bool + expectedErrMsg string + expectedName string + }{ + "name-empty": { + name: "", + address: "wss://foo", + wantErr: true, + expectedErrMsg: "name is required: invalid internal parameter", + }, + "name-whitespace": { + name: " ", + address: "wss://foo", + wantErr: true, + expectedErrMsg: "name is required: invalid internal parameter", + }, + "address-empty": { + name: "foo", + address: "", + wantErr: true, + expectedErrMsg: "address is required: invalid internal parameter", + }, + "address-whitespace": { + name: "foo", + address: " ", + wantErr: true, + expectedErrMsg: "address is required: invalid internal parameter", + }, + "format-empty": { + name: "foo", + address: "wss://foo", + format: "", + wantErr: true, + expectedErrMsg: "format is required: invalid internal parameter", + }, + "format-whitespace": { + name: "foo", + address: "wss://foo", + format: " ", + wantErr: true, + expectedErrMsg: "format is required: invalid internal parameter", + }, + "happy": { + name: "foo", + address: "wss://foo", + format: "json", + wantErr: false, + expectedName: "foo", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + err := b.configureSinkNode(tc.name, tc.address, tc.format) + + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + require.Len(t, b.nodeIDList, 0) + require.Len(t, b.nodeMap, 0) + } else { + require.NoError(t, err) + require.Len(t, b.nodeIDList, 1) + require.Len(t, b.nodeMap, 1) + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeSink, node.Type()) + mc, ok := node.(*event.MetricsCounter) + require.True(t, ok) + require.Equal(t, tc.expectedName, mc.Name) + } + }) + } +} + +// TestBackend_Factory_Conf is used to ensure that any configuration which is +// supplied, is validated and tested. +func TestBackend_Factory_Conf(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + tests := map[string]struct { + backendConfig *audit.BackendConfig + isErrorExpected bool + expectedErrorMessage string + }{ + "nil-salt-config": { + backendConfig: &audit.BackendConfig{ + SaltConfig: nil, + }, + isErrorExpected: true, + expectedErrorMessage: "nil salt config: invalid internal parameter", + }, + "nil-salt-view": { + backendConfig: &audit.BackendConfig{ + SaltConfig: &salt.Config{}, + }, + isErrorExpected: true, + expectedErrorMessage: "nil salt view: invalid internal parameter", + }, + "nil-logger": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: nil, + }, + isErrorExpected: true, + expectedErrorMessage: "nil logger: invalid internal parameter", + }, + "no-address": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{}, + }, + isErrorExpected: true, + expectedErrorMessage: "address is required: invalid configuration", + }, + "empty-address": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "address": "", + }, + }, + isErrorExpected: true, + expectedErrorMessage: "address is required: invalid internal parameter", + }, + "whitespace-address": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "address": " ", + }, + }, + isErrorExpected: true, + expectedErrorMessage: "address is required: invalid internal parameter", + }, + "write-duration-valid": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "address": "hashicorp.com", + "write_timeout": "5s", + }, + }, + isErrorExpected: false, + }, + "write-duration-not-valid": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "address": "hashicorp.com", + "write_timeout": "qwerty", + }, + }, + isErrorExpected: true, + expectedErrorMessage: "unable to parse max duration: invalid parameter: time: invalid duration \"qwerty\"", + }, + "non-fallback-device-with-filter": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "address": "hashicorp.com", + "write_timeout": "5s", + "fallback": "false", + "filter": "mount_type == kv", + }, + }, + isErrorExpected: false, + }, + "fallback-device-with-filter": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "address": "hashicorp.com", + "write_timeout": "2s", + "fallback": "true", + "filter": "mount_type == kv", + }, + }, + isErrorExpected: true, + expectedErrorMessage: "cannot configure a fallback device with a filter: invalid configuration", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + be, err := Factory(ctx, tc.backendConfig, &corehelpers.NoopHeaderFormatter{}) + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrorMessage) + default: + require.NoError(t, err) + require.NotNil(t, be) + } + }) + } +} + +// TestBackend_IsFallback ensures that the 'fallback' config setting is parsed +// and set correctly, then exposed via the interface method IsFallback(). +func TestBackend_IsFallback(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + tests := map[string]struct { + backendConfig *audit.BackendConfig + isFallbackExpected bool + }{ + "fallback": { + backendConfig: &audit.BackendConfig{ + MountPath: "qwerty", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "true", + "address": "hashicorp.com", + "write_timeout": "5s", + }, + }, + isFallbackExpected: true, + }, + "no-fallback": { + backendConfig: &audit.BackendConfig{ + MountPath: "qwerty", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "false", + "address": "hashicorp.com", + "write_timeout": "5s", + }, + }, + isFallbackExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + be, err := Factory(ctx, tc.backendConfig, &corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + require.NotNil(t, be) + require.Equal(t, tc.isFallbackExpected, be.IsFallback()) + }) + } +} diff --git a/builtin/audit/syslog/backend.go b/builtin/audit/syslog/backend.go index 9c7b775b88c8..19f541e632af 100644 --- a/builtin/audit/syslog/backend.go +++ b/builtin/audit/syslog/backend.go @@ -1,24 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package syslog import ( - "bytes" "context" "fmt" + "reflect" "strconv" + "strings" "sync" - gsyslog "github.com/hashicorp/go-syslog" + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/internal/observability/event" "github.com/hashicorp/vault/sdk/helper/salt" "github.com/hashicorp/vault/sdk/logical" ) -func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, error) { +var _ audit.Backend = (*Backend)(nil) + +// Backend is the audit backend for the syslog-based audit store. +type Backend struct { + fallback bool + name string + nodeIDList []eventlogger.NodeID + nodeMap map[eventlogger.NodeID]eventlogger.Node + salt *salt.Salt + saltConfig *salt.Config + saltMutex sync.RWMutex + saltView logical.Storage +} + +func Factory(_ context.Context, conf *audit.BackendConfig, headersConfig audit.HeaderFormatter) (audit.Backend, error) { if conf.SaltConfig == nil { - return nil, fmt.Errorf("nil salt config") + return nil, fmt.Errorf("nil salt config: %w", audit.ErrInvalidParameter) } + if conf.SaltView == nil { - return nil, fmt.Errorf("nil salt view") + return nil, fmt.Errorf("nil salt view: %w", audit.ErrInvalidParameter) + } + + if conf.Logger == nil || reflect.ValueOf(conf.Logger).IsNil() { + return nil, fmt.Errorf("nil logger: %w", audit.ErrInvalidParameter) + } + + if conf.MountPath == "" { + return nil, fmt.Errorf("mount path cannot be empty: %w", audit.ErrInvalidParameter) } // Get facility or default to AUTH @@ -33,123 +63,68 @@ func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, err tag = "vault" } - format, ok := conf.Config["format"] - if !ok { - format = "json" + sinkOpts := []event.Option{ + event.WithFacility(facility), + event.WithTag(tag), } - switch format { - case "json", "jsonx": - default: - return nil, fmt.Errorf("unknown format type %q", format) + + err := event.ValidateOptions(sinkOpts...) + if err != nil { + return nil, err } - // Check if hashing of accessor is disabled - hmacAccessor := true - if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { - value, err := strconv.ParseBool(hmacAccessorRaw) + // The config options 'fallback' and 'filter' are mutually exclusive, a fallback + // device catches everything, so it cannot be allowed to filter. + var fallback bool + if fallbackRaw, ok := conf.Config["fallback"]; ok { + fallback, err = parseutil.ParseBool(fallbackRaw) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to parse 'fallback': %w", audit.ErrExternalOptions) } - hmacAccessor = value } - // Check if raw logging is enabled - logRaw := false - if raw, ok := conf.Config["log_raw"]; ok { - b, err := strconv.ParseBool(raw) - if err != nil { - return nil, err - } - logRaw = b + if _, ok := conf.Config["filter"]; ok && fallback { + return nil, fmt.Errorf("cannot configure a fallback device with a filter: %w", audit.ErrExternalOptions) } - // Get the logger - logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag) + cfg, err := newFormatterConfig(headersConfig, conf.Config) if err != nil { return nil, err } b := &Backend{ - logger: logger, + fallback: fallback, + name: conf.MountPath, saltConfig: conf.SaltConfig, saltView: conf.SaltView, - formatConfig: audit.FormatterConfig{ - Raw: logRaw, - HMACAccessor: hmacAccessor, - }, + nodeIDList: []eventlogger.NodeID{}, + nodeMap: make(map[eventlogger.NodeID]eventlogger.Node), } - switch format { - case "json": - b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } - case "jsonx": - b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ - Prefix: conf.Config["prefix"], - SaltFunc: b.Salt, - } - } - - return b, nil -} - -// Backend is the audit backend for the syslog-based audit store. -type Backend struct { - logger gsyslog.Syslogger - - formatter audit.AuditFormatter - formatConfig audit.FormatterConfig - - saltMutex sync.RWMutex - salt *salt.Salt - saltConfig *salt.Config - saltView logical.Storage -} - -var _ audit.Backend = (*Backend)(nil) - -func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { - salt, err := b.Salt(ctx) + err = b.configureFilterNode(conf.Config["filter"]) if err != nil { - return "", err + return nil, err } - return audit.HashString(salt, data), nil -} -func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { - var buf bytes.Buffer - if err := b.formatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { - return err + err = b.configureFormatterNode(conf.MountPath, cfg, conf.Logger) + if err != nil { + return nil, err } - // Write out to syslog - _, err := b.logger.Write(buf.Bytes()) - return err -} - -func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { - var buf bytes.Buffer - if err := b.formatter.FormatResponse(ctx, &buf, b.formatConfig, in); err != nil { - return err + err = b.configureSinkNode(conf.MountPath, cfg.RequiredFormat.String(), sinkOpts...) + if err != nil { + return nil, err } - // Write out to syslog - _, err := b.logger.Write(buf.Bytes()) - return err + return b, nil } -func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { - var buf bytes.Buffer - temporaryFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) - if err := temporaryFormatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { - return err +func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput) error { + if len(b.nodeIDList) > 0 { + return audit.ProcessManual(ctx, in, b.nodeIDList, b.nodeMap) } - // Send to syslog - _, err := b.logger.Write(buf.Bytes()) - return err + return nil } func (b *Backend) Reload(_ context.Context) error { @@ -168,12 +143,12 @@ func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { if b.salt != nil { return b.salt, nil } - salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) + s, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) if err != nil { return nil, err } - b.salt = salt - return salt, nil + b.salt = s + return s, nil } func (b *Backend) Invalidate(_ context.Context) { @@ -181,3 +156,151 @@ func (b *Backend) Invalidate(_ context.Context) { defer b.saltMutex.Unlock() b.salt = nil } + +// newFormatterConfig creates the configuration required by a formatter node using +// the config map supplied to the factory. +func newFormatterConfig(headerFormatter audit.HeaderFormatter, config map[string]string) (audit.FormatterConfig, error) { + var opts []audit.Option + + if format, ok := config["format"]; ok { + if !audit.IsValidFormat(format) { + return audit.FormatterConfig{}, fmt.Errorf("unsupported 'format': %w", audit.ErrExternalOptions) + } + + opts = append(opts, audit.WithFormat(format)) + } + + // Check if hashing of accessor is disabled + if hmacAccessorRaw, ok := config["hmac_accessor"]; ok { + v, err := strconv.ParseBool(hmacAccessorRaw) + if err != nil { + return audit.FormatterConfig{}, fmt.Errorf("unable to parse 'hmac_accessor': %w", audit.ErrExternalOptions) + } + opts = append(opts, audit.WithHMACAccessor(v)) + } + + // Check if raw logging is enabled + if raw, ok := config["log_raw"]; ok { + v, err := strconv.ParseBool(raw) + if err != nil { + return audit.FormatterConfig{}, fmt.Errorf("unable to parse 'log_raw: %w", audit.ErrExternalOptions) + } + opts = append(opts, audit.WithRaw(v)) + } + + if elideListResponsesRaw, ok := config["elide_list_responses"]; ok { + v, err := strconv.ParseBool(elideListResponsesRaw) + if err != nil { + return audit.FormatterConfig{}, fmt.Errorf("unable to parse 'elide_list_responses': %w", audit.ErrExternalOptions) + } + opts = append(opts, audit.WithElision(v)) + } + + if prefix, ok := config["prefix"]; ok { + opts = append(opts, audit.WithPrefix(prefix)) + } + + return audit.NewFormatterConfig(headerFormatter, opts...) +} + +// configureFormatterNode is used to configure a formatter node and associated ID on the Backend. +func (b *Backend) configureFormatterNode(name string, formatConfig audit.FormatterConfig, logger hclog.Logger) error { + formatterNodeID, err := event.GenerateNodeID() + if err != nil { + return fmt.Errorf("error generating random NodeID for formatter node: %w: %w", audit.ErrInternal, err) + } + + formatterNode, err := audit.NewEntryFormatter(name, formatConfig, b, logger) + if err != nil { + return fmt.Errorf("error creating formatter: %w", err) + } + + b.nodeIDList = append(b.nodeIDList, formatterNodeID) + b.nodeMap[formatterNodeID] = formatterNode + + return nil +} + +// configureSinkNode is used to configure a sink node and associated ID on the Backend. +func (b *Backend) configureSinkNode(name string, format string, opts ...event.Option) error { + name = strings.TrimSpace(name) + if name == "" { + return fmt.Errorf("name is required: %w", audit.ErrInvalidParameter) + } + + format = strings.TrimSpace(format) + if format == "" { + return fmt.Errorf("format is required: %w", audit.ErrInvalidParameter) + } + + sinkNodeID, err := event.GenerateNodeID() + if err != nil { + return fmt.Errorf("error generating random NodeID for sink node: %w: %w", audit.ErrInternal, err) + } + + n, err := event.NewSyslogSink(format, opts...) + if err != nil { + return fmt.Errorf("error creating syslog sink node: %w", err) + } + + // Wrap the sink node with metrics middleware + sinkMetricTimer, err := audit.NewSinkMetricTimer(name, n) + if err != nil { + return fmt.Errorf("unable to add timing metrics to sink for path %q: %w", name, err) + } + + // Decide what kind of labels we want and wrap the sink node inside a metrics counter. + var metricLabeler event.Labeler + switch { + case b.fallback: + metricLabeler = &audit.MetricLabelerAuditFallback{} + default: + metricLabeler = &audit.MetricLabelerAuditSink{} + } + + sinkMetricCounter, err := event.NewMetricsCounter(name, sinkMetricTimer, metricLabeler) + if err != nil { + return fmt.Errorf("unable to add counting metrics to sink for path %q: %w", name, err) + } + + b.nodeIDList = append(b.nodeIDList, sinkNodeID) + b.nodeMap[sinkNodeID] = sinkMetricCounter + + return nil +} + +// Name for this backend, this would ideally correspond to the mount path for the audit device. +func (b *Backend) Name() string { + return b.name +} + +// Nodes returns the nodes which should be used by the event framework to process audit entries. +func (b *Backend) Nodes() map[eventlogger.NodeID]eventlogger.Node { + return b.nodeMap +} + +// NodeIDs returns the IDs of the nodes, in the order they are required. +func (b *Backend) NodeIDs() []eventlogger.NodeID { + return b.nodeIDList +} + +// EventType returns the event type for the backend. +func (b *Backend) EventType() eventlogger.EventType { + return event.AuditType.AsEventType() +} + +// HasFiltering determines if the first node for the pipeline is an eventlogger.NodeTypeFilter. +func (b *Backend) HasFiltering() bool { + if b.nodeMap == nil { + return false + } + + return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter +} + +// IsFallback can be used to determine if this audit backend device is intended to +// be used as a fallback to catch all events that are not written when only using +// filtered pipelines. +func (b *Backend) IsFallback() bool { + return b.fallback +} diff --git a/builtin/audit/syslog/backend_filter_node.go b/builtin/audit/syslog/backend_filter_node.go new file mode 100644 index 000000000000..45798d48e672 --- /dev/null +++ b/builtin/audit/syslog/backend_filter_node.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package syslog + +// configureFilterNode is used to configure a filter node and associated ID on the Backend. +func (b *Backend) configureFilterNode(_ string) error { + return nil +} diff --git a/builtin/audit/syslog/backend_filter_node_test.go b/builtin/audit/syslog/backend_filter_node_test.go new file mode 100644 index 000000000000..8c4903734019 --- /dev/null +++ b/builtin/audit/syslog/backend_filter_node_test.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package syslog + +import ( + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/stretchr/testify/require" +) + +// TestBackend_configureFilterNode ensures that configureFilterNode handles various +// filter values as expected. Empty (including whitespace) strings should return +// no error but skip configuration of the node. +// NOTE: Audit filtering is an Enterprise feature and behaves differently in the +// community edition of Vault. +func TestBackend_configureFilterNode(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + filter string + }{ + "happy": { + filter: "operation == update", + }, + "empty": { + filter: "", + }, + "spacey": { + filter: " ", + }, + "bad": { + filter: "___qwerty", + }, + "unsupported-field": { + filter: "foo == bar", + }, + } + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + err := b.configureFilterNode(tc.filter) + require.NoError(t, err) + require.Len(t, b.nodeIDList, 0) + require.Len(t, b.nodeMap, 0) + }) + } +} + +// TestBackend_configureFilterFormatterSink ensures that configuring all three +// types of nodes on a Backend works as expected, i.e. we have only formatter and sink +// nodes at the end and nothing gets overwritten. The order of calls influences the +// slice of IDs on the Backend. +// NOTE: Audit filtering is an Enterprise feature and behaves differently in the +// community edition of Vault. +func TestBackend_configureFilterFormatterSink(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + formatConfig, err := audit.NewFormatterConfig(&corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + + err = b.configureFilterNode("path == bar") + require.NoError(t, err) + + err = b.configureFormatterNode("juan", formatConfig, hclog.NewNullLogger()) + require.NoError(t, err) + + err = b.configureSinkNode("foo", "json") + require.NoError(t, err) + + require.Len(t, b.nodeIDList, 2) + require.Len(t, b.nodeMap, 2) + + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeFormatter, node.Type()) + + id = b.nodeIDList[1] + node = b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeSink, node.Type()) +} diff --git a/builtin/audit/syslog/backend_test.go b/builtin/audit/syslog/backend_test.go new file mode 100644 index 000000000000..1ee304a210b4 --- /dev/null +++ b/builtin/audit/syslog/backend_test.go @@ -0,0 +1,357 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package syslog + +import ( + "context" + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestBackend_newFormatterConfig ensures that all the configuration values are parsed correctly. +func TestBackend_newFormatterConfig(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + config map[string]string + want audit.FormatterConfig + wantErr bool + expectedErrMsg string + }{ + "happy-path-json": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: audit.FormatterConfig{ + Raw: true, + HMACAccessor: true, + ElideListResponses: true, + RequiredFormat: "json", + }, wantErr: false, + }, + "happy-path-jsonx": { + config: map[string]string{ + "format": audit.JSONxFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: audit.FormatterConfig{ + Raw: true, + HMACAccessor: true, + ElideListResponses: true, + RequiredFormat: "jsonx", + }, + wantErr: false, + }, + "invalid-format": { + config: map[string]string{ + "format": " squiggly ", + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "true", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedErrMsg: "unsupported 'format': invalid configuration", + }, + "invalid-hmac-accessor": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "maybe", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedErrMsg: "unable to parse 'hmac_accessor': invalid configuration", + }, + "invalid-log-raw": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "true", + "log_raw": "maybe", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedErrMsg: "unable to parse 'log_raw: invalid configuration", + }, + "invalid-elide-bool": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "hmac_accessor": "true", + "log_raw": "true", + "elide_list_responses": "maybe", + }, + want: audit.FormatterConfig{}, + wantErr: true, + expectedErrMsg: "unable to parse 'elide_list_responses': invalid configuration", + }, + "prefix": { + config: map[string]string{ + "format": audit.JSONFormat.String(), + "prefix": "foo", + }, + want: audit.FormatterConfig{ + RequiredFormat: audit.JSONFormat, + Prefix: "foo", + HMACAccessor: true, + }, + }, + } + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := newFormatterConfig(&corehelpers.NoopHeaderFormatter{}, tc.config) + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.want.RequiredFormat, got.RequiredFormat) + require.Equal(t, tc.want.Raw, got.Raw) + require.Equal(t, tc.want.ElideListResponses, got.ElideListResponses) + require.Equal(t, tc.want.HMACAccessor, got.HMACAccessor) + require.Equal(t, tc.want.OmitTime, got.OmitTime) + require.Equal(t, tc.want.Prefix, got.Prefix) + }) + } +} + +// TestBackend_configureFormatterNode ensures that configureFormatterNode +// populates the nodeIDList and nodeMap on Backend when given valid formatConfig. +func TestBackend_configureFormatterNode(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + formatConfig, err := audit.NewFormatterConfig(&corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + + err = b.configureFormatterNode("juan", formatConfig, hclog.NewNullLogger()) + + require.NoError(t, err) + require.Len(t, b.nodeIDList, 1) + require.Len(t, b.nodeMap, 1) + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeFormatter, node.Type()) +} + +// TestBackend_configureSinkNode ensures that we can correctly configure the sink +// node on the Backend, and any incorrect parameters result in the relevant errors. +func TestBackend_configureSinkNode(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + name string + format string + wantErr bool + expectedErrMsg string + expectedName string + }{ + "name-empty": { + name: "", + wantErr: true, + expectedErrMsg: "name is required: invalid internal parameter", + }, + "name-whitespace": { + name: " ", + wantErr: true, + expectedErrMsg: "name is required: invalid internal parameter", + }, + "format-empty": { + name: "foo", + format: "", + wantErr: true, + expectedErrMsg: "format is required: invalid internal parameter", + }, + "format-whitespace": { + name: "foo", + format: " ", + wantErr: true, + expectedErrMsg: "format is required: invalid internal parameter", + }, + "happy": { + name: "foo", + format: "json", + wantErr: false, + expectedName: "foo", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + b := &Backend{ + nodeIDList: []eventlogger.NodeID{}, + nodeMap: map[eventlogger.NodeID]eventlogger.Node{}, + } + + err := b.configureSinkNode(tc.name, tc.format) + + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + require.Len(t, b.nodeIDList, 0) + require.Len(t, b.nodeMap, 0) + } else { + require.NoError(t, err) + require.Len(t, b.nodeIDList, 1) + require.Len(t, b.nodeMap, 1) + id := b.nodeIDList[0] + node := b.nodeMap[id] + require.Equal(t, eventlogger.NodeTypeSink, node.Type()) + mc, ok := node.(*event.MetricsCounter) + require.True(t, ok) + require.Equal(t, tc.expectedName, mc.Name) + } + }) + } +} + +// TestBackend_Factory_Conf is used to ensure that any configuration which is +// supplied, is validated and tested. +func TestBackend_Factory_Conf(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + tests := map[string]struct { + backendConfig *audit.BackendConfig + isErrorExpected bool + expectedErrorMessage string + }{ + "nil-salt-config": { + backendConfig: &audit.BackendConfig{ + SaltConfig: nil, + }, + isErrorExpected: true, + expectedErrorMessage: "nil salt config: invalid internal parameter", + }, + "nil-salt-view": { + backendConfig: &audit.BackendConfig{ + SaltConfig: &salt.Config{}, + }, + isErrorExpected: true, + expectedErrorMessage: "nil salt view: invalid internal parameter", + }, + "non-fallback-device-with-filter": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "false", + "filter": "mount_type == kv", + }, + }, + isErrorExpected: false, + }, + "fallback-device-with-filter": { + backendConfig: &audit.BackendConfig{ + MountPath: "discard", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "true", + "filter": "mount_type == kv", + }, + }, + isErrorExpected: true, + expectedErrorMessage: "cannot configure a fallback device with a filter: invalid configuration", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + be, err := Factory(ctx, tc.backendConfig, &corehelpers.NoopHeaderFormatter{}) + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrorMessage) + default: + require.NoError(t, err) + require.NotNil(t, be) + } + }) + } +} + +// TestBackend_IsFallback ensures that the 'fallback' config setting is parsed +// and set correctly, then exposed via the interface method IsFallback(). +func TestBackend_IsFallback(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + tests := map[string]struct { + backendConfig *audit.BackendConfig + isFallbackExpected bool + }{ + "fallback": { + backendConfig: &audit.BackendConfig{ + MountPath: "qwerty", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "true", + }, + }, + isFallbackExpected: true, + }, + "no-fallback": { + backendConfig: &audit.BackendConfig{ + MountPath: "qwerty", + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Logger: hclog.NewNullLogger(), + Config: map[string]string{ + "fallback": "false", + }, + }, + isFallbackExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + be, err := Factory(ctx, tc.backendConfig, &corehelpers.NoopHeaderFormatter{}) + require.NoError(t, err) + require.NotNil(t, be) + require.Equal(t, tc.isFallbackExpected, be.IsFallback()) + }) + } +} diff --git a/builtin/credential/app-id/backend.go b/builtin/credential/app-id/backend.go deleted file mode 100644 index b77221d75ba6..000000000000 --- a/builtin/credential/app-id/backend.go +++ /dev/null @@ -1,184 +0,0 @@ -package appId - -import ( - "context" - "sync" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b, err := Backend(conf) - if err != nil { - return nil, err - } - if err := b.Setup(ctx, conf); err != nil { - return nil, err - } - return b, nil -} - -func Backend(conf *logical.BackendConfig) (*backend, error) { - var b backend - b.MapAppId = &framework.PolicyMap{ - PathMap: framework.PathMap{ - Name: "app-id", - Schema: map[string]*framework.FieldSchema{ - "display_name": { - Type: framework.TypeString, - Description: "A name to map to this app ID for logs.", - }, - - "value": { - Type: framework.TypeString, - Description: "Policies for the app ID.", - }, - }, - }, - DefaultKey: "default", - } - - b.MapUserId = &framework.PathMap{ - Name: "user-id", - Schema: map[string]*framework.FieldSchema{ - "cidr_block": { - Type: framework.TypeString, - Description: "If not blank, restricts auth by this CIDR block", - }, - - "value": { - Type: framework.TypeString, - Description: "App IDs that this user associates with.", - }, - }, - } - - b.Backend = &framework.Backend{ - Help: backendHelp, - - PathsSpecial: &logical.Paths{ - Unauthenticated: []string{ - "login", - "login/*", - }, - }, - Paths: framework.PathAppend([]*framework.Path{ - pathLogin(&b), - pathLoginWithAppIDPath(&b), - }, - b.MapAppId.Paths(), - b.MapUserId.Paths(), - ), - AuthRenew: b.pathLoginRenew, - Invalidate: b.invalidate, - BackendType: logical.TypeCredential, - } - - b.view = conf.StorageView - b.MapAppId.SaltFunc = b.Salt - b.MapUserId.SaltFunc = b.Salt - - return &b, nil -} - -type backend struct { - *framework.Backend - - salt *salt.Salt - SaltMutex sync.RWMutex - view logical.Storage - MapAppId *framework.PolicyMap - MapUserId *framework.PathMap -} - -func (b *backend) Salt(ctx context.Context) (*salt.Salt, error) { - b.SaltMutex.RLock() - if b.salt != nil { - defer b.SaltMutex.RUnlock() - return b.salt, nil - } - b.SaltMutex.RUnlock() - b.SaltMutex.Lock() - defer b.SaltMutex.Unlock() - if b.salt != nil { - return b.salt, nil - } - salt, err := salt.NewSalt(ctx, b.view, &salt.Config{ - HashFunc: salt.SHA1Hash, - Location: salt.DefaultLocation, - }) - if err != nil { - return nil, err - } - b.salt = salt - return salt, nil -} - -func (b *backend) invalidate(_ context.Context, key string) { - switch key { - case salt.DefaultLocation: - b.SaltMutex.Lock() - defer b.SaltMutex.Unlock() - b.salt = nil - } -} - -const backendHelp = ` -The App ID credential provider is used to perform authentication from -within applications or machine by pairing together two hard-to-guess -unique pieces of information: a unique app ID, and a unique user ID. - -The goal of this credential provider is to allow elastic users -(dynamic machines, containers, etc.) to authenticate with Vault without -having to store passwords outside of Vault. It is a single method of -solving the chicken-and-egg problem of setting up Vault access on a machine. -With this provider, nobody except the machine itself has access to both -pieces of information necessary to authenticate. For example: -configuration management will have the app IDs, but the machine itself -will detect its user ID based on some unique machine property such as a -MAC address (or a hash of it with some salt). - -An example, real world process for using this provider: - - 1. Create unique app IDs (UUIDs work well) and map them to policies. - (Path: map/app-id/) - - 2. Store the app IDs within configuration management systems. - - 3. An out-of-band process run by security operators map unique user IDs - to these app IDs. Example: when an instance is launched, a cloud-init - system tells security operators a unique ID for this machine. This - process can be scripted, but the key is that it is out-of-band and - out of reach of configuration management. - (Path: map/user-id/) - - 4. A new server is provisioned. Configuration management configures the - app ID, the server itself detects its user ID. With both of these - pieces of information, Vault can be accessed according to the policy - set by the app ID. - -More details on this process follow: - -The app ID is a unique ID that maps to a set of policies. This ID is -generated by an operator and configured into the backend. The ID itself -is usually a UUID, but any hard-to-guess unique value can be used. - -After creating app IDs, an operator authorizes a fixed set of user IDs -with each app ID. When a valid {app ID, user ID} tuple is given to the -"login" path, then the user is authenticated with the configured app -ID policies. - -The user ID can be any value (just like the app ID), however it is -generally a value unique to a machine, such as a MAC address or instance ID, -or a value hashed from these unique values. - -It is possible to authorize multiple app IDs with each -user ID by writing them as comma-separated values to the map/user-id/ -path. - -It is also possible to renew the auth tokens with 'vault token-renew ' command. -Before the token is renewed, the validity of app ID, user ID and the associated -policies are checked again. -` diff --git a/builtin/credential/app-id/backend_test.go b/builtin/credential/app-id/backend_test.go deleted file mode 100644 index a8b7077ef002..000000000000 --- a/builtin/credential/app-id/backend_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package appId - -import ( - "context" - "fmt" - "testing" - - logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - "github.com/hashicorp/vault/sdk/helper/salt" - "github.com/hashicorp/vault/sdk/logical" -) - -func TestBackend_basic(t *testing.T) { - var b *backend - var err error - var storage logical.Storage - factory := func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b, err = Backend(conf) - if err != nil { - t.Fatal(err) - } - storage = conf.StorageView - if err := b.Setup(ctx, conf); err != nil { - return nil, err - } - return b, nil - } - logicaltest.Test(t, logicaltest.TestCase{ - CredentialFactory: factory, - Steps: []logicaltest.TestStep{ - testAccStepMapAppId(t), - testAccStepMapUserId(t), - testAccLogin(t, ""), - testAccLoginAppIDInPath(t, ""), - testAccLoginInvalid(t), - testAccStepDeleteUserId(t), - testAccLoginDeleted(t), - }, - }) - - req := &logical.Request{ - Path: "map/app-id", - Operation: logical.ListOperation, - Storage: storage, - } - resp, err := b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("nil response") - } - keys := resp.Data["keys"].([]string) - if len(keys) != 1 { - t.Fatalf("expected 1 key, got %d", len(keys)) - } - bSalt, err := b.Salt(context.Background()) - if err != nil { - t.Fatal(err) - } - if keys[0] != "s"+bSalt.SaltIDHashFunc("foo", salt.SHA256Hash) { - t.Fatal("value was improperly salted") - } -} - -func TestBackend_cidr(t *testing.T) { - logicaltest.Test(t, logicaltest.TestCase{ - CredentialFactory: Factory, - Steps: []logicaltest.TestStep{ - testAccStepMapAppIdDisplayName(t), - testAccStepMapUserIdCidr(t, "192.168.1.0/16"), - testAccLoginCidr(t, "192.168.1.5", false), - testAccLoginCidr(t, "10.0.1.5", true), - testAccLoginCidr(t, "", true), - }, - }) -} - -func TestBackend_displayName(t *testing.T) { - logicaltest.Test(t, logicaltest.TestCase{ - CredentialFactory: Factory, - Steps: []logicaltest.TestStep{ - testAccStepMapAppIdDisplayName(t), - testAccStepMapUserId(t), - testAccLogin(t, "tubbin"), - testAccLoginAppIDInPath(t, "tubbin"), - testAccLoginInvalid(t), - testAccStepDeleteUserId(t), - testAccLoginDeleted(t), - }, - }) -} - -func testAccStepMapAppId(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "map/app-id/foo", - Data: map[string]interface{}{ - "value": "foo,bar", - }, - } -} - -func testAccStepMapAppIdDisplayName(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "map/app-id/foo", - Data: map[string]interface{}{ - "display_name": "tubbin", - "value": "foo,bar", - }, - } -} - -func testAccStepMapUserId(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "map/user-id/42", - Data: map[string]interface{}{ - "value": "foo", - }, - } -} - -func testAccStepDeleteUserId(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.DeleteOperation, - Path: "map/user-id/42", - } -} - -func testAccStepMapUserIdCidr(t *testing.T, cidr string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "map/user-id/42", - Data: map[string]interface{}{ - "value": "foo", - "cidr_block": cidr, - }, - } -} - -func testAccLogin(t *testing.T, display string) logicaltest.TestStep { - checkTTL := func(resp *logical.Response) error { - if resp.Auth.LeaseOptions.TTL.String() != "768h0m0s" { - return fmt.Errorf("invalid TTL: got %s", resp.Auth.LeaseOptions.TTL) - } - return nil - } - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "login", - Data: map[string]interface{}{ - "app_id": "foo", - "user_id": "42", - }, - Unauthenticated: true, - - Check: logicaltest.TestCheckMulti( - logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}), - logicaltest.TestCheckAuthDisplayName(display), - checkTTL, - ), - } -} - -func testAccLoginAppIDInPath(t *testing.T, display string) logicaltest.TestStep { - checkTTL := func(resp *logical.Response) error { - if resp.Auth.LeaseOptions.TTL.String() != "768h0m0s" { - return fmt.Errorf("invalid TTL: got %s", resp.Auth.LeaseOptions.TTL) - } - return nil - } - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "login/foo", - Data: map[string]interface{}{ - "user_id": "42", - }, - Unauthenticated: true, - - Check: logicaltest.TestCheckMulti( - logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}), - logicaltest.TestCheckAuthDisplayName(display), - checkTTL, - ), - } -} - -func testAccLoginCidr(t *testing.T, ip string, err bool) logicaltest.TestStep { - check := logicaltest.TestCheckError() - if !err { - check = logicaltest.TestCheckAuth([]string{"bar", "default", "foo"}) - } - - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "login", - Data: map[string]interface{}{ - "app_id": "foo", - "user_id": "42", - }, - ErrorOk: err, - Unauthenticated: true, - RemoteAddr: ip, - - Check: check, - } -} - -func testAccLoginInvalid(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "login", - Data: map[string]interface{}{ - "app_id": "foo", - "user_id": "48", - }, - ErrorOk: true, - Unauthenticated: true, - - Check: logicaltest.TestCheckError(), - } -} - -func testAccLoginDeleted(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "login", - Data: map[string]interface{}{ - "app_id": "foo", - "user_id": "42", - }, - ErrorOk: true, - Unauthenticated: true, - - Check: logicaltest.TestCheckError(), - } -} diff --git a/builtin/credential/app-id/cmd/app-id/main.go b/builtin/credential/app-id/cmd/app-id/main.go deleted file mode 100644 index ce482d630206..000000000000 --- a/builtin/credential/app-id/cmd/app-id/main.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "os" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - appId "github.com/hashicorp/vault/builtin/credential/app-id" - "github.com/hashicorp/vault/sdk/plugin" -) - -func main() { - apiClientMeta := &api.PluginAPIClientMeta{} - flags := apiClientMeta.FlagSet() - flags.Parse(os.Args[1:]) - - tlsConfig := apiClientMeta.GetTLSConfig() - tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - - if err := plugin.Serve(&plugin.ServeOpts{ - BackendFactoryFunc: appId.Factory, - TLSProviderFunc: tlsProviderFunc, - }); err != nil { - logger := hclog.New(&hclog.LoggerOptions{}) - - logger.Error("plugin shutting down", "error", err) - os.Exit(1) - } -} diff --git a/builtin/credential/app-id/path_login.go b/builtin/credential/app-id/path_login.go deleted file mode 100644 index 1c1198a19549..000000000000 --- a/builtin/credential/app-id/path_login.go +++ /dev/null @@ -1,229 +0,0 @@ -package appId - -import ( - "context" - "crypto/sha1" - "crypto/subtle" - "encoding/hex" - "fmt" - "net" - "strings" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/policyutil" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathLoginWithAppIDPath(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "login/(?P.+)", - Fields: map[string]*framework.FieldSchema{ - "app_id": { - Type: framework.TypeString, - Description: "The unique app ID", - }, - - "user_id": { - Type: framework.TypeString, - Description: "The unique user ID", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathLogin, - }, - - HelpSynopsis: pathLoginSyn, - HelpDescription: pathLoginDesc, - } -} - -func pathLogin(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "login$", - Fields: map[string]*framework.FieldSchema{ - "app_id": { - Type: framework.TypeString, - Description: "The unique app ID", - }, - - "user_id": { - Type: framework.TypeString, - Description: "The unique user ID", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathLogin, - logical.AliasLookaheadOperation: b.pathLoginAliasLookahead, - }, - - HelpSynopsis: pathLoginSyn, - HelpDescription: pathLoginDesc, - } -} - -func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - appId := data.Get("app_id").(string) - - if appId == "" { - return nil, fmt.Errorf("missing app_id") - } - - return &logical.Response{ - Auth: &logical.Auth{ - Alias: &logical.Alias{ - Name: appId, - }, - }, - }, nil -} - -func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - appId := data.Get("app_id").(string) - userId := data.Get("user_id").(string) - - var displayName string - if dispName, resp, err := b.verifyCredentials(ctx, req, appId, userId); err != nil { - return nil, err - } else if resp != nil { - return resp, nil - } else { - displayName = dispName - } - - // Get the policies associated with the app - policies, err := b.MapAppId.Policies(ctx, req.Storage, appId) - if err != nil { - return nil, err - } - - // Store hashes of the app ID and user ID for the metadata - appIdHash := sha1.Sum([]byte(appId)) - userIdHash := sha1.Sum([]byte(userId)) - metadata := map[string]string{ - "app-id": "sha1:" + hex.EncodeToString(appIdHash[:]), - "user-id": "sha1:" + hex.EncodeToString(userIdHash[:]), - } - - return &logical.Response{ - Auth: &logical.Auth{ - InternalData: map[string]interface{}{ - "app-id": appId, - "user-id": userId, - }, - DisplayName: displayName, - Policies: policies, - Metadata: metadata, - LeaseOptions: logical.LeaseOptions{ - Renewable: true, - }, - Alias: &logical.Alias{ - Name: appId, - }, - }, - }, nil -} - -func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - appId := req.Auth.InternalData["app-id"].(string) - userId := req.Auth.InternalData["user-id"].(string) - - // Skipping CIDR verification to enable renewal from machines other than - // the ones encompassed by CIDR block. - if _, resp, err := b.verifyCredentials(ctx, req, appId, userId); err != nil { - return nil, err - } else if resp != nil { - return resp, nil - } - - // Get the policies associated with the app - mapPolicies, err := b.MapAppId.Policies(ctx, req.Storage, appId) - if err != nil { - return nil, err - } - if !policyutil.EquivalentPolicies(mapPolicies, req.Auth.TokenPolicies) { - return nil, fmt.Errorf("policies do not match") - } - - return &logical.Response{Auth: req.Auth}, nil -} - -func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, appId, userId string) (string, *logical.Response, error) { - // Ensure both appId and userId are provided - if appId == "" || userId == "" { - return "", logical.ErrorResponse("missing 'app_id' or 'user_id'"), nil - } - - // Look up the apps that this user is allowed to access - appsMap, err := b.MapUserId.Get(ctx, req.Storage, userId) - if err != nil { - return "", nil, err - } - if appsMap == nil { - return "", logical.ErrorResponse("invalid user ID or app ID"), nil - } - - // If there is a CIDR block restriction, check that - if raw, ok := appsMap["cidr_block"]; ok { - _, cidr, err := net.ParseCIDR(raw.(string)) - if err != nil { - return "", nil, fmt.Errorf("invalid restriction cidr: %w", err) - } - - var addr string - if req.Connection != nil { - addr = req.Connection.RemoteAddr - } - if addr == "" || !cidr.Contains(net.ParseIP(addr)) { - return "", logical.ErrorResponse("unauthorized source address"), nil - } - } - - appsRaw, ok := appsMap["value"] - if !ok { - appsRaw = "" - } - - apps, ok := appsRaw.(string) - if !ok { - return "", nil, fmt.Errorf("mapping is not a string") - } - - // Verify that the app is in the list - found := false - appIdBytes := []byte(appId) - for _, app := range strings.Split(apps, ",") { - match := []byte(strings.TrimSpace(app)) - // Protect against a timing attack with the app_id comparison - if subtle.ConstantTimeCompare(match, appIdBytes) == 1 { - found = true - } - } - if !found { - return "", logical.ErrorResponse("invalid user ID or app ID"), nil - } - - // Get the raw data associated with the app - appRaw, err := b.MapAppId.Get(ctx, req.Storage, appId) - if err != nil { - return "", nil, err - } - if appRaw == nil { - return "", logical.ErrorResponse("invalid user ID or app ID"), nil - } - var displayName string - if raw, ok := appRaw["display_name"]; ok { - displayName = raw.(string) - } - - return displayName, nil, nil -} - -const pathLoginSyn = ` -Log in with an App ID and User ID. -` - -const pathLoginDesc = ` -This endpoint authenticates using an application ID, user ID and potential the IP address of the connecting client. -` diff --git a/builtin/credential/approle/backend.go b/builtin/credential/approle/backend.go index ebd8d3c06a80..4afdd596078c 100644 --- a/builtin/credential/approle/backend.go +++ b/builtin/credential/approle/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( @@ -12,6 +15,7 @@ import ( ) const ( + operationPrefixAppRole = "app-role" secretIDPrefix = "secret_id/" secretIDLocalPrefix = "secret_id_local/" secretIDAccessorPrefix = "accessor/" diff --git a/builtin/credential/approle/backend_test.go b/builtin/credential/approle/backend_test.go index 212fe36f0f7c..a3dc68be0635 100644 --- a/builtin/credential/approle/backend_test.go +++ b/builtin/credential/approle/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( diff --git a/builtin/credential/approle/cmd/approle/main.go b/builtin/credential/approle/cmd/approle/main.go index 22fa242fa623..d28cea383beb 100644 --- a/builtin/credential/approle/cmd/approle/main.go +++ b/builtin/credential/approle/cmd/approle/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: approle.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/approle/path_login.go b/builtin/credential/approle/path_login.go index 2ad3924ba9e5..72b7ac352f06 100644 --- a/builtin/credential/approle/path_login.go +++ b/builtin/credential/approle/path_login.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( "context" "fmt" + "net/http" "strings" "time" @@ -15,6 +19,10 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationVerb: "login", + }, Fields: map[string]*framework.FieldSchema{ "role_id": { Type: framework.TypeString, @@ -29,12 +37,33 @@ func pathLogin(b *backend) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathLoginUpdate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + }}, + }, }, logical.AliasLookaheadOperation: &framework.PathOperation{ Callback: b.pathLoginUpdateAliasLookahead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + }}, + }, }, logical.ResolveRoleOperation: &framework.PathOperation{ Callback: b.pathLoginResolveRole, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + Fields: map[string]*framework.FieldSchema{ + "role": { + Type: framework.TypeString, + Required: true, + }, + }, + }}, + }, }, }, HelpSynopsis: pathLoginHelpSys, @@ -70,7 +99,7 @@ func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request return nil, err } if roleIDIndex == nil { - return logical.ErrorResponse("invalid role ID"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } roleName := roleIDIndex.Name @@ -84,7 +113,7 @@ func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request return nil, err } if role == nil { - return logical.ErrorResponse("invalid role ID"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } return logical.ResolveRoleResponse(roleName) @@ -105,7 +134,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, err } if roleIDIndex == nil { - return logical.ErrorResponse("invalid role ID"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } roleName := roleIDIndex.Name @@ -119,7 +148,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, err } if role == nil { - return logical.ErrorResponse("invalid role ID"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } metadata := make(map[string]string) @@ -155,7 +184,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, err } if entry == nil { - return logical.ErrorResponse("invalid secret id"), logical.ErrInvalidCredentials + return logical.ErrorResponse("invalid role or secret ID"), logical.ErrInvalidCredentials } // If a secret ID entry does not have a corresponding accessor @@ -175,7 +204,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, err } if entry == nil { - return logical.ErrorResponse("invalid secret id"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } accessorEntry, err := b.secretIDAccessorEntry(ctx, req.Storage, entry.SecretIDAccessor, role.SecretIDPrefix) @@ -188,7 +217,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat return nil, fmt.Errorf("error deleting secret ID %q from storage: %w", secretIDHMAC, err) } } - return logical.ErrorResponse("invalid secret id"), nil + return logical.ErrorResponse("invalid role or secret ID"), nil } switch { diff --git a/builtin/credential/approle/path_login_test.go b/builtin/credential/approle/path_login_test.go index 542bf665b6e9..7dd8c7f0ff14 100644 --- a/builtin/credential/approle/path_login_test.go +++ b/builtin/credential/approle/path_login_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( @@ -16,7 +19,7 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { // Create a role with secret ID binding disabled and only bound cidr list // enabled - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole", Operation: logical.CreateOperation, Data: map[string]interface{}{ @@ -26,24 +29,18 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { }, Storage: s, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } // Read the role ID - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole/role-id", Operation: logical.ReadOperation, Storage: s, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } roleID := resp.Data["role_id"] // Fill in the connection information and login with just the role ID - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "login", Operation: logical.UpdateOperation, Data: map[string]interface{}{ @@ -52,9 +49,7 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { Storage: s, Connection: &logical.Connection{RemoteAddr: "127.0.0.1"}, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + if resp.Auth == nil { t.Fatal("expected login to succeed") } @@ -66,7 +61,7 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { } // Override with a secret-id value, verify it doesn't pass - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole", Operation: logical.UpdateOperation, Data: map[string]interface{}{ @@ -74,9 +69,6 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { }, Storage: s, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } roleSecretIDReq := &logical.Request{ Operation: logical.UpdateOperation, @@ -92,13 +84,11 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { } roleSecretIDReq.Data["token_bound_cidrs"] = "10.0.0.0/24" - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleSecretIDReq) + secretID := resp.Data["secret_id"] - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "login", Operation: logical.UpdateOperation, Data: map[string]interface{}{ @@ -108,9 +98,7 @@ func TestAppRole_BoundCIDRLogin(t *testing.T) { Storage: s, Connection: &logical.Connection{RemoteAddr: "127.0.0.1"}, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + if resp.Auth == nil { t.Fatal("expected login to succeed") } @@ -133,10 +121,8 @@ func TestAppRole_RoleLogin(t *testing.T) { Path: "role/role1/role-id", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), roleRoleIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleRoleIDReq) + roleID := resp.Data["role_id"] roleSecretIDReq := &logical.Request{ @@ -144,10 +130,8 @@ func TestAppRole_RoleLogin(t *testing.T) { Path: "role/role1/secret-id", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleSecretIDReq) + secretID := resp.Data["secret_id"] loginData := map[string]interface{}{ @@ -216,20 +200,15 @@ func TestAppRole_RoleLogin(t *testing.T) { Storage: storage, Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleRoleIDReq = &logical.Request{ Operation: logical.ReadOperation, Path: "role/role-period/role-id", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), roleRoleIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleRoleIDReq) + roleID = resp.Data["role_id"] roleSecretIDReq = &logical.Request{ @@ -237,10 +216,8 @@ func TestAppRole_RoleLogin(t *testing.T) { Path: "role/role-period/secret-id", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleSecretIDReq) + secretID = resp.Data["secret_id"] loginData["role_id"] = roleID @@ -303,8 +280,6 @@ func generateRenewRequest(s logical.Storage, auth *logical.Auth) *logical.Reques } func TestAppRole_RoleResolve(t *testing.T) { - var resp *logical.Response - var err error b, storage := createBackendWithStorage(t) role := "role1" @@ -314,10 +289,8 @@ func TestAppRole_RoleResolve(t *testing.T) { Path: "role/role1/role-id", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), roleRoleIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp := b.requestNoErr(t, roleRoleIDReq) + roleID := resp.Data["role_id"] roleSecretIDReq := &logical.Request{ @@ -325,10 +298,8 @@ func TestAppRole_RoleResolve(t *testing.T) { Path: "role/role1/secret-id", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleSecretIDReq) + secretID := resp.Data["secret_id"] loginData := map[string]interface{}{ @@ -345,10 +316,7 @@ func TestAppRole_RoleResolve(t *testing.T) { }, } - resp, err = b.HandleRequest(context.Background(), loginReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, loginReq) if resp.Data["role"] != role { t.Fatalf("Role was not as expected. Expected %s, received %s", role, resp.Data["role"]) @@ -386,7 +354,7 @@ func TestAppRole_RoleDoesNotExist(t *testing.T) { t.Fatal("Error not part of response.") } - if !strings.Contains(errString, "invalid role ID") { + if !strings.Contains(errString, "invalid role or secret ID") { t.Fatalf("Error was not due to invalid role ID. Error: %s", errString) } } diff --git a/builtin/credential/approle/path_role.go b/builtin/credential/approle/path_role.go index 3759677e6164..2268427b548c 100644 --- a/builtin/credential/approle/path_role.go +++ b/builtin/credential/approle/path_role.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( @@ -10,7 +13,7 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/strutil" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/parseip" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/cidrutil" @@ -121,6 +124,10 @@ func rolePaths(b *backend) []*framework.Path { p := &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role_name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "role", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -197,68 +204,84 @@ can only be set during role creation and once set, it can't be reset later.`, Fields: map[string]*framework.FieldSchema{ "bind_secret_id": { Type: framework.TypeBool, + Required: true, Description: "Impose secret ID to be presented when logging in using this role.", }, "secret_id_bound_cidrs": { Type: framework.TypeCommaStringSlice, + Required: true, Description: "Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.", }, "secret_id_num_uses": { Type: framework.TypeInt, + Required: true, Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", }, "secret_id_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: "Duration in seconds after which the issued secret ID expires.", }, "local_secret_ids": { Type: framework.TypeBool, + Required: true, Description: "If true, the secret identifiers generated using this role will be cluster local. This can only be set during role creation and once set, it can't be reset later", }, "token_bound_cidrs": { Type: framework.TypeCommaStringSlice, + Required: true, Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, }, "token_explicit_max_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: "If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.", }, "token_max_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: "The maximum lifetime of the generated token", }, "token_no_default_policy": { Type: framework.TypeBool, + Required: true, Description: "If true, the 'default' policy will not automatically be added to generated tokens", }, "token_period": { Type: framework.TypeDurationSecond, + Required: true, Description: "If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value.", }, "token_policies": { Type: framework.TypeCommaStringSlice, + Required: true, Description: "Comma-separated list of policies", }, "token_type": { Type: framework.TypeString, + Required: true, Default: "default-service", Description: "The type of token to generate, service or batch", }, "token_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: "The initial ttl of the token to generate", }, "token_num_uses": { Type: framework.TypeInt, + Required: true, Description: "The maximum number of times a token may be used, a value of zero means unlimited", }, "period": { Type: framework.TypeDurationSecond, + Required: false, Description: tokenutil.DeprecationText("token_period"), Deprecated: true, }, "policies": { Type: framework.TypeCommaStringSlice, + Required: false, Description: tokenutil.DeprecationText("token_policies"), Deprecated: true, }, @@ -281,19 +304,13 @@ can only be set during role creation and once set, it can't be reset later.`, p, { Pattern: "role/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "roles", + }, Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - }, - }, - }}, - }, }, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-list"][0]), @@ -301,6 +318,10 @@ can only be set during role creation and once set, it can't be reset later.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/local-secret-ids$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "local-secret-ids", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -316,6 +337,7 @@ can only be set during role creation and once set, it can't be reset later.`, Fields: map[string]*framework.FieldSchema{ "local_secret_ids": { Type: framework.TypeBool, + Required: true, Description: "If true, the secret identifiers generated using this role will be cluster local. This can only be set during role creation and once set, it can't be reset later", }, }, @@ -328,6 +350,10 @@ can only be set during role creation and once set, it can't be reset later.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/policies$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "policies", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -356,11 +382,13 @@ can only be set during role creation and once set, it can't be reset later.`, Fields: map[string]*framework.FieldSchema{ "policies": { Type: framework.TypeCommaStringSlice, + Required: false, Description: tokenutil.DeprecationText("token_policies"), Deprecated: true, }, "token_policies": { Type: framework.TypeCommaStringSlice, + Required: true, Description: defTokenFields["token_policies"].Description, }, }, @@ -377,6 +405,10 @@ can only be set during role creation and once set, it can't be reset later.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bound-cidr-list$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "bound-cidr-list", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -401,8 +433,9 @@ of CIDR blocks. If set, specifies the blocks of IP addresses which can perform t Fields: map[string]*framework.FieldSchema{ "bound_cidr_list": { Type: framework.TypeCommaStringSlice, - Deprecated: true, + Required: true, Description: `Deprecated: Please use "secret_id_bound_cidrs" instead. Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, + Deprecated: true, }, }, }}, @@ -418,6 +451,10 @@ of CIDR blocks. If set, specifies the blocks of IP addresses which can perform t }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-bound-cidrs$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-bound-cidrs", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -442,6 +479,7 @@ IP addresses which can perform the login operation.`, Fields: map[string]*framework.FieldSchema{ "secret_id_bound_cidrs": { Type: framework.TypeCommaStringSlice, + Required: true, Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, }, }, @@ -458,6 +496,10 @@ IP addresses which can perform the login operation.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-bound-cidrs$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-bound-cidrs", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -481,6 +523,7 @@ IP addresses which can perform the login operation.`, Fields: map[string]*framework.FieldSchema{ "token_bound_cidrs": { Type: framework.TypeCommaStringSlice, + Required: true, Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.`, }, }, @@ -497,6 +540,10 @@ IP addresses which can perform the login operation.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bind-secret-id$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "bind-secret-id", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -521,6 +568,7 @@ IP addresses which can perform the login operation.`, Fields: map[string]*framework.FieldSchema{ "bind_secret_id": { Type: framework.TypeBool, + Required: true, Description: "Impose secret_id to be presented when logging in using this role. Defaults to 'true'.", }, }, @@ -537,6 +585,10 @@ IP addresses which can perform the login operation.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-num-uses$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-num-uses", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -560,6 +612,7 @@ IP addresses which can perform the login operation.`, Fields: map[string]*framework.FieldSchema{ "secret_id_num_uses": { Type: framework.TypeInt, + Required: true, Description: "Number of times a secret ID can access the role, after which the SecretID will expire. Defaults to 0 meaning that the secret ID is of unlimited use.", }, }, @@ -576,6 +629,10 @@ IP addresses which can perform the login operation.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-ttl$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-ttl", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -600,6 +657,7 @@ to 0, meaning no expiration.`, Fields: map[string]*framework.FieldSchema{ "secret_id_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: "Duration in seconds after which the issued secret ID should expire. Defaults to 0, meaning no expiration.", }, }, @@ -616,6 +674,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/period$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "period", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -644,11 +706,13 @@ to 0, meaning no expiration.`, Fields: map[string]*framework.FieldSchema{ "period": { Type: framework.TypeDurationSecond, + Required: false, Description: tokenutil.DeprecationText("token_period"), Deprecated: true, }, "token_period": { Type: framework.TypeDurationSecond, + Required: true, Description: defTokenFields["token_period"].Description, }, }, @@ -665,6 +729,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-num-uses$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-num-uses", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -688,6 +756,7 @@ to 0, meaning no expiration.`, Fields: map[string]*framework.FieldSchema{ "token_num_uses": { Type: framework.TypeInt, + Required: true, Description: defTokenFields["token_num_uses"].Description, }, }, @@ -704,6 +773,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-ttl$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-ttl", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -727,6 +800,7 @@ to 0, meaning no expiration.`, Fields: map[string]*framework.FieldSchema{ "token_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: defTokenFields["token_ttl"].Description, }, }, @@ -743,6 +817,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-max-ttl$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-max-ttl", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -766,6 +844,7 @@ to 0, meaning no expiration.`, Fields: map[string]*framework.FieldSchema{ "token_max_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: defTokenFields["token_max_ttl"].Description, }, }, @@ -782,6 +861,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/role-id$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "role-id", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -801,6 +884,7 @@ to 0, meaning no expiration.`, Fields: map[string]*framework.FieldSchema{ "role_id": { Type: framework.TypeString, + Required: false, Description: "Identifier of the role. Defaults to a UUID.", }, }, @@ -817,6 +901,10 @@ to 0, meaning no expiration.`, }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -858,18 +946,22 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's Fields: map[string]*framework.FieldSchema{ "secret_id": { Type: framework.TypeString, + Required: true, Description: "Secret ID attached to the role.", }, "secret_id_accessor": { Type: framework.TypeString, + Required: true, Description: "Accessor of the secret ID", }, "secret_id_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: "Duration in seconds after which the issued secret ID expires.", }, "secret_id_num_uses": { Type: framework.TypeInt, + Required: true, Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", }, }, @@ -878,15 +970,8 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleSecretIDList, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "keys": { - Type: framework.TypeStringSlice, - }, - }, - }}, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-ids", }, }, }, @@ -895,6 +980,11 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/lookup/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id", + OperationVerb: "look-up", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -914,34 +1004,43 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's Fields: map[string]*framework.FieldSchema{ "secret_id_accessor": { Type: framework.TypeString, + Required: true, Description: "Accessor of the secret ID", }, "secret_id_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: "Duration in seconds after which the issued secret ID expires.", }, "secret_id_num_uses": { Type: framework.TypeInt, + Required: true, Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", }, "creation_time": { - Type: framework.TypeTime, + Type: framework.TypeTime, + Required: true, }, "expiration_time": { - Type: framework.TypeTime, + Type: framework.TypeTime, + Required: true, }, "last_updated_time": { - Type: framework.TypeTime, + Type: framework.TypeTime, + Required: true, }, "metadata": { - Type: framework.TypeMap, + Type: framework.TypeKVPairs, + Required: true, }, "cidr_list": { Type: framework.TypeCommaStringSlice, + Required: true, Description: "List of CIDR blocks enforcing secret IDs to be used from specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the list of CIDR blocks listed here should be a subset of the CIDR blocks listed on the role.", }, "token_bound_cidrs": { Type: framework.TypeCommaStringSlice, + Required: true, Description: "List of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.", }, }, @@ -954,6 +1053,10 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/destroy/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationVerb: "destroy", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -962,16 +1065,23 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's "secret_id": { Type: framework.TypeString, Description: "SecretID attached to the role.", + Query: true, }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathRoleSecretIDDestroyUpdateDelete, Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathRoleSecretIDDestroyUpdateDelete, Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id2", + }, }, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-destroy"][0]), @@ -979,6 +1089,11 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/lookup/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-by-accessor", + OperationVerb: "look-up", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -998,34 +1113,43 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's Fields: map[string]*framework.FieldSchema{ "secret_id_accessor": { Type: framework.TypeString, + Required: true, Description: "Accessor of the secret ID", }, "secret_id_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: "Duration in seconds after which the issued secret ID expires.", }, "secret_id_num_uses": { Type: framework.TypeInt, + Required: true, Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", }, "creation_time": { - Type: framework.TypeTime, + Type: framework.TypeTime, + Required: true, }, "expiration_time": { - Type: framework.TypeTime, + Type: framework.TypeTime, + Required: true, }, "last_updated_time": { - Type: framework.TypeTime, + Type: framework.TypeTime, + Required: true, }, "metadata": { - Type: framework.TypeMap, + Type: framework.TypeKVPairs, + Required: true, }, "cidr_list": { Type: framework.TypeCommaStringSlice, + Required: true, Description: "List of CIDR blocks enforcing secret IDs to be used from specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the list of CIDR blocks listed here should be a subset of the CIDR blocks listed on the role.", }, "token_bound_cidrs": { Type: framework.TypeCommaStringSlice, + Required: true, Description: "List of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.", }, }, @@ -1038,6 +1162,10 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/destroy/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationVerb: "destroy", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -1046,16 +1174,23 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's "secret_id_accessor": { Type: framework.TypeString, Description: "Accessor of the SecretID", + Query: true, }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathRoleSecretIDAccessorDestroyUpdateDelete, Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id-by-accessor", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathRoleSecretIDAccessorDestroyUpdateDelete, Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id-by-accessor2", + }, }, }, HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]), @@ -1063,6 +1198,10 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's }, { Pattern: "role/" + framework.GenericNameRegex("role_name") + "/custom-secret-id$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "custom-secret-id", + }, Fields: map[string]*framework.FieldSchema{ "role_name": { Type: framework.TypeString, @@ -1109,18 +1248,22 @@ Overrides secret_id_ttl role option when supplied. May not be longer than role's Fields: map[string]*framework.FieldSchema{ "secret_id": { Type: framework.TypeString, + Required: true, Description: "Secret ID attached to the role.", }, "secret_id_accessor": { Type: framework.TypeString, + Required: true, Description: "Accessor of the secret ID", }, "secret_id_ttl": { Type: framework.TypeDurationSecond, + Required: true, Description: "Duration in seconds after which the issued secret ID expires.", }, "secret_id_num_uses": { Type: framework.TypeInt, + Required: true, Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", }, }, @@ -1905,12 +2048,21 @@ func (b *backend) pathRoleSecretIDAccessorDestroyUpdateDelete(ctx context.Contex return nil, fmt.Errorf("failed to create HMAC of role_name: %w", err) } - entryIndex := fmt.Sprintf("%s%s/%s", role.SecretIDPrefix, roleNameHMAC, accessorEntry.SecretIDHMAC) - lock := b.secretIDLock(accessorEntry.SecretIDHMAC) lock.Lock() defer lock.Unlock() + // Verify we have a valid SecretID Storage Entry + entry, err := b.nonLockedSecretIDStorageEntry(ctx, req.Storage, role.SecretIDPrefix, roleNameHMAC, accessorEntry.SecretIDHMAC) + if err != nil { + return nil, err + } + if entry == nil { + return logical.ErrorResponse("invalid secret id accessor"), logical.ErrPermissionDenied + } + + entryIndex := fmt.Sprintf("%s%s/%s", role.SecretIDPrefix, roleNameHMAC, accessorEntry.SecretIDHMAC) + // Delete the accessor of the SecretID first if err := b.deleteSecretIDAccessorEntry(ctx, req.Storage, secretIDAccessor, role.SecretIDPrefix); err != nil { return nil, err diff --git a/builtin/credential/approle/path_role_test.go b/builtin/credential/approle/path_role_test.go index 885cb8e386e1..6b35afc725e6 100644 --- a/builtin/credential/approle/path_role_test.go +++ b/builtin/credential/approle/path_role_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( @@ -12,14 +15,23 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) +func (b *backend) requestNoErr(t *testing.T, req *logical.Request) *logical.Response { + t.Helper() + resp, err := b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(req.Path), req.Operation), resp, true) + return resp +} + func TestAppRole_LocalSecretIDsRead(t *testing.T) { - var resp *logical.Response - var err error b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ @@ -27,37 +39,29 @@ func TestAppRole_LocalSecretIDsRead(t *testing.T) { "bind_secret_id": true, } - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + b.requestNoErr(t, &logical.Request{ Operation: logical.CreateOperation, Path: "role/testrole", Storage: storage, Data: roleData, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp := b.requestNoErr(t, &logical.Request{ Operation: logical.ReadOperation, Storage: storage, Path: "role/testrole/local-secret-ids", }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + if !resp.Data["local_secret_ids"].(bool) { t.Fatalf("expected local_secret_ids to be returned") } } func TestAppRole_LocalNonLocalSecretIDs(t *testing.T) { - var resp *logical.Response - var err error - b, storage := createBackendWithStorage(t) // Create a role with local_secret_ids set - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp := b.requestNoErr(t, &logical.Request{ Path: "role/testrole1", Operation: logical.CreateOperation, Storage: storage, @@ -67,12 +71,9 @@ func TestAppRole_LocalNonLocalSecretIDs(t *testing.T) { "local_secret_ids": true, }, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\n resp: %#v", err, resp) - } // Create another role without setting local_secret_ids - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole2", Operation: logical.CreateOperation, Storage: storage, @@ -81,56 +82,43 @@ func TestAppRole_LocalNonLocalSecretIDs(t *testing.T) { "bind_secret_id": true, }, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\n resp: %#v", err, resp) - } count := 10 // Create secret IDs on testrole1 for i := 0; i < count; i++ { - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole1/secret-id", Operation: logical.UpdateOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } } // Check the number of secret IDs generated - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole1/secret-id", Operation: logical.ListOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } + if len(resp.Data["keys"].([]string)) != count { t.Fatalf("failed to list secret IDs") } // Create secret IDs on testrole1 for i := 0; i < count; i++ { - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole2/secret-id", Operation: logical.UpdateOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } } - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole2/secret-id", Operation: logical.ListOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } + if len(resp.Data["keys"].([]string)) != count { t.Fatalf("failed to list secret IDs") } @@ -164,14 +152,12 @@ func TestAppRole_UpgradeSecretIDPrefix(t *testing.T) { } // Ensure that the API response contains local_secret_ids - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole", Operation: logical.ReadOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\n resp: %#v", err, resp) - } + _, ok := resp.Data["local_secret_ids"] if !ok { t.Fatalf("expected local_secret_ids to be present in the response") @@ -192,15 +178,12 @@ func TestAppRole_LocalSecretIDImmutability(t *testing.T) { } // Create a role with local_secret_ids set - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole", Operation: logical.CreateOperation, Storage: storage, Data: roleData, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } // Attempt to modify local_secret_ids should fail resp, err = b.HandleRequest(context.Background(), &logical.Request{ @@ -230,25 +213,19 @@ func TestAppRole_UpgradeBoundCIDRList(t *testing.T) { } // Create a role with bound_cidr_list set - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole", Operation: logical.CreateOperation, Storage: storage, Data: roleData, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } // Read the role and check that the bound_cidr_list is set properly - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole", Operation: logical.ReadOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } expected := []string{"127.0.0.1/18", "192.178.1.2/24"} actual := resp.Data["secret_id_bound_cidrs"].([]string) @@ -272,20 +249,18 @@ func TestAppRole_UpgradeBoundCIDRList(t *testing.T) { } // Read the role. The upgrade code should have migrated the old type to the new type - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole", Operation: logical.ReadOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } + if !reflect.DeepEqual(expected, actual) { t.Fatalf("bad: bound_cidr_list; expected: %#v\nactual: %#v\n", expected, actual) } // Create a secret-id by supplying a subset of the role's CIDR blocks with the new type - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole/secret-id", Operation: logical.UpdateOperation, Storage: storage, @@ -293,15 +268,13 @@ func TestAppRole_UpgradeBoundCIDRList(t *testing.T) { "cidr_list": []string{"127.0.0.1/24"}, }, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } + if resp.Data["secret_id"].(string) == "" { t.Fatalf("failed to generate secret-id") } // Check that the backwards compatibility for the string type is not broken - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrole/secret-id", Operation: logical.UpdateOperation, Storage: storage, @@ -309,9 +282,7 @@ func TestAppRole_UpgradeBoundCIDRList(t *testing.T) { "cidr_list": "127.0.0.1/24", }, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: err: %v\nresp: %#v", err, resp) - } + if resp.Data["secret_id"].(string) == "" { t.Fatalf("failed to generate secret-id") } @@ -342,15 +313,13 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { Operation: logical.UpdateOperation, Storage: storage, } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } + resp = b.requestNoErr(t, secretIDReq) + secretID = resp.Data["secret_id"].(string) roleID = "testroleid" // Regular login flow. This should succeed. - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "login", Operation: logical.UpdateOperation, Storage: storage, @@ -359,16 +328,11 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { "secret_id": secretID, }, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } // Lower case the role name when generating the secret id secretIDReq.Path = "role/testrolename/secret-id" - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } + resp = b.requestNoErr(t, secretIDReq) + secretID = resp.Data["secret_id"].(string) // Login should fail @@ -391,14 +355,11 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { // Delete the role and create it again. This time don't directly persist // it, but route the request to the creation handler so that it sets the // LowerCaseRoleName to true. - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testRoleName", Operation: logical.DeleteOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } roleReq := &logical.Request{ Path: "role/testRoleName", @@ -408,34 +369,27 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { "bind_secret_id": true, }, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } + resp = b.requestNoErr(t, roleReq) // Create secret id with lower cased role name - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrolename/secret-id", Operation: logical.UpdateOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } + secretID = resp.Data["secret_id"].(string) - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrolename/role-id", Operation: logical.ReadOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } + roleID = resp.Data["role_id"].(string) // Login should pass - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "login", Operation: logical.UpdateOperation, Storage: storage, @@ -444,12 +398,9 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { "secret_id": secretID, }, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr:%v", resp, err) - } // Lookup of secret ID should work in case-insensitive manner - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrolename/secret-id/lookup", Operation: logical.UpdateOperation, Storage: storage, @@ -457,22 +408,17 @@ func TestAppRole_RoleNameLowerCasing(t *testing.T) { "secret_id": secretID, }, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } + if resp == nil { t.Fatalf("failed to lookup secret IDs") } // Listing of secret IDs should work in case-insensitive manner - resp, err = b.HandleRequest(context.Background(), &logical.Request{ + resp = b.requestNoErr(t, &logical.Request{ Path: "role/testrolename/secret-id", Operation: logical.ListOperation, Storage: storage, }) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) - } if len(resp.Data["keys"].([]string)) != 1 { t.Fatalf("failed to list secret IDs") @@ -495,10 +441,7 @@ func TestAppRole_RoleReadSetIndex(t *testing.T) { } // Create a role - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\n err: %v\n", resp, err) - } + resp = b.requestNoErr(t, roleReq) roleIDReq := &logical.Request{ Path: "role/testrole/role-id", @@ -507,10 +450,8 @@ func TestAppRole_RoleReadSetIndex(t *testing.T) { } // Get the role ID - resp, err = b.HandleRequest(context.Background(), roleIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\n err: %v\n", resp, err) - } + resp = b.requestNoErr(t, roleIDReq) + roleID := resp.Data["role_id"].(string) // Delete the role ID index @@ -521,10 +462,7 @@ func TestAppRole_RoleReadSetIndex(t *testing.T) { // Read the role again. This should add the index and return a warning roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\n err: %v\n", resp, err) - } + resp = b.requestNoErr(t, roleReq) // Check if the warning is being returned if !strings.Contains(resp.Warnings[0], "Role identifier was missing an index back to role name.") { @@ -549,15 +487,10 @@ func TestAppRole_RoleReadSetIndex(t *testing.T) { // Check if updating and reading of roles work and that there are no lock // contentions dangling due to previous operation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\n err: %v\n", resp, err) - } + resp = b.requestNoErr(t, roleReq) + roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("bad: resp: %#v\n err: %v\n", resp, err) - } + resp = b.requestNoErr(t, roleReq) } func TestAppRole_CIDRSubset(t *testing.T) { @@ -579,10 +512,7 @@ func TestAppRole_CIDRSubset(t *testing.T) { Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err: %v resp: %#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) secretIDData := map[string]interface{}{ "cidr_list": "127.0.0.1/16", @@ -604,19 +534,10 @@ func TestAppRole_CIDRSubset(t *testing.T) { roleData["bound_cidr_list"] = "192.168.27.29/16,172.245.30.40/24,10.20.30.40/30" roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err: %v resp: %#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) secretIDData["cidr_list"] = "192.168.27.29/20,172.245.30.40/25,10.20.30.40/32" - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil { - t.Fatal(err) - } - if resp != nil && resp.IsError() { - t.Fatalf("resp: %#v", resp) - } + resp = b.requestNoErr(t, secretIDReq) } func TestAppRole_TokenBoundCIDRSubset32Mask(t *testing.T) { @@ -638,10 +559,7 @@ func TestAppRole_TokenBoundCIDRSubset32Mask(t *testing.T) { Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err: %v resp: %#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) secretIDData := map[string]interface{}{ "token_bound_cidrs": "127.0.0.1/32", @@ -653,10 +571,7 @@ func TestAppRole_TokenBoundCIDRSubset32Mask(t *testing.T) { Data: secretIDData, } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil { - t.Fatalf("err: %v resp: %#v", err, resp) - } + resp = b.requestNoErr(t, secretIDReq) secretIDData = map[string]interface{}{ "token_bound_cidrs": "127.0.0.1/24", @@ -696,19 +611,13 @@ func TestAppRole_RoleConstraints(t *testing.T) { } // Set bind_secret_id, which is enabled by default - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) // Set bound_cidr_list alone by explicitly disabling bind_secret_id roleReq.Operation = logical.UpdateOperation roleData["bind_secret_id"] = false roleData["bound_cidr_list"] = "0.0.0.0/0" - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) // Remove both constraints roleReq.Operation = logical.UpdateOperation @@ -725,7 +634,6 @@ func TestAppRole_RoleConstraints(t *testing.T) { func TestAppRole_RoleIDUpdate(t *testing.T) { var resp *logical.Response - var err error b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ @@ -742,10 +650,7 @@ func TestAppRole_RoleIDUpdate(t *testing.T) { Storage: storage, Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleIDUpdateReq := &logical.Request{ Operation: logical.UpdateOperation, @@ -755,20 +660,15 @@ func TestAppRole_RoleIDUpdate(t *testing.T) { "role_id": "customroleid", }, } - resp, err = b.HandleRequest(context.Background(), roleIDUpdateReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleIDUpdateReq) secretIDReq := &logical.Request{ Operation: logical.UpdateOperation, Storage: storage, Path: "role/testrole1/secret-id", } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, secretIDReq) + secretID := resp.Data["secret_id"].(string) loginData := map[string]interface{}{ @@ -784,10 +684,7 @@ func TestAppRole_RoleIDUpdate(t *testing.T) { RemoteAddr: "127.0.0.1", }, } - resp, err = b.HandleRequest(context.Background(), loginReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, loginReq) if resp.Auth == nil { t.Fatalf("expected a non-nil auth object in the response") @@ -814,10 +711,7 @@ func TestAppRole_RoleIDUniqueness(t *testing.T) { Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Path = "role/testrole2" resp, err = b.HandleRequest(context.Background(), roleReq) @@ -826,10 +720,7 @@ func TestAppRole_RoleIDUniqueness(t *testing.T) { } roleData["role_id"] = "role-id-456" - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.UpdateOperation roleData["role_id"] = "role-id-123" @@ -867,22 +758,15 @@ func TestAppRole_RoleIDUniqueness(t *testing.T) { } roleIDData["role_id"] = "role-id-2000" - resp, err = b.HandleRequest(context.Background(), roleIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleIDReq) roleIDData["role_id"] = "role-id-1000" roleIDReq.Path = "role/testrole1/role-id" - resp, err = b.HandleRequest(context.Background(), roleIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleIDReq) } func TestAppRole_RoleDeleteSecretID(t *testing.T) { var resp *logical.Response - var err error b, storage := createBackendWithStorage(t) createRole(t, b, storage, "role1", "a,b") @@ -892,28 +776,17 @@ func TestAppRole_RoleDeleteSecretID(t *testing.T) { Path: "role/role1/secret-id", } // Create 3 secrets on the role - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) listReq := &logical.Request{ Operation: logical.ListOperation, Storage: storage, Path: "role/role1/secret-id", } - resp, err = b.HandleRequest(context.Background(), listReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, listReq) + secretIDAccessors := resp.Data["keys"].([]string) if len(secretIDAccessors) != 3 { t.Fatalf("bad: len of secretIDAccessors: expected:3 actual:%d", len(secretIDAccessors)) @@ -924,11 +797,9 @@ func TestAppRole_RoleDeleteSecretID(t *testing.T) { Storage: storage, Path: "role/role1", } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - resp, err = b.HandleRequest(context.Background(), listReq) + resp = b.requestNoErr(t, roleReq) + + resp, err := b.HandleRequest(context.Background(), listReq) if err != nil || resp == nil || (resp != nil && !resp.IsError()) { t.Fatalf("expected an error. err:%v resp:%#v", err, resp) } @@ -945,10 +816,7 @@ func TestAppRole_RoleSecretIDReadDelete(t *testing.T) { Storage: storage, Path: "role/role1/secret-id", } - resp, err = b.HandleRequest(context.Background(), secretIDCreateReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, secretIDCreateReq) secretID := resp.Data["secret_id"].(string) if secretID == "" { @@ -963,10 +831,8 @@ func TestAppRole_RoleSecretIDReadDelete(t *testing.T) { "secret_id": secretID, }, } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, secretIDReq) + if resp.Data == nil { t.Fatal(err) } @@ -979,11 +845,7 @@ func TestAppRole_RoleSecretIDReadDelete(t *testing.T) { "secret_id": secretID, }, } - resp, err = b.HandleRequest(context.Background(), deleteSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - + resp = b.requestNoErr(t, deleteSecretIDReq) resp, err = b.HandleRequest(context.Background(), secretIDReq) if resp != nil && resp.IsError() { t.Fatalf("error response:%#v", resp) @@ -1004,20 +866,15 @@ func TestAppRole_RoleSecretIDAccessorReadDelete(t *testing.T) { Storage: storage, Path: "role/role1/secret-id", } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, secretIDReq) listReq := &logical.Request{ Operation: logical.ListOperation, Storage: storage, Path: "role/role1/secret-id", } - resp, err = b.HandleRequest(context.Background(), listReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, listReq) + hmacSecretID := resp.Data["keys"].([]string)[0] hmacReq := &logical.Request{ @@ -1028,19 +885,14 @@ func TestAppRole_RoleSecretIDAccessorReadDelete(t *testing.T) { "secret_id_accessor": hmacSecretID, }, } - resp, err = b.HandleRequest(context.Background(), hmacReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, hmacReq) + if resp.Data == nil { t.Fatal(err) } hmacReq.Path = "role/role1/secret-id-accessor/destroy" - resp, err = b.HandleRequest(context.Background(), hmacReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, hmacReq) hmacReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), hmacReq) @@ -1071,7 +923,7 @@ func TestAppRoleSecretIDLookup(t *testing.T) { expected := &logical.Response{ Data: map[string]interface{}{ "http_content_type": "application/json", - "http_raw_body": `{"request_id":"","lease_id":"","renewable":false,"lease_duration":0,"data":{"error":"failed to find accessor entry for secret_id_accessor: \"invalid\""},"wrap_info":null,"warnings":null,"auth":null}`, + "http_raw_body": `{"request_id":"","lease_id":"","renewable":false,"lease_duration":0,"data":{"error":"failed to find accessor entry for secret_id_accessor: \"invalid\""},"wrap_info":null,"warnings":null,"auth":null,"mount_type":""}`, "http_status_code": 404, }, } @@ -1082,7 +934,6 @@ func TestAppRoleSecretIDLookup(t *testing.T) { func TestAppRoleRoleListSecretID(t *testing.T) { var resp *logical.Response - var err error b, storage := createBackendWithStorage(t) createRole(t, b, storage, "role1", "a,b") @@ -1093,36 +944,19 @@ func TestAppRoleRoleListSecretID(t *testing.T) { Path: "role/role1/secret-id", } // Create 5 'secret_id's - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) listReq := &logical.Request{ Operation: logical.ListOperation, Storage: storage, Path: "role/role1/secret-id/", } - resp, err = b.HandleRequest(context.Background(), listReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, listReq) + secrets := resp.Data["keys"].([]string) if len(secrets) != 5 { t.Fatalf("bad: len of secrets: expected:5 actual:%d", len(secrets)) @@ -1131,7 +965,6 @@ func TestAppRoleRoleListSecretID(t *testing.T) { func TestAppRole_RoleList(t *testing.T) { var resp *logical.Response - var err error b, storage := createBackendWithStorage(t) createRole(t, b, storage, "role1", "a,b") @@ -1145,10 +978,7 @@ func TestAppRole_RoleList(t *testing.T) { Path: "role", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), listReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, listReq) actual := resp.Data["keys"].([]string) expected := []string{"role1", "role2", "role3", "role4", "role5"} @@ -1159,7 +989,6 @@ func TestAppRole_RoleList(t *testing.T) { func TestAppRole_RoleSecretIDWithoutFields(t *testing.T) { var resp *logical.Response - var err error b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ @@ -1176,20 +1005,14 @@ func TestAppRole_RoleSecretIDWithoutFields(t *testing.T) { Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleSecretIDReq := &logical.Request{ Operation: logical.UpdateOperation, Path: "role/role1/secret-id", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleSecretIDReq) if resp.Data["secret_id"].(string) == "" { t.Fatalf("failed to generate secret_id") @@ -1206,10 +1029,7 @@ func TestAppRole_RoleSecretIDWithoutFields(t *testing.T) { "secret_id": "abcd123", } roleSecretIDReq.Data = roleCustomSecretIDData - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleSecretIDReq) if resp.Data["secret_id"] != "abcd123" { t.Fatalf("failed to set specific secret_id to role") @@ -1229,7 +1049,6 @@ func TestAppRole_RoleSecretIDWithValidFields(t *testing.T) { } var resp *logical.Response - var err error b, storage := createBackendWithStorage(t) roleData := map[string]interface{}{ @@ -1246,10 +1065,7 @@ func TestAppRole_RoleSecretIDWithValidFields(t *testing.T) { Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) testCases := []testCase{ { @@ -1280,10 +1096,7 @@ func TestAppRole_RoleSecretIDWithValidFields(t *testing.T) { roleCustomSecretIDData := tc.payload roleSecretIDReq.Data = roleCustomSecretIDData - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleSecretIDReq) if resp.Data["secret_id"].(string) == "" { t.Fatalf("failed to generate secret_id") @@ -1297,10 +1110,7 @@ func TestAppRole_RoleSecretIDWithValidFields(t *testing.T) { roleSecretIDReq.Path = "role/role1/custom-secret-id" roleSecretIDReq.Data = roleCustomSecretIDData - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleSecretIDReq) if resp.Data["secret_id"] != tc.payload["secret_id"] { t.Fatalf("failed to set specific secret_id to role") @@ -1416,10 +1226,7 @@ func TestAppRole_ErrorsRoleSecretIDWithInvalidFields(t *testing.T) { Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) for _, tc := range rc.cases { t.Run(fmt.Sprintf("%s/%s", rc.name, tc.name), func(t *testing.T) { @@ -1471,16 +1278,10 @@ func TestAppRole_RoleCRUD(t *testing.T) { Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) expected := map[string]interface{}{ "bind_secret_id": true, @@ -1523,16 +1324,10 @@ func TestAppRole_RoleCRUD(t *testing.T) { roleReq.Data = roleData roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) expected = map[string]interface{}{ "policies": []string{"a", "b", "c", "d"}, @@ -1558,26 +1353,19 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RU for role_id field roleReq.Path = "role/role1/role-id" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) + if resp.Data["role_id"].(string) != "test_role_id" { t.Fatalf("bad: role_id: expected:test_role_id actual:%s\n", resp.Data["role_id"].(string)) } roleReq.Data = map[string]interface{}{"role_id": "custom_role_id"} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) + if resp.Data["role_id"].(string) != "custom_role_id" { t.Fatalf("bad: role_id: expected:custom_role_id actual:%s\n", resp.Data["role_id"].(string)) } @@ -1585,38 +1373,23 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for bind_secret_id field roleReq.Path = "role/role1/bind-secret-id" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Data = map[string]interface{}{"bind_secret_id": false} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["bind_secret_id"].(bool) { t.Fatalf("bad: bind_secret_id: expected:false actual:%t\n", resp.Data["bind_secret_id"].(bool)) } roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if !resp.Data["bind_secret_id"].(bool) { t.Fatalf("expected the default value of 'true' to be set") @@ -1625,23 +1398,14 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for policies field roleReq.Path = "role/role1/policies" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Data = map[string]interface{}{"policies": "a1,b1,c1,d1"} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if !reflect.DeepEqual(resp.Data["policies"].([]string), []string{"a1", "b1", "c1", "d1"}) { t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string)) @@ -1650,16 +1414,10 @@ func TestAppRole_RoleCRUD(t *testing.T) { t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string)) } roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) expectedPolicies := []string{} actualPolicies := resp.Data["token_policies"].([]string) @@ -1670,38 +1428,23 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for secret-id-num-uses field roleReq.Path = "role/role1/secret-id-num-uses" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Data = map[string]interface{}{"secret_id_num_uses": 200} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["secret_id_num_uses"].(int) != 200 { t.Fatalf("bad: secret_id_num_uses: expected:200 actual:%d\n", resp.Data["secret_id_num_uses"].(int)) } roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["secret_id_num_uses"].(int) != 0 { t.Fatalf("expected value to be reset") @@ -1710,38 +1453,23 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for secret_id_ttl field roleReq.Path = "role/role1/secret-id-ttl" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Data = map[string]interface{}{"secret_id_ttl": 3001} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["secret_id_ttl"].(time.Duration) != 3001 { t.Fatalf("bad: secret_id_ttl: expected:3001 actual:%d\n", resp.Data["secret_id_ttl"].(time.Duration)) } roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["secret_id_ttl"].(time.Duration) != 0 { t.Fatalf("expected value to be reset") @@ -1750,42 +1478,28 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for secret-id-num-uses field roleReq.Path = "role/role1/token-num-uses" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) + if resp.Data["token_num_uses"].(int) != 600 { t.Fatalf("bad: token_num_uses: expected:600 actual:%d\n", resp.Data["token_num_uses"].(int)) } roleReq.Data = map[string]interface{}{"token_num_uses": 60} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["token_num_uses"].(int) != 60 { t.Fatalf("bad: token_num_uses: expected:60 actual:%d\n", resp.Data["token_num_uses"].(int)) } roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["token_num_uses"].(int) != 0 { t.Fatalf("expected value to be reset") @@ -1794,38 +1508,23 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for 'period' field roleReq.Path = "role/role1/period" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Data = map[string]interface{}{"period": 9001} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["period"].(time.Duration) != 9001 { t.Fatalf("bad: period: expected:9001 actual:%d\n", resp.Data["9001"].(time.Duration)) } roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["token_period"].(time.Duration) != 0 { t.Fatalf("expected value to be reset") @@ -1834,38 +1533,23 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for token_ttl field roleReq.Path = "role/role1/token-ttl" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Data = map[string]interface{}{"token_ttl": 4001} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["token_ttl"].(time.Duration) != 4001 { t.Fatalf("bad: token_ttl: expected:4001 actual:%d\n", resp.Data["token_ttl"].(time.Duration)) } roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["token_ttl"].(time.Duration) != 0 { t.Fatalf("expected value to be reset") @@ -1874,38 +1558,23 @@ func TestAppRole_RoleCRUD(t *testing.T) { // RUD for token_max_ttl field roleReq.Path = "role/role1/token-max-ttl" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Data = map[string]interface{}{"token_max_ttl": 5001} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["token_max_ttl"].(time.Duration) != 5001 { t.Fatalf("bad: token_max_ttl: expected:5001 actual:%d\n", resp.Data["token_max_ttl"].(time.Duration)) } roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["token_max_ttl"].(time.Duration) != 0 { t.Fatalf("expected value to be reset") @@ -1914,10 +1583,7 @@ func TestAppRole_RoleCRUD(t *testing.T) { // Delete test for role roleReq.Path = "role/role1" roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), roleReq) @@ -1952,16 +1618,10 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) expected := map[string]interface{}{ "bind_secret_id": true, @@ -2004,16 +1664,10 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { roleReq.Data = roleData roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) expected = map[string]interface{}{ "policies": []string{"a", "b", "c", "d"}, @@ -2039,10 +1693,8 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { // RUD for secret-id-bound-cidrs field roleReq.Path = "role/role1/secret-id-bound-cidrs" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) + if resp.Data["secret_id_bound_cidrs"].([]string)[0] != "127.0.0.1/32" || resp.Data["secret_id_bound_cidrs"].([]string)[1] != "127.0.0.1/16" { t.Fatalf("bad: secret_id_bound_cidrs: expected:127.0.0.1/32,127.0.0.1/16 actual:%d\n", resp.Data["secret_id_bound_cidrs"].(int)) @@ -2050,32 +1702,20 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { roleReq.Data = map[string]interface{}{"secret_id_bound_cidrs": []string{"127.0.0.1/20"}} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["secret_id_bound_cidrs"].([]string)[0] != "127.0.0.1/20" { t.Fatalf("bad: secret_id_bound_cidrs: expected:127.0.0.1/20 actual:%s\n", resp.Data["secret_id_bound_cidrs"].([]string)[0]) } roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if len(resp.Data["secret_id_bound_cidrs"].([]string)) != 0 { t.Fatalf("expected value to be reset") @@ -2084,10 +1724,8 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { // RUD for token-bound-cidrs field roleReq.Path = "role/role1/token-bound-cidrs" roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) + if resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0].String() != "127.0.0.1" || resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[1].String() != "127.0.0.1/16" { m, err := json.Marshal(resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)) @@ -2099,32 +1737,20 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { roleReq.Data = map[string]interface{}{"token_bound_cidrs": []string{"127.0.0.1/20"}} roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0].String() != "127.0.0.1/20" { t.Fatalf("bad: token_bound_cidrs: expected:127.0.0.1/20 actual:%s\n", resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0]) } roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) if len(resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)) != 0 { t.Fatalf("expected value to be reset") @@ -2133,17 +1759,13 @@ func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { // Delete test for role roleReq.Path = "role/role1" roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), roleReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%v resp:%#v", err, resp) } - if resp != nil { t.Fatalf("expected a nil response") } @@ -2170,19 +1792,14 @@ func TestAppRole_RoleWithTokenTypeCRUD(t *testing.T) { Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) + if 0 == len(resp.Warnings) { t.Fatalf("bad:\nexpected warning in resp:%#v\n", resp.Warnings) } roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) expected := map[string]interface{}{ "bind_secret_id": true, @@ -2224,19 +1841,14 @@ func TestAppRole_RoleWithTokenTypeCRUD(t *testing.T) { roleReq.Data = roleData roleReq.Operation = logical.UpdateOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) + if 0 == len(resp.Warnings) { t.Fatalf("bad:\nexpected a warning in resp:%#v\n", resp.Warnings) } roleReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) expected = map[string]interface{}{ "policies": []string{"a", "b", "c", "d"}, @@ -2263,17 +1875,13 @@ func TestAppRole_RoleWithTokenTypeCRUD(t *testing.T) { // Delete test for role roleReq.Path = "role/role1" roleReq.Operation = logical.DeleteOperation - resp, err = b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, roleReq) roleReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), roleReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%v resp:%#v", err, resp) } - if resp != nil { t.Fatalf("expected a nil response") } @@ -2293,11 +1901,7 @@ func createRole(t *testing.T, b *backend, s logical.Storage, roleName, policies Storage: s, Data: roleData, } - - resp, err := b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + _ = b.requestNoErr(t, roleReq) } // TestAppRole_TokenutilUpgrade ensures that when we read values out that are @@ -2433,10 +2037,7 @@ func TestAppRole_SecretID_WithTTL(t *testing.T) { Storage: storage, Data: roleData, } - resp, err := b.HandleRequest(context.Background(), roleReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp := b.requestNoErr(t, roleReq) // Generate secret ID secretIDReq := &logical.Request{ @@ -2444,10 +2045,7 @@ func TestAppRole_SecretID_WithTTL(t *testing.T) { Path: "role/" + tt.roleName + "/secret-id", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), secretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + resp = b.requestNoErr(t, secretIDReq) // Extract the "ttl" value from the response data if it exists ttlRaw, okTTL := resp.Data["secret_id_ttl"] @@ -2461,7 +2059,7 @@ func TestAppRole_SecretID_WithTTL(t *testing.T) { ) respTTL, ok = ttlRaw.(int64) if !ok { - t.Fatalf("expected ttl to be an integer, got: %s", err) + t.Fatalf("expected ttl to be an integer, got: %T", ttlRaw) } // Verify secret ID response for different cases @@ -2478,3 +2076,59 @@ func TestAppRole_SecretID_WithTTL(t *testing.T) { }) } } + +// TestAppRole_RoleSecretIDAccessorCrossDelete tests deleting a secret id via +// secret id accessor belonging to a different role +func TestAppRole_RoleSecretIDAccessorCrossDelete(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + // Create First Role + createRole(t, b, storage, "role1", "a,b") + _ = b.requestNoErr(t, &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id", + }) + + // Create Second Role + createRole(t, b, storage, "role2", "a,b") + _ = b.requestNoErr(t, &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role2/secret-id", + }) + + // Get role2 secretID Accessor + resp = b.requestNoErr(t, &logical.Request{ + Operation: logical.ListOperation, + Storage: storage, + Path: "role/role2/secret-id", + }) + + // Read back role2 secretID Accessor information + hmacSecretID := resp.Data["keys"].([]string)[0] + _ = b.requestNoErr(t, &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role2/secret-id-accessor/lookup", + Data: map[string]interface{}{ + "secret_id_accessor": hmacSecretID, + }, + }) + + // Attempt to destroy role2 secretID accessor using role1 path + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id-accessor/destroy", + Data: map[string]interface{}{ + "secret_id_accessor": hmacSecretID, + }, + }) + + if err == nil { + t.Fatalf("expected error") + } +} diff --git a/builtin/credential/approle/path_tidy_user_id.go b/builtin/credential/approle/path_tidy_user_id.go index b7c6fcc6b1ec..0367a0940f29 100644 --- a/builtin/credential/approle/path_tidy_user_id.go +++ b/builtin/credential/approle/path_tidy_user_id.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( @@ -17,8 +20,21 @@ func pathTidySecretID(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy/secret-id$", - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathTidySecretIDUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id", + OperationVerb: "tidy", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathTidySecretIDUpdate, + Responses: map[int][]framework.Response{ + http.StatusAccepted: {{ + Description: http.StatusText(http.StatusAccepted), + }}, + }, + }, }, HelpSynopsis: pathTidySecretIDSyn, diff --git a/builtin/credential/approle/path_tidy_user_id_test.go b/builtin/credential/approle/path_tidy_user_id_test.go index dd7dfdfd7d65..4b932cd11f5e 100644 --- a/builtin/credential/approle/path_tidy_user_id_test.go +++ b/builtin/credential/approle/path_tidy_user_id_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( @@ -8,12 +11,11 @@ import ( "testing" "time" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" ) func TestAppRole_TidyDanglingAccessors_Normal(t *testing.T) { - var resp *logical.Response - var err error b, storage := createBackendWithStorage(t) // Create a role @@ -25,10 +27,7 @@ func TestAppRole_TidyDanglingAccessors_Normal(t *testing.T) { Path: "role/role1/secret-id", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + _ = b.requestNoErr(t, roleSecretIDReq) accessorHashes, err := storage.List(context.Background(), "accessor/") if err != nil { @@ -73,12 +72,18 @@ func TestAppRole_TidyDanglingAccessors_Normal(t *testing.T) { t.Fatalf("bad: len(accessorHashes); expect 3, got %d", len(accessorHashes)) } - _, err = b.tidySecretID(context.Background(), &logical.Request{ + secret, err := b.tidySecretID(context.Background(), &logical.Request{ Storage: storage, }) if err != nil { t.Fatal(err) } + schema.ValidateResponse( + t, + schema.GetResponseSchema(t, pathTidySecretID(b), logical.UpdateOperation), + secret, + true, + ) // It runs async so we give it a bit of time to run time.Sleep(10 * time.Second) @@ -93,8 +98,6 @@ func TestAppRole_TidyDanglingAccessors_Normal(t *testing.T) { } func TestAppRole_TidyDanglingAccessors_RaceTest(t *testing.T) { - var resp *logical.Response - var err error b, storage := createBackendWithStorage(t) // Create a role @@ -106,22 +109,26 @@ func TestAppRole_TidyDanglingAccessors_RaceTest(t *testing.T) { Path: "role/role1/secret-id", Storage: storage, } - resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + _ = b.requestNoErr(t, roleSecretIDReq) + count := 1 wg := &sync.WaitGroup{} start := time.Now() for time.Now().Sub(start) < 10*time.Second { if time.Now().Sub(start) > 100*time.Millisecond && atomic.LoadUint32(b.tidySecretIDCASGuard) == 0 { - _, err = b.tidySecretID(context.Background(), &logical.Request{ + secret, err := b.tidySecretID(context.Background(), &logical.Request{ Storage: storage, }) if err != nil { t.Fatal(err) } + schema.ValidateResponse( + t, + schema.GetResponseSchema(t, pathTidySecretID(b), logical.UpdateOperation), + secret, + true, + ) } wg.Add(1) go func() { @@ -131,10 +138,7 @@ func TestAppRole_TidyDanglingAccessors_RaceTest(t *testing.T) { Path: "role/role1/secret-id", Storage: storage, } - resp, err := b.HandleRequest(context.Background(), roleSecretIDReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%v resp:%#v", err, resp) - } + _ = b.requestNoErr(t, roleSecretIDReq) }() entry, err := logical.StorageEntryJSON( @@ -173,6 +177,12 @@ func TestAppRole_TidyDanglingAccessors_RaceTest(t *testing.T) { if err != nil || len(secret.Warnings) > 0 { t.Fatal(err, secret.Warnings) } + schema.ValidateResponse( + t, + schema.GetResponseSchema(t, pathTidySecretID(b), logical.UpdateOperation), + secret, + true, + ) // Wait for tidy to start for atomic.LoadUint32(b.tidySecretIDCASGuard) == 0 { diff --git a/builtin/credential/approle/validation.go b/builtin/credential/approle/validation.go index 9b3f87827013..b99af755d818 100644 --- a/builtin/credential/approle/validation.go +++ b/builtin/credential/approle/validation.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( diff --git a/builtin/credential/approle/validation_test.go b/builtin/credential/approle/validation_test.go index ff325f4b1be9..d3386aa35615 100644 --- a/builtin/credential/approle/validation_test.go +++ b/builtin/credential/approle/validation_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( diff --git a/builtin/credential/aws/backend.go b/builtin/credential/aws/backend.go index 543608968396..df5f7ec23325 100644 --- a/builtin/credential/aws/backend.go +++ b/builtin/credential/aws/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -17,7 +20,11 @@ import ( cache "github.com/patrickmn/go-cache" ) -const amzHeaderPrefix = "X-Amz-" +const ( + amzHeaderPrefix = "X-Amz-" + amzSignedHeaders = "X-Amz-SignedHeaders" + operationPrefixAWS = "aws" +) var defaultAllowedSTSRequestHeaders = []string{ "X-Amz-Algorithm", @@ -26,7 +33,8 @@ var defaultAllowedSTSRequestHeaders = []string{ "X-Amz-Date", "X-Amz-Security-Token", "X-Amz-Signature", - "X-Amz-SignedHeaders", + amzSignedHeaders, + "X-Amz-User-Agent", } func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { @@ -123,7 +131,9 @@ func Backend(_ *logical.BackendConfig) (*backend, error) { deprecatedTerms: strings.NewReplacer( "accesslist", "whitelist", + "access-list", "whitelist", "denylist", "blacklist", + "deny-list", "blacklist", ), } @@ -309,7 +319,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag switch entity.Type { case "user": - userInfo, err := iamClient.GetUser(&iam.GetUserInput{UserName: &entity.FriendlyName}) + userInfo, err := iamClient.GetUserWithContext(ctx, &iam.GetUserInput{UserName: &entity.FriendlyName}) if err != nil { return "", awsutil.AppendAWSError(err) } @@ -318,7 +328,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag } return *userInfo.User.UserId, nil case "role": - roleInfo, err := iamClient.GetRole(&iam.GetRoleInput{RoleName: &entity.FriendlyName}) + roleInfo, err := iamClient.GetRoleWithContext(ctx, &iam.GetRoleInput{RoleName: &entity.FriendlyName}) if err != nil { return "", awsutil.AppendAWSError(err) } @@ -327,7 +337,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag } return *roleInfo.Role.RoleId, nil case "instance-profile": - profileInfo, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName}) + profileInfo, err := iamClient.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName}) if err != nil { return "", awsutil.AppendAWSError(err) } @@ -340,13 +350,33 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag } } -// genDeprecatedPath will return a deprecated version of a framework.Path. The will include -// using deprecated terms in the path pattern, and marking the path as deprecated. +// genDeprecatedPath will return a deprecated version of a framework.Path. The +// path pattern and display attributes (if any) will contain deprecated terms, +// and the path will be marked as deprecated. func (b *backend) genDeprecatedPath(path *framework.Path) *framework.Path { pathDeprecated := *path pathDeprecated.Pattern = b.deprecatedTerms.Replace(path.Pattern) pathDeprecated.Deprecated = true + if path.DisplayAttrs != nil { + deprecatedDisplayAttrs := *path.DisplayAttrs + deprecatedDisplayAttrs.OperationPrefix = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationPrefix) + deprecatedDisplayAttrs.OperationVerb = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationVerb) + deprecatedDisplayAttrs.OperationSuffix = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationSuffix) + pathDeprecated.DisplayAttrs = &deprecatedDisplayAttrs + } + + for i, op := range path.Operations { + if op.Properties().DisplayAttrs != nil { + deprecatedDisplayAttrs := *op.Properties().DisplayAttrs + deprecatedDisplayAttrs.OperationPrefix = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationPrefix) + deprecatedDisplayAttrs.OperationVerb = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationVerb) + deprecatedDisplayAttrs.OperationSuffix = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationSuffix) + deprecatedProperties := pathDeprecated.Operations[i].(*framework.PathOperation) + deprecatedProperties.DisplayAttrs = &deprecatedDisplayAttrs + } + } + return &pathDeprecated } diff --git a/builtin/credential/aws/backend_e2e_test.go b/builtin/credential/aws/backend_e2e_test.go index ac2bb22f129a..ea8076b8761c 100644 --- a/builtin/credential/aws/backend_e2e_test.go +++ b/builtin/credential/aws/backend_e2e_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -5,10 +8,8 @@ import ( "testing" "time" - hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) @@ -101,9 +102,7 @@ func TestBackend_E2E_Initialize(t *testing.T) { func setupAwsTestCluster(t *testing.T, _ context.Context) *vault.TestCluster { // create a cluster with the aws auth backend built-in - logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "aws": Factory, }, diff --git a/builtin/credential/aws/backend_test.go b/builtin/credential/aws/backend_test.go index 5b435d3e3a5c..d56478266d9a 100644 --- a/builtin/credential/aws/backend_test.go +++ b/builtin/credential/aws/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -1501,7 +1504,7 @@ func buildCallerIdentityLoginData(request *http.Request, roleName string) (map[s "iam_request_url": base64.StdEncoding.EncodeToString([]byte(request.URL.String())), "iam_request_headers": base64.StdEncoding.EncodeToString(headersJson), "iam_request_body": base64.StdEncoding.EncodeToString(requestBody), - "request_role": roleName, + "role": roleName, }, nil } @@ -1521,6 +1524,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { return } + ctx := context.Background() storage := &logical.InmemStorage{} config := logical.TestBackendConfig() config.StorageView = storage @@ -1599,7 +1603,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: clientConfigData, } - _, err = b.HandleRequest(context.Background(), clientRequest) + _, err = b.HandleRequest(ctx, clientRequest) if err != nil { t.Fatal(err) } @@ -1613,7 +1617,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: configIdentityData, } - resp, err := b.HandleRequest(context.Background(), configIdentityRequest) + resp, err := b.HandleRequest(ctx, configIdentityRequest) if err != nil { t.Fatal(err) } @@ -1633,7 +1637,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: roleData, } - resp, err = b.HandleRequest(context.Background(), roleRequest) + resp, err = b.HandleRequest(ctx, roleRequest) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err) } @@ -1650,7 +1654,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: roleDataEc2, } - resp, err = b.HandleRequest(context.Background(), roleRequestEc2) + resp, err = b.HandleRequest(ctx, roleRequestEc2) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: failed to create role; resp:%#v\nerr:%v", resp, err) } @@ -1688,7 +1692,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: loginData, } - resp, err = b.HandleRequest(context.Background(), loginRequest) + resp, err = b.HandleRequest(ctx, loginRequest) if err != nil || resp == nil || !resp.IsError() { t.Errorf("bad: expected failed login due to missing header: resp:%#v\nerr:%v", resp, err) } @@ -1711,7 +1715,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: loginData, } - resp, err = b.HandleRequest(context.Background(), loginRequest) + resp, err = b.HandleRequest(ctx, loginRequest) if err != nil || resp == nil || !resp.IsError() { t.Errorf("bad: expected failed login due to invalid header: resp:%#v\nerr:%v", resp, err) } @@ -1730,13 +1734,13 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Storage: storage, Data: loginData, } - resp, err = b.HandleRequest(context.Background(), loginRequest) + resp, err = b.HandleRequest(ctx, loginRequest) if err != nil || resp == nil || !resp.IsError() { t.Errorf("bad: expected failed login due to invalid role: resp:%#v\nerr:%v", resp, err) } loginData["role"] = "ec2only" - resp, err = b.HandleRequest(context.Background(), loginRequest) + resp, err = b.HandleRequest(ctx, loginRequest) if err != nil || resp == nil || !resp.IsError() { t.Errorf("bad: expected failed login due to bad auth type: resp:%#v\nerr:%v", resp, err) } @@ -1744,7 +1748,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { // finally, the happy path test :) loginData["role"] = testValidRoleName - resp, err = b.HandleRequest(context.Background(), loginRequest) + resp, err = b.HandleRequest(ctx, loginRequest) if err != nil { t.Fatal(err) } @@ -1767,7 +1771,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { Schema: b.pathLogin().Fields, } // ensure we can renew - resp, err = b.pathLoginRenew(context.Background(), renewReq, emptyLoginFd) + resp, err = b.pathLoginRenew(ctx, renewReq, emptyLoginFd) if err != nil { t.Fatal(err) } @@ -1785,17 +1789,17 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { // pick up the fake user ID roleData["bound_iam_principal_arn"] = entity.canonicalArn() roleRequest.Path = "role/" + testValidRoleName - resp, err = b.HandleRequest(context.Background(), roleRequest) + resp, err = b.HandleRequest(ctx, roleRequest) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: failed to recreate role: resp:%#v\nerr:%v", resp, err) } - resp, err = b.HandleRequest(context.Background(), loginRequest) + resp, err = b.HandleRequest(ctx, loginRequest) if err != nil || resp == nil || !resp.IsError() { t.Errorf("bad: expected failed login due to changed AWS role ID: resp: %#v\nerr:%v", resp, err) } // and ensure a renew no longer works - resp, err = b.pathLoginRenew(context.Background(), renewReq, emptyLoginFd) + resp, err = b.pathLoginRenew(ctx, renewReq, emptyLoginFd) if err == nil || (resp != nil && !resp.IsError()) { t.Errorf("bad: expected failed renew due to changed AWS role ID: resp: %#v", resp) } @@ -1808,13 +1812,13 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { wildcardEntity.FriendlyName = "*" roleData["bound_iam_principal_arn"] = []string{wildcardEntity.canonicalArn(), "arn:aws:iam::123456789012:role/DoesNotExist/Vault_Fake_Role*"} roleRequest.Path = "role/" + wildcardRoleName - resp, err = b.HandleRequest(context.Background(), roleRequest) + resp, err = b.HandleRequest(ctx, roleRequest) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: failed to create wildcard roles: resp:%#v\nerr:%v", resp, err) } loginData["role"] = wildcardRoleName - resp, err = b.HandleRequest(context.Background(), loginRequest) + resp, err = b.HandleRequest(ctx, loginRequest) if err != nil { t.Fatal(err) } @@ -1823,7 +1827,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { } // and ensure we can renew renewReq = generateRenewRequest(storage, resp.Auth) - resp, err = b.pathLoginRenew(context.Background(), renewReq, emptyLoginFd) + resp, err = b.pathLoginRenew(ctx, renewReq, emptyLoginFd) if err != nil { t.Fatal(err) } @@ -1834,7 +1838,17 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { t.Fatalf("got error when renewing: %#v", *resp) } // ensure the cache is populated - cachedArn := b.getCachedUserId(resp.Auth.Metadata["client_user_id"]) + + clientUserIDRaw, ok := resp.Auth.InternalData["client_user_id"] + if !ok { + t.Errorf("client_user_id not found in response") + } + clientUserID, ok := clientUserIDRaw.(string) + if !ok { + t.Errorf("client_user_id is not a string: %#v", clientUserIDRaw) + } + + cachedArn := b.getCachedUserId(clientUserID) if cachedArn == "" { t.Errorf("got empty ARN back from user ID cache; expected full arn") } @@ -1843,13 +1857,13 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { period := 600 * time.Second roleData["period"] = period.String() roleRequest.Path = "role/" + testValidRoleName - resp, err = b.HandleRequest(context.Background(), roleRequest) + resp, err = b.HandleRequest(ctx, roleRequest) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: failed to create wildcard role: resp:%#v\nerr:%v", resp, err) } loginData["role"] = testValidRoleName - resp, err = b.HandleRequest(context.Background(), loginRequest) + resp, err = b.HandleRequest(ctx, loginRequest) if err != nil { t.Fatal(err) } diff --git a/builtin/credential/aws/certificates.go b/builtin/credential/aws/certificates.go index c745ad2b3f24..a56b757521f5 100644 --- a/builtin/credential/aws/certificates.go +++ b/builtin/credential/aws/certificates.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( diff --git a/builtin/credential/aws/cli.go b/builtin/credential/aws/cli.go index 7b063fa5f42a..2cc228ac54b0 100644 --- a/builtin/credential/aws/cli.go +++ b/builtin/credential/aws/cli.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( diff --git a/builtin/credential/aws/client.go b/builtin/credential/aws/client.go index ff8ff5c837f9..bed15bb6a491 100644 --- a/builtin/credential/aws/client.go +++ b/builtin/credential/aws/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -119,7 +122,7 @@ func (b *backend) getClientConfig(ctx context.Context, s logical.Storage, region return nil, fmt.Errorf("could not obtain sts client: %w", err) } inputParams := &sts.GetCallerIdentityInput{} - identity, err := client.GetCallerIdentity(inputParams) + identity, err := client.GetCallerIdentityWithContext(ctx, inputParams) if err != nil { return nil, fmt.Errorf("unable to fetch current caller: %w", err) } @@ -216,7 +219,6 @@ func (b *backend) clientEC2(ctx context.Context, s logical.Storage, region, acco // Create an AWS config object using a chain of providers var awsConfig *aws.Config awsConfig, err = b.getClientConfig(ctx, s, region, stsRole, accountID, "ec2") - if err != nil { return nil, err } @@ -276,7 +278,6 @@ func (b *backend) clientIAM(ctx context.Context, s logical.Storage, region, acco // Create an AWS config object using a chain of providers var awsConfig *aws.Config awsConfig, err = b.getClientConfig(ctx, s, region, stsRole, accountID, "iam") - if err != nil { return nil, err } diff --git a/builtin/credential/aws/cmd/aws/main.go b/builtin/credential/aws/cmd/aws/main.go index 6de96d02d196..8a1ecff0bee9 100644 --- a/builtin/credential/aws/cmd/aws/main.go +++ b/builtin/credential/aws/cmd/aws/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: awsauth.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/aws/path_config_certificate.go b/builtin/credential/aws/path_config_certificate.go index f734694781b3..1b15fcf3b574 100644 --- a/builtin/credential/aws/path_config_certificate.go +++ b/builtin/credential/aws/path_config_certificate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -18,6 +21,11 @@ func (b *backend) pathListCertificates() *framework.Path { return &framework.Path{ Pattern: "config/certificates/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "certificate-configurations", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathCertificatesList, @@ -32,6 +40,11 @@ func (b *backend) pathListCertificates() *framework.Path { func (b *backend) pathConfigCertificate() *framework.Path { return &framework.Path{ Pattern: "config/certificate/" + framework.GenericNameRegex("cert_name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "cert_name": { Type: framework.TypeString, @@ -58,15 +71,29 @@ vary. Defaults to "pkcs7".`, Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "certificate", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "certificate", + }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "certificate-configuration", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigCertificateDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "certificate-configuration", + }, }, }, diff --git a/builtin/credential/aws/path_config_client.go b/builtin/credential/aws/path_config_client.go index c609e1acd608..ed6e5d89819b 100644 --- a/builtin/credential/aws/path_config_client.go +++ b/builtin/credential/aws/path_config_client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -5,10 +8,12 @@ import ( "errors" "net/http" "net/textproto" + "net/url" "strings" "github.com/aws/aws-sdk-go/aws" "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -16,6 +21,11 @@ import ( func (b *backend) pathConfigClient() *framework.Path { return &framework.Path{ Pattern: "config/client$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "access_key": { Type: framework.TypeString, @@ -53,6 +63,12 @@ func (b *backend) pathConfigClient() *framework.Path { Description: "The region ID for the sts_endpoint, if set.", }, + "use_sts_region_from_client": { + Type: framework.TypeBool, + Default: false, + Description: "Uses the STS region from client requests for making AWS STS API calls.", + }, + "iam_server_id_header_value": { Type: framework.TypeString, Default: "", @@ -77,15 +93,29 @@ func (b *backend) pathConfigClient() *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigClientCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "client", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigClientCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "client", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigClientDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "client-configuration", + }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigClientRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "client-configuration", + }, }, }, @@ -146,6 +176,7 @@ func (b *backend) pathConfigClientRead(ctx context.Context, req *logical.Request "iam_endpoint": clientConfig.IAMEndpoint, "sts_endpoint": clientConfig.STSEndpoint, "sts_region": clientConfig.STSRegion, + "use_sts_region_from_client": clientConfig.UseSTSRegionFromClient, "iam_server_id_header_value": clientConfig.IAMServerIdHeaderValue, "max_retries": clientConfig.MaxRetries, "allowed_sts_header_values": clientConfig.AllowedSTSHeaderValues, @@ -259,6 +290,14 @@ func (b *backend) pathConfigClientCreateUpdate(ctx context.Context, req *logical } } + useSTSRegionFromClientRaw, ok := data.GetOk("use_sts_region_from_client") + if ok { + if configEntry.UseSTSRegionFromClient != useSTSRegionFromClientRaw.(bool) { + changedCreds = true + configEntry.UseSTSRegionFromClient = useSTSRegionFromClientRaw.(bool) + } + } + headerValStr, ok := data.GetOk("iam_server_id_header_value") if ok { if configEntry.IAMServerIdHeaderValue != headerValStr.(string) { @@ -341,6 +380,7 @@ type clientConfig struct { IAMEndpoint string `json:"iam_endpoint"` STSEndpoint string `json:"sts_endpoint"` STSRegion string `json:"sts_region"` + UseSTSRegionFromClient bool `json:"use_sts_region_from_client"` IAMServerIdHeaderValue string `json:"iam_server_id_header_value"` AllowedSTSHeaderValues []string `json:"allowed_sts_header_values"` MaxRetries int `json:"max_retries"` @@ -349,6 +389,9 @@ type clientConfig struct { func (c *clientConfig) validateAllowedSTSHeaderValues(headers http.Header) error { for k := range headers { h := textproto.CanonicalMIMEHeaderKey(k) + if h == "X-Amz-Signedheaders" { + h = amzSignedHeaders + } if strings.HasPrefix(h, amzHeaderPrefix) && !strutil.StrListContains(defaultAllowedSTSRequestHeaders, h) && !strutil.StrListContains(c.AllowedSTSHeaderValues, h) { @@ -358,6 +401,21 @@ func (c *clientConfig) validateAllowedSTSHeaderValues(headers http.Header) error return nil } +func (c *clientConfig) validateAllowedSTSQueryValues(params url.Values) error { + for k := range params { + h := textproto.CanonicalMIMEHeaderKey(k) + if h == "X-Amz-Signedheaders" { + h = amzSignedHeaders + } + if strings.HasPrefix(h, amzHeaderPrefix) && + !strutil.StrListContains(defaultAllowedSTSRequestHeaders, k) && + !strutil.StrListContains(c.AllowedSTSHeaderValues, k) { + return errors.New("invalid request query param: " + k) + } + } + return nil +} + const pathConfigClientHelpSyn = ` Configure AWS IAM credentials that are used to query instance and role details from the AWS API. ` diff --git a/builtin/credential/aws/path_config_client_test.go b/builtin/credential/aws/path_config_client_test.go index 493d20d9df00..ed9b98ec5dfd 100644 --- a/builtin/credential/aws/path_config_client_test.go +++ b/builtin/credential/aws/path_config_client_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -95,7 +98,6 @@ func TestBackend_pathConfigClient(t *testing.T) { Data: data, Storage: storage, }) - if err != nil { t.Fatal(err) } diff --git a/builtin/credential/aws/path_config_identity.go b/builtin/credential/aws/path_config_identity.go index 282d277fab54..0c6f8c3398ec 100644 --- a/builtin/credential/aws/path_config_identity.go +++ b/builtin/credential/aws/path_config_identity.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -54,6 +57,11 @@ var ( func (b *backend) pathConfigIdentity() *framework.Path { return &framework.Path{ Pattern: "config/identity$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "iam_alias": { Type: framework.TypeString, @@ -72,9 +80,16 @@ func (b *backend) pathConfigIdentity() *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: pathConfigIdentityRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "identity-integration-configuration", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: pathConfigIdentityUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "identity-integration", + }, }, }, diff --git a/builtin/credential/aws/path_config_identity_test.go b/builtin/credential/aws/path_config_identity_test.go index 19e919fb1179..085cf18b4f7b 100644 --- a/builtin/credential/aws/path_config_identity_test.go +++ b/builtin/credential/aws/path_config_identity_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( diff --git a/builtin/credential/aws/path_config_rotate_root.go b/builtin/credential/aws/path_config_rotate_root.go index 125056234312..0a0e64fcb000 100644 --- a/builtin/credential/aws/path_config_rotate_root.go +++ b/builtin/credential/aws/path_config_rotate_root.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -21,6 +24,12 @@ func (b *backend) pathConfigRotateRoot() *framework.Path { return &framework.Path{ Pattern: "config/rotate-root", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationVerb: "rotate", + OperationSuffix: "root-credentials", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigRotateRootUpdate, @@ -97,7 +106,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R // Get the current user's name since it's required to create an access key. // Empty input means get the current user. var getUserInput iam.GetUserInput - getUserRes, err := iamClient.GetUser(&getUserInput) + getUserRes, err := iamClient.GetUserWithContext(ctx, &getUserInput) if err != nil { return nil, fmt.Errorf("error calling GetUser: %w", err) } @@ -115,7 +124,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R createAccessKeyInput := iam.CreateAccessKeyInput{ UserName: getUserRes.User.UserName, } - createAccessKeyRes, err := iamClient.CreateAccessKey(&createAccessKeyInput) + createAccessKeyRes, err := iamClient.CreateAccessKeyWithContext(ctx, &createAccessKeyInput) if err != nil { return nil, fmt.Errorf("error calling CreateAccessKey: %w", err) } @@ -139,7 +148,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R AccessKeyId: createAccessKeyRes.AccessKey.AccessKeyId, UserName: getUserRes.User.UserName, } - if _, err := iamClient.DeleteAccessKey(&deleteAccessKeyInput); err != nil { + if _, err := iamClient.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput); err != nil { // Include this error in the errs returned by this method. errs = multierror.Append(errs, fmt.Errorf("error deleting newly created but unstored access key ID %s: %s", *createAccessKeyRes.AccessKey.AccessKeyId, err)) } @@ -176,7 +185,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R AccessKeyId: aws.String(oldAccessKey), UserName: getUserRes.User.UserName, } - if _, err = iamClient.DeleteAccessKey(&deleteAccessKeyInput); err != nil { + if _, err = iamClient.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput); err != nil { errs = multierror.Append(errs, fmt.Errorf("error deleting old access key ID %s: %w", oldAccessKey, err)) return nil, errs } diff --git a/builtin/credential/aws/path_config_rotate_root_test.go b/builtin/credential/aws/path_config_rotate_root_test.go index 940c6d102270..d457f9787faf 100644 --- a/builtin/credential/aws/path_config_rotate_root_test.go +++ b/builtin/credential/aws/path_config_rotate_root_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -5,6 +8,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam/iamiface" @@ -12,9 +16,23 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +type mockIAMClient awsutil.MockIAM + +func (m *mockIAMClient) GetUserWithContext(_ aws.Context, input *iam.GetUserInput, _ ...request.Option) (*iam.GetUserOutput, error) { + return (*awsutil.MockIAM)(m).GetUser(input) +} + +func (m *mockIAMClient) CreateAccessKeyWithContext(_ aws.Context, input *iam.CreateAccessKeyInput, _ ...request.Option) (*iam.CreateAccessKeyOutput, error) { + return (*awsutil.MockIAM)(m).CreateAccessKey(input) +} + +func (m *mockIAMClient) DeleteAccessKeyWithContext(_ aws.Context, input *iam.DeleteAccessKeyInput, _ ...request.Option) (*iam.DeleteAccessKeyOutput, error) { + return (*awsutil.MockIAM)(m).DeleteAccessKey(input) +} + func TestPathConfigRotateRoot(t *testing.T) { getIAMClient = func(sess *session.Session) iamiface.IAMAPI { - return &awsutil.MockIAM{ + return &mockIAMClient{ CreateAccessKeyOutput: &iam.CreateAccessKeyOutput{ AccessKey: &iam.AccessKey{ AccessKeyId: aws.String("fizz2"), diff --git a/builtin/credential/aws/path_config_sts.go b/builtin/credential/aws/path_config_sts.go index 3666a9004179..21034f69feff 100644 --- a/builtin/credential/aws/path_config_sts.go +++ b/builtin/credential/aws/path_config_sts.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -17,6 +20,11 @@ func (b *backend) pathListSts() *framework.Path { return &framework.Path{ Pattern: "config/sts/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "sts-role-relationships", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathStsList, @@ -31,6 +39,12 @@ func (b *backend) pathListSts() *framework.Path { func (b *backend) pathConfigSts() *framework.Path { return &framework.Path{ Pattern: "config/sts/" + framework.GenericNameRegex("account_id"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "sts-role", + }, + Fields: map[string]*framework.FieldSchema{ "account_id": { Type: framework.TypeString, diff --git a/builtin/credential/aws/path_config_tidy_identity_accesslist.go b/builtin/credential/aws/path_config_tidy_identity_accesslist.go index f89c5ab21597..b9d194b5197d 100644 --- a/builtin/credential/aws/path_config_tidy_identity_accesslist.go +++ b/builtin/credential/aws/path_config_tidy_identity_accesslist.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -15,6 +18,11 @@ const ( func (b *backend) pathConfigTidyIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("%s$", "config/tidy/identity-accesslist"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, @@ -34,15 +42,29 @@ expiration, before it is removed from the backend storage.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "identity-access-list-tidy-operation", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "identity-access-list-tidy-operation", + }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "identity-access-list-tidy-settings", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigTidyIdentityAccessListDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "identity-access-list-tidy-settings", + }, }, }, diff --git a/builtin/credential/aws/path_config_tidy_roletag_denylist.go b/builtin/credential/aws/path_config_tidy_roletag_denylist.go index e00404d7ec64..7707ff7c1f85 100644 --- a/builtin/credential/aws/path_config_tidy_roletag_denylist.go +++ b/builtin/credential/aws/path_config_tidy_roletag_denylist.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -14,6 +17,11 @@ const ( func (b *backend) pathConfigTidyRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "config/tidy/roletag-denylist$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, @@ -35,15 +43,29 @@ Defaults to 4320h (180 days).`, Operations: map[logical.Operation]framework.OperationHandler{ logical.CreateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "role-tag-deny-list-tidy-operation", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "role-tag-deny-list-tidy-operation", + }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "role-tag-deny-list-tidy-settings", + }, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathConfigTidyRoletagDenyListDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "role-tag-deny-list-tidy-settings", + }, }, }, diff --git a/builtin/credential/aws/path_identity_accesslist.go b/builtin/credential/aws/path_identity_accesslist.go index a622b7d8f962..8c7462bd5f6b 100644 --- a/builtin/credential/aws/path_identity_accesslist.go +++ b/builtin/credential/aws/path_identity_accesslist.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -13,6 +16,12 @@ const identityAccessListStorage = "whitelist/identity/" func (b *backend) pathIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: "identity-accesslist/" + framework.GenericNameRegex("instance_id"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "identity-access-list", + }, + Fields: map[string]*framework.FieldSchema{ "instance_id": { Type: framework.TypeString, @@ -39,6 +48,11 @@ func (b *backend) pathListIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: "identity-accesslist/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "identity-access-list", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathAccessListIdentitiesList, diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go index fb8ab4f47492..b66146d1ee67 100644 --- a/builtin/credential/aws/path_login.go +++ b/builtin/credential/aws/path_login.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -18,15 +21,19 @@ import ( "github.com/aws/aws-sdk-go/aws" awsClient "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" "github.com/hashicorp/errwrap" - cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-retryablehttp" "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/strutil" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/builtin/credential/aws/pkcs7" + "github.com/hashicorp/go-uuid" + + "github.com/hashicorp/vault/helper/pkcs7" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/cidrutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" @@ -52,6 +59,10 @@ var ( func (b *backend) pathLogin() *framework.Path { return &framework.Path{ Pattern: "login$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationVerb: "login", + }, Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -86,7 +97,7 @@ significance.`, Type: framework.TypeString, Description: `HTTP method to use for the AWS request when auth_type is iam. This must match what has been signed in the -presigned request. Currently, POST is the only supported value`, +presigned request.`, }, "iam_request_url": { @@ -103,8 +114,8 @@ This must match the request body included in the signature.`, "iam_request_headers": { Type: framework.TypeHeader, Description: `Key/value pairs of headers for use in the -sts:GetCallerIdentity HTTP requests headers when auth_type is iam. Can be either -a Base64-encoded, JSON-serialized string, or a JSON object of key/value pairs. +sts:GetCallerIdentity HTTP requests headers when auth_type is iam. Can be either +a Base64-encoded, JSON-serialized string, or a JSON object of key/value pairs. This must at a minimum include the headers over which AWS has included a signature.`, }, "identity": { @@ -242,9 +253,8 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, return "", nil, nil, logical.ErrorResponse("missing iam_http_request_method"), nil } - // In the future, might consider supporting GET - if method != "POST" { - return "", nil, nil, logical.ErrorResponse("invalid iam_http_request_method; currently only 'POST' is supported"), nil + if method != http.MethodGet && method != http.MethodPost { + return "", nil, nil, logical.ErrorResponse("invalid iam_http_request_method; currently only 'GET' and 'POST' are supported"), nil } rawUrlB64 := data.Get("iam_request_url").(string) @@ -259,16 +269,12 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, if err != nil { return "", nil, nil, logical.ErrorResponse("error parsing iam_request_url"), nil } - if parsedUrl.RawQuery != "" { - // Should be no query parameters - return "", nil, nil, logical.ErrorResponse(logical.ErrInvalidRequest.Error()), nil + if err = validateLoginIamRequestUrl(method, parsedUrl); err != nil { + return "", nil, nil, logical.ErrorResponse(err.Error()), nil } - // TODO: There are two potentially valid cases we're not yet supporting that would - // necessitate this check being changed. First, if we support GET requests. - // Second if we support presigned POST requests bodyB64 := data.Get("iam_request_body").(string) - if bodyB64 == "" { - return "", nil, nil, logical.ErrorResponse("missing iam_request_body"), nil + if bodyB64 == "" && method != http.MethodGet { + return "", nil, nil, logical.ErrorResponse("missing iam_request_body which is required for POST requests"), nil } bodyRaw, err := base64.StdEncoding.DecodeString(bodyB64) if err != nil { @@ -286,7 +292,7 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, config, err := b.lockedClientConfigEntry(ctx, req.Storage) if err != nil { - return "", nil, nil, logical.ErrorResponse("error getting configuration"), nil + return "", nil, nil, nil, fmt.Errorf("error getting configuration: %w", err) } endpoint := "https://sts.amazonaws.com" @@ -294,7 +300,7 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, maxRetries := awsClient.DefaultRetryerMaxNumRetries if config != nil { if config.IAMServerIdHeaderValue != "" { - err = validateVaultHeaderValue(headers, parsedUrl, config.IAMServerIdHeaderValue) + err = validateVaultHeaderValue(method, headers, parsedUrl, config.IAMServerIdHeaderValue) if err != nil { return "", nil, nil, logical.ErrorResponse(fmt.Sprintf("error validating %s header: %v", iamServerIdHeader, err)), nil } @@ -302,14 +308,37 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, if err = config.validateAllowedSTSHeaderValues(headers); err != nil { return "", nil, nil, logical.ErrorResponse(err.Error()), nil } + if method == http.MethodGet { + if err = config.validateAllowedSTSQueryValues(parsedUrl.Query()); err != nil { + return "", nil, nil, logical.ErrorResponse(err.Error()), nil + } + } if config.STSEndpoint != "" { endpoint = config.STSEndpoint } if config.MaxRetries >= 0 { maxRetries = config.MaxRetries } + + // Extract and use a regional STS endpoint + // based on the region set in the Authorization header. + if config.UseSTSRegionFromClient { + clientSpecifiedRegion, err := awsRegionFromHeader(headers.Get("Authorization")) + if err != nil { + return "", nil, nil, logical.ErrorResponse("region missing from Authorization header"), nil + } + + url, err := stsRegionalEndpoint(clientSpecifiedRegion) + if err != nil { + return "", nil, nil, logical.ErrorResponse(err.Error()), nil + } + + b.Logger().Debug("use_sts_region_from_client set; using region specified from header", "region", clientSpecifiedRegion) + endpoint = url + } } + b.Logger().Debug("submitting caller identity request", "endpoint", endpoint) callerID, err := submitCallerIdentityRequest(ctx, maxRetries, method, endpoint, parsedUrl, body, headers) if err != nil { return "", nil, nil, logical.ErrorResponse(fmt.Sprintf("error making upstream request: %v", err)), nil @@ -337,7 +366,7 @@ func (b *backend) pathLoginResolveRoleIam(ctx context.Context, req *logical.Requ // instanceIamRoleARN fetches the IAM role ARN associated with the given // instance profile name -func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName string) (string, error) { +func (b *backend) instanceIamRoleARN(ctx context.Context, iamClient *iam.IAM, instanceProfileName string) (string, error) { if iamClient == nil { return "", fmt.Errorf("nil iamClient") } @@ -345,7 +374,7 @@ func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName str return "", fmt.Errorf("missing instance profile name") } - profile, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{ + profile, err := iamClient.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{ InstanceProfileName: aws.String(instanceProfileName), }) if err != nil { @@ -379,7 +408,7 @@ func (b *backend) validateInstance(ctx context.Context, s logical.Storage, insta return nil, err } - status, err := ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{ + status, err := ec2Client.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{ InstanceIds: []*string{ aws.String(instanceID), }, @@ -721,7 +750,7 @@ func (b *backend) verifyInstanceMeetsRoleRequirements(ctx context.Context, } else if iamClient == nil { return nil, fmt.Errorf("received a nil iamClient") } - iamRoleARN, err := b.instanceIamRoleARN(iamClient, iamInstanceProfileEntity.FriendlyName) + iamRoleARN, err := b.instanceIamRoleARN(ctx, iamClient, iamInstanceProfileEntity.FriendlyName) if err != nil { return nil, fmt.Errorf("IAM role ARN could not be fetched: %w", err) } @@ -1284,7 +1313,7 @@ func (b *backend) pathLoginRenewEc2(ctx context.Context, req *logical.Request, _ // If the login was made using the role tag, then max_ttl from tag // is cached in internal data during login and used here to cap the // max_ttl of renewal. - rTagMaxTTL, err := time.ParseDuration(req.Auth.Metadata["role_tag_max_ttl"]) + rTagMaxTTL, err := parseutil.ParseDurationSecond(req.Auth.Metadata["role_tag_max_ttl"]) if err != nil { return nil, err } @@ -1505,6 +1534,31 @@ func hasWildcardBind(boundIamPrincipalARNs []string) bool { return false } +// Validate that the iam_request_url passed is valid for the STS request +func validateLoginIamRequestUrl(method string, parsedUrl *url.URL) error { + switch method { + case http.MethodGet: + actions := map[string][]string(parsedUrl.Query())["Action"] + if len(actions) == 0 { + return fmt.Errorf("no action found in request") + } + if len(actions) != 1 { + return fmt.Errorf("found multiple actions") + } + if actions[0] != "GetCallerIdentity" { + return fmt.Errorf("unexpected action parameter, %s", actions[0]) + } + return nil + case http.MethodPost: + if parsedUrl.RawQuery != "" { + return logical.ErrInvalidRequest + } + return nil + default: + return fmt.Errorf("unsupported method, %s", method) + } +} + // Validate that the iam_request_body passed is valid for the STS request func validateLoginIamRequestBody(body string) error { qs, err := url.ParseQuery(body) @@ -1541,11 +1595,11 @@ func hasValuesForEc2Auth(data *framework.FieldData) (bool, bool) { } func hasValuesForIamAuth(data *framework.FieldData) (bool, bool) { - _, hasRequestMethod := data.GetOk("iam_http_request_method") + method, hasRequestMethod := data.GetOk("iam_http_request_method") _, hasRequestURL := data.GetOk("iam_request_url") _, hasRequestBody := data.GetOk("iam_request_body") _, hasRequestHeaders := data.GetOk("iam_request_headers") - return (hasRequestMethod && hasRequestURL && hasRequestBody && hasRequestHeaders), + return (hasRequestMethod && hasRequestURL && (method == http.MethodGet || hasRequestBody) && hasRequestHeaders), (hasRequestMethod || hasRequestURL || hasRequestBody || hasRequestHeaders) } @@ -1599,7 +1653,7 @@ func parseIamArn(iamArn string) (*iamEntity, error) { return &entity, nil } -func validateVaultHeaderValue(headers http.Header, _ *url.URL, requiredHeaderValue string) error { +func validateVaultHeaderValue(method string, headers http.Header, parsedUrl *url.URL, requiredHeaderValue string) error { providedValue := "" for k, v := range headers { if strings.EqualFold(iamServerIdHeader, k) { @@ -1615,25 +1669,29 @@ func validateVaultHeaderValue(headers http.Header, _ *url.URL, requiredHeaderVal if providedValue != requiredHeaderValue { return fmt.Errorf("expected %q but got %q", requiredHeaderValue, providedValue) } - - if authzHeaders, ok := headers["Authorization"]; ok { - // authzHeader looks like AWS4-HMAC-SHA256 Credential=AKI..., SignedHeaders=host;x-amz-date;x-vault-awsiam-id, Signature=... - // We need to extract out the SignedHeaders - re := regexp.MustCompile(".*SignedHeaders=([^,]+)") - authzHeader := strings.Join(authzHeaders, ",") - matches := re.FindSubmatch([]byte(authzHeader)) - if len(matches) < 1 { - return fmt.Errorf("vault header wasn't signed") - } - if len(matches) > 2 { - return fmt.Errorf("found multiple SignedHeaders components") + switch method { + case http.MethodPost: + if authzHeaders, ok := headers["Authorization"]; ok { + // authzHeader looks like AWS4-HMAC-SHA256 Credential=AKI..., SignedHeaders=host;x-amz-date;x-vault-awsiam-id, Signature=... + // We need to extract out the SignedHeaders + re := regexp.MustCompile(".*SignedHeaders=([^,]+)") + authzHeader := strings.Join(authzHeaders, ",") + matches := re.FindSubmatch([]byte(authzHeader)) + if len(matches) < 1 { + return fmt.Errorf("vault header wasn't signed") + } + if len(matches) > 2 { + return fmt.Errorf("found multiple SignedHeaders components") + } + signedHeaders := string(matches[1]) + return ensureHeaderIsSigned(signedHeaders, iamServerIdHeader) } - signedHeaders := string(matches[1]) - return ensureHeaderIsSigned(signedHeaders, iamServerIdHeader) + return fmt.Errorf("missing Authorization header") + case http.MethodGet: + return ensureHeaderIsSigned(parsedUrl.Query().Get(amzSignedHeaders), iamServerIdHeader) + default: + return fmt.Errorf("unsupported method, %s", method) } - // TODO: If we support GET requests, then we need to parse the X-Amz-SignedHeaders - // argument out of the query string and search in there for the header value - return fmt.Errorf("missing Authorization header") } func buildHttpRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) *http.Request { @@ -1832,7 +1890,7 @@ func (b *backend) fullArn(ctx context.Context, e *iamEntity, s logical.Storage) input := iam.GetUserInput{ UserName: aws.String(e.FriendlyName), } - resp, err := client.GetUser(&input) + resp, err := client.GetUserWithContext(ctx, &input) if err != nil { return "", fmt.Errorf("error fetching user %q: %w", e.FriendlyName, err) } @@ -1846,7 +1904,7 @@ func (b *backend) fullArn(ctx context.Context, e *iamEntity, s logical.Storage) input := iam.GetRoleInput{ RoleName: aws.String(e.FriendlyName), } - resp, err := client.GetRole(&input) + resp, err := client.GetRoleWithContext(ctx, &input) if err != nil { return "", fmt.Errorf("error fetching role %q: %w", e.FriendlyName, err) } @@ -1876,6 +1934,43 @@ func getMetadataValue(fromAuth *logical.Auth, forKey string) (string, error) { return "", fmt.Errorf("%q not found in auth metadata", forKey) } +func awsRegionFromHeader(authorizationHeader string) (string, error) { + // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html + // The Authorization header takes the following form. + // Authorization: AWS4-HMAC-SHA256 + // Credential=AKIAIOSFODNN7EXAMPLE/20230719/us-east-1/sts/aws4_request, + // SignedHeaders=content-length;content-type;host;x-amz-date, + // Signature=fe5f80f77d5fa3beca038a248ff027d0445342fe2855ddc963176630326f1024 + // + // The credential is in the form of "////aws4_request" + fields := strings.Split(authorizationHeader, " ") + for _, field := range fields { + if strings.HasPrefix(field, "Credential=") { + fields := strings.Split(field, "/") + if len(fields) < 3 { + return "", fmt.Errorf("invalid header format") + } + + region := fields[2] + return region, nil + } + } + + return "", fmt.Errorf("invalid header format") +} + +func stsRegionalEndpoint(region string) (string, error) { + stsService := sts.EndpointsID + resolver := endpoints.DefaultResolver() + resolvedEndpoint, err := resolver.EndpointFor(stsService, region, + endpoints.STSRegionalEndpointOption, + endpoints.StrictMatchingOption) + if err != nil { + return "", fmt.Errorf("unable to get regional STS endpoint for region: %v", region) + } + return resolvedEndpoint.URL, nil +} + const iamServerIdHeader = "X-Vault-AWS-IAM-Server-ID" const pathLoginSyn = ` diff --git a/builtin/credential/aws/path_login_test.go b/builtin/credential/aws/path_login_test.go index 6ffd60ed1494..b3a9c63913c1 100644 --- a/builtin/credential/aws/path_login_test.go +++ b/builtin/credential/aws/path_login_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -13,6 +16,8 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sts" + "github.com/stretchr/testify/assert" + "github.com/hashicorp/vault/sdk/logical" ) @@ -121,9 +126,129 @@ func TestBackend_pathLogin_parseIamArn(t *testing.T) { } } -func TestBackend_validateVaultHeaderValue(t *testing.T) { +func TestBackend_validateVaultGetRequestValues(t *testing.T) { + const canaryHeaderValue = "Vault-Server" + + getHeadersMissing := http.Header{ + "Host": []string{"Foo"}, + } + getHeadersInvalid := http.Header{ + "Host": []string{"Foo"}, + iamServerIdHeader: []string{"InvalidValue"}, + } + getHeadersValid := http.Header{ + "Host": []string{"Foo"}, + iamServerIdHeader: []string{canaryHeaderValue}, + } + getQueryValid := url.Values(map[string][]string{ + "X-Amz-Algorithm": {"AWS4-HMAC-SHA256"}, + "X-Amz-Credential": {"AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request"}, + amzSignedHeaders: {"host;x-vault-aws-iam-server-id"}, + "X-Amz-Signature": {"5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + "X-Amz-User-Agent": {"aws-sdk-go-v2/1.2.0 os/linux lang/go/1.16 md/GOOS/linux md/GOARCH/amd64"}, + "Action": {"GetCallerIdentity"}, + "Version": {"2011-06-15"}, + }) + getQueryUnsigned := url.Values(map[string][]string{ + "X-Amz-Algorithm": {"AWS4-HMAC-SHA256"}, + "X-Amz-Credential": {"AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request"}, + amzSignedHeaders: {"host"}, + "X-Amz-Signature": {"5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + "X-Amz-User-Agent": {"aws-sdk-go-v2/1.2.0 os/linux lang/go/1.16 md/GOOS/linux md/GOARCH/amd64"}, + "Action": {"GetCallerIdentity"}, + "Version": {"2011-06-15"}, + }) + getQueryNoAction := url.Values(map[string][]string{ + "X-Amz-Algorithm": {"AWS4-HMAC-SHA256"}, + "X-Amz-Credential": {"AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request"}, + amzSignedHeaders: {"host;x-vault-aws-iam-server-id"}, + "X-Amz-Signature": {"5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + "X-Amz-User-Agent": {"aws-sdk-go-v2/1.2.0 os/linux lang/go/1.16 md/GOOS/linux md/GOARCH/amd64"}, + "Version": {"2011-06-15"}, + }) + getQueryInvalidAction := url.Values(map[string][]string{ + "X-Amz-Algorithm": {"AWS4-HMAC-SHA256"}, + "X-Amz-Credential": {"AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request"}, + amzSignedHeaders: {"host;x-vault-aws-iam-server-id"}, + "X-Amz-Signature": {"5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + "X-Amz-User-Agent": {"aws-sdk-go-v2/1.2.0 os/linux lang/go/1.16 md/GOOS/linux md/GOARCH/amd64"}, + "Action": {"GetSessionToken"}, + "Version": {"2011-06-15"}, + }) + getQueryMultipleActions := url.Values(map[string][]string{ + "X-Amz-Algorithm": {"AWS4-HMAC-SHA256"}, + "X-Amz-Credential": {"AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request"}, + amzSignedHeaders: {"host;x-vault-aws-iam-server-id"}, + "X-Amz-Signature": {"5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + "X-Amz-User-Agent": {"aws-sdk-go-v2/1.2.0 os/linux lang/go/1.16 md/GOOS/linux md/GOARCH/amd64"}, + "Action": {"GetCallerIdentity;GetSessionToken"}, + "Version": {"2011-06-15"}, + }) + validGetRequestURL, err := url.Parse("https://sts.amazonaws.com/?" + getQueryValid.Encode()) + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + unsignedGetRequestURL, err := url.Parse("https://sts.amazonaws.com/?" + getQueryUnsigned.Encode()) + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + noActionGetRequestURL, err := url.Parse("https://sts.amazonaws.com/?" + getQueryNoAction.Encode()) + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + invalidActionGetRequestURL, err := url.Parse("https://sts.amazonaws.com/?" + getQueryInvalidAction.Encode()) + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + multipleActionsGetRequestURL, err := url.Parse("https://sts.amazonaws.com/?" + getQueryMultipleActions.Encode()) + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + + err = validateVaultHeaderValue(http.MethodGet, getHeadersMissing, validGetRequestURL, canaryHeaderValue) + if err == nil { + t.Error("validated GET request with missing Vault header") + } + + err = validateVaultHeaderValue(http.MethodGet, getHeadersInvalid, validGetRequestURL, canaryHeaderValue) + if err == nil { + t.Error("validated GET request with invalid Vault header value") + } + + err = validateVaultHeaderValue(http.MethodGet, getHeadersValid, unsignedGetRequestURL, canaryHeaderValue) + if err == nil { + t.Error("validated GET request with unsigned Vault header") + } + + err = validateLoginIamRequestUrl(http.MethodGet, noActionGetRequestURL) + if err == nil { + t.Error("validated GET request with no Action parameter") + } + + err = validateLoginIamRequestUrl(http.MethodGet, multipleActionsGetRequestURL) + if err == nil { + t.Error("validated GET request with multiple Action parameters") + } + + err = validateLoginIamRequestUrl(http.MethodGet, invalidActionGetRequestURL) + if err == nil { + t.Error("validated GET request with an invalid Action parameter") + } + + err = validateLoginIamRequestUrl(http.MethodGet, validGetRequestURL) + if err != nil { + t.Errorf("did NOT validate valid GET request: %v", err) + } + + err = validateVaultHeaderValue(http.MethodGet, getHeadersValid, validGetRequestURL, canaryHeaderValue) + if err != nil { + t.Errorf("did NOT validate valid GET request: %v", err) + } +} + +func TestBackend_validateVaultPostRequestValues(t *testing.T) { const canaryHeaderValue = "Vault-Server" - requestURL, err := url.Parse("https://sts.amazonaws.com/") + postRequestURL, err := url.Parse("https://sts.amazonaws.com/") if err != nil { t.Fatalf("error parsing test URL: %v", err) } @@ -146,39 +271,93 @@ func TestBackend_validateVaultHeaderValue(t *testing.T) { iamServerIdHeader: []string{canaryHeaderValue}, "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, } - postHeadersSplit := http.Header{ "Host": []string{"Foo"}, iamServerIdHeader: []string{canaryHeaderValue}, "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request", "SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, } - err = validateVaultHeaderValue(postHeadersMissing, requestURL, canaryHeaderValue) + err = validateVaultHeaderValue(http.MethodPost, postHeadersMissing, postRequestURL, canaryHeaderValue) if err == nil { t.Error("validated POST request with missing Vault header") } - err = validateVaultHeaderValue(postHeadersInvalid, requestURL, canaryHeaderValue) + err = validateVaultHeaderValue(http.MethodPost, postHeadersInvalid, postRequestURL, canaryHeaderValue) if err == nil { t.Error("validated POST request with invalid Vault header value") } - err = validateVaultHeaderValue(postHeadersUnsigned, requestURL, canaryHeaderValue) + err = validateVaultHeaderValue(http.MethodPost, postHeadersUnsigned, postRequestURL, canaryHeaderValue) if err == nil { t.Error("validated POST request with unsigned Vault header") } - err = validateVaultHeaderValue(postHeadersValid, requestURL, canaryHeaderValue) + err = validateVaultHeaderValue(http.MethodPost, postHeadersValid, postRequestURL, canaryHeaderValue) if err != nil { t.Errorf("did NOT validate valid POST request: %v", err) } - err = validateVaultHeaderValue(postHeadersSplit, requestURL, canaryHeaderValue) + err = validateLoginIamRequestUrl(http.MethodPost, postRequestURL) + if err != nil { + t.Errorf("did NOT validate valid POST request: %v", err) + } + + err = validateVaultHeaderValue(http.MethodPost, postHeadersSplit, postRequestURL, canaryHeaderValue) if err != nil { t.Errorf("did NOT validate valid POST request with split Authorization header: %v", err) } } +// TestBackend_pathLogin_NoClientConfig covers logging in via IAM auth when the +// client config does not exist. This is a regression test to cover potential +// panics when referencing the potentially-nil config in the login handler. For +// details see https://github.com/hashicorp/vault/issues/23361. +func TestBackend_pathLogin_NoClientConfig(t *testing.T) { + storage := new(logical.InmemStorage) + config := logical.TestBackendConfig() + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Intentionally left out the client configuration + + roleEntry := &awsRoleEntry{ + RoleID: "foo", + Version: currentRoleStorageVersion, + AuthType: iamAuthType, + } + err = b.setRole(context.Background(), storage, testValidRoleName, roleEntry) + if err != nil { + t.Fatal(err) + } + + loginData, err := defaultLoginData() + if err != nil { + t.Fatal(err) + } + loginRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{}, + } + resp, err := b.HandleRequest(context.Background(), loginRequest) + if err != nil { + t.Fatalf("expected nil error, got: %v", err) + } + if !resp.IsError() { + t.Fatalf("expected error response, got: %+v", resp) + } +} + // TestBackend_pathLogin_IAMHeaders tests login with iam_request_headers, // supporting both base64 encoded string and JSON headers func TestBackend_pathLogin_IAMHeaders(t *testing.T) { @@ -622,6 +801,58 @@ func TestBackend_defaultAliasMetadata(t *testing.T) { } } +func TestRegionFromHeader(t *testing.T) { + tcs := map[string]struct { + header string + expectedRegion string + expectedSTSEndpoint string + }{ + "us-east-1": { + header: "AWS4-HMAC-SHA256 Credential=AAAAAAAAAAAAAAAAAAAA/20230719/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + expectedRegion: "us-east-1", + expectedSTSEndpoint: "https://sts.us-east-1.amazonaws.com", + }, + "us-west-2": { + header: "AWS4-HMAC-SHA256 Credential=AAAAAAAAAAAAAAAAAAAA/20230719/us-west-2/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + expectedRegion: "us-west-2", + expectedSTSEndpoint: "https://sts.us-west-2.amazonaws.com", + }, + "ap-northeast-3": { + header: "AWS4-HMAC-SHA256 Credential=AAAAAAAAAAAAAAAAAAAA/20230719/ap-northeast-3/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + expectedRegion: "ap-northeast-3", + expectedSTSEndpoint: "https://sts.ap-northeast-3.amazonaws.com", + }, + "us-gov-east-1": { + header: "AWS4-HMAC-SHA256 Credential=AAAAAAAAAAAAAAAAAAAA/20230719/us-gov-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date, Signature=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + expectedRegion: "us-gov-east-1", + expectedSTSEndpoint: "https://sts.us-gov-east-1.amazonaws.com", + }, + } + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + region, err := awsRegionFromHeader(tc.header) + assert.NoError(t, err) + assert.Equal(t, tc.expectedRegion, region) + + stsEndpoint, err := stsRegionalEndpoint(region) + assert.NoError(t, err) + assert.Equal(t, tc.expectedSTSEndpoint, stsEndpoint) + }) + } + + t.Run("invalid-header", func(t *testing.T) { + region, err := awsRegionFromHeader("this-is-an-invalid-header/foobar") + assert.EqualError(t, err, "invalid header format") + assert.Empty(t, region) + }) + + t.Run("invalid-region", func(t *testing.T) { + endpoint, err := stsRegionalEndpoint("fake-region-1") + assert.EqualError(t, err, "unable to get regional STS endpoint for region: fake-region-1") + assert.Empty(t, endpoint) + }) +} + func defaultLoginData() (map[string]interface{}, error) { awsSession, err := session.NewSession() if err != nil { diff --git a/builtin/credential/aws/path_role.go b/builtin/credential/aws/path_role.go index 12a4c7d0f2d9..ae725e571832 100644 --- a/builtin/credential/aws/path_role.go +++ b/builtin/credential/aws/path_role.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -20,6 +23,12 @@ var currentRoleStorageVersion = 3 func (b *backend) pathRole() *framework.Path { p := &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "auth-role", + }, + Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -78,6 +87,9 @@ auth_type is ec2 or inferred_entity_type is ec2_instance.`, given instance IDs. Can be a list or comma-separated string of EC2 instance IDs. This is only applicable when auth_type is ec2 or inferred_entity_type is ec2_instance.`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "If set, defines a constraint on the EC2 instances to have one of the given instance IDs. A list of EC2 instance IDs. This is only applicable when auth_type is ec2 or inferred_entity_type is ec2_instance.", + }, }, "resolve_aws_unique_ids": { Type: framework.TypeBool, @@ -199,6 +211,11 @@ func (b *backend) pathListRole() *framework.Path { return &framework.Path{ Pattern: "role/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "auth-roles", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, @@ -214,6 +231,11 @@ func (b *backend) pathListRoles() *framework.Path { return &framework.Path{ Pattern: "roles/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "auth-roles2", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, @@ -373,7 +395,7 @@ func (b *backend) initialize(ctx context.Context, req *logical.InitializationReq return nil } -// awsVersion stores info about the the latest aws version that we have +// awsVersion stores info about the latest aws version that we have // upgraded to. type awsVersion struct { Version int `json:"version"` diff --git a/builtin/credential/aws/path_role_tag.go b/builtin/credential/aws/path_role_tag.go index 15927a82a2bb..3584f08f26a4 100644 --- a/builtin/credential/aws/path_role_tag.go +++ b/builtin/credential/aws/path_role_tag.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -11,6 +14,7 @@ import ( "strings" "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/strutil" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/framework" @@ -23,6 +27,12 @@ const roleTagVersion = "v1" func (b *backend) pathRoleTag() *framework.Path { return &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("role") + "/tag$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag", + }, + Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -338,7 +348,7 @@ func (b *backend) parseAndVerifyRoleTagValue(ctx context.Context, s logical.Stor return nil, err } case strings.HasPrefix(tagItem, "t="): - rTag.MaxTTL, err = time.ParseDuration(fmt.Sprintf("%ss", strings.TrimPrefix(tagItem, "t="))) + rTag.MaxTTL, err = parseutil.ParseDurationSecond(fmt.Sprintf("%ss", strings.TrimPrefix(tagItem, "t="))) if err != nil { return nil, err } diff --git a/builtin/credential/aws/path_role_test.go b/builtin/credential/aws/path_role_test.go index b8e824a9d27c..3d3fbc3c6f71 100644 --- a/builtin/credential/aws/path_role_test.go +++ b/builtin/credential/aws/path_role_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -301,7 +304,6 @@ func TestBackend_pathIam(t *testing.T) { Data: data, Storage: storage, }) - if err != nil { t.Fatal(err) } diff --git a/builtin/credential/aws/path_roletag_denylist.go b/builtin/credential/aws/path_roletag_denylist.go index 19520aab2f59..131ea717058e 100644 --- a/builtin/credential/aws/path_roletag_denylist.go +++ b/builtin/credential/aws/path_roletag_denylist.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -12,6 +15,12 @@ import ( func (b *backend) pathRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "roletag-denylist/(?P.*)", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag-deny-list", + }, + Fields: map[string]*framework.FieldSchema{ "role_tag": { Type: framework.TypeString, @@ -42,6 +51,11 @@ func (b *backend) pathListRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "roletag-denylist/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag-deny-lists", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoletagDenyListsList, diff --git a/builtin/credential/aws/path_tidy_identity_accesslist.go b/builtin/credential/aws/path_tidy_identity_accesslist.go index 9455cc0d3df1..acfff00b1a90 100644 --- a/builtin/credential/aws/path_tidy_identity_accesslist.go +++ b/builtin/credential/aws/path_tidy_identity_accesslist.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -15,6 +18,13 @@ import ( func (b *backend) pathTidyIdentityAccessList() *framework.Path { return &framework.Path{ Pattern: "tidy/identity-accesslist$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "identity-access-list", + OperationVerb: "tidy", + }, + Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, diff --git a/builtin/credential/aws/path_tidy_roletag_denylist.go b/builtin/credential/aws/path_tidy_roletag_denylist.go index 80c9dd8afea7..665cb0319f1e 100644 --- a/builtin/credential/aws/path_tidy_roletag_denylist.go +++ b/builtin/credential/aws/path_tidy_roletag_denylist.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package awsauth import ( @@ -19,6 +22,13 @@ const ( func (b *backend) pathTidyRoletagDenyList() *framework.Path { return &framework.Path{ Pattern: "tidy/roletag-denylist$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag-deny-list", + OperationVerb: "tidy", + }, + Fields: map[string]*framework.FieldSchema{ "safety_buffer": { Type: framework.TypeDurationSecond, diff --git a/builtin/credential/aws/pkcs7/verify_dsa_test.go b/builtin/credential/aws/pkcs7/verify_dsa_test.go deleted file mode 100644 index 857ea4dbf181..000000000000 --- a/builtin/credential/aws/pkcs7/verify_dsa_test.go +++ /dev/null @@ -1,181 +0,0 @@ -//go:build go1.11 || go1.12 || go1.13 || go1.14 || go1.15 - -package pkcs7 - -import ( - "crypto/x509" - "encoding/pem" - "io/ioutil" - "os" - "os/exec" - "testing" -) - -func TestVerifyEC2(t *testing.T) { - fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture) - p7, err := Parse(fixture.Input) - if err != nil { - t.Errorf("Parse encountered unexpected error: %v", err) - } - p7.Certificates = []*x509.Certificate{fixture.Certificate} - if err := p7.Verify(); err != nil { - t.Errorf("Verify failed with error: %v", err) - } -} - -var EC2IdentityDocumentFixture = ` ------BEGIN PKCS7----- -MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA -JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh -eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1 -cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh -bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs -bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg -OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK -ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj -aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy -YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA -AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n -dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi -IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B -CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG -CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w -LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA -AAAAAA== ------END PKCS7----- ------BEGIN CERTIFICATE----- -MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw -FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD -VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z -ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u -IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl -cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e -ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 -VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P -hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j -k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U -hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF -lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf -MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW -MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw -vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw -7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K ------END CERTIFICATE-----` - -func TestDSASignWithOpenSSLAndVerify(t *testing.T) { - content := []byte(` -A ship in port is safe, -but that's not what ships are built for. --- Grace Hopper`) - // write the content to a temp file - tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpContentFile.Name(), content, 0o755) - - // write the signer cert to a temp file - tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0o755) - - // write the signer key to a temp file - tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key") - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0o755) - - tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature") - if err != nil { - t.Fatal(err) - } - // call openssl to sign the content - opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1", - "-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(), - "-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(), - "-certfile", tmpSignerCertFile.Name(), "-outform", "PEM") - out, err := opensslCMD.CombinedOutput() - if err != nil { - t.Fatalf("openssl command failed with %s: %s", err, out) - } - - // verify the signed content - pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name()) - if err != nil { - t.Fatal(err) - } - t.Logf("%s\n", pemSignature) - derBlock, _ := pem.Decode(pemSignature) - if derBlock == nil { - t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name()) - } - p7, err := Parse(derBlock.Bytes) - if err != nil { - t.Fatalf("Parse encountered unexpected error: %v", err) - } - if err := p7.Verify(); err != nil { - t.Fatalf("Verify failed with error: %v", err) - } - os.Remove(tmpSignerCertFile.Name()) // clean up - os.Remove(tmpSignerKeyFile.Name()) // clean up - os.Remove(tmpContentFile.Name()) // clean up -} - -var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY----- -MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS -PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl -pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith -1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L -vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3 -zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo -g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE= ------END PRIVATE KEY-----`) - -var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE----- -MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV -bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD -VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du -MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r -bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE -ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC -AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD -Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE -exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii -Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4 -V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI -puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl -nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp -rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt -1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT -ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G -CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688 -qzy/7yePTlhlpj+ahMM= ------END CERTIFICATE-----`) - -type DSATestFixture struct { - Input []byte - Certificate *x509.Certificate -} - -func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture { - var result DSATestFixture - var derBlock *pem.Block - pemBlock := []byte(testPEMBlock) - for { - derBlock, pemBlock = pem.Decode(pemBlock) - if derBlock == nil { - break - } - switch derBlock.Type { - case "PKCS7": - result.Input = derBlock.Bytes - case "CERTIFICATE": - result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes) - } - } - - return result -} diff --git a/builtin/credential/cert/backend.go b/builtin/credential/cert/backend.go index 81dba0a80fd5..d40a8de6026f 100644 --- a/builtin/credential/cert/backend.go +++ b/builtin/credential/cert/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cert import ( @@ -13,31 +16,35 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" + lru "github.com/hashicorp/golang-lru/v2" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/ocsp" "github.com/hashicorp/vault/sdk/logical" ) +const ( + operationPrefixCert = "cert" + trustedCertPath = "cert/" + + defaultRoleCacheSize = 200 + defaultOcspMaxRetries = 4 + maxRoleCacheSize = 10000 +) + func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { return nil, err } - bConf, err := b.Config(ctx, conf.StorageView) - if err != nil { - return nil, err - } - if bConf != nil { - b.updatedConfig(bConf) - } - if err := b.lockThenpopulateCRLs(ctx, conf.StorageView); err != nil { - return nil, err - } return b, nil } func Backend() *backend { - var b backend + // ignoring the error as it only can occur with <= 0 size + cache, _ := lru.New[string, *trusted](defaultRoleCacheSize) + b := backend{ + trustedCache: cache, + } b.Backend = &framework.Backend{ Help: backendHelp, PathsSpecial: &logical.Paths{ @@ -53,16 +60,24 @@ func Backend() *backend { pathListCRLs(&b), pathCRLs(&b), }, - AuthRenew: b.pathLoginRenew, - Invalidate: b.invalidate, - BackendType: logical.TypeCredential, - PeriodicFunc: b.updateCRLs, + AuthRenew: b.loginPathWrapper(b.pathLoginRenew), + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, + InitializeFunc: b.initialize, + PeriodicFunc: b.updateCRLs, } b.crlUpdateMutex = &sync.RWMutex{} return &b } +type trusted struct { + pool *x509.CertPool + trusted []*ParsedCert + trustedNonCAs []*ParsedCert + ocspConf *ocsp.VerifyConfig +} + type backend struct { *framework.Backend MapCertId *framework.PathMap @@ -72,6 +87,28 @@ type backend struct { ocspClientMutex sync.RWMutex ocspClient *ocsp.Client configUpdated atomic.Bool + + trustedCache *lru.Cache[string, *trusted] + trustedCacheDisabled atomic.Bool +} + +func (b *backend) initialize(ctx context.Context, req *logical.InitializationRequest) error { + bConf, err := b.Config(ctx, req.Storage) + if err != nil { + b.Logger().Error(fmt.Sprintf("failed to load backend configuration: %v", err)) + return err + } + + if bConf != nil { + b.updatedConfig(bConf) + } + + if err := b.lockThenpopulateCRLs(ctx, req.Storage); err != nil { + b.Logger().Error(fmt.Sprintf("failed to populate CRLs: %v", err)) + return err + } + + return nil } func (b *backend) invalidate(_ context.Context, key string) { @@ -83,6 +120,7 @@ func (b *backend) invalidate(_ context.Context, key string) { case key == "config": b.configUpdated.Store(true) } + b.flushTrustedCache() } func (b *backend) initOCSPClient(cacheSize int) { @@ -91,12 +129,24 @@ func (b *backend) initOCSPClient(cacheSize int) { }, cacheSize) } -func (b *backend) updatedConfig(config *config) error { +func (b *backend) updatedConfig(config *config) { b.ocspClientMutex.Lock() defer b.ocspClientMutex.Unlock() + + switch { + case config.RoleCacheSize < 0: + // Just to clean up memory + b.trustedCacheDisabled.Store(true) + b.trustedCache.Purge() + case config.RoleCacheSize == 0: + config.RoleCacheSize = defaultRoleCacheSize + fallthrough + default: + b.trustedCache.Resize(config.RoleCacheSize) + b.trustedCacheDisabled.Store(false) + } b.initOCSPClient(config.OcspCacheSize) b.configUpdated.Store(false) - return nil } func (b *backend) fetchCRL(ctx context.Context, storage logical.Storage, name string, crl *CRLInfo) error { @@ -146,6 +196,12 @@ func (b *backend) storeConfig(ctx context.Context, storage logical.Storage, conf return nil } +func (b *backend) flushTrustedCache() { + if b.trustedCache != nil { // defensive + b.trustedCache.Purge() + } +} + const backendHelp = ` The "cert" credential provider allows authentication using TLS client certificates. A client connects to Vault and uses diff --git a/builtin/credential/cert/backend_test.go b/builtin/credential/cert/backend_test.go index c56ecefadf39..6260c368e8c0 100644 --- a/builtin/credential/cert/backend_test.go +++ b/builtin/credential/cert/backend_test.go @@ -1,7 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cert import ( "context" + "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -17,6 +21,7 @@ import ( mathrand "math/rand" "net" "net/http" + "net/http/httptest" "net/url" "os" "path/filepath" @@ -25,24 +30,22 @@ import ( "time" "github.com/go-test/deep" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/go-sockaddr" - - "golang.org/x/net/http2" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - vaulthttp "github.com/hashicorp/vault/http" - - rootcerts "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/vault/builtin/logical/pki" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/tokenutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/ocsp" + "golang.org/x/net/http2" ) const ( @@ -250,9 +253,6 @@ func connectionState(serverCAPath, serverCertPath, serverKeyPath, clientCertPath func TestBackend_PermittedDNSDomainsIntermediateCA(t *testing.T) { // Enable PKI secret engine and Cert auth method coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "cert": Factory, }, @@ -445,7 +445,7 @@ func TestBackend_PermittedDNSDomainsIntermediateCA(t *testing.T) { } // Create a new api client with the desired TLS configuration - newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig) + newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig()) secret, err = newClient.Logical().Write("auth/cert/login", map[string]interface{}{ "name": "myvault-dot-com", @@ -476,9 +476,6 @@ func TestBackend_PermittedDNSDomainsIntermediateCA(t *testing.T) { func TestBackend_MetadataBasedACLPolicy(t *testing.T) { // Start cluster with cert auth method enabled coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "cert": Factory, }, @@ -595,7 +592,7 @@ path "kv/ext/{{identity.entity.aliases.%s.metadata.2-1-1-1}}" { } // Create a new api client with the desired TLS configuration - newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig) + newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig()) var secret *api.Secret @@ -1103,6 +1100,11 @@ func testFactory(t *testing.T) logical.Backend { if err != nil { t.Fatalf("error: %s", err) } + if err := b.Initialize(context.Background(), &logical.InitializationRequest{ + Storage: storage, + }); err != nil { + t.Fatalf("error: %s", err) + } return b } @@ -1298,6 +1300,12 @@ func TestBackend_ext_singleCert(t *testing.T) { testAccStepLoginInvalid(t, connState), testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false), testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "hex:2.5.29.17:*87047F000002*"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "hex:2.5.29.17:*87047F000001*"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.5.29.17:"}, false), + testAccStepLogin(t, connState), testAccStepReadConfig(t, config{EnableIdentityAliasMetadata: false}, connState), testAccStepCert(t, "web", ca, "foo", allowed{metadata_ext: "2.1.1.1,1.2.3.45"}, false), testAccStepLoginWithMetadata(t, connState, "web", map[string]string{"2-1-1-1": "A UTF8String Extension"}, false), @@ -1960,6 +1968,27 @@ func testAccStepCertWithExtraParams(t *testing.T, name string, cert []byte, poli } } +func testAccStepReadCertPolicy(t *testing.T, name string, expectError bool, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "certs/" + name, + ErrorOk: expectError, + Data: nil, + Check: func(resp *logical.Response) error { + if (resp == nil || len(resp.Data) == 0) && expectError { + return fmt.Errorf("expected error but received nil") + } + for key, expectedValue := range expected { + actualValue := resp.Data[key] + if expectedValue != actualValue { + return fmt.Errorf("Expected to get [%v]=[%v] but read [%v]=[%v] from server for certs/%v: %v", key, expectedValue, key, actualValue, name, resp) + } + } + return nil + }, + } +} + func testAccStepCertLease( t *testing.T, name string, cert []byte, policies string, ) logicaltest.TestStep { @@ -2032,6 +2061,11 @@ func testConnState(certPath, keyPath, rootCertPath string) (tls.ConnectionState, if err != nil { return tls.ConnectionState{}, err } + + return testConnStateWithCert(cert, rootCAs) +} + +func testConnStateWithCert(cert tls.Certificate, rootCAs *x509.CertPool) (tls.ConnectionState, error) { listenConf := &tls.Config{ Certificates: []tls.Certificate{cert}, ClientAuth: tls.RequestClientCert, @@ -2311,3 +2345,596 @@ func TestBackend_CertUpgrade(t *testing.T) { t.Fatal(diff) } } + +// TestOCSPFailOpenWithBadIssuer validates we fail all different types of cert auth +// login scenarios if we encounter an OCSP verification error +func TestOCSPFailOpenWithBadIssuer(t *testing.T) { + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + caTLS := loadCerts(t, caFile, "test-fixtures/root/rootcakey.pem") + leafTLS := loadCerts(t, "test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem") + + rootConfig := &rootcerts.Config{ + CAFile: caFile, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + connState, err := testConnStateWithCert(leafTLS, rootCAs) + require.NoError(t, err, "error testing connection state: %v", err) + + badCa, badCaKey := createCa(t) + + // Setup an OCSP handler + ocspHandler := func(ca *x509.Certificate, caKey crypto.Signer) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafTLS.Leaf.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + } + response, err := ocsp.CreateResponse(ca, ca, ocspRes, caKey) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + } + goodTs := httptest.NewServer(ocspHandler(caTLS.Leaf, caTLS.PrivateKey.(crypto.Signer))) + badTs := httptest.NewServer(ocspHandler(badCa, badCaKey)) + defer goodTs.Close() + defer badTs.Close() + + steps := []logicaltest.TestStep{ + // step 1/2: This should fail as we get a response from a bad root, even with ocsp_fail_open is set to true + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{badTs.URL}, + "ocsp_query_all_servers": false, + "ocsp_fail_open": true, + }), + testAccStepLoginInvalid(t, connState), + // step 3/4: This should fail as we query all the servers which will get a response with an invalid signature + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{goodTs.URL, badTs.URL}, + "ocsp_query_all_servers": true, + "ocsp_fail_open": true, + }), + testAccStepLoginInvalid(t, connState), + // step 5/6: This should fail as we will query the OCSP server with the bad root key first. + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{badTs.URL, goodTs.URL}, + "ocsp_query_all_servers": false, + "ocsp_fail_open": true, + }), + testAccStepLoginInvalid(t, connState), + // step 7/8: This should pass as we will only query the first server with the valid root signature + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{goodTs.URL, badTs.URL}, + "ocsp_query_all_servers": false, + "ocsp_fail_open": true, + }), + testAccStepLogin(t, connState), + } + + // Setup a new factory everytime to avoid OCSP caching from influencing the test + for i := 0; i < len(steps); i += 2 { + setup := i + execute := i + 1 + t.Run(fmt.Sprintf("steps-%d-%d", setup+1, execute+1), func(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{steps[setup], steps[execute]}, + }) + }) + } +} + +// TestOCSPWithMixedValidResponses validates the expected behavior of multiple OCSP servers configured, +// with and without ocsp_query_all_servers enabled or disabled. +func TestOCSPWithMixedValidResponses(t *testing.T) { + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + caTLS := loadCerts(t, caFile, "test-fixtures/root/rootcakey.pem") + leafTLS := loadCerts(t, "test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem") + + rootConfig := &rootcerts.Config{ + CAFile: caFile, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + connState, err := testConnStateWithCert(leafTLS, rootCAs) + require.NoError(t, err, "error testing connection state: %v", err) + + // Setup an OCSP handler + ocspHandler := func(status int) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafTLS.Leaf.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: status, + } + response, err := ocsp.CreateResponse(caTLS.Leaf, caTLS.Leaf, ocspRes, caTLS.PrivateKey.(crypto.Signer)) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + } + goodTs := httptest.NewServer(ocspHandler(ocsp.Good)) + revokeTs := httptest.NewServer(ocspHandler(ocsp.Revoked)) + defer goodTs.Close() + defer revokeTs.Close() + + steps := []logicaltest.TestStep{ + // step 1/2: This should pass as we will query the first server and get a valid good response, not testing + // the second configured server + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{goodTs.URL, revokeTs.URL}, + "ocsp_query_all_servers": false, + }), + testAccStepLogin(t, connState), + // step 3/4: This should fail as we will query the revoking OCSP server first and get a revoke response + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{revokeTs.URL, goodTs.URL}, + "ocsp_query_all_servers": false, + }), + testAccStepLoginInvalid(t, connState), + // step 5/6: This should fail as we will query all the OCSP servers and prefer the revoke response + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{goodTs.URL, revokeTs.URL}, + "ocsp_query_all_servers": true, + }), + testAccStepLoginInvalid(t, connState), + } + + // Setup a new factory everytime to avoid OCSP caching from influencing the test + for i := 0; i < len(steps); i += 2 { + setup := i + execute := i + 1 + t.Run(fmt.Sprintf("steps-%d-%d", setup+1, execute+1), func(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{steps[setup], steps[execute]}, + }) + }) + } +} + +// TestOCSPFailOpenWithGoodResponse validates the expected behavior with multiple OCSP servers configured +// one that returns a Good response the other is not available, along with the ocsp_fail_open in multiple modes +func TestOCSPFailOpenWithGoodResponse(t *testing.T) { + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + caTLS := loadCerts(t, caFile, "test-fixtures/root/rootcakey.pem") + leafTLS := loadCerts(t, "test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem") + + rootConfig := &rootcerts.Config{ + CAFile: caFile, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + connState, err := testConnStateWithCert(leafTLS, rootCAs) + require.NoError(t, err, "error testing connection state: %v", err) + + // Setup an OCSP handler + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafTLS.Leaf.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + } + response, err := ocsp.CreateResponse(caTLS.Leaf, caTLS.Leaf, ocspRes, caTLS.PrivateKey.(crypto.Signer)) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + steps := []logicaltest.TestStep{ + // Step 1/2 With no proper responses from any OCSP server and fail_open to true, we should pass validation + // as fail_open is true + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{"http://127.0.0.1:30000", "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 3/4 With no proper responses from any OCSP server and fail_open to false we should fail validation + // as fail_open is false + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{"http://127.0.0.1:30000", "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + // Step 5/6 With a single positive response, query all servers set to false and fail open true, pass validation + // as query all servers is false + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 7/8 With a single positive response, query all servers set to false and fail open false, pass validation + // as query all servers is false + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 9/10 With a single positive response, query all servers set to true and fail open true, pass validation + // as fail open is true + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 11/12 With a single positive response, query all servers set to true and fail open false, fail validation + // as not all servers agree + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + } + + // Setup a new factory everytime to avoid OCSP caching from influencing the test + for i := 0; i < len(steps); i += 2 { + setup := i + execute := i + 1 + t.Run(fmt.Sprintf("steps-%d-%d", setup+1, execute+1), func(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{steps[setup], steps[execute]}, + }) + }) + } +} + +// TestOCSPFailOpenWithRevokeResponse validates the expected behavior with multiple OCSP servers configured +// one that returns a Revoke response the other is not available, along with the ocsp_fail_open in multiple modes +func TestOCSPFailOpenWithRevokeResponse(t *testing.T) { + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + caTLS := loadCerts(t, caFile, "test-fixtures/root/rootcakey.pem") + leafTLS := loadCerts(t, "test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem") + + rootConfig := &rootcerts.Config{ + CAFile: caFile, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + connState, err := testConnStateWithCert(leafTLS, rootCAs) + require.NoError(t, err, "error testing connection state: %v", err) + + // Setup an OCSP handler + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafTLS.Leaf.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Revoked, + } + response, err := ocsp.CreateResponse(caTLS.Leaf, caTLS.Leaf, ocspRes, caTLS.PrivateKey.(crypto.Signer)) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + // With no OCSP servers available, make sure that we behave as we expect + steps := []logicaltest.TestStep{ + // Step 1/2 With a single revoke response, query all servers set to false and fail open true, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + // Step 3/4 With a single revoke response, query all servers set to false and fail open false, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + // Step 5/6 With a single revoke response, query all servers set to true and fail open false, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + // Step 7/8 With a single revoke response, query all servers set to true and fail open true, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + } + + // Setup a new factory everytime to avoid OCSP caching from influencing the test + for i := 0; i < len(steps); i += 2 { + setup := i + execute := i + 1 + t.Run(fmt.Sprintf("steps-%d-%d", setup+1, execute+1), func(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{steps[setup], steps[execute]}, + }) + }) + } +} + +// TestOCSPFailOpenWithUnknownResponse validates the expected behavior with multiple OCSP servers configured +// one that returns an Unknown response the other is not available, along with the ocsp_fail_open in multiple modes +func TestOCSPFailOpenWithUnknownResponse(t *testing.T) { + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + caTLS := loadCerts(t, caFile, "test-fixtures/root/rootcakey.pem") + leafTLS := loadCerts(t, "test-fixtures/keys/cert.pem", "test-fixtures/keys/key.pem") + + rootConfig := &rootcerts.Config{ + CAFile: caFile, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + connState, err := testConnStateWithCert(leafTLS, rootCAs) + require.NoError(t, err, "error testing connection state: %v", err) + + // Setup an OCSP handler + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafTLS.Leaf.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Unknown, + } + response, err := ocsp.CreateResponse(caTLS.Leaf, caTLS.Leaf, ocspRes, caTLS.PrivateKey.(crypto.Signer)) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + // With no OCSP servers available, make sure that we behave as we expect + steps := []logicaltest.TestStep{ + // Step 1/2 With a single unknown response, query all servers set to false and fail open true, pass validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 3/4 With a single unknown response, query all servers set to false and fail open false, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": false, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + // Step 5/6 With a single unknown response, query all servers set to true and fail open true, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", allowed{names: "cert.example.com"}, false, + map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": true, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLogin(t, connState), + // Step 7/8 With a single unknown response, query all servers set to true and fail open false, fail validation + testAccStepCertWithExtraParams(t, "web", pemCa, "foo", + allowed{names: "cert.example.com"}, false, map[string]interface{}{ + "ocsp_enabled": true, + "ocsp_servers_override": []string{ts.URL, "http://127.0.0.1:30001"}, + "ocsp_fail_open": false, + "ocsp_query_all_servers": true, + "ocsp_max_retries": 0, + }), + testAccStepLoginInvalid(t, connState), + } + + // Setup a new factory everytime to avoid OCSP caching from influencing the test + for i := 0; i < len(steps); i += 2 { + setup := i + execute := i + 1 + t.Run(fmt.Sprintf("steps-%d-%d", setup+1, execute+1), func(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{steps[setup], steps[execute]}, + }) + }) + } +} + +// TestOcspMaxRetriesUpdate verifies that the ocsp_max_retries field is properly initialized +// with our default value of 4, legacy roles have it initialized automatically to 4 and we +// can properly store and retrieve updates to the field. +func TestOcspMaxRetriesUpdate(t *testing.T) { + storage := &logical.InmemStorage{} + ctx := context.Background() + + lb, err := Factory(context.Background(), &logical.BackendConfig{ + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: 300 * time.Second, + MaxLeaseTTLVal: 1800 * time.Second, + }, + StorageView: storage, + }) + require.NoError(t, err, "failed creating backend") + + caFile := "test-fixtures/root/rootcacert.pem" + pemCa, err := os.ReadFile(caFile) + require.NoError(t, err, "failed reading in file %s", caFile) + + data := map[string]interface{}{ + "certificate": string(pemCa), + "display_name": "test", + } + + // Test initial creation of role sets ocsp_max_retries to a default of 4 + _, err = lb.HandleRequest(ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "certs/test", + Data: data, + Storage: storage, + }) + require.NoError(t, err, "failed initial role creation request") + + resp, err := lb.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "certs/test", + Storage: storage, + }) + require.NoError(t, err, "failed reading role request") + require.NotNil(t, resp) + require.Equal(t, 4, resp.Data["ocsp_max_retries"], "ocsp config didn't match expectations") + + // Test we can update the field and read it back + data["ocsp_max_retries"] = 1 + _, err = lb.HandleRequest(ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "certs/test", + Data: data, + Storage: storage, + }) + require.NoError(t, err, "failed updating role request") + + resp, err = lb.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "certs/test", + Storage: storage, + }) + require.NoError(t, err, "failed reading role request") + require.NotNil(t, resp) + require.Equal(t, 1, resp.Data["ocsp_max_retries"], "ocsp config didn't match expectations on update") + + // Verify existing storage entries get updated with a value of 4 + entry := &logical.StorageEntry{ + Key: "cert/legacy", + Value: []byte(`{"token_bound_cidrs":null,"token_explicit_max_ttl":0,"token_max_ttl":0, + "token_no_default_policy":false,"token_num_uses":0,"token_period":0, + "token_policies":null,"token_type":0,"token_ttl":0,"Name":"test", + "Certificate":"-----BEGIN CERTIFICATE-----\nMIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL\nBQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw\nMjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7\nQ7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0\nz2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x\nAHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb\n6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH\nSWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G\nA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx\n7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc\nBgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA\nwHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2\nU946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa\ncNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N\nScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ\nt2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk\nzehNe5dFTjFpylg1o6b8Ow==\n-----END CERTIFICATE-----\n", + "DisplayName":"test","Policies":null,"TTL":0,"MaxTTL":0,"Period":0, + "AllowedNames":null,"AllowedCommonNames":null,"AllowedDNSSANs":null, + "AllowedEmailSANs":null,"AllowedURISANs":null,"AllowedOrganizationalUnits":null, + "RequiredExtensions":null,"AllowedMetadataExtensions":null,"BoundCIDRs":null, + "OcspCaCertificates":"","OcspEnabled":false,"OcspServersOverride":null, + "OcspFailOpen":false,"OcspQueryAllServers":false}`), + } + err = storage.Put(ctx, entry) + require.NoError(t, err, "failed putting legacy storage entry") + + resp, err = lb.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "certs/legacy", + Storage: storage, + }) + require.NoError(t, err, "failed reading role request") + require.NotNil(t, resp) + require.Equal(t, 4, resp.Data["ocsp_max_retries"], "ocsp config didn't match expectations on legacy entry") +} + +func loadCerts(t *testing.T, certFile, certKey string) tls.Certificate { + caTLS, err := tls.LoadX509KeyPair(certFile, certKey) + require.NoError(t, err, "failed reading ca/key files") + + caTLS.Leaf, err = x509.ParseCertificate(caTLS.Certificate[0]) + require.NoError(t, err, "failed parsing certificate from file %s", certFile) + + return caTLS +} + +func createCa(t *testing.T) (*x509.Certificate, *ecdsa.PrivateKey) { + rootCaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated root key for CA") + + // Validate we reject CSRs that contain CN that aren't in the original order + cr := &x509.Certificate{ + Subject: pkix.Name{CommonName: "Root Cert"}, + SerialNumber: big.NewInt(1), + IsCA: true, + BasicConstraintsValid: true, + SignatureAlgorithm: x509.ECDSAWithSHA256, + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().AddDate(1, 0, 0), + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageOCSPSigning}, + } + rootCaBytes, err := x509.CreateCertificate(rand.Reader, cr, cr, &rootCaKey.PublicKey, rootCaKey) + require.NoError(t, err, "failed generating root ca") + + rootCa, err := x509.ParseCertificate(rootCaBytes) + require.NoError(t, err, "failed parsing root ca") + + return rootCa, rootCaKey +} diff --git a/builtin/credential/cert/cli.go b/builtin/credential/cert/cli.go index 4a470c89616f..2e7a8b8e22e3 100644 --- a/builtin/credential/cert/cli.go +++ b/builtin/credential/cert/cli.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cert import ( diff --git a/builtin/credential/cert/cmd/cert/main.go b/builtin/credential/cert/cmd/cert/main.go index 09018ec3f040..45eb75d36caf 100644 --- a/builtin/credential/cert/cmd/cert/main.go +++ b/builtin/credential/cert/cmd/cert/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: cert.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/cert/path_certs.go b/builtin/credential/cert/path_certs.go index 13f6da78c495..0e056917874a 100644 --- a/builtin/credential/cert/path_certs.go +++ b/builtin/credential/cert/path_certs.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cert import ( @@ -7,6 +10,7 @@ import ( "strings" "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/sdk/framework" @@ -18,22 +22,33 @@ func pathListCerts(b *backend) *framework.Path { return &framework.Path{ Pattern: "certs/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "certificates", + Navigation: true, + ItemType: "Certificate", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathCertList, }, HelpSynopsis: pathCertHelpSyn, HelpDescription: pathCertHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "Certificate", - }, } } func pathCerts(b *backend) *framework.Path { p := &framework.Path{ Pattern: "certs/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "certificate", + Action: "Create", + ItemType: "Certificate", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -63,6 +78,9 @@ Must be x509 PEM encoded.`, Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of OCSP server addresses. If unset, the OCSP server is determined from the AuthorityInformationAccess extension on the certificate being inspected.`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of OCSP server addresses. If unset, the OCSP server is determined from the AuthorityInformationAccess extension on the certificate being inspected.", + }, }, "ocsp_fail_open": { Type: framework.TypeBool, @@ -74,6 +92,16 @@ from the AuthorityInformationAccess extension on the certificate being inspected Default: false, Description: "If set to true, rather than accepting the first successful OCSP response, query all servers and consider the certificate valid only if all servers agree.", }, + "ocsp_this_update_max_age": { + Type: framework.TypeDurationSecond, + Default: 0, + Description: "If greater than 0, specifies the maximum age of an OCSP thisUpdate field to avoid accepting old responses without a nextUpdate field.", + }, + "ocsp_max_retries": { + Type: framework.TypeInt, + Default: 4, + Description: "The number of retries the OCSP client should attempt per query.", + }, "allowed_names": { Type: framework.TypeCommaStringSlice, Description: `A comma-separated list of names. @@ -81,7 +109,8 @@ At least one must exist in either the Common Name or SANs. Supports globbing. This parameter is deprecated, please use allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans.`, DisplayAttrs: &framework.DisplayAttributes{ - Group: "Constraints", + Group: "Constraints", + Description: "A list of names. At least one must exist in either the Common Name or SANs. Supports globbing. This parameter is deprecated, please use allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans.", }, }, @@ -90,7 +119,8 @@ allowed_email_sans, allowed_uri_sans.`, Description: `A comma-separated list of names. At least one must exist in the Common Name. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Group: "Constraints", + Group: "Constraints", + Description: "A list of names. At least one must exist in the Common Name. Supports globbing.", }, }, @@ -99,8 +129,9 @@ At least one must exist in the Common Name. Supports globbing.`, Description: `A comma-separated list of DNS names. At least one must exist in the SANs. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Allowed DNS SANs", - Group: "Constraints", + Name: "Allowed DNS SANs", + Group: "Constraints", + Description: "A list of DNS names. At least one must exist in the SANs. Supports globbing.", }, }, @@ -109,8 +140,9 @@ At least one must exist in the SANs. Supports globbing.`, Description: `A comma-separated list of Email Addresses. At least one must exist in the SANs. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Allowed Email SANs", - Group: "Constraints", + Name: "Allowed Email SANs", + Group: "Constraints", + Description: "A list of Email Addresses. At least one must exist in the SANs. Supports globbing.", }, }, @@ -119,8 +151,9 @@ At least one must exist in the SANs. Supports globbing.`, Description: `A comma-separated list of URIs. At least one must exist in the SANs. Supports globbing.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Allowed URI SANs", - Group: "Constraints", + Name: "Allowed URI SANs", + Group: "Constraints", + Description: "A list of URIs. At least one must exist in the SANs. Supports globbing.", }, }, @@ -129,7 +162,8 @@ At least one must exist in the SANs. Supports globbing.`, Description: `A comma-separated list of Organizational Units names. At least one must exist in the OU field.`, DisplayAttrs: &framework.DisplayAttributes{ - Group: "Constraints", + Group: "Constraints", + Description: "A list of Organizational Units names. At least one must exist in the OU field.", }, }, @@ -138,6 +172,9 @@ At least one must exist in the OU field.`, Description: `A comma-separated string or array of extensions formatted as "oid:value". Expects the extension value to be some type of ASN1 encoded string. All values much match. Supports globbing on "value".`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of extensions formatted as 'oid:value'. Expects the extension value to be some type of ASN1 encoded string. All values much match. Supports globbing on 'value'.", + }, }, "allowed_metadata_extensions": { @@ -146,6 +183,9 @@ All values much match. Supports globbing on "value".`, Upon successful authentication, these extensions will be added as metadata if they are present in the certificate. The metadata key will be the string consisting of the oid numbers separated by a dash (-) instead of a dot (.) to allow usage in ACL templates.`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of OID extensions. Upon successful authentication, these extensions will be added as metadata if they are present in the certificate. The metadata key will be the string consisting of the OID numbers separated by a dash (-) instead of a dot (.) to allow usage in ACL templates.", + }, }, "display_name": { @@ -199,10 +239,6 @@ certificate.`, HelpSynopsis: pathCertHelpSyn, HelpDescription: pathCertHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "Certificate", - }, } tokenutil.AddTokenFields(p.Fields) @@ -210,7 +246,7 @@ certificate.`, } func (b *backend) Cert(ctx context.Context, s logical.Storage, n string) (*CertEntry, error) { - entry, err := s.Get(ctx, "cert/"+strings.ToLower(n)) + entry, err := s.Get(ctx, trustedCertPath+strings.ToLower(n)) if err != nil { return nil, err } @@ -218,7 +254,7 @@ func (b *backend) Cert(ctx context.Context, s logical.Storage, n string) (*CertE return nil, nil } - var result CertEntry + result := CertEntry{OcspMaxRetries: defaultOcspMaxRetries} // Specify our defaults if the key is missing if err := entry.DecodeJSON(&result); err != nil { return nil, err } @@ -243,7 +279,8 @@ func (b *backend) Cert(ctx context.Context, s logical.Storage, n string) (*CertE } func (b *backend) pathCertDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, "cert/"+strings.ToLower(d.Get("name").(string))) + defer b.flushTrustedCache() + err := req.Storage.Delete(ctx, trustedCertPath+strings.ToLower(d.Get("name").(string))) if err != nil { return nil, err } @@ -251,7 +288,7 @@ func (b *backend) pathCertDelete(ctx context.Context, req *logical.Request, d *f } func (b *backend) pathCertList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - certs, err := req.Storage.List(ctx, "cert/") + certs, err := req.Storage.List(ctx, trustedCertPath) if err != nil { return nil, err } @@ -278,6 +315,13 @@ func (b *backend) pathCertRead(ctx context.Context, req *logical.Request, d *fra "allowed_organizational_units": cert.AllowedOrganizationalUnits, "required_extensions": cert.RequiredExtensions, "allowed_metadata_extensions": cert.AllowedMetadataExtensions, + "ocsp_ca_certificates": cert.OcspCaCertificates, + "ocsp_enabled": cert.OcspEnabled, + "ocsp_servers_override": cert.OcspServersOverride, + "ocsp_fail_open": cert.OcspFailOpen, + "ocsp_query_all_servers": cert.OcspQueryAllServers, + "ocsp_this_update_max_age": int64(cert.OcspThisUpdateMaxAge.Seconds()), + "ocsp_max_retries": cert.OcspMaxRetries, } cert.PopulateTokenData(data) @@ -303,6 +347,7 @@ func (b *backend) pathCertRead(ctx context.Context, req *logical.Request, d *fra } func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + defer b.flushTrustedCache() name := strings.ToLower(d.Get("name").(string)) cert, err := b.Cert(ctx, req.Storage, name) @@ -312,7 +357,8 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr if cert == nil { cert = &CertEntry{ - Name: name, + Name: name, + OcspMaxRetries: defaultOcspMaxRetries, } } @@ -335,6 +381,19 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr if ocspQueryAll, ok := d.GetOk("ocsp_query_all_servers"); ok { cert.OcspQueryAllServers = ocspQueryAll.(bool) } + if ocspThisUpdateMaxAge, ok := d.GetOk("ocsp_this_update_max_age"); ok { + maxAgeDuration, err := parseutil.ParseDurationSecond(ocspThisUpdateMaxAge) + if err != nil { + return nil, fmt.Errorf("failed to parse ocsp_this_update_max_age: %w", err) + } + cert.OcspThisUpdateMaxAge = maxAgeDuration + } + if ocspMaxRetries, ok := d.GetOk("ocsp_max_retries"); ok { + cert.OcspMaxRetries = ocspMaxRetries.(int) + if cert.OcspMaxRetries < 0 { + return nil, fmt.Errorf("ocsp_max_retries can not be a negative number") + } + } if displayNameRaw, ok := d.GetOk("display_name"); ok { cert.DisplayName = displayNameRaw.(string) } @@ -445,7 +504,7 @@ func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *fr } // Store it - entry, err := logical.StorageEntryJSON("cert/"+name, cert) + entry, err := logical.StorageEntryJSON(trustedCertPath+name, cert) if err != nil { return nil, err } @@ -480,11 +539,13 @@ type CertEntry struct { AllowedMetadataExtensions []string BoundCIDRs []*sockaddr.SockAddrMarshaler - OcspCaCertificates string - OcspEnabled bool - OcspServersOverride []string - OcspFailOpen bool - OcspQueryAllServers bool + OcspCaCertificates string + OcspEnabled bool + OcspServersOverride []string + OcspFailOpen bool + OcspQueryAllServers bool + OcspThisUpdateMaxAge time.Duration + OcspMaxRetries int } const pathCertHelpSyn = ` diff --git a/builtin/credential/cert/path_config.go b/builtin/credential/cert/path_config.go index c08992af15c4..1183775f6bc4 100644 --- a/builtin/credential/cert/path_config.go +++ b/builtin/credential/cert/path_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cert import ( @@ -8,11 +11,16 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -const maxCacheSize = 100000 +const maxOcspCacheSize = 100000 func pathConfig(b *backend) *framework.Path { return &framework.Path{ Pattern: "config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + }, + Fields: map[string]*framework.FieldSchema{ "disable_binding": { Type: framework.TypeBool, @@ -29,11 +37,26 @@ func pathConfig(b *backend) *framework.Path { Default: 100, Description: `The size of the in memory OCSP response cache, shared by all configured certs`, }, + "role_cache_size": { + Type: framework.TypeInt, + Default: defaultRoleCacheSize, + Description: `The size of the in memory role cache`, + }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConfigWrite, - logical.ReadOperation: b.pathConfigRead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, }, } } @@ -52,11 +75,18 @@ func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, dat } if cacheSizeRaw, ok := data.GetOk("ocsp_cache_size"); ok { cacheSize := cacheSizeRaw.(int) - if cacheSize < 2 || cacheSize > maxCacheSize { - return logical.ErrorResponse("invalid cache size, must be >= 2 and <= %d", maxCacheSize), nil + if cacheSize < 2 || cacheSize > maxOcspCacheSize { + return logical.ErrorResponse("invalid ocsp cache size, must be >= 2 and <= %d", maxOcspCacheSize), nil } config.OcspCacheSize = cacheSize } + if cacheSizeRaw, ok := data.GetOk("role_cache_size"); ok { + cacheSize := cacheSizeRaw.(int) + if (cacheSize < 0 && cacheSize != -1) || cacheSize > maxRoleCacheSize { + return logical.ErrorResponse("invalid role cache size, must be <= %d or -1 to disable role caching", maxRoleCacheSize), nil + } + config.RoleCacheSize = cacheSize + } if err := b.storeConfig(ctx, req.Storage, config); err != nil { return nil, err } @@ -73,6 +103,7 @@ func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *f "disable_binding": cfg.DisableBinding, "enable_identity_alias_metadata": cfg.EnableIdentityAliasMetadata, "ocsp_cache_size": cfg.OcspCacheSize, + "role_cache_size": cfg.RoleCacheSize, } return &logical.Response{ @@ -101,4 +132,5 @@ type config struct { DisableBinding bool `json:"disable_binding"` EnableIdentityAliasMetadata bool `json:"enable_identity_alias_metadata"` OcspCacheSize int `json:"ocsp_cache_size"` + RoleCacheSize int `json:"role_cache_size"` } diff --git a/builtin/credential/cert/path_crls.go b/builtin/credential/cert/path_crls.go index 787c5572dccd..f38654869d73 100644 --- a/builtin/credential/cert/path_crls.go +++ b/builtin/credential/cert/path_crls.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cert import ( @@ -19,6 +22,10 @@ import ( func pathListCRLs(b *backend) *framework.Path { return &framework.Path{ Pattern: "crls/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "crls", + }, Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathCRLsList, @@ -41,6 +48,12 @@ func (b *backend) pathCRLsList(ctx context.Context, req *logical.Request, d *fra func pathCRLs(b *backend) *framework.Path { return &framework.Path{ Pattern: "crls/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "crl", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -71,6 +84,16 @@ using the same name as specified here.`, } } +func (b *backend) populateCrlsIfNil(ctx context.Context, storage logical.Storage) error { + b.crlUpdateMutex.RLock() + if b.crls == nil { + b.crlUpdateMutex.RUnlock() + return b.lockThenpopulateCRLs(ctx, storage) + } + b.crlUpdateMutex.RUnlock() + return nil +} + func (b *backend) lockThenpopulateCRLs(ctx context.Context, storage logical.Storage) error { b.crlUpdateMutex.Lock() defer b.crlUpdateMutex.Unlock() @@ -167,6 +190,7 @@ func (b *backend) pathCRLDelete(ctx context.Context, req *logical.Request, d *fr b.crlUpdateMutex.Lock() defer b.crlUpdateMutex.Unlock() + defer b.flushTrustedCache() _, ok := b.crls[name] if !ok { @@ -290,6 +314,8 @@ func (b *backend) setCRL(ctx context.Context, storage logical.Storage, certList } b.crls[name] = crlInfo + b.flushTrustedCache() + return err } diff --git a/builtin/credential/cert/path_crls_test.go b/builtin/credential/cert/path_crls_test.go index aa293a52cd6d..fff11d588aea 100644 --- a/builtin/credential/cert/path_crls_test.go +++ b/builtin/credential/cert/path_crls_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cert import ( @@ -5,6 +8,7 @@ import ( "crypto/rand" "crypto/x509" "crypto/x509/pkix" + "fmt" "io/ioutil" "math/big" "net/http" @@ -14,6 +18,8 @@ import ( "testing" "time" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -162,7 +168,7 @@ func TestCRLFetch(t *testing.T) { b.crlUpdateMutex.Lock() if len(b.crls["testcrl"].Serials) != 1 { - t.Fatalf("wrong number of certs in CRL") + t.Fatalf("wrong number of certs in CRL got %d, expected 1", len(b.crls["testcrl"].Serials)) } b.crlUpdateMutex.Unlock() @@ -188,11 +194,14 @@ func TestCRLFetch(t *testing.T) { // Give ourselves a little extra room on slower CI systems to ensure we // can fetch the new CRL. - time.Sleep(150 * time.Millisecond) + corehelpers.RetryUntil(t, 2*time.Second, func() error { + b.crlUpdateMutex.Lock() + defer b.crlUpdateMutex.Unlock() - b.crlUpdateMutex.Lock() - if len(b.crls["testcrl"].Serials) != 2 { - t.Fatalf("wrong number of certs in CRL") - } - b.crlUpdateMutex.Unlock() + serialCount := len(b.crls["testcrl"].Serials) + if serialCount != 2 { + return fmt.Errorf("CRL refresh did not occur serial count %d", serialCount) + } + return nil + }) } diff --git a/builtin/credential/cert/path_login.go b/builtin/credential/cert/path_login.go index 8547e9209cdc..8f33d11ac64d 100644 --- a/builtin/credential/cert/path_login.go +++ b/builtin/credential/cert/path_login.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cert import ( @@ -7,20 +10,23 @@ import ( "crypto/x509" "encoding/asn1" "encoding/base64" + "encoding/hex" "encoding/pem" "errors" "fmt" + "net/url" "strings" - "github.com/hashicorp/vault/sdk/helper/ocsp" - "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/ocsp" "github.com/hashicorp/vault/sdk/helper/policyutil" "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/helper/cidrutil" - glob "github.com/ryanuber/go-glob" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/ryanuber/go-glob" ) // ParsedCert is a certificate that has been configured as trusted @@ -32,6 +38,10 @@ type ParsedCert struct { func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationVerb: "login", + }, Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -39,15 +49,27 @@ func pathLogin(b *backend) *framework.Path { }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathLogin, + logical.UpdateOperation: b.loginPathWrapper(b.pathLogin), logical.AliasLookaheadOperation: b.pathLoginAliasLookahead, - logical.ResolveRoleOperation: b.pathLoginResolveRole, + logical.ResolveRoleOperation: b.loginPathWrapper(b.pathLoginResolveRole), }, } } +func (b *backend) loginPathWrapper(wrappedOp func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error)) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Make sure that the CRLs have been loaded before processing a login request, + // they might have been nil'd by an invalidate func call. + if err := b.populateCrlsIfNil(ctx, req.Storage); err != nil { + return nil, err + } + return wrappedOp(ctx, req, data) + } +} + func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { var matched *ParsedCert + if verifyResp, resp, err := b.verifyCredentials(ctx, req, data); err != nil { return nil, err } else if resp != nil { @@ -90,13 +112,6 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *fra b.updatedConfig(config) } - if b.crls == nil { - // Probably invalidated due to replication, but we need these to proceed - if err := b.populateCRLs(ctx, req.Storage); err != nil { - return nil, err - } - } - var matched *ParsedCert if verifyResp, resp, err := b.verifyCredentials(ctx, req, data); err != nil { return nil, err @@ -173,12 +188,6 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f b.updatedConfig(config) } - if b.crls == nil { - if err := b.populateCRLs(ctx, req.Storage); err != nil { - return nil, err - } - } - if !config.DisableBinding { var matched *ParsedCert if verifyResp, resp, err := b.verifyCredentials(ctx, req, d); err != nil { @@ -249,7 +258,7 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d } // Load the trusted certificates and other details - roots, trusted, trustedNonCAs, verifyConf := b.loadTrustedCerts(ctx, req.Storage, certName) + roots, trusted, trustedNonCAs, verifyConf := b.getTrustedCerts(ctx, req.Storage, certName) // Get the list of full chains matching the connection and validates the // certificate itself @@ -265,16 +274,35 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d // If trustedNonCAs is not empty it means that client had registered a non-CA cert // with the backend. + var retErr error if len(trustedNonCAs) != 0 { for _, trustedNonCA := range trustedNonCAs { tCert := trustedNonCA.Certificates[0] // Check for client cert being explicitly listed in the config (and matching other constraints) if tCert.SerialNumber.Cmp(clientCert.SerialNumber) == 0 && bytes.Equal(tCert.AuthorityKeyId, clientCert.AuthorityKeyId) { - matches, err := b.matchesConstraints(ctx, clientCert, trustedNonCA.Certificates, trustedNonCA, verifyConf) + pkMatch, err := certutil.ComparePublicKeysAndType(tCert.PublicKey, clientCert.PublicKey) if err != nil { return nil, nil, err } + if !pkMatch { + // Someone may be trying to pass off a forged certificate as the trusted non-CA cert. Reject early. + return nil, logical.ErrorResponse("public key mismatch of a trusted leaf certificate"), nil + } + matches, err := b.matchesConstraints(ctx, clientCert, trustedNonCA.Certificates, trustedNonCA, verifyConf) + + // matchesConstraints returns an error when OCSP verification fails, + // but some other path might still give us success. Add to the + // retErr multierror, but avoid duplicates. This way, if we reach a + // failure later, we can give additional context. + // + // XXX: If matchesConstraints is updated to generate additional, + // immediately fatal errors, we likely need to extend it to return + // another boolean (fatality) or other detection scheme. + if err != nil && (retErr == nil || !errwrap.Contains(retErr, err.Error())) { + retErr = multierror.Append(retErr, err) + } + if matches { return trustedNonCA, nil, nil } @@ -285,23 +313,36 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d // If no trusted chain was found, client is not authenticated // This check happens after checking for a matching configured non-CA certs if len(trustedChains) == 0 { + if retErr == nil { + return nil, logical.ErrorResponse(fmt.Sprintf("invalid certificate or no client certificate supplied; additionally got errors during verification: %v", retErr)), nil + } return nil, logical.ErrorResponse("invalid certificate or no client certificate supplied"), nil } // Search for a ParsedCert that intersects with the validated chains and any additional constraints - matches := make([]*ParsedCert, 0) for _, trust := range trusted { // For each ParsedCert in the config for _, tCert := range trust.Certificates { // For each certificate in the entry for _, chain := range trustedChains { // For each root chain that we matched for _, cCert := range chain { // For each cert in the matched chain if tCert.Equal(cCert) { // ParsedCert intersects with matched chain match, err := b.matchesConstraints(ctx, clientCert, chain, trust, verifyConf) // validate client cert + matched chain against the config - if err != nil { - return nil, nil, err + + // See note above. + if err != nil && (retErr == nil || !errwrap.Contains(retErr, err.Error())) { + retErr = multierror.Append(retErr, err) } - if match { - // Add the match to the list - matches = append(matches, trust) + + // Return the first matching entry (for backwards + // compatibility, we continue to just pick the first + // one if we have multiple matches). + // + // Here, we return directly: this means that any + // future OCSP errors would be ignored; in the future, + // if these become fatal, we could revisit this + // choice and choose the first match after evaluating + // all possible candidates. + if match && err == nil { + return trust, nil, nil } } } @@ -309,13 +350,11 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d } } - // Fail on no matches - if len(matches) == 0 { - return nil, logical.ErrorResponse("no chain matching all constraints could be found for this login certificate"), nil + if retErr != nil { + return nil, logical.ErrorResponse(fmt.Sprintf("no chain matching all constraints could be found for this login certificate; additionally got errors during verification: %v", retErr)), nil } - // Return the first matching entry (for backwards compatibility, we continue to just pick one if multiple match) - return matches[0], nil, nil + return nil, logical.ErrorResponse("no chain matching all constraints could be found for this login certificate"), nil } func (b *backend) matchesConstraints(ctx context.Context, clientCert *x509.Certificate, trustedChain []*x509.Certificate, @@ -478,18 +517,43 @@ func (b *backend) matchesCertificateExtensions(clientCert *x509.Certificate, con // including its ASN.1 type tag bytes. For the sake of simplicity, assume string type // and drop the tag bytes. And get the number of bytes from the tag. clientExtMap := make(map[string]string, len(clientCert.Extensions)) + hexExtMap := make(map[string]string, len(clientCert.Extensions)) + for _, ext := range clientCert.Extensions { var parsedValue string - asn1.Unmarshal(ext.Value, &parsedValue) - clientExtMap[ext.Id.String()] = parsedValue + _, err := asn1.Unmarshal(ext.Value, &parsedValue) + if err != nil { + clientExtMap[ext.Id.String()] = "" + } else { + clientExtMap[ext.Id.String()] = parsedValue + } + + hexExtMap[ext.Id.String()] = hex.EncodeToString(ext.Value) } - // If any of the required extensions don'log match the constraint fails + + // If any of the required extensions don't match the constraint fails for _, requiredExt := range config.Entry.RequiredExtensions { reqExt := strings.SplitN(requiredExt, ":", 2) - clientExtValue, clientExtValueOk := clientExtMap[reqExt[0]] - if !clientExtValueOk || !glob.Glob(reqExt[1], clientExtValue) { + if len(reqExt) != 2 { return false } + + if reqExt[0] == "hex" { + reqHexExt := strings.SplitN(reqExt[1], ":", 2) + if len(reqHexExt) != 2 { + return false + } + + clientExtValue, clientExtValueOk := hexExtMap[reqHexExt[0]] + if !clientExtValueOk || !glob.Glob(strings.ToLower(reqHexExt[1]), clientExtValue) { + return false + } + } else { + clientExtValue, clientExtValueOk := clientExtMap[reqExt[0]] + if !clientExtValueOk || !glob.Glob(reqExt[1], clientExtValue) { + return false + } + } } return true } @@ -526,10 +590,21 @@ func (b *backend) certificateExtensionsMetadata(clientCert *x509.Certificate, co return metadata } +// getTrustedCerts is used to load all the trusted certificates from the backend, cached + +func (b *backend) getTrustedCerts(ctx context.Context, storage logical.Storage, certName string) (pool *x509.CertPool, trusted []*ParsedCert, trustedNonCAs []*ParsedCert, conf *ocsp.VerifyConfig) { + if !b.trustedCacheDisabled.Load() { + if trusted, found := b.trustedCache.Get(certName); found { + return trusted.pool, trusted.trusted, trusted.trustedNonCAs, trusted.ocspConf + } + } + return b.loadTrustedCerts(ctx, storage, certName) +} + // loadTrustedCerts is used to load all the trusted certificates from the backend -func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, certName string) (pool *x509.CertPool, trusted []*ParsedCert, trustedNonCAs []*ParsedCert, conf *ocsp.VerifyConfig) { +func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, certName string) (pool *x509.CertPool, trustedCerts []*ParsedCert, trustedNonCAs []*ParsedCert, conf *ocsp.VerifyConfig) { pool = x509.NewCertPool() - trusted = make([]*ParsedCert, 0) + trustedCerts = make([]*ParsedCert, 0) trustedNonCAs = make([]*ParsedCert, 0) var names []string @@ -537,7 +612,7 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, names = append(names, certName) } else { var err error - names, err = storage.List(ctx, "cert/") + names, err = storage.List(ctx, trustedCertPath) if err != nil { b.Logger().Error("failed to list trusted certs", "error", err) return @@ -546,7 +621,7 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, conf = &ocsp.VerifyConfig{} for _, name := range names { - entry, err := b.Cert(ctx, storage, strings.TrimPrefix(name, "cert/")) + entry, err := b.Cert(ctx, storage, strings.TrimPrefix(name, trustedCertPath)) if err != nil { b.Logger().Error("failed to load trusted cert", "name", name, "error", err) continue @@ -575,7 +650,7 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, } // Create a ParsedCert entry - trusted = append(trusted, &ParsedCert{ + trustedCerts = append(trustedCerts, &ParsedCert{ Entry: entry, Certificates: parsed, }) @@ -589,8 +664,19 @@ func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, conf.OcspFailureMode = ocsp.FailOpenFalse } conf.QueryAllServers = conf.QueryAllServers || entry.OcspQueryAllServers + conf.OcspThisUpdateMaxAge = entry.OcspThisUpdateMaxAge + conf.OcspMaxRetries = entry.OcspMaxRetries } } + + if !b.trustedCacheDisabled.Load() { + b.trustedCache.Add(certName, &trusted{ + pool: pool, + trusted: trustedCerts, + trustedNonCAs: trustedNonCAs, + ocspConf: conf, + }) + } return } @@ -602,11 +688,49 @@ func (b *backend) checkForCertInOCSP(ctx context.Context, clientCert *x509.Certi defer b.ocspClientMutex.RUnlock() err := b.ocspClient.VerifyLeafCertificate(ctx, clientCert, chain[1], conf) if err != nil { + if ocsp.IsOcspVerificationError(err) { + // We don't want anything to override an OCSP verification error + return false, err + } + if conf.OcspFailureMode == ocsp.FailOpenTrue { + onlyNetworkErrors := b.handleOcspErrorInFailOpen(err) + if onlyNetworkErrors { + return true, nil + } + } + // We want to preserve error messages when they have additional, + // potentially useful information. Just having a revoked cert + // isn't additionally useful. + if !strings.Contains(err.Error(), "has been revoked") { + return false, err + } return false, nil } return true, nil } +func (b *backend) handleOcspErrorInFailOpen(err error) bool { + urlError := &url.Error{} + allNetworkErrors := true + if multiError, ok := err.(*multierror.Error); ok { + for _, myErr := range multiError.Errors { + if !errors.As(myErr, &urlError) { + allNetworkErrors = false + } + } + } else if !errors.As(err, &urlError) { + allNetworkErrors = false + } + + if allNetworkErrors { + b.Logger().Warn("OCSP is set to fail-open, and could not retrieve "+ + "OCSP based revocation but proceeding.", "detail", err) + return true + } + + return false +} + func (b *backend) checkForChainInCRLs(chain []*x509.Certificate) bool { badChain := false for _, cert := range chain { diff --git a/builtin/credential/cert/path_login_test.go b/builtin/credential/cert/path_login_test.go index f69444270f39..fcc7b37c51a6 100644 --- a/builtin/credential/cert/path_login_test.go +++ b/builtin/credential/cert/path_login_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cert import ( @@ -91,6 +94,10 @@ func TestCert_RoleResolve(t *testing.T) { testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), testAccStepLoginWithName(t, connState, "web"), testAccStepResolveRoleWithName(t, connState, "web"), + // Test with caching disabled + testAccStepSetRoleCacheSize(t, -1), + testAccStepLoginWithName(t, connState, "web"), + testAccStepResolveRoleWithName(t, connState, "web"), }, }) } @@ -148,10 +155,23 @@ func TestCert_RoleResolveWithoutProvidingCertName(t *testing.T) { testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), testAccStepLoginWithName(t, connState, "web"), testAccStepResolveRoleWithEmptyDataMap(t, connState, "web"), + testAccStepSetRoleCacheSize(t, -1), + testAccStepLoginWithName(t, connState, "web"), + testAccStepResolveRoleWithEmptyDataMap(t, connState, "web"), }, }) } +func testAccStepSetRoleCacheSize(t *testing.T, size int) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "role_cache_size": size, + }, + } +} + func testAccStepResolveRoleWithEmptyDataMap(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ResolveRoleOperation, @@ -345,6 +365,7 @@ func TestCert_RoleResolveOCSP(t *testing.T) { Steps: []logicaltest.TestStep{ testAccStepCertWithExtraParams(t, "web", ca, "foo", allowed{dns: "example.com"}, false, map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen}), + testAccStepReadCertPolicy(t, "web", false, map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen}), loginStep, resolveStep, }, diff --git a/builtin/credential/cert/test_responder.go b/builtin/credential/cert/test_responder.go index 1c7c75b2ff33..2052736d33cf 100644 --- a/builtin/credential/cert/test_responder.go +++ b/builtin/credential/cert/test_responder.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + // Package ocsp implements an OCSP responder based on a generic storage backend. // It provides a couple of sample implementations. // Because OCSP responders handle high query volumes, we have to be careful diff --git a/builtin/credential/github/backend.go b/builtin/credential/github/backend.go index 89ce37c7cd6d..6e80e7b313d6 100644 --- a/builtin/credential/github/backend.go +++ b/builtin/credential/github/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package github import ( @@ -5,12 +8,14 @@ import ( "net/url" "github.com/google/go-github/github" - cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" "golang.org/x/oauth2" ) +const operationPrefixGithub = "github" + func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { @@ -28,6 +33,32 @@ func Backend() *backend { DefaultKey: "default", } + teamMapPaths := b.TeamMap.Paths() + + teamMapPaths[0].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "teams", + } + teamMapPaths[1].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "team-mapping", + } + teamMapPaths[0].Operations = map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: teamMapPaths[0].Callbacks[logical.ListOperation], + Summary: teamMapPaths[0].HelpSynopsis, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: teamMapPaths[0].Callbacks[logical.ReadOperation], + Summary: teamMapPaths[0].HelpSynopsis, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "list", + OperationSuffix: "teams2", // The ReadOperation is redundant with the ListOperation + }, + }, + } + teamMapPaths[0].Callbacks = nil + b.UserMap = &framework.PolicyMap{ PathMap: framework.PathMap{ Name: "users", @@ -35,7 +66,33 @@ func Backend() *backend { DefaultKey: "default", } - allPaths := append(b.TeamMap.Paths(), b.UserMap.Paths()...) + userMapPaths := b.UserMap.Paths() + + userMapPaths[0].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "users", + } + userMapPaths[1].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "user-mapping", + } + userMapPaths[0].Operations = map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: userMapPaths[0].Callbacks[logical.ListOperation], + Summary: userMapPaths[0].HelpSynopsis, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: userMapPaths[0].Callbacks[logical.ReadOperation], + Summary: userMapPaths[0].HelpSynopsis, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "list", + OperationSuffix: "users2", // The ReadOperation is redundant with the ListOperation + }, + }, + } + userMapPaths[0].Callbacks = nil + + allPaths := append(teamMapPaths, userMapPaths...) b.Backend = &framework.Backend{ Help: backendHelp, diff --git a/builtin/credential/github/backend_test.go b/builtin/credential/github/backend_test.go index f3360f52cfb5..4f3dee078131 100644 --- a/builtin/credential/github/backend_test.go +++ b/builtin/credential/github/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package github import ( diff --git a/builtin/credential/github/cli.go b/builtin/credential/github/cli.go index bccc6fa516e2..177433bde49b 100644 --- a/builtin/credential/github/cli.go +++ b/builtin/credential/github/cli.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package github import ( diff --git a/builtin/credential/github/cmd/github/main.go b/builtin/credential/github/cmd/github/main.go index be4fbb64ca65..40a3a0002bfb 100644 --- a/builtin/credential/github/cmd/github/main.go +++ b/builtin/credential/github/cmd/github/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: github.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/github/path_config.go b/builtin/credential/github/path_config.go index 84c03d3dbb79..abe78760fc1d 100644 --- a/builtin/credential/github/path_config.go +++ b/builtin/credential/github/path_config.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package github import ( "context" "fmt" "net/url" + "os" "strings" "time" @@ -16,6 +20,11 @@ import ( func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: "config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + }, + Fields: map[string]*framework.FieldSchema{ "organization": { Type: framework.TypeString, @@ -48,9 +57,20 @@ API-compatible authentication server.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConfigWrite, - logical.ReadOperation: b.pathConfigRead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationVerb: "configure", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, }, } @@ -94,7 +114,8 @@ func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, dat } if c.OrganizationID == 0 { - client, err := b.Client("") + githubToken := os.Getenv("VAULT_AUTH_CONFIG_GITHUB_TOKEN") + client, err := b.Client(githubToken) if err != nil { return nil, err } diff --git a/builtin/credential/github/path_config_test.go b/builtin/credential/github/path_config_test.go index e8d0cf5fdb39..19338ff44d23 100644 --- a/builtin/credential/github/path_config_test.go +++ b/builtin/credential/github/path_config_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package github import ( @@ -6,6 +9,7 @@ import ( "fmt" "net/http" "net/http/httptest" + "os" "strings" "testing" @@ -120,6 +124,43 @@ func TestGitHub_WriteReadConfig_OrgID(t *testing.T) { assert.Equal(t, "foo-org", resp.Data["organization"]) } +// TestGitHub_WriteReadConfig_Token tests that we can successfully read and +// write the github auth config with a token environment variable +func TestGitHub_WriteReadConfig_Token(t *testing.T) { + b, s := createBackendWithStorage(t) + // use a test server to return our mock GH org info + ts := setupTestServer(t) + defer ts.Close() + + err := os.Setenv("VAULT_AUTH_CONFIG_GITHUB_TOKEN", "foobar") + assert.NoError(t, err) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "organization": "foo-org", + "base_url": ts.URL, // base_url will call the test server + }, + Storage: s, + }) + assert.NoError(t, err) + assert.Nil(t, resp) + assert.NoError(t, resp.Error()) + + // Read the config + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.ReadOperation, + Storage: s, + }) + assert.NoError(t, err) + assert.NoError(t, resp.Error()) + + // the token should not be returned in the read config response. + assert.Nil(t, resp.Data["token"]) +} + // TestGitHub_ErrorNoOrgID tests that an error is returned when we cannot fetch // the org ID for the given org name func TestGitHub_ErrorNoOrgID(t *testing.T) { diff --git a/builtin/credential/github/path_login.go b/builtin/credential/github/path_login.go index 252b5641cd4f..181076a6587f 100644 --- a/builtin/credential/github/path_login.go +++ b/builtin/credential/github/path_login.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package github import ( @@ -16,6 +19,12 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ "token": { Type: framework.TypeString, diff --git a/builtin/credential/github/path_login_test.go b/builtin/credential/github/path_login_test.go index 25baf7f811e8..cfc47a984345 100644 --- a/builtin/credential/github/path_login_test.go +++ b/builtin/credential/github/path_login_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package github import ( diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go index 35e0f102c396..3f203fb13bb0 100644 --- a/builtin/credential/ldap/backend.go +++ b/builtin/credential/ldap/backend.go @@ -1,17 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ldap import ( "context" "fmt" "strings" + "sync" + "github.com/hashicorp/cap/ldap" "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/ldaputil" "github.com/hashicorp/vault/sdk/logical" ) -const errUserBindFailed = `ldap operation failed: failed to bind as user` +const ( + operationPrefixLDAP = "ldap" + errUserBindFailed = "ldap operation failed: failed to bind as user" + defaultPasswordLength = 64 // length to use for configured root password on rotations by default +) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() @@ -43,6 +53,7 @@ func Backend() *backend { pathUsers(&b), pathUsersList(&b), pathLogin(&b), + pathConfigRotateRoot(&b), }, AuthRenew: b.pathLoginRenew, @@ -54,6 +65,8 @@ func Backend() *backend { type backend struct { *framework.Backend + + mu sync.RWMutex } func (b *backend) Login(ctx context.Context, req *logical.Request, username string, password string, usernameAsAlias bool) (string, []string, *logical.Response, []string, error) { @@ -69,82 +82,25 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri return "", nil, logical.ErrorResponse("password cannot be of zero length when passwordless binds are being denied"), nil, nil } - ldapClient := ldaputil.Client{ - Logger: b.Logger(), - LDAP: ldaputil.NewLDAP(), - } - - c, err := ldapClient.DialLDAP(cfg.ConfigEntry) + ldapClient, err := ldap.NewClient(ctx, ldaputil.ConvertConfig(cfg.ConfigEntry)) if err != nil { return "", nil, logical.ErrorResponse(err.Error()), nil, nil } - if c == nil { - return "", nil, logical.ErrorResponse("invalid connection returned from LDAP dial"), nil, nil - } // Clean connection - defer c.Close() - - userBindDN, err := ldapClient.GetUserBindDN(cfg.ConfigEntry, c, username) - if err != nil { - if b.Logger().IsDebug() { - b.Logger().Debug("error getting user bind DN", "error", err) - } - return "", nil, logical.ErrorResponse(errUserBindFailed), nil, nil - } - - if b.Logger().IsDebug() { - b.Logger().Debug("user binddn fetched", "username", username, "binddn", userBindDN) - } - - // Try to bind as the login user. This is where the actual authentication takes place. - if len(password) > 0 { - err = c.Bind(userBindDN, password) - } else { - err = c.UnauthenticatedBind(userBindDN) - } - if err != nil { - if b.Logger().IsDebug() { - b.Logger().Debug("ldap bind failed", "error", err) - } - return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials - } + defer ldapClient.Close(ctx) - // We re-bind to the BindDN if it's defined because we assume - // the BindDN should be the one to search, not the user logging in. - if cfg.BindDN != "" && cfg.BindPassword != "" { - if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil { - if b.Logger().IsDebug() { - b.Logger().Debug("error while attempting to re-bind with the BindDN User", "error", err) - } - return "", nil, logical.ErrorResponse("ldap operation failed: failed to re-bind with the BindDN user"), nil, logical.ErrInvalidCredentials - } - if b.Logger().IsDebug() { - b.Logger().Debug("re-bound to original binddn") - } - } - - userDN, err := ldapClient.GetUserDN(cfg.ConfigEntry, c, userBindDN, username) + c, err := ldapClient.Authenticate(ctx, username, password, ldap.WithGroups(), ldap.WithUserAttributes()) if err != nil { - return "", nil, logical.ErrorResponse(err.Error()), nil, nil - } - - if cfg.AnonymousGroupSearch { - c, err = ldapClient.DialLDAP(cfg.ConfigEntry) - if err != nil { - return "", nil, logical.ErrorResponse("ldap operation failed: failed to connect to LDAP server"), nil, nil + if strings.Contains(err.Error(), "discovery of user bind DN failed") || + strings.Contains(err.Error(), "unable to bind user") { + return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials } - defer c.Close() // Defer closing of this connection as the deferal above closes the other defined connection - } - ldapGroups, err := ldapClient.GetLdapGroups(cfg.ConfigEntry, c, userDN, username) - if err != nil { return "", nil, logical.ErrorResponse(err.Error()), nil, nil } - if b.Logger().IsDebug() { - b.Logger().Debug("groups fetched from server", "num_server_groups", len(ldapGroups), "server_groups", ldapGroups) - } + ldapGroups := c.Groups ldapResponse := &logical.Response{ Data: map[string]interface{}{}, } @@ -155,6 +111,10 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri ldapResponse.AddWarning(errString) } + for _, warning := range c.Warnings { + ldapResponse.AddWarning(string(warning)) + } + var allGroups []string canonicalUsername := username cs := *cfg.CaseSensitiveNames @@ -199,13 +159,11 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri return username, policies, ldapResponse, allGroups, nil } - entityAliasAttribute, err := ldapClient.GetUserAliasAttributeValue(cfg.ConfigEntry, c, username) - if err != nil { - return "", nil, logical.ErrorResponse(err.Error()), nil, nil - } - if entityAliasAttribute == "" { + userAttrValues := c.UserAttributes[cfg.UserAttr] + if len(userAttrValues) == 0 { return "", nil, logical.ErrorResponse("missing entity alias attribute value"), nil, nil } + entityAliasAttribute := userAttrValues[0] return entityAliasAttribute, policies, ldapResponse, allGroups, nil } diff --git a/builtin/credential/ldap/backend_test.go b/builtin/credential/ldap/backend_test.go index 74b4e18a17e3..0e4a0d1a3153 100644 --- a/builtin/credential/ldap/backend_test.go +++ b/builtin/credential/ldap/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ldap import ( @@ -829,6 +832,7 @@ func testAccStepConfigUrl(t *testing.T, cfg *ldaputil.ConfigEntry) logicaltest.T "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, "username_as_alias": cfg.UsernameAsAlias, }, } @@ -851,6 +855,7 @@ func testAccStepConfigUrlWithAuthBind(t *testing.T, cfg *ldaputil.ConfigEntry) l "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, } } @@ -871,6 +876,7 @@ func testAccStepConfigUrlWithDiscover(t *testing.T, cfg *ldaputil.ConfigEntry) l "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, } } @@ -888,6 +894,7 @@ func testAccStepConfigUrlNoGroupDN(t *testing.T, cfg *ldaputil.ConfigEntry) logi "discoverdn": true, "case_sensitive_names": true, "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, } } @@ -908,6 +915,7 @@ func testAccStepConfigUrlWarningCheck(t *testing.T, cfg *ldaputil.ConfigEntry, o "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, Check: func(response *logical.Response) error { if len(response.Warnings) == 0 { @@ -1189,6 +1197,8 @@ func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { "token_period": "5m", "token_explicit_max_ttl": "24h", "request_timeout": cfg.RequestTimeout, + "max_page_size": cfg.MaximumPageSize, + "connection_timeout": cfg.ConnectionTimeout, }, Storage: storage, Connection: &logical.Connection{}, @@ -1230,7 +1240,10 @@ func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { CaseSensitiveNames: falseBool, UsePre111GroupCNBehavior: new(bool), RequestTimeout: cfg.RequestTimeout, + ConnectionTimeout: cfg.ConnectionTimeout, UsernameAsAlias: false, + DerefAliases: "never", + MaximumPageSize: 1000, }, } diff --git a/builtin/credential/ldap/cli.go b/builtin/credential/ldap/cli.go index e0d744b4caad..f7f4a63156d3 100644 --- a/builtin/credential/ldap/cli.go +++ b/builtin/credential/ldap/cli.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ldap import ( @@ -26,12 +29,15 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro } password, ok := m["password"] if !ok { - fmt.Fprintf(os.Stderr, "Password (will be hidden): ") - var err error - password, err = pwd.Read(os.Stdin) - fmt.Fprintf(os.Stderr, "\n") - if err != nil { - return nil, err + password = passwordFromEnv() + if password == "" { + fmt.Fprintf(os.Stderr, "Password (will be hidden): ") + var err error + password, err = pwd.Read(os.Stdin) + fmt.Fprintf(os.Stderr, "\n") + if err != nil { + return nil, err + } } } @@ -70,8 +76,9 @@ Usage: vault login -method=ldap [CONFIG K=V...] Configuration: password= - LDAP password to use for authentication. If not provided, the CLI will - prompt for this on stdin. + LDAP password to use for authentication. If not provided, it will use + the VAULT_LDAP_PASSWORD environment variable. If this is not set, the + CLI will prompt for this on stdin. username= LDAP username to use for authentication. @@ -89,3 +96,7 @@ func usernameFromEnv() string { } return "" } + +func passwordFromEnv() string { + return os.Getenv("VAULT_LDAP_PASSWORD") +} diff --git a/builtin/credential/ldap/cmd/ldap/main.go b/builtin/credential/ldap/cmd/ldap/main.go index b632c011ce13..8594cc527e76 100644 --- a/builtin/credential/ldap/cmd/ldap/main.go +++ b/builtin/credential/ldap/cmd/ldap/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: ldap.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/ldap/path_config.go b/builtin/credential/ldap/path_config.go index 45e5294c79d9..e24d04b295c7 100644 --- a/builtin/credential/ldap/path_config.go +++ b/builtin/credential/ldap/path_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ldap import ( @@ -16,22 +19,41 @@ const userFilterWarning = "userfilter configured does not consider userattr and func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: `config`, - Fields: ldaputil.ConfigFields(), - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigRead, - logical.UpdateOperation: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + Action: "Configure", + }, + + Fields: ldaputil.ConfigFields(), + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "auth-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure-auth", + }, + }, }, HelpSynopsis: pathConfigHelpSyn, HelpDescription: pathConfigHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Configure", - }, } tokenutil.AddTokenFields(p.Fields) p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any configured for specific users/groups." + + p.Fields["password_policy"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "Password policy to use to rotate the root password", + } + return p } @@ -102,6 +124,9 @@ func (b *backend) Config(ctx context.Context, req *logical.Request) (*ldapConfig } func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + b.mu.RLock() + defer b.mu.RUnlock() + cfg, err := b.Config(ctx, req) if err != nil { return nil, err @@ -112,6 +137,7 @@ func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *f data := cfg.PasswordlessMap() cfg.PopulateTokenData(data) + data["password_policy"] = cfg.PasswordPolicy resp := &logical.Response{ Data: data, @@ -148,6 +174,9 @@ func (b *backend) checkConfigUserFilter(cfg *ldapConfigEntry) []string { } func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + b.mu.Lock() + defer b.mu.Unlock() + cfg, err := b.Config(ctx, req) if err != nil { return nil, err @@ -178,6 +207,10 @@ func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d * return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } + if passwordPolicy, ok := d.GetOk("password_policy"); ok { + cfg.PasswordPolicy = passwordPolicy.(string) + } + entry, err := logical.StorageEntryJSON("config", cfg) if err != nil { return nil, err @@ -218,6 +251,8 @@ func (b *backend) getConfigFieldData() (*framework.FieldData, error) { type ldapConfigEntry struct { tokenutil.TokenParams *ldaputil.ConfigEntry + + PasswordPolicy string `json:"password_policy"` } const pathConfigHelpSyn = ` diff --git a/builtin/credential/ldap/path_config_rotate_root.go b/builtin/credential/ldap/path_config_rotate_root.go new file mode 100644 index 000000000000..1aa008f4ddc3 --- /dev/null +++ b/builtin/credential/ldap/path_config_rotate_root.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-2.0 + +package ldap + +import ( + "context" + + "github.com/go-ldap/ldap/v3" + + "github.com/hashicorp/vault/sdk/helper/base62" + "github.com/hashicorp/vault/sdk/helper/ldaputil" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigRotateRoot(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/rotate-root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationVerb: "rotate", + OperationSuffix: "root-credentials", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigRotateRootUpdate, + ForwardPerformanceSecondary: true, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathConfigRotateRootHelpSyn, + HelpDescription: pathConfigRotateRootHelpDesc, + } +} + +func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // lock the backend's state - really just the config state - for mutating + b.mu.Lock() + defer b.mu.Unlock() + + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("attempted to rotate root on an undefined config"), nil + } + + u, p := cfg.BindDN, cfg.BindPassword + if u == "" || p == "" { + return logical.ErrorResponse("auth is not using authenticated search, no root to rotate"), nil + } + + // grab our ldap client + client := ldaputil.Client{ + Logger: b.Logger(), + LDAP: ldaputil.NewLDAP(), + } + + conn, err := client.DialLDAP(cfg.ConfigEntry) + if err != nil { + return nil, err + } + + err = conn.Bind(u, p) + if err != nil { + return nil, err + } + + lreq := &ldap.ModifyRequest{ + DN: cfg.BindDN, + } + + var newPassword string + if cfg.PasswordPolicy != "" { + newPassword, err = b.System().GeneratePasswordFromPolicy(ctx, cfg.PasswordPolicy) + } else { + newPassword, err = base62.Random(defaultPasswordLength) + } + if err != nil { + return nil, err + } + + lreq.Replace("userPassword", []string{newPassword}) + + err = conn.Modify(lreq) + if err != nil { + return nil, err + } + // update config with new password + cfg.BindPassword = newPassword + entry, err := logical.StorageEntryJSON("config", cfg) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + // we might have to roll-back the password here? + return nil, err + } + + return nil, nil +} + +const pathConfigRotateRootHelpSyn = ` +Request to rotate the LDAP credentials used by Vault +` + +const pathConfigRotateRootHelpDesc = ` +This path attempts to rotate the LDAP bindpass used by Vault for this mount. +` diff --git a/builtin/credential/ldap/path_config_rotate_root_test.go b/builtin/credential/ldap/path_config_rotate_root_test.go new file mode 100644 index 000000000000..65073472ca00 --- /dev/null +++ b/builtin/credential/ldap/path_config_rotate_root_test.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-2.0 + +package ldap + +import ( + "context" + "os" + "testing" + + "github.com/hashicorp/vault/helper/testhelpers/ldap" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/logical" +) + +// This test relies on a docker ldap server with a suitable person object (cn=admin,dc=planetexpress,dc=com) +// with bindpassword "admin". `PrepareTestContainer` does this for us. - see the backend_test for more details +func TestRotateRoot(t *testing.T) { + if os.Getenv(logicaltest.TestEnvVar) == "" { + t.Skip("skipping rotate root tests because VAULT_ACC is unset") + } + ctx := context.Background() + + b, store := createBackendWithStorage(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + // set up auth config + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Storage: store, + Data: map[string]interface{}{ + "url": cfg.Url, + "binddn": cfg.BindDN, + "bindpass": cfg.BindPassword, + "userdn": cfg.UserDN, + }, + } + + resp, err := b.HandleRequest(ctx, req) + if err != nil { + t.Fatalf("failed to initialize ldap auth config: %s", err) + } + if resp != nil && resp.IsError() { + t.Fatalf("failed to initialize ldap auth config: %s", resp.Data["error"]) + } + + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/rotate-root", + Storage: store, + } + + _, err = b.HandleRequest(ctx, req) + if err != nil { + t.Fatalf("failed to rotate password: %s", err) + } + + newCFG, err := b.Config(ctx, req) + if newCFG.BindDN != cfg.BindDN { + t.Fatalf("a value in config that should have stayed the same changed: %s", cfg.BindDN) + } + if newCFG.BindPassword == cfg.BindPassword { + t.Fatalf("the password should have changed, but it didn't") + } +} diff --git a/builtin/credential/ldap/path_groups.go b/builtin/credential/ldap/path_groups.go index b39691cf8174..645b6428fd8e 100644 --- a/builtin/credential/ldap/path_groups.go +++ b/builtin/credential/ldap/path_groups.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ldap import ( @@ -13,22 +16,33 @@ func pathGroupsList(b *backend) *framework.Path { return &framework.Path{ Pattern: "groups/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "groups", + Navigation: true, + ItemType: "Group", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathGroupList, }, HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "Group", - }, } } func pathGroups(b *backend) *framework.Path { return &framework.Path{ Pattern: `groups/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "group", + Action: "Create", + ItemType: "Group", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -38,6 +52,9 @@ func pathGroups(b *backend) *framework.Path { "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the group.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated to the group.", + }, }, }, @@ -49,10 +66,6 @@ func pathGroups(b *backend) *framework.Path { HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "Group", - }, } } diff --git a/builtin/credential/ldap/path_login.go b/builtin/credential/ldap/path_login.go index 67303911e5a1..a1c28dea02b8 100644 --- a/builtin/credential/ldap/path_login.go +++ b/builtin/credential/ldap/path_login.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ldap import ( @@ -13,6 +16,12 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: `login/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -74,17 +83,8 @@ func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framew password := d.Get("password").(string) effectiveUsername, policies, resp, groupNames, err := b.Login(ctx, req, username, password, cfg.UsernameAsAlias) - // Handle an internal error - if err != nil { - return nil, err - } - if resp != nil { - // Handle a logical error - if resp.IsError() { - return resp, nil - } - } else { - resp = &logical.Response{} + if err != nil || (resp != nil && resp.IsError()) { + return resp, err } auth := &logical.Auth{ diff --git a/builtin/credential/ldap/path_users.go b/builtin/credential/ldap/path_users.go index a4e18d30eb6d..55326f640862 100644 --- a/builtin/credential/ldap/path_users.go +++ b/builtin/credential/ldap/path_users.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ldap import ( @@ -14,22 +17,33 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "User", - }, } } func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -39,11 +53,17 @@ func pathUsers(b *backend) *framework.Path { "groups": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of additional groups associated with the user.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of additional groups associated with the user.", + }, }, "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated with the user.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated with the user.", + }, }, }, @@ -55,10 +75,6 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "User", - }, } } diff --git a/builtin/credential/okta/backend.go b/builtin/credential/okta/backend.go index d7ac1d8cbcf4..96507f7879c1 100644 --- a/builtin/credential/okta/backend.go +++ b/builtin/credential/okta/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package okta import ( @@ -15,8 +18,9 @@ import ( ) const ( - mfaPushMethod = "push" - mfaTOTPMethod = "token:software:totp" + operationPrefixOkta = "okta" + mfaPushMethod = "push" + mfaTOTPMethod = "token:software:totp" ) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { @@ -267,10 +271,12 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username, pas return nil, logical.ErrorResponse("okta auth backend unexpected failure"), nil, nil } + timer := time.NewTimer(1 * time.Second) select { - case <-time.After(1 * time.Second): + case <-timer.C: // Continue case <-ctx.Done(): + timer.Stop() return nil, logical.ErrorResponse("exiting pending mfa challenge"), nil, nil } case "REJECTED": diff --git a/builtin/credential/okta/backend_test.go b/builtin/credential/okta/backend_test.go index 749b511eb0f9..b347524656a8 100644 --- a/builtin/credential/okta/backend_test.go +++ b/builtin/credential/okta/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package okta import ( diff --git a/builtin/credential/okta/cli.go b/builtin/credential/okta/cli.go index f6e3d13b73c2..faa7f86f2faa 100644 --- a/builtin/credential/okta/cli.go +++ b/builtin/credential/okta/cli.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package okta import ( @@ -61,10 +64,12 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro go func() { for { + timer := time.NewTimer(time.Second) select { case <-doneCh: + timer.Stop() return - case <-time.After(time.Second): + case <-timer.C: } resp, _ := c.Logical().Read(fmt.Sprintf("auth/%s/verify/%s", mount, nonce)) diff --git a/builtin/credential/okta/cmd/okta/main.go b/builtin/credential/okta/cmd/okta/main.go index e2452ba4b8ad..2b6c3e9496d8 100644 --- a/builtin/credential/okta/cmd/okta/main.go +++ b/builtin/credential/okta/cmd/okta/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: okta.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/okta/path_config.go b/builtin/credential/okta/path_config.go index 7fc93efb87c7..6bdb241b2d2e 100644 --- a/builtin/credential/okta/path_config.go +++ b/builtin/credential/okta/path_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package okta import ( @@ -24,6 +27,12 @@ const ( func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: `config`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + Action: "Configure", + }, + Fields: map[string]*framework.FieldSchema{ "organization": { Type: framework.TypeString, @@ -80,18 +89,30 @@ func pathConfig(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigRead, - logical.CreateOperation: b.pathConfigWrite, - logical.UpdateOperation: b.pathConfigWrite, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, }, ExistenceCheck: b.pathConfigExistenceCheck, HelpSynopsis: pathConfigHelp, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Configure", - }, } tokenutil.AddTokenFields(p.Fields) diff --git a/builtin/credential/okta/path_groups.go b/builtin/credential/okta/path_groups.go index f9ff0225ac98..9ae9826d309f 100644 --- a/builtin/credential/okta/path_groups.go +++ b/builtin/credential/okta/path_groups.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package okta import ( @@ -13,22 +16,33 @@ func pathGroupsList(b *backend) *framework.Path { return &framework.Path{ Pattern: "groups/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "groups", + Navigation: true, + ItemType: "Group", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathGroupList, }, HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "Group", - }, } } func pathGroups(b *backend) *framework.Path { return &framework.Path{ Pattern: `groups/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "group", + Action: "Create", + ItemType: "Group", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -38,6 +52,9 @@ func pathGroups(b *backend) *framework.Path { "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the group.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated to the group.", + }, }, }, @@ -49,10 +66,6 @@ func pathGroups(b *backend) *framework.Path { HelpSynopsis: pathGroupHelpSyn, HelpDescription: pathGroupHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "Group", - }, } } diff --git a/builtin/credential/okta/path_groups_test.go b/builtin/credential/okta/path_groups_test.go index 84253f379fd8..7ca8a9415101 100644 --- a/builtin/credential/okta/path_groups_test.go +++ b/builtin/credential/okta/path_groups_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package okta import ( diff --git a/builtin/credential/okta/path_login.go b/builtin/credential/okta/path_login.go index 0f8967576bb7..5b86545d20ed 100644 --- a/builtin/credential/okta/path_login.go +++ b/builtin/credential/okta/path_login.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package okta import ( @@ -20,6 +23,12 @@ const ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: `login/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -189,6 +198,10 @@ func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *f func pathVerify(b *backend) *framework.Path { return &framework.Path{ Pattern: `verify/(?P.+)`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationVerb: "verify", + }, Fields: map[string]*framework.FieldSchema{ "nonce": { Type: framework.TypeString, diff --git a/builtin/credential/okta/path_users.go b/builtin/credential/okta/path_users.go index bd5fdc0ebbe0..d66a5ed463e3 100644 --- a/builtin/credential/okta/path_users.go +++ b/builtin/credential/okta/path_users.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package okta import ( @@ -11,22 +14,33 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "User", - }, } } func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -52,10 +66,6 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "User", - }, } } diff --git a/builtin/credential/radius/backend.go b/builtin/credential/radius/backend.go index 03da06efd9bb..40e680ebcc27 100644 --- a/builtin/credential/radius/backend.go +++ b/builtin/credential/radius/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package radius import ( @@ -7,6 +10,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const operationPrefixRadius = "radius" + func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { diff --git a/builtin/credential/radius/backend_test.go b/builtin/credential/radius/backend_test.go index 17cf54367c25..3c885008422a 100644 --- a/builtin/credential/radius/backend_test.go +++ b/builtin/credential/radius/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package radius import ( @@ -5,13 +8,14 @@ import ( "fmt" "os" "reflect" + "runtime" "strconv" "strings" "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/logical" ) @@ -27,6 +31,10 @@ const ( ) func prepareRadiusTestContainer(t *testing.T) (func(), string, int) { + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + if os.Getenv(envRadiusRadiusHost) != "" { port, _ := strconv.Atoi(os.Getenv(envRadiusPort)) return func() {}, os.Getenv(envRadiusRadiusHost), port diff --git a/builtin/credential/radius/cmd/radius/main.go b/builtin/credential/radius/cmd/radius/main.go index 9ab5a636948c..9adc5bfc78d2 100644 --- a/builtin/credential/radius/cmd/radius/main.go +++ b/builtin/credential/radius/cmd/radius/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: radius.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/radius/path_config.go b/builtin/credential/radius/path_config.go index 33d4d0d99175..1ed33ede6c10 100644 --- a/builtin/credential/radius/path_config.go +++ b/builtin/credential/radius/path_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package radius import ( @@ -12,6 +15,12 @@ import ( func pathConfig(b *backend) *framework.Path { p := &framework.Path{ Pattern: "config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + Action: "Configure", + }, + Fields: map[string]*framework.FieldSchema{ "host": { Type: framework.TypeString, @@ -35,9 +44,10 @@ func pathConfig(b *backend) *framework.Path { "unregistered_user_policies": { Type: framework.TypeString, Default: "", - Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregisted user (default: empty)", + Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregistered user (default: empty)", DisplayAttrs: &framework.DisplayAttributes{ - Name: "Policies for unregistered users", + Name: "Policies for unregistered users", + Description: "List of policies to grant upon successful RADIUS authentication of an unregistered user (default: empty)", }, }, "dial_timeout": { @@ -77,17 +87,29 @@ func pathConfig(b *backend) *framework.Path { ExistenceCheck: b.configExistenceCheck, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigRead, - logical.CreateOperation: b.pathConfigCreateUpdate, - logical.UpdateOperation: b.pathConfigCreateUpdate, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, }, HelpSynopsis: pathConfigHelpSyn, HelpDescription: pathConfigHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Configure", - }, } tokenutil.AddTokenFields(p.Fields) diff --git a/builtin/credential/radius/path_login.go b/builtin/credential/radius/path_login.go index c8a1ab8f43ed..995b6a8177a0 100644 --- a/builtin/credential/radius/path_login.go +++ b/builtin/credential/radius/path_login.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package radius import ( @@ -20,6 +23,13 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login" + framework.OptionalParamRegex("urlusername"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + OperationVerb: "login", + OperationSuffix: "|with-username", + }, + Fields: map[string]*framework.FieldSchema{ "urlusername": { Type: framework.TypeString, diff --git a/builtin/credential/radius/path_users.go b/builtin/credential/radius/path_users.go index de7b5d4690e6..21ebd262f0d7 100644 --- a/builtin/credential/radius/path_users.go +++ b/builtin/credential/radius/path_users.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package radius import ( @@ -14,22 +17,33 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "User", - }, } } func pathUsers(b *backend) *framework.Path { return &framework.Path{ Pattern: `users/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -39,6 +53,9 @@ func pathUsers(b *backend) *framework.Path { "policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies associated to the user.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated to the user.", + }, }, }, @@ -53,10 +70,6 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "User", - }, } } diff --git a/builtin/credential/token/cli.go b/builtin/credential/token/cli.go index 64a88169cbe7..f3ecd97058cb 100644 --- a/builtin/credential/token/cli.go +++ b/builtin/credential/token/cli.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package token import ( diff --git a/builtin/credential/userpass/backend.go b/builtin/credential/userpass/backend.go index aa45dc3766db..e361f08ca48b 100644 --- a/builtin/credential/userpass/backend.go +++ b/builtin/credential/userpass/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package userpass import ( @@ -7,6 +10,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const operationPrefixUserpass = "userpass" + func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { diff --git a/builtin/credential/userpass/backend_test.go b/builtin/credential/userpass/backend_test.go index 83f79db9a4e1..7280667c8a7b 100644 --- a/builtin/credential/userpass/backend_test.go +++ b/builtin/credential/userpass/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package userpass import ( diff --git a/builtin/credential/userpass/cli.go b/builtin/credential/userpass/cli.go index 092d0927ef1f..ab1f138e7806 100644 --- a/builtin/credential/userpass/cli.go +++ b/builtin/credential/userpass/cli.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package userpass import ( diff --git a/builtin/credential/userpass/cmd/userpass/main.go b/builtin/credential/userpass/cmd/userpass/main.go index 5ea1894d219e..d8dfed7f5e10 100644 --- a/builtin/credential/userpass/cmd/userpass/main.go +++ b/builtin/credential/userpass/cmd/userpass/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -16,9 +19,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: userpass.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/credential/userpass/path_login.go b/builtin/credential/userpass/path_login.go index 8e3f42ea4f42..b53953ee837a 100644 --- a/builtin/credential/userpass/path_login.go +++ b/builtin/credential/userpass/path_login.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package userpass import ( @@ -16,6 +19,12 @@ import ( func pathLogin(b *backend) *framework.Path { return &framework.Path{ Pattern: "login/" + framework.GenericNameRegex("username"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -39,7 +48,7 @@ func pathLogin(b *backend) *framework.Path { } func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - username := strings.ToLower(d.Get("username").(string)) + username := d.Get("username").(string) if username == "" { return nil, fmt.Errorf("missing username") } diff --git a/builtin/credential/userpass/path_user_password.go b/builtin/credential/userpass/path_user_password.go index 500749730478..48b127507901 100644 --- a/builtin/credential/userpass/path_user_password.go +++ b/builtin/credential/userpass/path_user_password.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package userpass import ( @@ -13,6 +16,13 @@ import ( func pathUserPassword(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username") + "/password$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationVerb: "reset", + OperationSuffix: "password", + }, + Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, diff --git a/builtin/credential/userpass/path_user_policies.go b/builtin/credential/userpass/path_user_policies.go index 3c017253869e..1dd9b9675de4 100644 --- a/builtin/credential/userpass/path_user_policies.go +++ b/builtin/credential/userpass/path_user_policies.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package userpass import ( @@ -13,6 +16,13 @@ import ( func pathUserPolicies(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username") + "/policies$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationVerb: "update", + OperationSuffix: "policies", + }, + Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -26,6 +36,9 @@ func pathUserPolicies(b *backend) *framework.Path { "token_policies": { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies that will apply to the generated token for this user.", + }, }, }, diff --git a/builtin/credential/userpass/path_users.go b/builtin/credential/userpass/path_users.go index 7ec22c5fbd45..66687f6b02a1 100644 --- a/builtin/credential/userpass/path_users.go +++ b/builtin/credential/userpass/path_users.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package userpass import ( @@ -16,22 +19,33 @@ func pathUsersList(b *backend) *framework.Path { return &framework.Path{ Pattern: "users/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathUserList, }, HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Navigation: true, - ItemType: "User", - }, } } func pathUsers(b *backend) *framework.Path { p := &framework.Path{ Pattern: "users/" + framework.GenericNameRegex("username"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + Fields: map[string]*framework.FieldSchema{ "username": { Type: framework.TypeString, @@ -82,10 +96,6 @@ func pathUsers(b *backend) *framework.Path { HelpSynopsis: pathUserHelpSyn, HelpDescription: pathUserHelpDesc, - DisplayAttrs: &framework.DisplayAttributes{ - Action: "Create", - ItemType: "User", - }, } tokenutil.AddTokenFields(p.Fields) diff --git a/builtin/credential/userpass/stepwise_test.go b/builtin/credential/userpass/stepwise_test.go index 90820b883d27..241d7707b73f 100644 --- a/builtin/credential/userpass/stepwise_test.go +++ b/builtin/credential/userpass/stepwise_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package userpass import ( @@ -16,7 +19,7 @@ func TestAccBackend_stepwise_UserCrud(t *testing.T) { customPluginName := "my-userpass" envOptions := &stepwise.MountOptions{ RegistryName: customPluginName, - PluginType: stepwise.PluginTypeCredential, + PluginType: api.PluginTypeCredential, PluginName: "userpass", MountPathPrefix: customPluginName, } diff --git a/builtin/logical/aws/backend.go b/builtin/logical/aws/backend.go index 9c5abe1e82e8..b33fb1b4d693 100644 --- a/builtin/logical/aws/backend.go +++ b/builtin/logical/aws/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -10,23 +13,27 @@ import ( "github.com/aws/aws-sdk-go/service/sts/stsiface" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" ) const ( rootConfigPath = "config/root" minAwsUserRollbackAge = 5 * time.Minute + operationPrefixAWS = "aws" + operationPrefixAWSASD = "aws-config" ) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b := Backend() + b := Backend(conf) if err := b.Setup(ctx, conf); err != nil { return nil, err } return b, nil } -func Backend() *backend { +func Backend(_ *logical.BackendConfig) *backend { var b backend + b.credRotationQueue = queue.New() b.Backend = &framework.Backend{ Help: strings.TrimSpace(backendHelp), @@ -35,7 +42,8 @@ func Backend() *backend { framework.WALPrefix, }, SealWrapStorage: []string{ - "config/root", + rootConfigPath, + pathStaticCreds + "/", }, }, @@ -45,6 +53,8 @@ func Backend() *backend { pathConfigLease(&b), pathRoles(&b), pathListRoles(&b), + pathStaticRoles(&b), + pathStaticCredentials(&b), pathUser(&b), }, @@ -55,7 +65,13 @@ func Backend() *backend { Invalidate: b.invalidate, WALRollback: b.walRollback, WALRollbackMinAge: minAwsUserRollbackAge, - BackendType: logical.TypeLogical, + PeriodicFunc: func(ctx context.Context, req *logical.Request) error { + if b.WriteSafeReplicationState() { + return b.rotateExpiredStaticCreds(ctx, req) + } + return nil + }, + BackendType: logical.TypeLogical, } return &b @@ -74,6 +90,10 @@ type backend struct { // to enable mocking with AWS iface for tests iamClient iamiface.IAMAPI stsClient stsiface.STSAPI + + // the age of a static role's credential is tracked by a priority queue and handled + // by the PeriodicFunc + credRotationQueue *queue.PriorityQueue } const backendHelp = ` @@ -121,7 +141,7 @@ func (b *backend) clientIAM(ctx context.Context, s logical.Storage) (iamiface.IA return b.iamClient, nil } - iamClient, err := nonCachedClientIAM(ctx, s, b.Logger()) + iamClient, err := b.nonCachedClientIAM(ctx, s, b.Logger()) if err != nil { return nil, err } @@ -148,7 +168,7 @@ func (b *backend) clientSTS(ctx context.Context, s logical.Storage) (stsiface.ST return b.stsClient, nil } - stsClient, err := nonCachedClientSTS(ctx, s, b.Logger()) + stsClient, err := b.nonCachedClientSTS(ctx, s, b.Logger()) if err != nil { return nil, err } diff --git a/builtin/logical/aws/backend_test.go b/builtin/logical/aws/backend_test.go index 5831dfea772c..b5376f64687e 100644 --- a/builtin/logical/aws/backend_test.go +++ b/builtin/logical/aws/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -16,6 +19,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/ec2" @@ -36,7 +40,7 @@ type mockIAMClient struct { iamiface.IAMAPI } -func (m *mockIAMClient) CreateUser(input *iam.CreateUserInput) (*iam.CreateUserOutput, error) { +func (m *mockIAMClient) CreateUserWithContext(_ aws.Context, input *iam.CreateUserInput, _ ...request.Option) (*iam.CreateUserOutput, error) { return nil, awserr.New("Throttling", "", nil) } @@ -144,7 +148,7 @@ func TestBackend_throttled(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -293,7 +297,6 @@ func createRole(t *testing.T, roleName, awsAccountID string, policyARNs []string RoleName: aws.String(roleName), // Required } _, err = svc.AttachRolePolicy(attachment) - if err != nil { t.Fatalf("AWS AttachRolePolicy failed: %v", err) } @@ -461,7 +464,6 @@ func deleteTestRole(roleName string) error { log.Printf("[INFO] AWS DeleteRole: %s", roleName) _, err = svc.DeleteRole(params) - if err != nil { log.Printf("[WARN] AWS DeleteRole failed: %v", err) return err @@ -663,7 +665,7 @@ func testAccStepRead(t *testing.T, path, name string, credentialTests []credenti var d struct { AccessKey string `mapstructure:"access_key"` SecretKey string `mapstructure:"secret_key"` - STSToken string `mapstructure:"security_token"` + STSToken string `mapstructure:"session_token"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err @@ -680,26 +682,28 @@ func testAccStepRead(t *testing.T, path, name string, credentialTests []credenti } } -func testAccStepReadSTSResponse(name string, maximumTTL uint64) logicaltest.TestStep { +func testAccStepReadWithMFA(t *testing.T, path, name, mfaCode string, credentialTests []credentialTestFunc) logicaltest.TestStep { + step := testAccStepRead(t, path, name, credentialTests) + step.Data = map[string]interface{}{ + "mfa_code": mfaCode, + } + + return step +} + +func testAccStepReadSTSResponse(name string, maximumTTL time.Duration) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.ReadOperation, Path: "creds/" + name, Check: func(resp *logical.Response) error { - if resp.Secret != nil { - return fmt.Errorf("bad: STS tokens should return a nil secret, received: %+v", resp.Secret) + if resp.Secret == nil { + return fmt.Errorf("bad: nil Secret returned") } - - if ttl, exists := resp.Data["ttl"]; exists { - ttlVal := ttl.(uint64) - - if ttlVal > maximumTTL { - return fmt.Errorf("bad: ttl of %d greater than maximum of %d", ttl, maximumTTL) - } - - return nil + ttl := resp.Secret.TTL + if ttl > maximumTTL { + return fmt.Errorf("bad: ttl of %d greater than maximum of %d", ttl/time.Second, maximumTTL/time.Second) } - - return fmt.Errorf("response data missing ttl, received: %+v", resp.Data) + return nil }, } } @@ -904,6 +908,7 @@ func testAccStepReadPolicy(t *testing.T, name string, value string) logicaltest. "permissions_boundary_arn": "", "iam_groups": []string(nil), "iam_tags": map[string]string(nil), + "mfa_serial_number": "", } if !reflect.DeepEqual(resp.Data, expected) { return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) @@ -1027,6 +1032,7 @@ func TestAcceptanceBackend_iamUserManagedInlinePoliciesGroups(t *testing.T) { "permissions_boundary_arn": "", "iam_groups": []string{groupName}, "iam_tags": map[string]string(nil), + "mfa_serial_number": "", } logicaltest.Test(t, logicaltest.TestCase{ @@ -1071,6 +1077,7 @@ func TestAcceptanceBackend_iamUserGroups(t *testing.T) { "permissions_boundary_arn": "", "iam_groups": []string{group1Name, group2Name}, "iam_tags": map[string]string(nil), + "mfa_serial_number": "", } logicaltest.Test(t, logicaltest.TestCase{ @@ -1321,6 +1328,86 @@ func TestAcceptanceBackend_FederationTokenWithGroups(t *testing.T) { }) } +func TestAcceptanceBackend_SessionToken(t *testing.T) { + t.Parallel() + userName := generateUniqueUserName(t.Name()) + accessKey := &awsAccessKey{} + + roleData := map[string]interface{}{ + "credential_type": sessionTokenCred, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createUser(t, userName, accessKey) + // Sleep sometime because AWS is eventually consistent + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfigWithCreds(t, accessKey), + testAccStepWriteRole(t, "test", roleData), + testAccStepRead(t, "sts", "test", []credentialTestFunc{listDynamoTablesTest}), + testAccStepRead(t, "creds", "test", []credentialTestFunc{listDynamoTablesTest}), + }, + Teardown: func() error { + return deleteTestUser(accessKey, userName) + }, + }) +} + +// Running this test requires a pre-made IAM user that has the necessary access permissions set +// and a set MFA device. This device serial number along with the other associated values must +// be set to the environment variables in the function below. +// For this reason, the test is currently a manually run-only acceptance test. +func TestAcceptanceBackend_SessionTokenWithMFA(t *testing.T) { + t.Parallel() + + serial, found := os.LookupEnv("AWS_TEST_MFA_SERIAL_NUMBER") + if !found { + t.Skipf("AWS_TEST_MFA_SERIAL_NUMBER not set, skipping") + } + code, found := os.LookupEnv("AWS_TEST_MFA_CODE") + if !found { + t.Skipf("AWS_TEST_MFA_CODE not set, skipping") + } + accessKeyID, found := os.LookupEnv("AWS_TEST_MFA_USER_ACCESS_KEY") + if !found { + t.Skipf("AWS_TEST_MFA_USER_ACCESS_KEY not set, skipping") + } + secretKey, found := os.LookupEnv("AWS_TEST_MFA_USER_SECRET_KEY") + if !found { + t.Skipf("AWS_TEST_MFA_USER_SECRET_KEY not set, skipping") + } + + accessKey := &awsAccessKey{} + accessKey.AccessKeyID = accessKeyID + accessKey.SecretAccessKey = secretKey + + roleData := map[string]interface{}{ + "credential_type": sessionTokenCred, + "mfa_serial_number": serial, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + // Sleep sometime because AWS is eventually consistent + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfigWithCreds(t, accessKey), + testAccStepWriteRole(t, "test", roleData), + testAccStepReadWithMFA(t, "sts", "test", code, []credentialTestFunc{listDynamoTablesTest}), + testAccStepReadWithMFA(t, "creds", "test", code, []credentialTestFunc{listDynamoTablesTest}), + }, + }) +} + func TestAcceptanceBackend_RoleDefaultSTSTTL(t *testing.T) { t.Parallel() roleName := generateUniqueRoleName(t.Name()) @@ -1348,7 +1435,7 @@ func TestAcceptanceBackend_RoleDefaultSTSTTL(t *testing.T) { Steps: []logicaltest.TestStep{ testAccStepConfig(t), testAccStepWriteRole(t, "test", roleData), - testAccStepReadSTSResponse("test", uint64(minAwsAssumeRoleDuration)), // allow a little slack + testAccStepReadSTSResponse("test", time.Duration(minAwsAssumeRoleDuration)*time.Second), // allow a little slack }, Teardown: func() error { return deleteTestRole(roleName) @@ -1395,6 +1482,7 @@ func testAccStepReadArnPolicy(t *testing.T, name string, value string) logicalte "permissions_boundary_arn": "", "iam_groups": []string(nil), "iam_tags": map[string]string(nil), + "mfa_serial_number": "", } if !reflect.DeepEqual(resp.Data, expected) { return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) @@ -1465,6 +1553,7 @@ func testAccStepReadIamGroups(t *testing.T, name string, groups []string) logica "permissions_boundary_arn": "", "iam_groups": groups, "iam_tags": map[string]string(nil), + "mfa_serial_number": "", } if !reflect.DeepEqual(resp.Data, expected) { return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) @@ -1524,6 +1613,7 @@ func testAccStepReadIamTags(t *testing.T, name string, tags map[string]string) l "permissions_boundary_arn": "", "iam_groups": []string(nil), "iam_tags": tags, + "mfa_serial_number": "", } if !reflect.DeepEqual(resp.Data, expected) { return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) diff --git a/builtin/logical/aws/client.go b/builtin/logical/aws/client.go index 80d839ed55ed..c65b2469eaf2 100644 --- a/builtin/logical/aws/client.go +++ b/builtin/logical/aws/client.go @@ -1,22 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( "context" "fmt" "os" + "strconv" + "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/sts" - cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/awsutil" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" ) // NOTE: The caller is required to ensure that b.clientMutex is at least read locked -func getRootConfig(ctx context.Context, s logical.Storage, clientType string, logger hclog.Logger) (*aws.Config, error) { +func (b *backend) getRootConfig(ctx context.Context, s logical.Storage, clientType string, logger hclog.Logger) (*aws.Config, error) { credsConfig := &awsutil.CredentialsConfig{} var endpoint string var maxRetries int = aws.UseServiceDefaultRetries @@ -41,6 +50,26 @@ func getRootConfig(ctx context.Context, s logical.Storage, clientType string, lo case clientType == "sts" && config.STSEndpoint != "": endpoint = *aws.String(config.STSEndpoint) } + + if config.IdentityTokenAudience != "" { + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get namespace from context: %w", err) + } + + fetcher := &PluginIdentityTokenFetcher{ + sys: b.System(), + logger: b.Logger(), + ns: ns, + audience: config.IdentityTokenAudience, + ttl: config.IdentityTokenTTL, + } + + sessionSuffix := strconv.FormatInt(time.Now().UnixNano(), 10) + credsConfig.RoleSessionName = fmt.Sprintf("vault-aws-secrets-%s", sessionSuffix) + credsConfig.WebIdentityTokenFetcher = fetcher + credsConfig.RoleARN = config.RoleARN + } } if credsConfig.Region == "" { @@ -71,8 +100,8 @@ func getRootConfig(ctx context.Context, s logical.Storage, clientType string, lo }, nil } -func nonCachedClientIAM(ctx context.Context, s logical.Storage, logger hclog.Logger) (*iam.IAM, error) { - awsConfig, err := getRootConfig(ctx, s, "iam", logger) +func (b *backend) nonCachedClientIAM(ctx context.Context, s logical.Storage, logger hclog.Logger) (*iam.IAM, error) { + awsConfig, err := b.getRootConfig(ctx, s, "iam", logger) if err != nil { return nil, err } @@ -87,8 +116,8 @@ func nonCachedClientIAM(ctx context.Context, s logical.Storage, logger hclog.Log return client, nil } -func nonCachedClientSTS(ctx context.Context, s logical.Storage, logger hclog.Logger) (*sts.STS, error) { - awsConfig, err := getRootConfig(ctx, s, "sts", logger) +func (b *backend) nonCachedClientSTS(ctx context.Context, s logical.Storage, logger hclog.Logger) (*sts.STS, error) { + awsConfig, err := b.getRootConfig(ctx, s, "sts", logger) if err != nil { return nil, err } @@ -102,3 +131,36 @@ func nonCachedClientSTS(ctx context.Context, s logical.Storage, logger hclog.Log } return client, nil } + +// PluginIdentityTokenFetcher fetches plugin identity tokens from Vault. It is provided +// to the AWS SDK client to keep assumed role credentials refreshed through expiration. +// When the client's STS credentials expire, it will use this interface to fetch a new +// plugin identity token and exchange it for new STS credentials. +type PluginIdentityTokenFetcher struct { + sys logical.SystemView + logger hclog.Logger + audience string + ns *namespace.Namespace + ttl time.Duration +} + +var _ stscreds.TokenFetcher = (*PluginIdentityTokenFetcher)(nil) + +func (f PluginIdentityTokenFetcher) FetchToken(ctx aws.Context) ([]byte, error) { + nsCtx := namespace.ContextWithNamespace(ctx, f.ns) + resp, err := f.sys.GenerateIdentityToken(nsCtx, &pluginutil.IdentityTokenRequest{ + Audience: f.audience, + TTL: f.ttl, + }) + if err != nil { + return nil, fmt.Errorf("failed to generate plugin identity token: %w", err) + } + f.logger.Info("fetched new plugin identity token") + + if resp.TTL < f.ttl { + f.logger.Debug("generated plugin identity token has shorter TTL than requested", + "requested", f.ttl, "actual", resp.TTL) + } + + return []byte(resp.Token.Token()), nil +} diff --git a/builtin/logical/aws/cmd/aws/main.go b/builtin/logical/aws/cmd/aws/main.go index 74f7d97a7b86..62c7efe6cf3a 100644 --- a/builtin/logical/aws/cmd/aws/main.go +++ b/builtin/logical/aws/cmd/aws/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: aws.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/aws/iam_policies.go b/builtin/logical/aws/iam_policies.go index caf79e33d310..9735a2af81a6 100644 --- a/builtin/logical/aws/iam_policies.go +++ b/builtin/logical/aws/iam_policies.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -70,7 +73,7 @@ func (b *backend) getGroupPolicies(ctx context.Context, s logical.Storage, iamGr for _, g := range iamGroups { // Collect managed policy ARNs from the IAM Group - agp, err = iamClient.ListAttachedGroupPolicies(&iam.ListAttachedGroupPoliciesInput{ + agp, err = iamClient.ListAttachedGroupPoliciesWithContext(ctx, &iam.ListAttachedGroupPoliciesInput{ GroupName: aws.String(g), }) if err != nil { @@ -81,14 +84,14 @@ func (b *backend) getGroupPolicies(ctx context.Context, s logical.Storage, iamGr } // Collect inline policy names from the IAM Group - inlinePolicies, err = iamClient.ListGroupPolicies(&iam.ListGroupPoliciesInput{ + inlinePolicies, err = iamClient.ListGroupPoliciesWithContext(ctx, &iam.ListGroupPoliciesInput{ GroupName: aws.String(g), }) if err != nil { return nil, nil, err } for _, iP := range inlinePolicies.PolicyNames { - inlinePolicyDoc, err = iamClient.GetGroupPolicy(&iam.GetGroupPolicyInput{ + inlinePolicyDoc, err = iamClient.GetGroupPolicyWithContext(ctx, &iam.GetGroupPolicyInput{ GroupName: &g, PolicyName: iP, }) diff --git a/builtin/logical/aws/iam_policies_test.go b/builtin/logical/aws/iam_policies_test.go index ddba67f6b8bd..15d0ab801649 100644 --- a/builtin/logical/aws/iam_policies_test.go +++ b/builtin/logical/aws/iam_policies_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -5,6 +8,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam/iamiface" "github.com/hashicorp/vault/sdk/logical" @@ -26,15 +30,15 @@ type mockGroupIAMClient struct { GetGroupPolicyResp iam.GetGroupPolicyOutput } -func (m mockGroupIAMClient) ListAttachedGroupPolicies(in *iam.ListAttachedGroupPoliciesInput) (*iam.ListAttachedGroupPoliciesOutput, error) { +func (m mockGroupIAMClient) ListAttachedGroupPoliciesWithContext(_ aws.Context, in *iam.ListAttachedGroupPoliciesInput, _ ...request.Option) (*iam.ListAttachedGroupPoliciesOutput, error) { return &m.ListAttachedGroupPoliciesResp, nil } -func (m mockGroupIAMClient) ListGroupPolicies(in *iam.ListGroupPoliciesInput) (*iam.ListGroupPoliciesOutput, error) { +func (m mockGroupIAMClient) ListGroupPoliciesWithContext(_ aws.Context, in *iam.ListGroupPoliciesInput, _ ...request.Option) (*iam.ListGroupPoliciesOutput, error) { return &m.ListGroupPoliciesResp, nil } -func (m mockGroupIAMClient) GetGroupPolicy(in *iam.GetGroupPolicyInput) (*iam.GetGroupPolicyOutput, error) { +func (m mockGroupIAMClient) GetGroupPolicyWithContext(_ aws.Context, in *iam.GetGroupPolicyInput, _ ...request.Option) (*iam.GetGroupPolicyOutput, error) { return &m.GetGroupPolicyResp, nil } @@ -137,7 +141,7 @@ func Test_getGroupPolicies(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } diff --git a/builtin/logical/aws/path_config_lease.go b/builtin/logical/aws/path_config_lease.go index b953b2305e3c..0e2ad43afe80 100644 --- a/builtin/logical/aws/path_config_lease.go +++ b/builtin/logical/aws/path_config_lease.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -5,6 +8,7 @@ import ( "fmt" "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -12,6 +16,11 @@ import ( func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "lease": { Type: framework.TypeString, @@ -24,9 +33,20 @@ func pathConfigLease(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathLeaseRead, - logical.UpdateOperation: b.pathLeaseWrite, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathLeaseRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "lease-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLeaseWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "lease", + }, + }, }, HelpSynopsis: pathConfigLeaseHelpSyn, @@ -63,12 +83,12 @@ func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *f return logical.ErrorResponse("'lease_max' is a required parameter"), nil } - lease, err := time.ParseDuration(leaseRaw) + lease, err := parseutil.ParseDurationSecond(leaseRaw) if err != nil { return logical.ErrorResponse(fmt.Sprintf( "Invalid lease: %s", err)), nil } - leaseMax, err := time.ParseDuration(leaseMaxRaw) + leaseMax, err := parseutil.ParseDurationSecond(leaseMaxRaw) if err != nil { return logical.ErrorResponse(fmt.Sprintf( "Invalid lease_max: %s", err)), nil diff --git a/builtin/logical/aws/path_config_root.go b/builtin/logical/aws/path_config_root.go index 1262980fa806..d7a16524a5d3 100644 --- a/builtin/logical/aws/path_config_root.go +++ b/builtin/logical/aws/path_config_root.go @@ -1,10 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( "context" + "errors" "github.com/aws/aws-sdk-go/aws" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/pluginidentityutil" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -12,8 +19,13 @@ import ( const defaultUserNameTemplate = `{{ if (eq .Type "STS") }}{{ printf "vault-%s-%s" (unix_time) (random 20) | truncate 32 }}{{ else }}{{ printf "vault-%s-%s-%s" (printf "%s-%s" (.DisplayName) (.PolicyName) | truncate 42) (unix_time) (random 20) | truncate 64 }}{{ end }}` func pathConfigRoot(b *backend) *framework.Path { - return &framework.Path{ + p := &framework.Path{ Pattern: "config/root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + Fields: map[string]*framework.FieldSchema{ "access_key": { Type: framework.TypeString, @@ -46,16 +58,34 @@ func pathConfigRoot(b *backend) *framework.Path { Type: framework.TypeString, Description: "Template to generate custom IAM usernames", }, + "role_arn": { + Type: framework.TypeString, + Description: "Role ARN to assume for plugin identity token federation", + }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigRootRead, - logical.UpdateOperation: b.pathConfigRootWrite, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRootRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "root-iam-credentials-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigRootWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "root-iam-credentials", + }, + }, }, HelpSynopsis: pathConfigRootHelpSyn, HelpDescription: pathConfigRootHelpDesc, } + pluginidentityutil.AddPluginIdentityTokenFields(p.Fields) + + return p } func (b *backend) pathConfigRootRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { @@ -83,7 +113,10 @@ func (b *backend) pathConfigRootRead(ctx context.Context, req *logical.Request, "sts_endpoint": config.STSEndpoint, "max_retries": config.MaxRetries, "username_template": config.UsernameTemplate, + "role_arn": config.RoleARN, } + + config.PopulatePluginIdentityTokenData(configData) return &logical.Response{ Data: configData, }, nil @@ -94,6 +127,7 @@ func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, iamendpoint := data.Get("iam_endpoint").(string) stsendpoint := data.Get("sts_endpoint").(string) maxretries := data.Get("max_retries").(int) + roleARN := data.Get("role_arn").(string) usernameTemplate := data.Get("username_template").(string) if usernameTemplate == "" { usernameTemplate = defaultUserNameTemplate @@ -102,7 +136,7 @@ func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, b.clientMutex.Lock() defer b.clientMutex.Unlock() - entry, err := logical.StorageEntryJSON("config/root", rootConfig{ + rc := rootConfig{ AccessKey: data.Get("access_key").(string), SecretKey: data.Get("secret_key").(string), IAMEndpoint: iamendpoint, @@ -110,7 +144,33 @@ func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, Region: region, MaxRetries: maxretries, UsernameTemplate: usernameTemplate, - }) + RoleARN: roleARN, + } + if err := rc.ParsePluginIdentityTokenFields(data); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if rc.IdentityTokenAudience != "" && rc.AccessKey != "" { + return logical.ErrorResponse("only one of 'access_key' or 'identity_token_audience' can be set"), nil + } + + if rc.IdentityTokenAudience != "" && rc.RoleARN == "" { + return logical.ErrorResponse("missing required 'role_arn' when 'identity_token_audience' is set"), nil + } + + if rc.IdentityTokenAudience != "" { + _, err := b.System().GenerateIdentityToken(ctx, &pluginutil.IdentityTokenRequest{ + Audience: rc.IdentityTokenAudience, + }) + if err != nil { + if errors.Is(err, pluginidentityutil.ErrPluginWorkloadIdentityUnsupported) { + return logical.ErrorResponse(err.Error()), nil + } + return nil, err + } + } + + entry, err := logical.StorageEntryJSON("config/root", rc) if err != nil { return nil, err } @@ -128,6 +188,8 @@ func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, } type rootConfig struct { + pluginidentityutil.PluginIdentityTokenParams + AccessKey string `json:"access_key"` SecretKey string `json:"secret_key"` IAMEndpoint string `json:"iam_endpoint"` @@ -135,6 +197,7 @@ type rootConfig struct { Region string `json:"region"` MaxRetries int `json:"max_retries"` UsernameTemplate string `json:"username_template"` + RoleARN string `json:"role_arn"` } const pathConfigRootHelpSyn = ` diff --git a/builtin/logical/aws/path_config_root_test.go b/builtin/logical/aws/path_config_root_test.go index d90ee6cacb38..783745ac0ed8 100644 --- a/builtin/logical/aws/path_config_root_test.go +++ b/builtin/logical/aws/path_config_root_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -5,26 +8,33 @@ import ( "reflect" "testing" + "github.com/hashicorp/vault/sdk/helper/pluginidentityutil" + "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestBackend_PathConfigRoot(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } configData := map[string]interface{}{ - "access_key": "AKIAEXAMPLE", - "secret_key": "RandomData", - "region": "us-west-2", - "iam_endpoint": "https://iam.amazonaws.com", - "sts_endpoint": "https://sts.us-west-2.amazonaws.com", - "max_retries": 10, - "username_template": defaultUserNameTemplate, + "access_key": "AKIAEXAMPLE", + "secret_key": "RandomData", + "region": "us-west-2", + "iam_endpoint": "https://iam.amazonaws.com", + "sts_endpoint": "https://sts.us-west-2.amazonaws.com", + "max_retries": 10, + "username_template": defaultUserNameTemplate, + "role_arn": "", + "identity_token_audience": "", + "identity_token_ttl": int64(0), } configReq := &logical.Request{ @@ -49,7 +59,47 @@ func TestBackend_PathConfigRoot(t *testing.T) { } delete(configData, "secret_key") + require.Equal(t, configData, resp.Data) if !reflect.DeepEqual(resp.Data, configData) { t.Errorf("bad: expected to read config root as %#v, got %#v instead", configData, resp.Data) } } + +// TestBackend_PathConfigRoot_PluginIdentityToken tests that configuration +// of plugin WIF returns an immediate error. +func TestBackend_PathConfigRoot_PluginIdentityToken(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = &testSystemView{} + + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "identity_token_ttl": int64(10), + "identity_token_audience": "test-aud", + "role_arn": "test-role-arn", + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Path: "config/root", + Data: configData, + } + + resp, err := b.HandleRequest(context.Background(), configReq) + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.ErrorContains(t, resp.Error(), pluginidentityutil.ErrPluginWorkloadIdentityUnsupported.Error()) +} + +type testSystemView struct { + logical.StaticSystemView +} + +func (d testSystemView) GenerateIdentityToken(_ context.Context, _ *pluginutil.IdentityTokenRequest) (*pluginutil.IdentityTokenResponse, error) { + return nil, pluginidentityutil.ErrPluginWorkloadIdentityUnsupported +} diff --git a/builtin/logical/aws/path_config_rotate_root.go b/builtin/logical/aws/path_config_rotate_root.go index 1f7ca3113366..72f9c82e4d0f 100644 --- a/builtin/logical/aws/path_config_rotate_root.go +++ b/builtin/logical/aws/path_config_rotate_root.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -13,6 +16,13 @@ import ( func pathConfigRotateRoot(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/rotate-root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "root-iam-credentials", + OperationVerb: "rotate", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigRotateRootUpdate, @@ -56,7 +66,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R } var getUserInput iam.GetUserInput // empty input means get current user - getUserRes, err := client.GetUser(&getUserInput) + getUserRes, err := client.GetUserWithContext(ctx, &getUserInput) if err != nil { return nil, fmt.Errorf("error calling GetUser: %w", err) } @@ -73,7 +83,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R createAccessKeyInput := iam.CreateAccessKeyInput{ UserName: getUserRes.User.UserName, } - createAccessKeyRes, err := client.CreateAccessKey(&createAccessKeyInput) + createAccessKeyRes, err := client.CreateAccessKeyWithContext(ctx, &createAccessKeyInput) if err != nil { return nil, fmt.Errorf("error calling CreateAccessKey: %w", err) } @@ -104,7 +114,7 @@ func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.R AccessKeyId: aws.String(oldAccessKey), UserName: getUserRes.User.UserName, } - _, err = client.DeleteAccessKey(&deleteAccessKeyInput) + _, err = client.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput) if err != nil { return nil, fmt.Errorf("error deleting old access key: %w", err) } diff --git a/builtin/logical/aws/path_roles.go b/builtin/logical/aws/path_roles.go index a7c3dd84a896..abf24a072efa 100644 --- a/builtin/logical/aws/path_roles.go +++ b/builtin/logical/aws/path_roles.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -24,6 +27,11 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -36,18 +44,24 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, - Description: "Name of the policy", + Description: "Name of the role", DisplayAttrs: &framework.DisplayAttributes{ - Name: "Policy Name", + Name: "Role Name", }, }, "credential_type": { Type: framework.TypeString, - Description: fmt.Sprintf("Type of credential to retrieve. Must be one of %s, %s, or %s", assumedRoleCred, iamUserCred, federationTokenCred), + Description: fmt.Sprintf("Type of credential to retrieve. Must be one of %s, %s, %s, or %s", assumedRoleCred, iamUserCred, federationTokenCred, sessionTokenCred), }, "role_arns": { @@ -104,7 +118,7 @@ delimited key pairs.`, "default_sts_ttl": { Type: framework.TypeDurationSecond, - Description: fmt.Sprintf("Default TTL for %s and %s credential types when no TTL is explicitly requested with the credentials", assumedRoleCred, federationTokenCred), + Description: fmt.Sprintf("Default TTL for %s, %s, and %s credential types when no TTL is explicitly requested with the credentials", assumedRoleCred, federationTokenCred, sessionTokenCred), DisplayAttrs: &framework.DisplayAttributes{ Name: "Default STS TTL", }, @@ -112,7 +126,7 @@ delimited key pairs.`, "max_sts_ttl": { Type: framework.TypeDurationSecond, - Description: fmt.Sprintf("Max allowed TTL for %s and %s credential types", assumedRoleCred, federationTokenCred), + Description: fmt.Sprintf("Max allowed TTL for %s, %s, and %s credential types", assumedRoleCred, federationTokenCred, sessionTokenCred), DisplayAttrs: &framework.DisplayAttributes{ Name: "Max STS TTL", }, @@ -147,6 +161,15 @@ delimited key pairs.`, }, Default: "/", }, + + "mfa_serial_number": { + Type: framework.TypeString, + Description: fmt.Sprintf(`Identification number or ARN of the MFA device associated with the root config user. Only valid +when credential_type is %s. This is only required when the IAM user has an MFA device configured.`, sessionTokenCred), + DisplayAttrs: &framework.DisplayAttributes{ + Name: "MFA Device Serial Number", + }, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -314,6 +337,10 @@ func (b *backend) pathRolesWrite(ctx context.Context, req *logical.Request, d *f roleEntry.IAMTags = iamTags.(map[string]string) } + if serialNumber, ok := d.GetOk("mfa_serial_number"); ok { + roleEntry.SerialNumber = serialNumber.(string) + } + if legacyRole != "" { roleEntry = upgradeLegacyPolicyEntry(legacyRole) if roleEntry.InvalidData != "" { @@ -507,6 +534,7 @@ type awsRoleEntry struct { MaxSTSTTL time.Duration `json:"max_sts_ttl"` // Max allowed TTL for STS credentials UserPath string `json:"user_path"` // The path for the IAM user when using "iam_user" credential type PermissionsBoundaryARN string `json:"permissions_boundary_arn"` // ARN of an IAM policy to attach as a permissions boundary + SerialNumber string `json:"mfa_serial_number"` // Serial number or ARN of the MFA device } func (r *awsRoleEntry) toResponseData() map[string]interface{} { @@ -521,6 +549,7 @@ func (r *awsRoleEntry) toResponseData() map[string]interface{} { "max_sts_ttl": int64(r.MaxSTSTTL.Seconds()), "user_path": r.UserPath, "permissions_boundary_arn": r.PermissionsBoundaryARN, + "mfa_serial_number": r.SerialNumber, } if r.InvalidData != "" { @@ -536,19 +565,19 @@ func (r *awsRoleEntry) validate() error { errors = multierror.Append(errors, fmt.Errorf("did not supply credential_type")) } - allowedCredentialTypes := []string{iamUserCred, assumedRoleCred, federationTokenCred} + allowedCredentialTypes := []string{iamUserCred, assumedRoleCred, federationTokenCred, sessionTokenCred} for _, credType := range r.CredentialTypes { if !strutil.StrListContains(allowedCredentialTypes, credType) { errors = multierror.Append(errors, fmt.Errorf("unrecognized credential type: %s", credType)) } } - if r.DefaultSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) { - errors = multierror.Append(errors, fmt.Errorf("default_sts_ttl parameter only valid for %s and %s credential types", assumedRoleCred, federationTokenCred)) + if r.DefaultSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) && !strutil.StrListContains(r.CredentialTypes, sessionTokenCred) { + errors = multierror.Append(errors, fmt.Errorf("default_sts_ttl parameter only valid for %s, %s, and %s credential types", assumedRoleCred, federationTokenCred, sessionTokenCred)) } - if r.MaxSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) { - errors = multierror.Append(errors, fmt.Errorf("max_sts_ttl parameter only valid for %s and %s credential types", assumedRoleCred, federationTokenCred)) + if r.MaxSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) && !strutil.StrListContains(r.CredentialTypes, sessionTokenCred) { + errors = multierror.Append(errors, fmt.Errorf("max_sts_ttl parameter only valid for %s, %s, and %s credential types", assumedRoleCred, federationTokenCred, sessionTokenCred)) } if r.MaxSTSTTL > 0 && @@ -562,7 +591,7 @@ func (r *awsRoleEntry) validate() error { errors = multierror.Append(errors, fmt.Errorf("user_path parameter only valid for %s credential type", iamUserCred)) } if !userPathRegex.MatchString(r.UserPath) { - errors = multierror.Append(errors, fmt.Errorf("The specified value for user_path is invalid. It must match %q regexp", userPathRegex.String())) + errors = multierror.Append(errors, fmt.Errorf("the specified value for user_path is invalid. It must match %q regexp", userPathRegex.String())) } } @@ -575,6 +604,10 @@ func (r *awsRoleEntry) validate() error { } } + if (r.PolicyDocument != "" || len(r.PolicyArns) != 0) && strutil.StrListContains(r.CredentialTypes, sessionTokenCred) { + errors = multierror.Append(errors, fmt.Errorf("cannot supply a policy or role when using credential_type %s", sessionTokenCred)) + } + if len(r.RoleArns) > 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) { errors = multierror.Append(errors, fmt.Errorf("cannot supply role_arns when credential_type isn't %s", assumedRoleCred)) } @@ -592,6 +625,7 @@ const ( assumedRoleCred = "assumed_role" iamUserCred = "iam_user" federationTokenCred = "federation_token" + sessionTokenCred = "session_token" ) const pathListRolesHelpSyn = `List the existing roles in this backend` diff --git a/builtin/logical/aws/path_roles_test.go b/builtin/logical/aws/path_roles_test.go index 39c9d90811fc..32d65da7bb81 100644 --- a/builtin/logical/aws/path_roles_test.go +++ b/builtin/logical/aws/path_roles_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -18,7 +21,7 @@ func TestBackend_PathListRoles(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -221,7 +224,7 @@ func TestRoleCRUDWithPermissionsBoundary(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -265,7 +268,7 @@ func TestRoleWithPermissionsBoundaryValidation(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } diff --git a/builtin/logical/aws/path_static_creds.go b/builtin/logical/aws/path_static_creds.go new file mode 100644 index 000000000000..14fca7cd6de4 --- /dev/null +++ b/builtin/logical/aws/path_static_creds.go @@ -0,0 +1,102 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "fmt" + "net/http" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + pathStaticCreds = "static-creds" + + paramAccessKeyID = "access_key" + paramSecretsAccessKey = "secret_key" +) + +type awsCredentials struct { + AccessKeyID string `json:"access_key" structs:"access_key" mapstructure:"access_key"` + SecretAccessKey string `json:"secret_key" structs:"secret_key" mapstructure:"secret_key"` +} + +func pathStaticCredentials(b *backend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("%s/%s", pathStaticCreds, framework.GenericNameWithAtRegex(paramRoleName)), + Fields: map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathStaticCredsRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + Fields: map[string]*framework.FieldSchema{ + paramAccessKeyID: { + Type: framework.TypeString, + Description: descAccessKeyID, + }, + paramSecretsAccessKey: { + Type: framework.TypeString, + Description: descSecretAccessKey, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathStaticCredsHelpSyn, + HelpDescription: pathStaticCredsHelpDesc, + } +} + +func (b *backend) pathStaticCredsRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, ok := data.GetOk(paramRoleName) + if !ok { + return nil, fmt.Errorf("missing %q parameter", paramRoleName) + } + + entry, err := req.Storage.Get(ctx, formatCredsStoragePath(roleName.(string))) + if err != nil { + return nil, fmt.Errorf("failed to read credentials for role %q: %w", roleName, err) + } + if entry == nil { + return nil, nil + } + + var credentials awsCredentials + if err := entry.DecodeJSON(&credentials); err != nil { + return nil, fmt.Errorf("failed to decode credentials: %w", err) + } + + return &logical.Response{ + Data: structs.New(credentials).Map(), + }, nil +} + +func formatCredsStoragePath(roleName string) string { + return fmt.Sprintf("%s/%s", pathStaticCreds, roleName) +} + +const pathStaticCredsHelpSyn = `Retrieve static credentials from the named role.` + +const pathStaticCredsHelpDesc = ` +This path reads AWS credentials for a certain static role. The keys are rotated +periodically according to their configuration, and will return the same password +until they are rotated.` + +const ( + descAccessKeyID = "The access key of the AWS Credential" + descSecretAccessKey = "The secret key of the AWS Credential" +) diff --git a/builtin/logical/aws/path_static_creds_test.go b/builtin/logical/aws/path_static_creds_test.go new file mode 100644 index 000000000000..e4ef5b2636c1 --- /dev/null +++ b/builtin/logical/aws/path_static_creds_test.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "reflect" + "testing" + + "github.com/fatih/structs" + + "github.com/hashicorp/vault/sdk/framework" + + "github.com/hashicorp/vault/sdk/logical" +) + +// TestStaticCredsRead verifies that we can correctly read a cred that exists, and correctly _not read_ +// a cred that does not exist. +func TestStaticCredsRead(t *testing.T) { + // setup + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + bgCTX := context.Background() // for brevity later + + // insert a cred to get + creds := &awsCredentials{ + AccessKeyID: "foo", + SecretAccessKey: "bar", + } + entry, err := logical.StorageEntryJSON(formatCredsStoragePath("test"), creds) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + // cases + cases := []struct { + name string + roleName string + expectedError error + expectedResponse *logical.Response + }{ + { + name: "get existing creds", + roleName: "test", + expectedResponse: &logical.Response{ + Data: structs.New(creds).Map(), + }, + }, + { + name: "get non-existent creds", + roleName: "this-doesnt-exist", + // returns nil, nil + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + b := Backend(config) + + req := &logical.Request{ + Storage: config.StorageView, + Data: map[string]interface{}{ + "name": c.roleName, + }, + } + resp, err := b.pathStaticCredsRead(bgCTX, req, staticCredsFieldData(req.Data)) + + if err != c.expectedError { + t.Fatalf("got error %q, but expected %q", err, c.expectedError) + } + if !reflect.DeepEqual(resp, c.expectedResponse) { + t.Fatalf("got response %v, but expected %v", resp, c.expectedResponse) + } + }) + } +} + +func staticCredsFieldData(data map[string]interface{}) *framework.FieldData { + schema := map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + } + + return &framework.FieldData{ + Raw: data, + Schema: schema, + } +} diff --git a/builtin/logical/aws/path_static_roles.go b/builtin/logical/aws/path_static_roles.go new file mode 100644 index 000000000000..f07eab54ab18 --- /dev/null +++ b/builtin/logical/aws/path_static_roles.go @@ -0,0 +1,348 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +const ( + pathStaticRole = "static-roles" + + paramRoleName = "name" + paramUsername = "username" + paramRotationPeriod = "rotation_period" +) + +type staticRoleEntry struct { + Name string `json:"name" structs:"name" mapstructure:"name"` + ID string `json:"id" structs:"id" mapstructure:"id"` + Username string `json:"username" structs:"username" mapstructure:"username"` + RotationPeriod time.Duration `json:"rotation_period" structs:"rotation_period" mapstructure:"rotation_period"` +} + +func pathStaticRoles(b *backend) *framework.Path { + roleResponse := map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + Fields: map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + paramUsername: { + Type: framework.TypeString, + Description: descUsername, + }, + paramRotationPeriod: { + Type: framework.TypeDurationSecond, + Description: descRotationPeriod, + }, + }, + }}, + } + + return &framework.Path{ + Pattern: fmt.Sprintf("%s/%s", pathStaticRole, framework.GenericNameWithAtRegex(paramRoleName)), + Fields: map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + paramUsername: { + Type: framework.TypeString, + Description: descUsername, + }, + paramRotationPeriod: { + Type: framework.TypeDurationSecond, + Description: descRotationPeriod, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathStaticRolesRead, + Responses: roleResponse, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathStaticRolesWrite, + ForwardPerformanceSecondary: true, + ForwardPerformanceStandby: true, + Responses: roleResponse, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathStaticRolesDelete, + ForwardPerformanceSecondary: true, + ForwardPerformanceStandby: true, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: http.StatusText(http.StatusNoContent), + }}, + }, + }, + }, + + HelpSynopsis: pathStaticRolesHelpSyn, + HelpDescription: pathStaticRolesHelpDesc, + } +} + +func (b *backend) pathStaticRolesRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, ok := data.GetOk(paramRoleName) + if !ok { + return nil, fmt.Errorf("missing %q parameter", paramRoleName) + } + + b.roleMutex.RLock() + defer b.roleMutex.RUnlock() + + entry, err := req.Storage.Get(ctx, formatRoleStoragePath(roleName.(string))) + if err != nil { + return nil, fmt.Errorf("failed to read configuration for static role %q: %w", roleName, err) + } + if entry == nil { + return nil, nil + } + + var config staticRoleEntry + if err := entry.DecodeJSON(&config); err != nil { + return nil, fmt.Errorf("failed to decode configuration for static role %q: %w", roleName, err) + } + + return &logical.Response{ + Data: formatResponse(config), + }, nil +} + +func (b *backend) pathStaticRolesWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Create & validate config from request parameters + config := staticRoleEntry{} + isCreate := req.Operation == logical.CreateOperation + + if rawRoleName, ok := data.GetOk(paramRoleName); ok { + config.Name = rawRoleName.(string) + + if err := b.validateRoleName(config.Name); err != nil { + return nil, err + } + } else { + return logical.ErrorResponse("missing %q parameter", paramRoleName), nil + } + + // retrieve old role value + entry, err := req.Storage.Get(ctx, formatRoleStoragePath(config.Name)) + if err != nil { + return nil, fmt.Errorf("couldn't check storage for pre-existing role: %w", err) + } + + if entry != nil { + err = entry.DecodeJSON(&config) + if err != nil { + return nil, fmt.Errorf("couldn't convert existing role into config struct: %w", err) + } + } else { + // if we couldn't find an entry, this is a create event + isCreate = true + } + + // other params are optional if we're not Creating + + if rawUsername, ok := data.GetOk(paramUsername); ok { + config.Username = rawUsername.(string) + + if err := b.validateIAMUserExists(ctx, req.Storage, &config, isCreate); err != nil { + return nil, err + } + } else if isCreate { + return logical.ErrorResponse("missing %q parameter", paramUsername), nil + } + + if rawRotationPeriod, ok := data.GetOk(paramRotationPeriod); ok { + config.RotationPeriod = time.Duration(rawRotationPeriod.(int)) * time.Second + + if err := b.validateRotationPeriod(config.RotationPeriod); err != nil { + return nil, err + } + } else if isCreate { + return logical.ErrorResponse("missing %q parameter", paramRotationPeriod), nil + } + + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + // Upsert role config + newRole, err := logical.StorageEntryJSON(formatRoleStoragePath(config.Name), config) + if err != nil { + return nil, fmt.Errorf("failed to marshal object to JSON: %w", err) + } + err = req.Storage.Put(ctx, newRole) + if err != nil { + return nil, fmt.Errorf("failed to save object in storage: %w", err) + } + + // Bootstrap initial set of keys if they did not exist before. AWS Secret Access Keys can only be obtained on creation, + // so we need to boostrap new roles with a new initial set of keys to be able to serve valid credentials to Vault clients. + existingCreds, err := req.Storage.Get(ctx, formatCredsStoragePath(config.Name)) + if err != nil { + return nil, fmt.Errorf("unable to verify if credentials already exist for role %q: %w", config.Name, err) + } + if existingCreds == nil { + err := b.createCredential(ctx, req.Storage, config, false) + if err != nil { + return nil, fmt.Errorf("failed to create new credentials for role %q: %w", config.Name, err) + } + + err = b.credRotationQueue.Push(&queue.Item{ + Key: config.Name, + Value: config, + Priority: time.Now().Add(config.RotationPeriod).Unix(), + }) + if err != nil { + return nil, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", config.Name, err) + } + } else { + // creds already exist, so all we need to do is update the rotation + // what here stays the same and what changes? Can we change the name? + i, err := b.credRotationQueue.PopByKey(config.Name) + if err != nil { + return nil, fmt.Errorf("expected an item with name %q, but got an error: %w", config.Name, err) + } + i.Value = config + // update the next rotation to occur at now + the new rotation period + i.Priority = time.Now().Add(config.RotationPeriod).Unix() + err = b.credRotationQueue.Push(i) + if err != nil { + return nil, fmt.Errorf("failed to add updated item into the rotation queue for role %q: %w", config.Name, err) + } + } + + return &logical.Response{ + Data: formatResponse(config), + }, nil +} + +func (b *backend) pathStaticRolesDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, ok := data.GetOk(paramRoleName) + if !ok { + return nil, fmt.Errorf("missing %q parameter", paramRoleName) + } + + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + entry, err := req.Storage.Get(ctx, formatRoleStoragePath(roleName.(string))) + if err != nil { + return nil, fmt.Errorf("couldn't locate role in storage due to error: %w", err) + } + // no entry in storage, but no error either, congrats, it's deleted! + if entry == nil { + return nil, nil + } + var cfg staticRoleEntry + err = entry.DecodeJSON(&cfg) + if err != nil { + return nil, fmt.Errorf("couldn't convert storage entry to role config") + } + + err = b.deleteCredential(ctx, req.Storage, cfg, false) + if err != nil { + return nil, fmt.Errorf("failed to clean credentials while deleting role %q: %w", roleName.(string), err) + } + + // delete from the queue + _, err = b.credRotationQueue.PopByKey(cfg.Name) + if err != nil { + return nil, fmt.Errorf("couldn't delete key from queue: %w", err) + } + + return nil, req.Storage.Delete(ctx, formatRoleStoragePath(roleName.(string))) +} + +func (b *backend) validateRoleName(name string) error { + if name == "" { + return errors.New("empty role name attribute given") + } + return nil +} + +// validateIAMUser checks the user information we have for the role against the information on AWS. On a create, it uses the username +// to retrieve the user information and _sets_ the userID. On update, it validates the userID and username. +func (b *backend) validateIAMUserExists(ctx context.Context, storage logical.Storage, entry *staticRoleEntry, isCreate bool) error { + c, err := b.clientIAM(ctx, storage) + if err != nil { + return fmt.Errorf("unable to validate username %q: %w", entry.Username, err) + } + + // we don't really care about the content of the result, just that it's not an error + out, err := c.GetUser(&iam.GetUserInput{ + UserName: aws.String(entry.Username), + }) + if err != nil || out.User == nil { + return fmt.Errorf("unable to validate username %q: %w", entry.Username, err) + } + if *out.User.UserName != entry.Username { + return fmt.Errorf("AWS GetUser returned a username, but it didn't match: %q was requested, but %q was returned", entry.Username, *out.User.UserName) + } + + if !isCreate && *out.User.UserId != entry.ID { + return fmt.Errorf("AWS GetUser returned a user, but the ID did not match: %q was requested, but %q was returned", entry.ID, *out.User.UserId) + } else { + // if this is an insert, store the userID. This is the immutable part of an IAM user, but it's not exactly user-friendly. + // So, we allow users to specify usernames, but on updates we'll use the ID as a verification cross-check. + entry.ID = *out.User.UserId + } + + return nil +} + +const ( + minAllowableRotationPeriod = 1 * time.Minute +) + +func (b *backend) validateRotationPeriod(period time.Duration) error { + if period < minAllowableRotationPeriod { + return fmt.Errorf("role rotation period out of range: must be greater than %.2f seconds", minAllowableRotationPeriod.Seconds()) + } + return nil +} + +func formatResponse(cfg staticRoleEntry) map[string]interface{} { + response := structs.New(cfg).Map() + response[paramRotationPeriod] = int64(cfg.RotationPeriod.Seconds()) + + return response +} + +func formatRoleStoragePath(roleName string) string { + return fmt.Sprintf("%s/%s", pathStaticRole, roleName) +} + +const pathStaticRolesHelpSyn = ` +Manage static roles for AWS. +` + +const pathStaticRolesHelpDesc = ` +This path lets you manage static roles (users) for the AWS secret backend. +A static role is associated with a single IAM user, and manages the access +keys based on a rotation period, automatically rotating the credential. If +the IAM user has multiple access keys, the oldest key will be rotated. +` + +const ( + descRoleName = "The name of this role." + descUsername = "The IAM user to adopt as a static role." + descRotationPeriod = `Period by which to rotate the backing credential of the adopted user. +This can be a Go duration (e.g, '1m', 24h'), or an integer number of seconds.` +) diff --git a/builtin/logical/aws/path_static_roles_test.go b/builtin/logical/aws/path_static_roles_test.go new file mode 100644 index 000000000000..a56225b57580 --- /dev/null +++ b/builtin/logical/aws/path_static_roles_test.go @@ -0,0 +1,546 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/queue" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// TestStaticRolesValidation verifies that valid requests pass validation and that invalid requests fail validation. +// This includes the user already existing in IAM roles, and the rotation period being sufficiently long. +func TestStaticRolesValidation(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + bgCTX := context.Background() // for brevity + + cases := []struct { + name string + opts []awsutil.MockIAMOption + requestData map[string]interface{} + isError bool + }{ + { + name: "all good", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("jane-doe"), + }, + }), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "1d", + }, + }, + { + name: "bad user", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserError(errors.New("oh no")), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "24h", + }, + isError: true, + }, + { + name: "user mismatch", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("ms-impostor"), UserId: aws.String("fake-id")}}), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "1d2h", + }, + isError: true, + }, + { + name: "bad rotation period", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "45s", + }, + isError: true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + b := Backend(config) + miam, err := awsutil.NewMockIAM(c.opts...)(nil) + if err != nil { + t.Fatal(err) + } + b.iamClient = miam + if err := b.Setup(bgCTX, config); err != nil { + t.Fatal(err) + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Data: c.requestData, + Path: "static-roles/test", + } + _, err = b.pathStaticRolesWrite(bgCTX, req, staticRoleFieldData(req.Data)) + if c.isError && err == nil { + t.Fatal("expected an error but didn't get one") + } else if !c.isError && err != nil { + t.Fatalf("got an unexpected error: %s", err) + } + }) + } +} + +// TestStaticRolesWrite validates that we can write a new entry for a new static role, and that we correctly +// do not write if the request is invalid in some way. +func TestStaticRolesWrite(t *testing.T) { + bgCTX := context.Background() + + cases := []struct { + name string + // objects to return from mock IAM. + // You'll need a GetUserOutput (to validate the existence of the user being written, + // the keys the user has already been assigned, + // and the new key vault requests. + opts []awsutil.MockIAMOption // objects to return from the mock IAM + // the name, username if updating, and rotation_period of the user. This is the inbound request the cod would get. + data map[string]interface{} + expectedError bool + findUser bool + // if data is sent the name "johnny", then we'll match an existing user with rotation period 24 hours. + isUpdate bool + newPriority int64 // update time of new item in queue, skip if isUpdate false. There is a wiggle room of 5 seconds + // so the deltas between the old and the new update time should be larger than that to ensure the difference + // can be detected. + }{ + { + name: "happy path", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("jane-doe"), + }, + }), + }, + data: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "1d", + }, + // writes role, writes cred + findUser: true, + }, + { + name: "no aws user", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserError(errors.New("no such user, etc etc")), + }, + data: map[string]interface{}{ + "name": "test", + "username": "a-nony-mous", + "rotation_period": "15s", + }, + expectedError: true, + }, + { + name: "update existing user, decreased rotation duration", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("john-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("john-doe"), + }, + }), + }, + data: map[string]interface{}{ + "name": "johnny", + "rotation_period": "19m", + }, + findUser: true, + isUpdate: true, + newPriority: time.Now().Add(19 * time.Minute).Unix(), + }, + { + name: "update existing user, increased rotation duration", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("john-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("john-doe"), + }, + }), + }, + data: map[string]interface{}{ + "name": "johnny", + "rotation_period": "40h", + }, + findUser: true, + isUpdate: true, + newPriority: time.Now().Add(40 * time.Hour).Unix(), + }, + } + + // if a user exists (user doesn't exist is tested in validation) + // we'll check how many keys the user has - if it's two, we delete one. + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + miam, err := awsutil.NewMockIAM( + c.opts..., + )(nil) + if err != nil { + t.Fatal(err) + } + + b := Backend(config) + b.iamClient = miam + if err := b.Setup(bgCTX, config); err != nil { + t.Fatal(err) + } + + // put a role in storage for update tests + staticRole := staticRoleEntry{ + Name: "johnny", + Username: "john-doe", + ID: "unique-id", + RotationPeriod: 24 * time.Hour, + } + entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Data: c.data, + Path: "static-roles/" + c.data["name"].(string), + } + + r, err := b.pathStaticRolesWrite(bgCTX, req, staticRoleFieldData(req.Data)) + if c.expectedError && err == nil { + t.Fatal(err) + } else if c.expectedError { + return // save us some if statements + } + + if err != nil { + t.Fatalf("got an error back unexpectedly: %s", err) + } + + if c.findUser && r == nil { + t.Fatal("response was nil, but it shouldn't have been") + } + + role, err := config.StorageView.Get(bgCTX, req.Path) + if c.findUser && (err != nil || role == nil) { + t.Fatalf("couldn't find the role we should have stored: %s", err) + } + var actualData staticRoleEntry + err = role.DecodeJSON(&actualData) + if err != nil { + t.Fatalf("couldn't convert storage data to role entry: %s", err) + } + + // construct expected data + var expectedData staticRoleEntry + fieldData := staticRoleFieldData(c.data) + if c.isUpdate { + // data is johnny + c.data + expectedData = staticRole + } + + var actualItem *queue.Item + if c.isUpdate { + actualItem, _ = b.credRotationQueue.PopByKey(expectedData.Name) + } + + if u, ok := fieldData.GetOk("username"); ok { + expectedData.Username = u.(string) + } + if r, ok := fieldData.GetOk("rotation_period"); ok { + expectedData.RotationPeriod = time.Duration(r.(int)) * time.Second + } + if n, ok := fieldData.GetOk("name"); ok { + expectedData.Name = n.(string) + } + + // validate fields + if eu, au := expectedData.Username, actualData.Username; eu != au { + t.Fatalf("mismatched username, expected %q but got %q", eu, au) + } + if er, ar := expectedData.RotationPeriod, actualData.RotationPeriod; er != ar { + t.Fatalf("mismatched rotation period, expected %q but got %q", er, ar) + } + if en, an := expectedData.Name, actualData.Name; en != an { + t.Fatalf("mismatched role name, expected %q, but got %q", en, an) + } + + // one-off to avoid importing/casting + abs := func(x int64) int64 { + if x < 0 { + return -x + } + return x + } + + if c.isUpdate { + if ep, ap := c.newPriority, actualItem.Priority; abs(ep-ap) > 5 { // 5 second wiggle room for how long the test takes + t.Fatalf("mismatched updated priority, expected %d but got %d", ep, ap) + } + } + }) + } +} + +// TestStaticRoleRead validates that we can read a configured role and correctly do not read anything if we +// request something that doesn't exist. +func TestStaticRoleRead(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + bgCTX := context.Background() + + // test cases are run against an inmem storage holding a role called "test" attached to an IAM user called "jane-doe" + cases := []struct { + name string + roleName string + found bool + }{ + { + name: "role name exists", + roleName: "test", + found: true, + }, + { + name: "role name not found", + roleName: "toast", + found: false, // implied, but set for clarity + }, + } + + staticRole := staticRoleEntry{ + Name: "test", + Username: "jane-doe", + RotationPeriod: 24 * time.Hour, + } + entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + req := &logical.Request{ + Operation: logical.ReadOperation, + Storage: config.StorageView, + Data: map[string]interface{}{ + "name": c.roleName, + }, + Path: formatRoleStoragePath(c.roleName), + } + + b := Backend(config) + + r, err := b.pathStaticRolesRead(bgCTX, req, staticRoleFieldData(req.Data)) + if err != nil { + t.Fatal(err) + } + if c.found { + if r == nil { + t.Fatal("response was nil, but it shouldn't have been") + } + } else { + if r != nil { + t.Fatal("response should have been nil on a non-existent role") + } + } + }) + } +} + +// TestStaticRoleDelete validates that we correctly remove a role on a delete request, and that we correctly do not +// remove anything if a role does not exist with that name. +func TestStaticRoleDelete(t *testing.T) { + bgCTX := context.Background() + + // test cases are run against an inmem storage holding a role called "test" attached to an IAM user called "jane-doe" + cases := []struct { + name string + role string + found bool + }{ + { + name: "role found", + role: "test", + found: true, + }, + { + name: "role not found", + role: "tossed", + found: false, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + // fake an IAM + var iamfunc awsutil.IAMAPIFunc + if !c.found { + iamfunc = awsutil.NewMockIAM(awsutil.WithDeleteAccessKeyError(errors.New("shouldn't have called delete"))) + } else { + iamfunc = awsutil.NewMockIAM() + } + miam, err := iamfunc(nil) + if err != nil { + t.Fatalf("couldn't initialize mockiam: %s", err) + } + + b := Backend(config) + b.iamClient = miam + + // put in storage + staticRole := staticRoleEntry{ + Name: "test", + Username: "jane-doe", + RotationPeriod: 24 * time.Hour, + } + entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + l, err := config.StorageView.List(bgCTX, "") + if err != nil || len(l) != 1 { + t.Fatalf("couldn't add an entry to storage during test setup: %s", err) + } + + // put in queue + err = b.credRotationQueue.Push(&queue.Item{ + Key: staticRole.Name, + Value: staticRole, + Priority: time.Now().Add(90 * time.Hour).Unix(), + }) + if err != nil { + t.Fatalf("couldn't add items to pq") + } + + req := &logical.Request{ + Operation: logical.ReadOperation, + Storage: config.StorageView, + Data: map[string]interface{}{ + "name": c.role, + }, + Path: formatRoleStoragePath(c.role), + } + + r, err := b.pathStaticRolesDelete(bgCTX, req, staticRoleFieldData(req.Data)) + if err != nil { + t.Fatal(err) + } + if r != nil { + t.Fatal("response wasn't nil, but it should have been") + } + + l, err = config.StorageView.List(bgCTX, "") + if err != nil { + t.Fatal(err) + } + if c.found && len(l) != 0 { + t.Fatal("size of role storage is non zero after delete") + } else if !c.found && len(l) != 1 { + t.Fatal("size of role storage changed after what should have been no deletion") + } + + if c.found && b.credRotationQueue.Len() != 0 { + t.Fatal("size of queue is non-zero after delete") + } else if !c.found && b.credRotationQueue.Len() != 1 { + t.Fatal("size of queue changed after what should have been no deletion") + } + }) + } +} + +func staticRoleFieldData(data map[string]interface{}) *framework.FieldData { + schema := map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + paramUsername: { + Type: framework.TypeString, + Description: descUsername, + }, + paramRotationPeriod: { + Type: framework.TypeDurationSecond, + Description: descRotationPeriod, + }, + } + + return &framework.FieldData{ + Raw: data, + Schema: schema, + } +} diff --git a/builtin/logical/aws/path_user.go b/builtin/logical/aws/path_user.go index 035350cdbfb1..46b9c3e928a9 100644 --- a/builtin/logical/aws/path_user.go +++ b/builtin/logical/aws/path_user.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -18,6 +21,12 @@ import ( func pathUser(b *backend) *framework.Path { return &framework.Path{ Pattern: "(creds|sts)/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationVerb: "generate", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -26,21 +35,38 @@ func pathUser(b *backend) *framework.Path { "role_arn": { Type: framework.TypeString, Description: "ARN of role to assume when credential_type is " + assumedRoleCred, + Query: true, }, "ttl": { Type: framework.TypeDurationSecond, Description: "Lifetime of the returned credentials in seconds", Default: 3600, + Query: true, }, "role_session_name": { Type: framework.TypeString, Description: "Session name to use when assuming role. Max chars: 64", + Query: true, + }, + "mfa_code": { + Type: framework.TypeString, + Description: "MFA code to provide for session tokens", }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathCredsRead, - logical.UpdateOperation: b.pathCredsRead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathCredsRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "credentials|sts-credentials", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCredsRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "credentials-with-parameters|sts-credentials-with-parameters", + }, + }, }, HelpSynopsis: pathUserHelpSyn, @@ -85,6 +111,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr roleArn := d.Get("role_arn").(string) roleSessionName := d.Get("role_session_name").(string) + mfaCode := d.Get("mfa_code").(string) var credentialType string switch { @@ -133,6 +160,8 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl, roleSessionName) case federationTokenCred: return b.getFederationToken(ctx, req.Storage, req.DisplayName, roleName, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl) + case sessionTokenCred: + return b.getSessionToken(ctx, req.Storage, role.SerialNumber, mfaCode, ttl) default: return logical.ErrorResponse(fmt.Sprintf("unknown credential_type: %q", credentialType)), nil } @@ -152,7 +181,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k } // Get information about this user - groupsResp, err := client.ListGroupsForUser(&iam.ListGroupsForUserInput{ + groupsResp, err := client.ListGroupsForUserWithContext(ctx, &iam.ListGroupsForUserInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -191,7 +220,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k groups := groupsResp.Groups // Inline (user) policies - policiesResp, err := client.ListUserPolicies(&iam.ListUserPoliciesInput{ + policiesResp, err := client.ListUserPoliciesWithContext(ctx, &iam.ListUserPoliciesInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -201,7 +230,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k policies := policiesResp.PolicyNames // Attached managed policies - manPoliciesResp, err := client.ListAttachedUserPolicies(&iam.ListAttachedUserPoliciesInput{ + manPoliciesResp, err := client.ListAttachedUserPoliciesWithContext(ctx, &iam.ListAttachedUserPoliciesInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -210,7 +239,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k } manPolicies := manPoliciesResp.AttachedPolicies - keysResp, err := client.ListAccessKeys(&iam.ListAccessKeysInput{ + keysResp, err := client.ListAccessKeysWithContext(ctx, &iam.ListAccessKeysInput{ UserName: aws.String(username), MaxItems: aws.Int64(1000), }) @@ -221,7 +250,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Revoke all keys for _, k := range keys { - _, err = client.DeleteAccessKey(&iam.DeleteAccessKeyInput{ + _, err = client.DeleteAccessKeyWithContext(ctx, &iam.DeleteAccessKeyInput{ AccessKeyId: k.AccessKeyId, UserName: aws.String(username), }) @@ -232,7 +261,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Detach managed policies for _, p := range manPolicies { - _, err = client.DetachUserPolicy(&iam.DetachUserPolicyInput{ + _, err = client.DetachUserPolicyWithContext(ctx, &iam.DetachUserPolicyInput{ UserName: aws.String(username), PolicyArn: p.PolicyArn, }) @@ -243,7 +272,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Delete any inline (user) policies for _, p := range policies { - _, err = client.DeleteUserPolicy(&iam.DeleteUserPolicyInput{ + _, err = client.DeleteUserPolicyWithContext(ctx, &iam.DeleteUserPolicyInput{ UserName: aws.String(username), PolicyName: p, }) @@ -254,7 +283,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k // Remove the user from all their groups for _, g := range groups { - _, err = client.RemoveUserFromGroup(&iam.RemoveUserFromGroupInput{ + _, err = client.RemoveUserFromGroupWithContext(ctx, &iam.RemoveUserFromGroupInput{ GroupName: g.GroupName, UserName: aws.String(username), }) @@ -264,7 +293,7 @@ func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _k } // Delete the user - _, err = client.DeleteUser(&iam.DeleteUserInput{ + _, err = client.DeleteUserWithContext(ctx, &iam.DeleteUserInput{ UserName: aws.String(username), }) if err != nil { diff --git a/builtin/logical/aws/rollback.go b/builtin/logical/aws/rollback.go index e498fc6b2baf..6136db9baae6 100644 --- a/builtin/logical/aws/rollback.go +++ b/builtin/logical/aws/rollback.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( diff --git a/builtin/logical/aws/rotation.go b/builtin/logical/aws/rotation.go new file mode 100644 index 000000000000..0e9e22fc8273 --- /dev/null +++ b/builtin/logical/aws/rotation.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +// rotateExpiredStaticCreds will pop expired credentials (credentials whose priority +// represents a time before the present), rotate the associated credential, and push +// them back onto the queue with the new priority. +func (b *backend) rotateExpiredStaticCreds(ctx context.Context, req *logical.Request) error { + var errs *multierror.Error + + for { + keepGoing, err := b.rotateCredential(ctx, req.Storage) + if err != nil { + errs = multierror.Append(errs, err) + } + if !keepGoing { + if errs.ErrorOrNil() != nil { + return fmt.Errorf("error(s) occurred while rotating expired static credentials: %w", errs) + } else { + return nil + } + } + } +} + +// rotateCredential pops an element from the priority queue, and if it is expired, rotate and re-push. +// If a cred was ready for rotation, return true, otherwise return false. +func (b *backend) rotateCredential(ctx context.Context, storage logical.Storage) (wasReady bool, err error) { + // If queue is empty or first item does not need a rotation (priority is next rotation timestamp) there is nothing to do + item, err := b.credRotationQueue.Pop() + if err != nil { + // the queue is just empty, which is fine. + if errors.Is(err, queue.ErrEmpty) { + return false, nil + } + return false, fmt.Errorf("failed to pop from queue for role %q: %w", item.Key, err) + } + if item.Priority > time.Now().Unix() { + // no rotation required + // push the item back into priority queue + err = b.credRotationQueue.Push(item) + if err != nil { + return false, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", item.Key, err) + } + return false, nil + } + + cfg := item.Value.(staticRoleEntry) + + err = b.createCredential(ctx, storage, cfg, true) + if err != nil { + // put it back in the queue with a backoff + item.Priority = time.Now().Add(10 * time.Second).Unix() + innerErr := b.credRotationQueue.Push(item) + if innerErr != nil { + return true, fmt.Errorf("failed to add item into the rotation queue for role %q(%w), while attempting to recover from failure to create credential: %w", cfg.Name, innerErr, err) + } + // there was one that "should have" rotated, so we want to keep looking further down the queue + return true, err + } + + // set new priority and re-queue + item.Priority = time.Now().Add(cfg.RotationPeriod).Unix() + err = b.credRotationQueue.Push(item) + if err != nil { + return true, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", cfg.Name, err) + } + + return true, nil +} + +// createCredential will create a new iam credential, deleting the oldest one if necessary. +func (b *backend) createCredential(ctx context.Context, storage logical.Storage, cfg staticRoleEntry, shouldLockStorage bool) error { + iamClient, err := b.clientIAM(ctx, storage) + if err != nil { + return fmt.Errorf("unable to get the AWS IAM client: %w", err) + } + + // IAM users can have a most 2 sets of keys at a time. + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) + // Ideally we would get this value through an api check, but I'm not sure one exists. + const maxAllowedKeys = 2 + + err = b.validateIAMUserExists(ctx, storage, &cfg, false) + if err != nil { + return fmt.Errorf("iam user didn't exist, or username/userid didn't match: %w", err) + } + + accessKeys, err := iamClient.ListAccessKeys(&iam.ListAccessKeysInput{ + UserName: aws.String(cfg.Username), + }) + if err != nil { + return fmt.Errorf("unable to list existing access keys for IAM user %q: %w", cfg.Username, err) + } + + // If we have the maximum number of keys, we have to delete one to make another (so we can get the credentials). + // We'll delete the oldest one. + // + // Since this check relies on a pre-coded maximum, it's a bit fragile. If the number goes up, we risk deleting + // a key when we didn't need to. If this number goes down, we'll start throwing errors because we think we're + // allowed to create a key and aren't. In either case, adjusting the constant should be sufficient to fix things. + if len(accessKeys.AccessKeyMetadata) >= maxAllowedKeys { + oldestKey := accessKeys.AccessKeyMetadata[0] + + for i := 1; i < len(accessKeys.AccessKeyMetadata); i++ { + if accessKeys.AccessKeyMetadata[i].CreateDate.Before(*oldestKey.CreateDate) { + oldestKey = accessKeys.AccessKeyMetadata[i] + } + } + + _, err := iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{ + AccessKeyId: oldestKey.AccessKeyId, + UserName: oldestKey.UserName, + }) + if err != nil { + return fmt.Errorf("unable to delete oldest access keys for user %q: %w", cfg.Username, err) + } + } + + // Create new set of keys + out, err := iamClient.CreateAccessKey(&iam.CreateAccessKeyInput{ + UserName: aws.String(cfg.Username), + }) + if err != nil { + return fmt.Errorf("unable to create new access keys for user %q: %w", cfg.Username, err) + } + + // Persist new keys + entry, err := logical.StorageEntryJSON(formatCredsStoragePath(cfg.Name), &awsCredentials{ + AccessKeyID: *out.AccessKey.AccessKeyId, + SecretAccessKey: *out.AccessKey.SecretAccessKey, + }) + if err != nil { + return fmt.Errorf("failed to marshal object to JSON: %w", err) + } + if shouldLockStorage { + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + } + err = storage.Put(ctx, entry) + if err != nil { + return fmt.Errorf("failed to save object in storage: %w", err) + } + + return nil +} + +// delete credential will remove the credential associated with the role from storage. +func (b *backend) deleteCredential(ctx context.Context, storage logical.Storage, cfg staticRoleEntry, shouldLockStorage bool) error { + // synchronize storage access if we didn't in the caller. + if shouldLockStorage { + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + } + + key, err := storage.Get(ctx, formatCredsStoragePath(cfg.Name)) + if err != nil { + return fmt.Errorf("couldn't find key in storage: %w", err) + } + // no entry, so i guess we deleted it already + if key == nil { + return nil + } + var creds awsCredentials + err = key.DecodeJSON(&creds) + if err != nil { + return fmt.Errorf("couldn't decode storage entry to a valid credential: %w", err) + } + + err = storage.Delete(ctx, formatCredsStoragePath(cfg.Name)) + if err != nil { + return fmt.Errorf("couldn't delete from storage: %w", err) + } + + // because we have the information, this is the one we created, so it's safe for us to delete. + _, err = b.iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{ + AccessKeyId: aws.String(creds.AccessKeyID), + UserName: aws.String(cfg.Username), + }) + if err != nil { + return fmt.Errorf("couldn't delete from IAM: %w", err) + } + + return nil +} diff --git a/builtin/logical/aws/rotation_test.go b/builtin/logical/aws/rotation_test.go new file mode 100644 index 000000000000..5b85d456c340 --- /dev/null +++ b/builtin/logical/aws/rotation_test.go @@ -0,0 +1,440 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package aws + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/aws/aws-sdk-go/service/iam/iamiface" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +// TestRotation verifies that the rotation code and priority queue correctly selects and rotates credentials +// for static secrets. +func TestRotation(t *testing.T) { + bgCTX := context.Background() + + type credToInsert struct { + config staticRoleEntry // role configuration from a normal createRole request + age time.Duration // how old the cred should be - if this is longer than the config.RotationPeriod, + // the cred is 'pre-expired' + + changed bool // whether we expect the cred to change - this is technically redundant to a comparison between + // rotationPeriod and age. + } + + // due to a limitation with the mockIAM implementation, any cred you want to rotate must have + // username jane-doe and userid unique-id, since we can only pre-can one exact response to GetUser + cases := []struct { + name string + creds []credToInsert + }{ + { + name: "refresh one", + creds: []credToInsert{ + { + config: staticRoleEntry{ + Name: "test", + Username: "jane-doe", + ID: "unique-id", + RotationPeriod: 2 * time.Second, + }, + age: 5 * time.Second, + changed: true, + }, + }, + }, + { + name: "refresh none", + creds: []credToInsert{ + { + config: staticRoleEntry{ + Name: "test", + Username: "jane-doe", + ID: "unique-id", + RotationPeriod: 1 * time.Minute, + }, + age: 5 * time.Second, + changed: false, + }, + }, + }, + { + name: "refresh one of two", + creds: []credToInsert{ + { + config: staticRoleEntry{ + Name: "toast", + Username: "john-doe", + ID: "other-id", + RotationPeriod: 1 * time.Minute, + }, + age: 5 * time.Second, + changed: false, + }, + { + config: staticRoleEntry{ + Name: "test", + Username: "jane-doe", + ID: "unique-id", + RotationPeriod: 1 * time.Second, + }, + age: 5 * time.Second, + changed: true, + }, + }, + }, + { + name: "no creds to rotate", + creds: []credToInsert{}, + }, + } + + ak := "long-access-key-id" + oldSecret := "abcdefghijklmnopqrstuvwxyz" + newSecret := "zyxwvutsrqponmlkjihgfedcba" + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + + // insert all our creds + for i, cred := range c.creds { + + // all the creds will be the same for every user, but that's okay + // since what we care about is whether they changed on a single-user basis. + miam, err := awsutil.NewMockIAM( + // blank list for existing user + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {}, + }, + }), + // initial key to store + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String(ak), + SecretAccessKey: aws.String(oldSecret), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String(cred.config.ID), + UserName: aws.String(cred.config.Username), + }, + }), + )(nil) + if err != nil { + t.Fatalf("couldn't initialze mock IAM handler: %s", err) + } + b.iamClient = miam + + err = b.createCredential(bgCTX, config.StorageView, cred.config, true) + if err != nil { + t.Fatalf("couldn't insert credential %d: %s", i, err) + } + + item := &queue.Item{ + Key: cred.config.Name, + Value: cred.config, + Priority: time.Now().Add(-1 * cred.age).Add(cred.config.RotationPeriod).Unix(), + } + err = b.credRotationQueue.Push(item) + if err != nil { + t.Fatalf("couldn't push item onto queue: %s", err) + } + } + + // update aws responses, same argument for why it's okay every cred will be the same + miam, err := awsutil.NewMockIAM( + // old key + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + { + AccessKeyId: aws.String(ak), + }, + }, + }), + // new key + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String(ak), + SecretAccessKey: aws.String(newSecret), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + )(nil) + if err != nil { + t.Fatalf("couldn't initialze mock IAM handler: %s", err) + } + b.iamClient = miam + + req := &logical.Request{ + Storage: config.StorageView, + } + err = b.rotateExpiredStaticCreds(bgCTX, req) + if err != nil { + t.Fatalf("got an error rotating credentials: %s", err) + } + + // check our credentials + for i, cred := range c.creds { + entry, err := config.StorageView.Get(bgCTX, formatCredsStoragePath(cred.config.Name)) + if err != nil { + t.Fatalf("got an error retrieving credentials %d", i) + } + var out awsCredentials + err = entry.DecodeJSON(&out) + if err != nil { + t.Fatalf("could not unmarshal storage view entry for cred %d to an aws credential: %s", i, err) + } + + if cred.changed && out.SecretAccessKey != newSecret { + t.Fatalf("expected the key for cred %d to have changed, but it hasn't", i) + } else if !cred.changed && out.SecretAccessKey != oldSecret { + t.Fatalf("expected the key for cred %d to have stayed the same, but it changed", i) + } + } + }) + } +} + +type fakeIAM struct { + iamiface.IAMAPI + delReqs []*iam.DeleteAccessKeyInput +} + +func (f *fakeIAM) DeleteAccessKey(r *iam.DeleteAccessKeyInput) (*iam.DeleteAccessKeyOutput, error) { + f.delReqs = append(f.delReqs, r) + return f.IAMAPI.DeleteAccessKey(r) +} + +// TestCreateCredential verifies that credential creation firstly only deletes credentials if it needs to (i.e., two +// or more credentials on IAM), and secondly correctly deletes the oldest one. +func TestCreateCredential(t *testing.T) { + cases := []struct { + name string + username string + id string + deletedKey string + opts []awsutil.MockIAMOption + }{ + { + name: "zero keys", + username: "jane-doe", + id: "unique-id", + opts: []awsutil.MockIAMOption{ + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + }), + // delete should _not_ be called + awsutil.WithDeleteAccessKeyError(errors.New("should not have been called")), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("key"), + SecretAccessKey: aws.String("itsasecret"), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + }, + }, + { + name: "one key", + username: "jane-doe", + id: "unique-id", + opts: []awsutil.MockIAMOption{ + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {AccessKeyId: aws.String("foo"), CreateDate: aws.Time(time.Now())}, + }, + }), + // delete should _not_ be called + awsutil.WithDeleteAccessKeyError(errors.New("should not have been called")), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("key"), + SecretAccessKey: aws.String("itsasecret"), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + }, + }, + { + name: "two keys", + username: "jane-doe", + id: "unique-id", + deletedKey: "foo", + opts: []awsutil.MockIAMOption{ + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {AccessKeyId: aws.String("foo"), CreateDate: aws.Time(time.Time{})}, + {AccessKeyId: aws.String("bar"), CreateDate: aws.Time(time.Now())}, + }, + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("key"), + SecretAccessKey: aws.String("itsasecret"), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + }, + }, + } + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + miam, err := awsutil.NewMockIAM( + c.opts..., + )(nil) + if err != nil { + t.Fatal(err) + } + fiam := &fakeIAM{ + IAMAPI: miam, + } + + b := Backend(config) + b.iamClient = fiam + + err = b.createCredential(context.Background(), config.StorageView, staticRoleEntry{Username: c.username, ID: c.id}, true) + if err != nil { + t.Fatalf("got an error we didn't expect: %q", err) + } + + if c.deletedKey != "" { + if len(fiam.delReqs) != 1 { + t.Fatalf("called the wrong number of deletes (called %d deletes)", len(fiam.delReqs)) + } + actualKey := *fiam.delReqs[0].AccessKeyId + if c.deletedKey != actualKey { + t.Fatalf("we deleted the wrong key: %q instead of %q", actualKey, c.deletedKey) + } + } + }) + } +} + +// TestRequeueOnError verifies that in the case of an error, the entry will still be in the queue for later rotation +func TestRequeueOnError(t *testing.T) { + bgCTX := context.Background() + + cred := staticRoleEntry{ + Name: "test", + Username: "jane-doe", + RotationPeriod: 30 * time.Minute, + } + + ak := "long-access-key-id" + oldSecret := "abcdefghijklmnopqrstuvwxyz" + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + + // go through the process of adding a key + miam, err := awsutil.NewMockIAM( + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {}, + }, + }), + // initial key to store + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String(ak), + SecretAccessKey: aws.String(oldSecret), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String(cred.ID), + UserName: aws.String(cred.Username), + }, + }), + )(nil) + if err != nil { + t.Fail() + } + + b.iamClient = miam + + err = b.createCredential(bgCTX, config.StorageView, cred, true) + if err != nil { + t.Fatalf("couldn't insert credential: %s", err) + } + + // put the cred in the queue but age it out + item := &queue.Item{ + Key: cred.Name, + Value: cred, + Priority: time.Now().Add(-10 * time.Minute).Unix(), + } + err = b.credRotationQueue.Push(item) + if err != nil { + t.Fatalf("couldn't push item onto queue: %s", err) + } + + // update the mock iam with the next requests + miam, err = awsutil.NewMockIAM( + awsutil.WithGetUserError(errors.New("oh no")), + )(nil) + if err != nil { + t.Fatalf("couldn't initialize the mock iam: %s", err) + } + b.iamClient = miam + + // now rotate, but it will fail + r, e := b.rotateCredential(bgCTX, config.StorageView) + if !r { + t.Fatalf("rotate credential should return true in this case, but it didn't") + } + if e == nil { + t.Fatalf("we expected an error when rotating a credential, but didn't get one") + } + // the queue should be updated though + i, e := b.credRotationQueue.PopByKey(cred.Name) + if err != nil { + t.Fatalf("queue error: %s", e) + } + delta := time.Now().Add(10*time.Second).Unix() - i.Priority + if delta < -5 || delta > 5 { + t.Fatalf("priority should be within 5 seconds of our backoff interval") + } +} diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go index eb83ed5fa16e..60fe8015f445 100644 --- a/builtin/logical/aws/secret_access_keys.go +++ b/builtin/logical/aws/secret_access_keys.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -35,9 +38,14 @@ func secretAccessKeys(b *backend) *framework.Secret { Type: framework.TypeString, Description: "Secret Key", }, + "session_token": { + Type: framework.TypeString, + Description: "Session Token", + }, "security_token": { Type: framework.TypeString, Description: "Security Token", + Deprecated: true, }, }, @@ -150,20 +158,83 @@ func (b *backend) getFederationToken(ctx context.Context, s logical.Storage, return logical.ErrorResponse("must specify at least one of policy_arns or policy_document with %s credential_type", federationTokenCred), nil } - tokenResp, err := stsClient.GetFederationToken(getTokenInput) + tokenResp, err := stsClient.GetFederationTokenWithContext(ctx, getTokenInput) if err != nil { return logical.ErrorResponse("Error generating STS keys: %s", err), awsutil.CheckAWSError(err) } - // STS credentials cannot be revoked so do not create a lease - return &logical.Response{ - Data: map[string]interface{}{ - "access_key": *tokenResp.Credentials.AccessKeyId, - "secret_key": *tokenResp.Credentials.SecretAccessKey, - "security_token": *tokenResp.Credentials.SessionToken, - "ttl": uint64(tokenResp.Credentials.Expiration.Sub(time.Now()).Seconds()), - }, - }, nil + // While STS credentials cannot be revoked/renewed, we will still create a lease since users are + // relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually. + // + ttl := time.Until(*tokenResp.Credentials.Expiration) + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *tokenResp.Credentials.AccessKeyId, + "secret_key": *tokenResp.Credentials.SecretAccessKey, + "security_token": *tokenResp.Credentials.SessionToken, + "session_token": *tokenResp.Credentials.SessionToken, + "ttl": uint64(ttl.Seconds()), + }, map[string]interface{}{ + "username": username, + "policy": policy, + "is_sts": true, + }) + + // Set the secret TTL to appropriately match the expiration of the token + resp.Secret.TTL = ttl + + // STS are purposefully short-lived and aren't renewable + resp.Secret.Renewable = false + + return resp, nil +} + +// NOTE: Getting session tokens with or without MFA/TOTP has behavior that can cause confusion. +// When an AWS IAM user has a policy attached requiring an MFA code by use of "aws:MultiFactorAuthPresent": "true", +// then credentials may still be returned without an MFA code provided. +// If a Vault role associated with the IAM user is configured without both an mfa_serial_number and +// the mfa_code is not given, the API call is successful and returns credentials. These credentials +// are scoped to any resources in the policy that do NOT have "aws:MultiFactorAuthPresent": "true" set and +// accessing resources with it set will be denied. +// This is expected behavior, as the policy may have a mix of permissions, some requiring MFA and others not. +// If an mfa_serial_number is set on the Vault role, then a valid mfa_code MUST be provided to succeed. +func (b *backend) getSessionToken(ctx context.Context, s logical.Storage, serialNumber, mfaCode string, lifeTimeInSeconds int64) (*logical.Response, error) { + stsClient, err := b.clientSTS(ctx, s) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + getTokenInput := &sts.GetSessionTokenInput{ + DurationSeconds: &lifeTimeInSeconds, + } + if serialNumber != "" { + getTokenInput.SerialNumber = &serialNumber + } + if mfaCode != "" { + getTokenInput.TokenCode = &mfaCode + } + + tokenResp, err := stsClient.GetSessionToken(getTokenInput) + if err != nil { + return logical.ErrorResponse("Error generating STS keys: %s", err), awsutil.CheckAWSError(err) + } + + ttl := time.Until(*tokenResp.Credentials.Expiration) + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *tokenResp.Credentials.AccessKeyId, + "secret_key": *tokenResp.Credentials.SecretAccessKey, + "session_token": *tokenResp.Credentials.SessionToken, + "ttl": uint64(ttl.Seconds()), + }, map[string]interface{}{ + "is_sts": true, + }) + + // Set the secret TTL to appropriately match the expiration of the token + resp.Secret.TTL = time.Until(*tokenResp.Credentials.Expiration) + + // STS are purposefully short-lived and aren't renewable + resp.Secret.Renewable = false + + return resp, nil } func (b *backend) assumeRole(ctx context.Context, s logical.Storage, @@ -225,21 +296,35 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage, if len(policyARNs) > 0 { assumeRoleInput.SetPolicyArns(convertPolicyARNs(policyARNs)) } - tokenResp, err := stsClient.AssumeRole(assumeRoleInput) + tokenResp, err := stsClient.AssumeRoleWithContext(ctx, assumeRoleInput) if err != nil { return logical.ErrorResponse("Error assuming role: %s", err), awsutil.CheckAWSError(err) } - // STS credentials cannot be revoked so do not create a lease - return &logical.Response{ - Data: map[string]interface{}{ - "access_key": *tokenResp.Credentials.AccessKeyId, - "secret_key": *tokenResp.Credentials.SecretAccessKey, - "security_token": *tokenResp.Credentials.SessionToken, - "arn": *tokenResp.AssumedRoleUser.Arn, - "ttl": uint64(tokenResp.Credentials.Expiration.Sub(time.Now()).Seconds()), - }, - }, nil + // While STS credentials cannot be revoked/renewed, we will still create a lease since users are + // relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually. + // + ttl := time.Until(*tokenResp.Credentials.Expiration) + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *tokenResp.Credentials.AccessKeyId, + "secret_key": *tokenResp.Credentials.SecretAccessKey, + "security_token": *tokenResp.Credentials.SessionToken, + "session_token": *tokenResp.Credentials.SessionToken, + "arn": *tokenResp.AssumedRoleUser.Arn, + "ttl": uint64(ttl.Seconds()), + }, map[string]interface{}{ + "username": roleSessionName, + "policy": roleArn, + "is_sts": true, + }) + + // Set the secret TTL to appropriately match the expiration of the token + resp.Secret.TTL = ttl + + // STS are purposefully short-lived and aren't renewable + resp.Secret.Renewable = false + + return resp, nil } func readConfig(ctx context.Context, storage logical.Storage) (rootConfig, error) { @@ -311,7 +396,7 @@ func (b *backend) secretAccessKeysCreate( } // Create the user - _, err = iamClient.CreateUser(createUserRequest) + _, err = iamClient.CreateUserWithContext(ctx, createUserRequest) if err != nil { if walErr := framework.DeleteWAL(ctx, s, walID); walErr != nil { iamErr := fmt.Errorf("error creating IAM user: %w", err) @@ -322,7 +407,7 @@ func (b *backend) secretAccessKeysCreate( for _, arn := range role.PolicyArns { // Attach existing policy against user - _, err = iamClient.AttachUserPolicy(&iam.AttachUserPolicyInput{ + _, err = iamClient.AttachUserPolicyWithContext(ctx, &iam.AttachUserPolicyInput{ UserName: aws.String(username), PolicyArn: aws.String(arn), }) @@ -333,7 +418,7 @@ func (b *backend) secretAccessKeysCreate( } if role.PolicyDocument != "" { // Add new inline user policy against user - _, err = iamClient.PutUserPolicy(&iam.PutUserPolicyInput{ + _, err = iamClient.PutUserPolicyWithContext(ctx, &iam.PutUserPolicyInput{ UserName: aws.String(username), PolicyName: aws.String(policyName), PolicyDocument: aws.String(role.PolicyDocument), @@ -345,7 +430,7 @@ func (b *backend) secretAccessKeysCreate( for _, group := range role.IAMGroups { // Add user to IAM groups - _, err = iamClient.AddUserToGroup(&iam.AddUserToGroupInput{ + _, err = iamClient.AddUserToGroupWithContext(ctx, &iam.AddUserToGroupInput{ UserName: aws.String(username), GroupName: aws.String(group), }) @@ -364,18 +449,17 @@ func (b *backend) secretAccessKeysCreate( } if len(tags) > 0 { - _, err = iamClient.TagUser(&iam.TagUserInput{ + _, err = iamClient.TagUserWithContext(ctx, &iam.TagUserInput{ Tags: tags, UserName: &username, }) - if err != nil { return logical.ErrorResponse("Error adding tags to user: %s", err), awsutil.CheckAWSError(err) } } // Create the keys - keyResp, err := iamClient.CreateAccessKey(&iam.CreateAccessKeyInput{ + keyResp, err := iamClient.CreateAccessKeyWithContext(ctx, &iam.CreateAccessKeyInput{ UserName: aws.String(username), }) if err != nil { @@ -391,9 +475,9 @@ func (b *backend) secretAccessKeysCreate( // Return the info! resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ - "access_key": *keyResp.AccessKey.AccessKeyId, - "secret_key": *keyResp.AccessKey.SecretAccessKey, - "security_token": nil, + "access_key": *keyResp.AccessKey.AccessKeyId, + "secret_key": *keyResp.AccessKey.SecretAccessKey, + "session_token": nil, }, map[string]interface{}{ "username": username, "policy": role, diff --git a/builtin/logical/aws/secret_access_keys_test.go b/builtin/logical/aws/secret_access_keys_test.go index 7ee9d33b8027..8c6804d94641 100644 --- a/builtin/logical/aws/secret_access_keys_test.go +++ b/builtin/logical/aws/secret_access_keys_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -117,7 +120,7 @@ func TestGenUsername(t *testing.T) { func TestReadConfig_DefaultTemplate(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } @@ -161,7 +164,7 @@ func TestReadConfig_DefaultTemplate(t *testing.T) { func TestReadConfig_CustomTemplate(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - b := Backend() + b := Backend(config) if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } diff --git a/builtin/logical/aws/stepwise_test.go b/builtin/logical/aws/stepwise_test.go index c62975b9474e..dff852859f90 100644 --- a/builtin/logical/aws/stepwise_test.go +++ b/builtin/logical/aws/stepwise_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -17,7 +20,7 @@ func TestAccBackend_Stepwise_basic(t *testing.T) { t.Parallel() envOptions := &stepwise.MountOptions{ RegistryName: "aws-sec", - PluginType: stepwise.PluginTypeSecrets, + PluginType: api.PluginTypeSecrets, PluginName: "aws", MountPathPrefix: "aws-sec", } @@ -67,7 +70,7 @@ func testAccStepwiseRead(t *testing.T, path, name string, credentialTests []cred var d struct { AccessKey string `mapstructure:"access_key"` SecretKey string `mapstructure:"secret_key"` - STSToken string `mapstructure:"security_token"` + STSToken string `mapstructure:"session_token"` } if err := mapstructure.Decode(resp.Data, &d); err != nil { return err diff --git a/builtin/logical/cassandra/backend.go b/builtin/logical/cassandra/backend.go deleted file mode 100644 index e7087448d15a..000000000000 --- a/builtin/logical/cassandra/backend.go +++ /dev/null @@ -1,134 +0,0 @@ -package cassandra - -import ( - "context" - "fmt" - "strings" - "sync" - - "github.com/gocql/gocql" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -// Factory creates a new backend -func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b := Backend() - if err := b.Setup(ctx, conf); err != nil { - return nil, err - } - return b, nil -} - -// Backend contains the base information for the backend's functionality -func Backend() *backend { - var b backend - b.Backend = &framework.Backend{ - Help: strings.TrimSpace(backendHelp), - - PathsSpecial: &logical.Paths{ - SealWrapStorage: []string{ - "config/connection", - }, - }, - - Paths: []*framework.Path{ - pathConfigConnection(&b), - pathRoles(&b), - pathCredsCreate(&b), - }, - - Secrets: []*framework.Secret{ - secretCreds(&b), - }, - - Invalidate: b.invalidate, - - Clean: func(_ context.Context) { - b.ResetDB(nil) - }, - BackendType: logical.TypeLogical, - } - - return &b -} - -type backend struct { - *framework.Backend - - // Session is goroutine safe, however, since we reinitialize - // it when connection info changes, we want to make sure we - // can close it and use a new connection; hence the lock - session *gocql.Session - lock sync.Mutex -} - -type sessionConfig struct { - Hosts string `json:"hosts" structs:"hosts" mapstructure:"hosts"` - Username string `json:"username" structs:"username" mapstructure:"username"` - Password string `json:"password" structs:"password" mapstructure:"password"` - TLS bool `json:"tls" structs:"tls" mapstructure:"tls"` - InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"` - Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"` - PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` - IssuingCA string `json:"issuing_ca" structs:"issuing_ca" mapstructure:"issuing_ca"` - ProtocolVersion int `json:"protocol_version" structs:"protocol_version" mapstructure:"protocol_version"` - ConnectTimeout int `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"` - TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"` -} - -// DB returns the database connection. -func (b *backend) DB(ctx context.Context, s logical.Storage) (*gocql.Session, error) { - b.lock.Lock() - defer b.lock.Unlock() - - // If we already have a DB, we got it! - if b.session != nil { - return b.session, nil - } - - entry, err := s.Get(ctx, "config/connection") - if err != nil { - return nil, err - } - if entry == nil { - return nil, fmt.Errorf("configure the DB connection with config/connection first") - } - - config := &sessionConfig{} - if err := entry.DecodeJSON(config); err != nil { - return nil, err - } - - session, err := createSession(config, s) - // Store the session in backend for reuse - b.session = session - - return session, err -} - -// ResetDB forces a connection next time DB() is called. -func (b *backend) ResetDB(newSession *gocql.Session) { - b.lock.Lock() - defer b.lock.Unlock() - - if b.session != nil { - b.session.Close() - } - - b.session = newSession -} - -func (b *backend) invalidate(_ context.Context, key string) { - switch key { - case "config/connection": - b.ResetDB(nil) - } -} - -const backendHelp = ` -The Cassandra backend dynamically generates database users. - -After mounting this backend, configure it using the endpoints within -the "config/" path. -` diff --git a/builtin/logical/cassandra/backend_test.go b/builtin/logical/cassandra/backend_test.go deleted file mode 100644 index 1b76dfe6c2a4..000000000000 --- a/builtin/logical/cassandra/backend_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package cassandra - -import ( - "context" - "fmt" - "log" - "testing" - - "github.com/hashicorp/vault/helper/testhelpers/cassandra" - logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/mapstructure" -) - -func TestBackend_basic(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - copyFromTo := map[string]string{ - "test-fixtures/cassandra.yaml": "/etc/cassandra/cassandra.yaml", - } - host, cleanup := cassandra.PrepareTestContainer(t, - cassandra.CopyFromTo(copyFromTo), - ) - defer cleanup() - - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, host.ConnectionURL()), - testAccStepRole(t), - testAccStepReadCreds(t, "test"), - }, - }) -} - -func TestBackend_roleCrud(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - copyFromTo := map[string]string{ - "test-fixtures/cassandra.yaml": "/etc/cassandra/cassandra.yaml", - } - host, cleanup := cassandra.PrepareTestContainer(t, - cassandra.CopyFromTo(copyFromTo)) - defer cleanup() - - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, host.ConnectionURL()), - testAccStepRole(t), - testAccStepRoleWithOptions(t), - testAccStepReadRole(t, "test", testRole), - testAccStepReadRole(t, "test2", testRole), - testAccStepDeleteRole(t, "test"), - testAccStepDeleteRole(t, "test2"), - testAccStepReadRole(t, "test", ""), - testAccStepReadRole(t, "test2", ""), - }, - }) -} - -func testAccStepConfig(t *testing.T, hostname string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "config/connection", - Data: map[string]interface{}{ - "hosts": hostname, - "username": "cassandra", - "password": "cassandra", - "protocol_version": 3, - }, - } -} - -func testAccStepRole(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "roles/test", - Data: map[string]interface{}{ - "creation_cql": testRole, - }, - } -} - -func testAccStepRoleWithOptions(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "roles/test2", - Data: map[string]interface{}{ - "creation_cql": testRole, - "lease": "30s", - "consistency": "All", - }, - } -} - -func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.DeleteOperation, - Path: "roles/" + n, - } -} - -func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "creds/" + name, - Check: func(resp *logical.Response) error { - var d struct { - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - log.Printf("[WARN] Generated credentials: %v", d) - - return nil - }, - } -} - -func testAccStepReadRole(t *testing.T, name string, cql string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "roles/" + name, - Check: func(resp *logical.Response) error { - if resp == nil { - if cql == "" { - return nil - } - - return fmt.Errorf("response is nil") - } - - var d struct { - CreationCQL string `mapstructure:"creation_cql"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - - if d.CreationCQL != cql { - return fmt.Errorf("bad: %#v\n%#v\n%#v\n", resp, cql, d.CreationCQL) - } - - return nil - }, - } -} - -const testRole = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER; -GRANT ALL PERMISSIONS ON ALL KEYSPACES TO {{username}};` diff --git a/builtin/logical/cassandra/cmd/cassandra/main.go b/builtin/logical/cassandra/cmd/cassandra/main.go deleted file mode 100644 index 0ab900aa112a..000000000000 --- a/builtin/logical/cassandra/cmd/cassandra/main.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "os" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/logical/cassandra" - "github.com/hashicorp/vault/sdk/plugin" -) - -func main() { - apiClientMeta := &api.PluginAPIClientMeta{} - flags := apiClientMeta.FlagSet() - flags.Parse(os.Args[1:]) - - tlsConfig := apiClientMeta.GetTLSConfig() - tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - - if err := plugin.Serve(&plugin.ServeOpts{ - BackendFactoryFunc: cassandra.Factory, - TLSProviderFunc: tlsProviderFunc, - }); err != nil { - logger := hclog.New(&hclog.LoggerOptions{}) - - logger.Error("plugin shutting down", "error", err) - os.Exit(1) - } -} diff --git a/builtin/logical/cassandra/path_config_connection.go b/builtin/logical/cassandra/path_config_connection.go deleted file mode 100644 index afa1816880d8..000000000000 --- a/builtin/logical/cassandra/path_config_connection.go +++ /dev/null @@ -1,245 +0,0 @@ -package cassandra - -import ( - "context" - "fmt" - - "github.com/hashicorp/go-secure-stdlib/tlsutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathConfigConnection(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/connection", - Fields: map[string]*framework.FieldSchema{ - "hosts": { - Type: framework.TypeString, - Description: "Comma-separated list of hosts", - }, - - "username": { - Type: framework.TypeString, - Description: "The username to use for connecting to the cluster", - }, - - "password": { - Type: framework.TypeString, - Description: "The password to use for connecting to the cluster", - }, - - "tls": { - Type: framework.TypeBool, - Description: `Whether to use TLS. If pem_bundle or pem_json are -set, this is automatically set to true`, - }, - - "insecure_tls": { - Type: framework.TypeBool, - Description: `Whether to use TLS but skip verification; has no -effect if a CA certificate is provided`, - }, - - // TLS 1.3 is not supported as this engine is deprecated. Please switch to the Cassandra database secrets engine - "tls_min_version": { - Type: framework.TypeString, - Default: "tls12", - Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11' or 'tls12'. Defaults to 'tls12'", - }, - - "pem_bundle": { - Type: framework.TypeString, - Description: `PEM-format, concatenated unencrypted secret key -and certificate, with optional CA certificate`, - }, - - "pem_json": { - Type: framework.TypeString, - Description: `JSON containing a PEM-format, unencrypted secret -key and certificate, with optional CA certificate. -The JSON output of a certificate issued with the PKI -backend can be directly passed into this parameter. -If both this and "pem_bundle" are specified, this will -take precedence.`, - }, - - "protocol_version": { - Type: framework.TypeInt, - Description: `The protocol version to use. Defaults to 2.`, - }, - - "connect_timeout": { - Type: framework.TypeDurationSecond, - Default: 5, - Description: `The connection timeout to use. Defaults to 5.`, - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConnectionRead, - logical.UpdateOperation: b.pathConnectionWrite, - }, - - HelpSynopsis: pathConfigConnectionHelpSyn, - HelpDescription: pathConfigConnectionHelpDesc, - } -} - -func (b *backend) pathConnectionRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - entry, err := req.Storage.Get(ctx, "config/connection") - if err != nil { - return nil, err - } - if entry == nil { - return logical.ErrorResponse(fmt.Sprintf("Configure the DB connection with config/connection first")), nil - } - - config := &sessionConfig{} - if err := entry.DecodeJSON(config); err != nil { - return nil, err - } - - resp := &logical.Response{ - Data: map[string]interface{}{ - "hosts": config.Hosts, - "username": config.Username, - "tls": config.TLS, - "insecure_tls": config.InsecureTLS, - "certificate": config.Certificate, - "issuing_ca": config.IssuingCA, - "protocol_version": config.ProtocolVersion, - "connect_timeout": config.ConnectTimeout, - "tls_min_version": config.TLSMinVersion, - }, - } - return resp, nil -} - -func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - hosts := data.Get("hosts").(string) - username := data.Get("username").(string) - password := data.Get("password").(string) - - switch { - case len(hosts) == 0: - return logical.ErrorResponse("Hosts cannot be empty"), nil - case len(username) == 0: - return logical.ErrorResponse("Username cannot be empty"), nil - case len(password) == 0: - return logical.ErrorResponse("Password cannot be empty"), nil - } - - config := &sessionConfig{ - Hosts: hosts, - Username: username, - Password: password, - TLS: data.Get("tls").(bool), - InsecureTLS: data.Get("insecure_tls").(bool), - ProtocolVersion: data.Get("protocol_version").(int), - ConnectTimeout: data.Get("connect_timeout").(int), - } - - config.TLSMinVersion = data.Get("tls_min_version").(string) - if config.TLSMinVersion == "" { - return logical.ErrorResponse("failed to get 'tls_min_version' value"), nil - } - - var ok bool - _, ok = tlsutil.TLSLookup[config.TLSMinVersion] - if !ok { - return logical.ErrorResponse("invalid 'tls_min_version'"), nil - } - - if config.InsecureTLS { - config.TLS = true - } - - pemBundle := data.Get("pem_bundle").(string) - pemJSON := data.Get("pem_json").(string) - - var certBundle *certutil.CertBundle - var parsedCertBundle *certutil.ParsedCertBundle - var err error - - switch { - case len(pemJSON) != 0: - parsedCertBundle, err = certutil.ParsePKIJSON([]byte(pemJSON)) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %s", err)), nil - } - certBundle, err = parsedCertBundle.ToCertBundle() - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Error marshaling PEM information: %s", err)), nil - } - config.Certificate = certBundle.Certificate - config.PrivateKey = certBundle.PrivateKey - config.IssuingCA = certBundle.IssuingCA - config.TLS = true - - case len(pemBundle) != 0: - parsedCertBundle, err = certutil.ParsePEMBundle(pemBundle) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Error parsing the given PEM information: %s", err)), nil - } - certBundle, err = parsedCertBundle.ToCertBundle() - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Error marshaling PEM information: %s", err)), nil - } - config.Certificate = certBundle.Certificate - config.PrivateKey = certBundle.PrivateKey - config.IssuingCA = certBundle.IssuingCA - config.TLS = true - } - - session, err := createSession(config, req.Storage) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - // Store it - entry, err := logical.StorageEntryJSON("config/connection", config) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - // Reset the DB connection - b.ResetDB(session) - - return nil, nil -} - -const pathConfigConnectionHelpSyn = ` -Configure the connection information to talk to Cassandra. -` - -const pathConfigConnectionHelpDesc = ` -This path configures the connection information used to connect to Cassandra. - -"hosts" is a comma-delimited list of hostnames in the Cassandra cluster. - -"username" and "password" are self-explanatory, although the given user -must have superuser access within Cassandra. Note that since this backend -issues username/password credentials, Cassandra must be configured to use -PasswordAuthenticator or a similar backend for its authentication. If you wish -to have no authorization in Cassandra and want to use TLS client certificates, -see the PKI backend. - -TLS works as follows: - -* If "tls" is set to true, the connection will use TLS; this happens automatically if "pem_bundle", "pem_json", or "insecure_tls" is set - -* If "insecure_tls" is set to true, the connection will not perform verification of the server certificate; this also sets "tls" to true - -* If only "issuing_ca" is set in "pem_json", or the only certificate in "pem_bundle" is a CA certificate, the given CA certificate will be used for server certificate verification; otherwise the system CA certificates will be used - -* If "certificate" and "private_key" are set in "pem_bundle" or "pem_json", client auth will be turned on for the connection - -"pem_bundle" should be a PEM-concatenated bundle of a private key + client certificate, an issuing CA certificate, or both. "pem_json" should contain the same information; for convenience, the JSON format is the same as that output by the issue command from the PKI backend. - -When configuring the connection information, the backend will verify its -validity. -` diff --git a/builtin/logical/cassandra/path_creds_create.go b/builtin/logical/cassandra/path_creds_create.go deleted file mode 100644 index ec100b961317..000000000000 --- a/builtin/logical/cassandra/path_creds_create.go +++ /dev/null @@ -1,123 +0,0 @@ -package cassandra - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/gocql/gocql" - "github.com/hashicorp/go-secure-stdlib/strutil" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathCredsCreate(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "creds/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathCredsCreateRead, - }, - - HelpSynopsis: pathCredsCreateReadHelpSyn, - HelpDescription: pathCredsCreateReadHelpDesc, - } -} - -func (b *backend) pathCredsCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - - // Get the role - role, err := getRole(ctx, req.Storage, name) - if err != nil { - return nil, err - } - if role == nil { - return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", name)), nil - } - - displayName := req.DisplayName - userUUID, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - username := fmt.Sprintf("vault_%s_%s_%s_%d", name, displayName, userUUID, time.Now().Unix()) - username = strings.ReplaceAll(username, "-", "_") - password, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - - // Get our connection - session, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - // Set consistency - if role.Consistency != "" { - consistencyValue, err := gocql.ParseConsistencyWrapper(role.Consistency) - if err != nil { - return nil, err - } - - session.SetConsistency(consistencyValue) - } - - // Execute each query - for _, query := range strutil.ParseArbitraryStringSlice(role.CreationCQL, ";") { - query = strings.TrimSpace(query) - if len(query) == 0 { - continue - } - - err = session.Query(substQuery(query, map[string]string{ - "username": username, - "password": password, - })).Exec() - if err != nil { - for _, query := range strutil.ParseArbitraryStringSlice(role.RollbackCQL, ";") { - query = strings.TrimSpace(query) - if len(query) == 0 { - continue - } - - session.Query(substQuery(query, map[string]string{ - "username": username, - "password": password, - })).Exec() - } - return nil, err - } - } - - // Return the secret - resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ - "username": username, - "password": password, - }, map[string]interface{}{ - "username": username, - "role": name, - }) - resp.Secret.TTL = role.Lease - - return resp, nil -} - -const pathCredsCreateReadHelpSyn = ` -Request database credentials for a certain role. -` - -const pathCredsCreateReadHelpDesc = ` -This path creates database credentials for a certain role. The -database credentials will be generated on demand and will be automatically -revoked when the lease is up. -` diff --git a/builtin/logical/cassandra/path_roles.go b/builtin/logical/cassandra/path_roles.go deleted file mode 100644 index df7671e47e7c..000000000000 --- a/builtin/logical/cassandra/path_roles.go +++ /dev/null @@ -1,196 +0,0 @@ -package cassandra - -import ( - "context" - "fmt" - "time" - - "github.com/fatih/structs" - "github.com/gocql/gocql" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - defaultCreationCQL = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;` - defaultRollbackCQL = `DROP USER '{{username}}';` -) - -func pathRoles(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "roles/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role", - }, - - "creation_cql": { - Type: framework.TypeString, - Default: defaultCreationCQL, - Description: `CQL to create a user and optionally grant -authorization. If not supplied, a default that -creates non-superuser accounts with the built-in -password authenticator will be used; no -authorization grants will be configured. Separate -statements by semicolons; use @file to load from a -file. Valid template values are '{{username}}' and -'{{password}}' -- the single quotes are important!`, - }, - - "rollback_cql": { - Type: framework.TypeString, - Default: defaultRollbackCQL, - Description: `CQL to roll back an account operation. This will -be used if there is an error during execution of a -statement passed in via the "creation_cql" parameter -parameter. The default simply drops the user, which -should generally be sufficient. Separate statements -by semicolons; use @file to load from a file. Valid -template values are '{{username}}' and -'{{password}}' -- the single quotes are important!`, - }, - - "lease": { - Type: framework.TypeString, - Default: "4h", - Description: "The lease length; defaults to 4 hours", - }, - - "consistency": { - Type: framework.TypeString, - Default: "Quorum", - Description: "The consistency level for the operations; defaults to Quorum.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathRoleRead, - logical.UpdateOperation: b.pathRoleCreate, - logical.DeleteOperation: b.pathRoleDelete, - }, - - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, - } -} - -func getRole(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { - entry, err := s.Get(ctx, "role/"+n) - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result roleEntry - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) - if err != nil { - return nil, err - } - - return nil, nil -} - -func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - role, err := getRole(ctx, req.Storage, data.Get("name").(string)) - if err != nil { - return nil, err - } - if role == nil { - return nil, nil - } - - return &logical.Response{ - Data: structs.New(role).Map(), - }, nil -} - -func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - - creationCQL := data.Get("creation_cql").(string) - - rollbackCQL := data.Get("rollback_cql").(string) - - leaseRaw := data.Get("lease").(string) - lease, err := time.ParseDuration(leaseRaw) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error parsing lease value of %s: %s", leaseRaw, err)), nil - } - - consistencyStr := data.Get("consistency").(string) - _, err = gocql.ParseConsistencyWrapper(consistencyStr) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error parsing consistency value of %q: %v", consistencyStr, err)), nil - } - - entry := &roleEntry{ - Lease: lease, - CreationCQL: creationCQL, - RollbackCQL: rollbackCQL, - Consistency: consistencyStr, - } - - // Store it - entryJSON, err := logical.StorageEntryJSON("role/"+name, entry) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entryJSON); err != nil { - return nil, err - } - - return nil, nil -} - -type roleEntry struct { - CreationCQL string `json:"creation_cql" structs:"creation_cql"` - Lease time.Duration `json:"lease" structs:"lease"` - RollbackCQL string `json:"rollback_cql" structs:"rollback_cql"` - Consistency string `json:"consistency" structs:"consistency"` -} - -const pathRoleHelpSyn = ` -Manage the roles that can be created with this backend. -` - -const pathRoleHelpDesc = ` -This path lets you manage the roles that can be created with this backend. - -The "creation_cql" parameter customizes the CQL string used to create users -and assign them grants. This can be a sequence of CQL queries separated by -semicolons. Some substitution will be done to the CQL string for certain keys. -The names of the variables must be surrounded by '{{' and '}}' to be replaced. -Note that it is important that single quotes are used, not double quotes. - - * "username" - The random username generated for the DB user. - - * "password" - The random password generated for the DB user. - -If no "creation_cql" parameter is given, a default will be used: - -` + defaultCreationCQL + ` - -This default should be suitable for Cassandra installations using the password -authenticator but not configured to use authorization. - -Similarly, the "rollback_cql" is used if user creation fails, in the absence of -Cassandra transactions. The default should be suitable for almost any -instance of Cassandra: - -` + defaultRollbackCQL + ` - -"lease" the lease time; if not set the mount/system defaults are used. -` diff --git a/builtin/logical/cassandra/secret_creds.go b/builtin/logical/cassandra/secret_creds.go deleted file mode 100644 index 3ca06c927903..000000000000 --- a/builtin/logical/cassandra/secret_creds.go +++ /dev/null @@ -1,77 +0,0 @@ -package cassandra - -import ( - "context" - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -// SecretCredsType is the type of creds issued from this backend -const SecretCredsType = "cassandra" - -func secretCreds(b *backend) *framework.Secret { - return &framework.Secret{ - Type: SecretCredsType, - Fields: map[string]*framework.FieldSchema{ - "username": { - Type: framework.TypeString, - Description: "Username", - }, - - "password": { - Type: framework.TypeString, - Description: "Password", - }, - }, - - Renew: b.secretCredsRenew, - Revoke: b.secretCredsRevoke, - } -} - -func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // Get the lease information - roleRaw, ok := req.Secret.InternalData["role"] - if !ok { - return nil, fmt.Errorf("secret is missing role internal data") - } - roleName, ok := roleRaw.(string) - if !ok { - return nil, fmt.Errorf("error converting role internal data to string") - } - - role, err := getRole(ctx, req.Storage, roleName) - if err != nil { - return nil, fmt.Errorf("unable to load role: %w", err) - } - - resp := &logical.Response{Secret: req.Secret} - resp.Secret.TTL = role.Lease - return resp, nil -} - -func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // Get the username from the internal data - usernameRaw, ok := req.Secret.InternalData["username"] - if !ok { - return nil, fmt.Errorf("secret is missing username internal data") - } - username, ok := usernameRaw.(string) - if !ok { - return nil, fmt.Errorf("error converting username internal data to string") - } - - session, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, fmt.Errorf("error getting session") - } - - err = session.Query(fmt.Sprintf("DROP USER '%s'", username)).Exec() - if err != nil { - return nil, fmt.Errorf("error removing user %q", username) - } - - return nil, nil -} diff --git a/builtin/logical/cassandra/test-fixtures/cassandra.yaml b/builtin/logical/cassandra/test-fixtures/cassandra.yaml deleted file mode 100644 index fe1ec8b07bbd..000000000000 --- a/builtin/logical/cassandra/test-fixtures/cassandra.yaml +++ /dev/null @@ -1,1146 +0,0 @@ -# Cassandra storage config YAML - -# NOTE: -# See http://wiki.apache.org/cassandra/StorageConfiguration for -# full explanations of configuration directives -# /NOTE - -# The name of the cluster. This is mainly used to prevent machines in -# one logical cluster from joining another. -cluster_name: 'Test Cluster' - -# This defines the number of tokens randomly assigned to this node on the ring -# The more tokens, relative to other nodes, the larger the proportion of data -# that this node will store. You probably want all nodes to have the same number -# of tokens assuming they have equal hardware capability. -# -# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, -# and will use the initial_token as described below. -# -# Specifying initial_token will override this setting on the node's initial start, -# on subsequent starts, this setting will apply even if initial token is set. -# -# If you already have a cluster with 1 token per node, and wish to migrate to -# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations -num_tokens: 256 - -# Triggers automatic allocation of num_tokens tokens for this node. The allocation -# algorithm attempts to choose tokens in a way that optimizes replicated load over -# the nodes in the datacenter for the replication strategy used by the specified -# keyspace. -# -# The load assigned to each node will be close to proportional to its number of -# vnodes. -# -# Only supported with the Murmur3Partitioner. -# allocate_tokens_for_keyspace: KEYSPACE - -# initial_token allows you to specify tokens manually. While you can use it with -# vnodes (num_tokens > 1, above) -- in which case you should provide a -# comma-separated list -- it's primarily used when adding nodes to legacy clusters -# that do not have vnodes enabled. -# initial_token: - -# See http://wiki.apache.org/cassandra/HintedHandoff -# May either be "true" or "false" to enable globally -hinted_handoff_enabled: true - -# When hinted_handoff_enabled is true, a black list of data centers that will not -# perform hinted handoff -# hinted_handoff_disabled_datacenters: -# - DC1 -# - DC2 - -# this defines the maximum amount of time a dead host will have hints -# generated. After it has been dead this long, new hints for it will not be -# created until it has been seen alive and gone down again. -max_hint_window_in_ms: 10800000 # 3 hours - -# Maximum throttle in KBs per second, per delivery thread. This will be -# reduced proportionally to the number of nodes in the cluster. (If there -# are two nodes in the cluster, each delivery thread will use the maximum -# rate; if there are three, each will throttle to half of the maximum, -# since we expect two nodes to be delivering hints simultaneously.) -hinted_handoff_throttle_in_kb: 1024 - -# Number of threads with which to deliver hints; -# Consider increasing this number when you have multi-dc deployments, since -# cross-dc handoff tends to be slower -max_hints_delivery_threads: 2 - -# Directory where Cassandra should store hints. -# If not set, the default directory is $CASSANDRA_HOME/data/hints. -# hints_directory: /var/lib/cassandra/hints - -# How often hints should be flushed from the internal buffers to disk. -# Will *not* trigger fsync. -hints_flush_period_in_ms: 10000 - -# Maximum size for a single hints file, in megabytes. -max_hints_file_size_in_mb: 128 - -# Compression to apply to the hint files. If omitted, hints files -# will be written uncompressed. LZ4, Snappy, and Deflate compressors -# are supported. -#hints_compression: -# - class_name: LZ4Compressor -# parameters: -# - - -# Maximum throttle in KBs per second, total. This will be -# reduced proportionally to the number of nodes in the cluster. -batchlog_replay_throttle_in_kb: 1024 - -# Authentication backend, implementing IAuthenticator; used to identify users -# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, -# PasswordAuthenticator}. -# -# - AllowAllAuthenticator performs no checks - set it to disable authentication. -# - PasswordAuthenticator relies on username/password pairs to authenticate -# users. It keeps usernames and hashed passwords in system_auth.credentials table. -# Please increase system_auth keyspace replication factor if you use this authenticator. -# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) -authenticator: PasswordAuthenticator - -# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, -# CassandraAuthorizer}. -# -# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. -# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please -# increase system_auth keyspace replication factor if you use this authorizer. -authorizer: CassandraAuthorizer - -# Part of the Authentication & Authorization backend, implementing IRoleManager; used -# to maintain grants and memberships between roles. -# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, -# which stores role information in the system_auth keyspace. Most functions of the -# IRoleManager require an authenticated login, so unless the configured IAuthenticator -# actually implements authentication, most of this functionality will be unavailable. -# -# - CassandraRoleManager stores role data in the system_auth keyspace. Please -# increase system_auth keyspace replication factor if you use this role manager. -role_manager: CassandraRoleManager - -# Validity period for roles cache (fetching granted roles can be an expensive -# operation depending on the role manager, CassandraRoleManager is one example) -# Granted roles are cached for authenticated sessions in AuthenticatedUser and -# after the period specified here, become eligible for (async) reload. -# Defaults to 2000, set to 0 to disable caching entirely. -# Will be disabled automatically for AllowAllAuthenticator. -roles_validity_in_ms: 2000 - -# Refresh interval for roles cache (if enabled). -# After this interval, cache entries become eligible for refresh. Upon next -# access, an async reload is scheduled and the old value returned until it -# completes. If roles_validity_in_ms is non-zero, then this must be -# also. -# Defaults to the same value as roles_validity_in_ms. -# roles_update_interval_in_ms: 2000 - -# Validity period for permissions cache (fetching permissions can be an -# expensive operation depending on the authorizer, CassandraAuthorizer is -# one example). Defaults to 2000, set to 0 to disable. -# Will be disabled automatically for AllowAllAuthorizer. -permissions_validity_in_ms: 2000 - -# Refresh interval for permissions cache (if enabled). -# After this interval, cache entries become eligible for refresh. Upon next -# access, an async reload is scheduled and the old value returned until it -# completes. If permissions_validity_in_ms is non-zero, then this must be -# also. -# Defaults to the same value as permissions_validity_in_ms. -# permissions_update_interval_in_ms: 2000 - -# Validity period for credentials cache. This cache is tightly coupled to -# the provided PasswordAuthenticator implementation of IAuthenticator. If -# another IAuthenticator implementation is configured, this cache will not -# be automatically used and so the following settings will have no effect. -# Please note, credentials are cached in their encrypted form, so while -# activating this cache may reduce the number of queries made to the -# underlying table, it may not bring a significant reduction in the -# latency of individual authentication attempts. -# Defaults to 2000, set to 0 to disable credentials caching. -credentials_validity_in_ms: 2000 - -# Refresh interval for credentials cache (if enabled). -# After this interval, cache entries become eligible for refresh. Upon next -# access, an async reload is scheduled and the old value returned until it -# completes. If credentials_validity_in_ms is non-zero, then this must be -# also. -# Defaults to the same value as credentials_validity_in_ms. -# credentials_update_interval_in_ms: 2000 - -# The partitioner is responsible for distributing groups of rows (by -# partition key) across nodes in the cluster. You should leave this -# alone for new clusters. The partitioner can NOT be changed without -# reloading all data, so when upgrading you should set this to the -# same partitioner you were already using. -# -# Besides Murmur3Partitioner, partitioners included for backwards -# compatibility include RandomPartitioner, ByteOrderedPartitioner, and -# OrderPreservingPartitioner. -# -partitioner: org.apache.cassandra.dht.Murmur3Partitioner - -# Directories where Cassandra should store data on disk. Cassandra -# will spread data evenly across them, subject to the granularity of -# the configured compaction strategy. -# If not set, the default directory is $CASSANDRA_HOME/data/data. -data_file_directories: - - /var/lib/cassandra/data - -# commit log. when running on magnetic HDD, this should be a -# separate spindle than the data directories. -# If not set, the default directory is $CASSANDRA_HOME/data/commitlog. -commitlog_directory: /var/lib/cassandra/commitlog - -# Enable / disable CDC functionality on a per-node basis. This modifies the logic used -# for write path allocation rejection (standard: never reject. cdc: reject Mutation -# containing a CDC-enabled table if at space limit in cdc_raw_directory). -cdc_enabled: false - -# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the -# segment contains mutations for a CDC-enabled table. This should be placed on a -# separate spindle than the data directories. If not set, the default directory is -# $CASSANDRA_HOME/data/cdc_raw. -# cdc_raw_directory: /var/lib/cassandra/cdc_raw - -# Policy for data disk failures: -# -# die -# shut down gossip and client transports and kill the JVM for any fs errors or -# single-sstable errors, so the node can be replaced. -# -# stop_paranoid -# shut down gossip and client transports even for single-sstable errors, -# kill the JVM for errors during startup. -# -# stop -# shut down gossip and client transports, leaving the node effectively dead, but -# can still be inspected via JMX, kill the JVM for errors during startup. -# -# best_effort -# stop using the failed disk and respond to requests based on -# remaining available sstables. This means you WILL see obsolete -# data at CL.ONE! -# -# ignore -# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra -disk_failure_policy: stop - -# Policy for commit disk failures: -# -# die -# shut down gossip and Thrift and kill the JVM, so the node can be replaced. -# -# stop -# shut down gossip and Thrift, leaving the node effectively dead, but -# can still be inspected via JMX. -# -# stop_commit -# shutdown the commit log, letting writes collect but -# continuing to service reads, as in pre-2.0.5 Cassandra -# -# ignore -# ignore fatal errors and let the batches fail -commit_failure_policy: stop - -# Maximum size of the native protocol prepared statement cache -# -# Valid values are either "auto" (omitting the value) or a value greater 0. -# -# Note that specifying a too large value will result in long running GCs and possibly -# out-of-memory errors. Keep the value at a small fraction of the heap. -# -# If you constantly see "prepared statements discarded in the last minute because -# cache limit reached" messages, the first step is to investigate the root cause -# of these messages and check whether prepared statements are used correctly - -# i.e. use bind markers for variable parts. -# -# Do only change the default value, if you really have more prepared statements than -# fit in the cache. In most cases it is not necessary to change this value. -# Constantly re-preparing statements is a performance penalty. -# -# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater -prepared_statements_cache_size_mb: - -# Maximum size of the Thrift prepared statement cache -# -# If you do not use Thrift at all, it is safe to leave this value at "auto". -# -# See description of 'prepared_statements_cache_size_mb' above for more information. -# -# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater -thrift_prepared_statements_cache_size_mb: - -# Maximum size of the key cache in memory. -# -# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the -# minimum, sometimes more. The key cache is fairly tiny for the amount of -# time it saves, so it's worthwhile to use it at large numbers. -# The row cache saves even more time, but must contain the entire row, -# so it is extremely space-intensive. It's best to only use the -# row cache if you have hot rows or static rows. -# -# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. -# -# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. -key_cache_size_in_mb: - -# Duration in seconds after which Cassandra should -# save the key cache. Caches are saved to saved_caches_directory as -# specified in this configuration file. -# -# Saved caches greatly improve cold-start speeds, and is relatively cheap in -# terms of I/O for the key cache. Row cache saving is much more expensive and -# has limited use. -# -# Default is 14400 or 4 hours. -key_cache_save_period: 14400 - -# Number of keys from the key cache to save -# Disabled by default, meaning all keys are going to be saved -# key_cache_keys_to_save: 100 - -# Row cache implementation class name. Available implementations: -# -# org.apache.cassandra.cache.OHCProvider -# Fully off-heap row cache implementation (default). -# -# org.apache.cassandra.cache.SerializingCacheProvider -# This is the row cache implementation availabile -# in previous releases of Cassandra. -# row_cache_class_name: org.apache.cassandra.cache.OHCProvider - -# Maximum size of the row cache in memory. -# Please note that OHC cache implementation requires some additional off-heap memory to manage -# the map structures and some in-flight memory during operations before/after cache entries can be -# accounted against the cache capacity. This overhead is usually small compared to the whole capacity. -# Do not specify more memory that the system can afford in the worst usual situation and leave some -# headroom for OS block level cache. Do never allow your system to swap. -# -# Default value is 0, to disable row caching. -row_cache_size_in_mb: 0 - -# Duration in seconds after which Cassandra should save the row cache. -# Caches are saved to saved_caches_directory as specified in this configuration file. -# -# Saved caches greatly improve cold-start speeds, and is relatively cheap in -# terms of I/O for the key cache. Row cache saving is much more expensive and -# has limited use. -# -# Default is 0 to disable saving the row cache. -row_cache_save_period: 0 - -# Number of keys from the row cache to save. -# Specify 0 (which is the default), meaning all keys are going to be saved -# row_cache_keys_to_save: 100 - -# Maximum size of the counter cache in memory. -# -# Counter cache helps to reduce counter locks' contention for hot counter cells. -# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before -# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration -# of the lock hold, helping with hot counter cell updates, but will not allow skipping -# the read entirely. Only the local (clock, count) tuple of a counter cell is kept -# in memory, not the whole counter, so it's relatively cheap. -# -# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. -# -# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. -# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. -counter_cache_size_in_mb: - -# Duration in seconds after which Cassandra should -# save the counter cache (keys only). Caches are saved to saved_caches_directory as -# specified in this configuration file. -# -# Default is 7200 or 2 hours. -counter_cache_save_period: 7200 - -# Number of keys from the counter cache to save -# Disabled by default, meaning all keys are going to be saved -# counter_cache_keys_to_save: 100 - -# saved caches -# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. -saved_caches_directory: /var/lib/cassandra/saved_caches - -# commitlog_sync may be either "periodic" or "batch." -# -# When in batch mode, Cassandra won't ack writes until the commit log -# has been fsynced to disk. It will wait -# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. -# This window should be kept short because the writer threads will -# be unable to do extra work while waiting. (You may need to increase -# concurrent_writes for the same reason.) -# -# commitlog_sync: batch -# commitlog_sync_batch_window_in_ms: 2 -# -# the other option is "periodic" where writes may be acked immediately -# and the CommitLog is simply synced every commitlog_sync_period_in_ms -# milliseconds. -commitlog_sync: periodic -commitlog_sync_period_in_ms: 10000 - -# The size of the individual commitlog file segments. A commitlog -# segment may be archived, deleted, or recycled once all the data -# in it (potentially from each columnfamily in the system) has been -# flushed to sstables. -# -# The default size is 32, which is almost always fine, but if you are -# archiving commitlog segments (see commitlog_archiving.properties), -# then you probably want a finer granularity of archiving; 8 or 16 MB -# is reasonable. -# Max mutation size is also configurable via max_mutation_size_in_kb setting in -# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. -# -# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must -# be set to at least twice the size of max_mutation_size_in_kb / 1024 -# -commitlog_segment_size_in_mb: 32 - -# Compression to apply to the commit log. If omitted, the commit log -# will be written uncompressed. LZ4, Snappy, and Deflate compressors -# are supported. -# commitlog_compression: -# - class_name: LZ4Compressor -# parameters: -# - - -# any class that implements the SeedProvider interface and has a -# constructor that takes a Map of parameters will do. -seed_provider: - # Addresses of hosts that are deemed contact points. - # Cassandra nodes use this list of hosts to find each other and learn - # the topology of the ring. You must change this if you are running - # multiple nodes! - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # seeds is actually a comma-delimited list of addresses. - # Ex: ",," - - seeds: "127.0.0.1" - -# For workloads with more data than can fit in memory, Cassandra's -# bottleneck will be reads that need to fetch data from -# disk. "concurrent_reads" should be set to (16 * number_of_drives) in -# order to allow the operations to enqueue low enough in the stack -# that the OS and drives can reorder them. Same applies to -# "concurrent_counter_writes", since counter writes read the current -# values before incrementing and writing them back. -# -# On the other hand, since writes are almost never IO bound, the ideal -# number of "concurrent_writes" is dependent on the number of cores in -# your system; (8 * number_of_cores) is a good rule of thumb. -concurrent_reads: 32 -concurrent_writes: 32 -concurrent_counter_writes: 32 - -# For materialized view writes, as there is a read involved, so this should -# be limited by the less of concurrent reads or concurrent writes. -concurrent_materialized_view_writes: 32 - -# Maximum memory to use for sstable chunk cache and buffer pooling. -# 32MB of this are reserved for pooling buffers, the rest is used as an -# cache that holds uncompressed sstable chunks. -# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, -# so is in addition to the memory allocated for heap. The cache also has on-heap -# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size -# if the default 64k chunk size is used). -# Memory is only allocated when needed. -# file_cache_size_in_mb: 512 - -# Flag indicating whether to allocate on or off heap when the sstable buffer -# pool is exhausted, that is when it has exceeded the maximum memory -# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. - -# buffer_pool_use_heap_if_exhausted: true - -# The strategy for optimizing disk read -# Possible values are: -# ssd (for solid state disks, the default) -# spinning (for spinning disks) -# disk_optimization_strategy: ssd - -# Total permitted memory to use for memtables. Cassandra will stop -# accepting writes when the limit is exceeded until a flush completes, -# and will trigger a flush based on memtable_cleanup_threshold -# If omitted, Cassandra will set both to 1/4 the size of the heap. -# memtable_heap_space_in_mb: 2048 -# memtable_offheap_space_in_mb: 2048 - -# Ratio of occupied non-flushing memtable size to total permitted size -# that will trigger a flush of the largest memtable. Larger mct will -# mean larger flushes and hence less compaction, but also less concurrent -# flush activity which can make it difficult to keep your disks fed -# under heavy write load. -# -# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) -# memtable_cleanup_threshold: 0.11 - -# Specify the way Cassandra allocates and manages memtable memory. -# Options are: -# -# heap_buffers -# on heap nio buffers -# -# offheap_buffers -# off heap (direct) nio buffers -# -# offheap_objects -# off heap objects -memtable_allocation_type: heap_buffers - -# Total space to use for commit logs on disk. -# -# If space gets above this value, Cassandra will flush every dirty CF -# in the oldest segment and remove it. So a small total commitlog space -# will tend to cause more flush activity on less-active columnfamilies. -# -# The default value is the smaller of 8192, and 1/4 of the total space -# of the commitlog volume. -# -# commitlog_total_space_in_mb: 8192 - -# This sets the amount of memtable flush writer threads. These will -# be blocked by disk io, and each one will hold a memtable in memory -# while blocked. -# -# memtable_flush_writers defaults to one per data_file_directory. -# -# If your data directories are backed by SSD, you can increase this, but -# avoid having memtable_flush_writers * data_file_directories > number of cores -#memtable_flush_writers: 1 - -# Total space to use for change-data-capture logs on disk. -# -# If space gets above this value, Cassandra will throw WriteTimeoutException -# on Mutations including tables with CDC enabled. A CDCCompactor is responsible -# for parsing the raw CDC logs and deleting them when parsing is completed. -# -# The default value is the min of 4096 mb and 1/8th of the total space -# of the drive where cdc_raw_directory resides. -# cdc_total_space_in_mb: 4096 - -# When we hit our cdc_raw limit and the CDCCompactor is either running behind -# or experiencing backpressure, we check at the following interval to see if any -# new space for cdc-tracked tables has been made available. Default to 250ms -# cdc_free_space_check_interval_ms: 250 - -# A fixed memory pool size in MB for SSTable index summaries. If left -# empty, this will default to 5% of the heap size. If the memory usage of -# all index summaries exceeds this limit, SSTables with low read rates will -# shrink their index summaries in order to meet this limit. However, this -# is a best-effort process. In extreme conditions Cassandra may need to use -# more than this amount of memory. -index_summary_capacity_in_mb: - -# How frequently index summaries should be resampled. This is done -# periodically to redistribute memory from the fixed-size pool to sstables -# proportional their recent read rates. Setting to -1 will disable this -# process, leaving existing index summaries at their current sampling level. -index_summary_resize_interval_in_minutes: 60 - -# Whether to, when doing sequential writing, fsync() at intervals in -# order to force the operating system to flush the dirty -# buffers. Enable this to avoid sudden dirty buffer flushing from -# impacting read latencies. Almost always a good idea on SSDs; not -# necessarily on platters. -trickle_fsync: false -trickle_fsync_interval_in_kb: 10240 - -# TCP port, for commands and data -# For security reasons, you should not expose this port to the internet. Firewall it if needed. -storage_port: 7000 - -# SSL port, for encrypted communication. Unused unless enabled in -# encryption_options -# For security reasons, you should not expose this port to the internet. Firewall it if needed. -ssl_storage_port: 7001 - -# Address or interface to bind to and tell other Cassandra nodes to connect to. -# You _must_ change this if you want multiple nodes to be able to communicate! -# -# Set listen_address OR listen_interface, not both. -# -# Leaving it blank leaves it up to InetAddress.getLocalHost(). This -# will always do the Right Thing _if_ the node is properly configured -# (hostname, name resolution, etc), and the Right Thing is to use the -# address associated with the hostname (it might not be). -# -# Setting listen_address to 0.0.0.0 is always wrong. -# -listen_address: 172.17.0.2 - -# Set listen_address OR listen_interface, not both. Interfaces must correspond -# to a single address, IP aliasing is not supported. -# listen_interface: eth0 - -# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 -# address will be used. If true the first ipv6 address will be used. Defaults to false preferring -# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. -# listen_interface_prefer_ipv6: false - -# Address to broadcast to other Cassandra nodes -# Leaving this blank will set it to the same value as listen_address -broadcast_address: 127.0.0.1 - -# When using multiple physical network interfaces, set this -# to true to listen on broadcast_address in addition to -# the listen_address, allowing nodes to communicate in both -# interfaces. -# Ignore this property if the network configuration automatically -# routes between the public and private networks such as EC2. -# listen_on_broadcast_address: false - -# Internode authentication backend, implementing IInternodeAuthenticator; -# used to allow/disallow connections from peer nodes. -# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator - -# Whether to start the native transport server. -# Please note that the address on which the native transport is bound is the -# same as the rpc_address. The port however is different and specified below. -start_native_transport: true -# port for the CQL native transport to listen for clients on -# For security reasons, you should not expose this port to the internet. Firewall it if needed. -native_transport_port: 9042 -# Enabling native transport encryption in client_encryption_options allows you to either use -# encryption for the standard port or to use a dedicated, additional port along with the unencrypted -# standard native_transport_port. -# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -# for native_transport_port. Setting native_transport_port_ssl to a different value -# from native_transport_port will use encryption for native_transport_port_ssl while -# keeping native_transport_port unencrypted. -# native_transport_port_ssl: 9142 -# The maximum threads for handling requests when the native transport is used. -# This is similar to rpc_max_threads though the default differs slightly (and -# there is no native_transport_min_threads, idle threads will always be stopped -# after 30 seconds). -# native_transport_max_threads: 128 -# -# The maximum size of allowed frame. Frame (requests) larger than this will -# be rejected as invalid. The default is 256MB. If you're changing this parameter, -# you may want to adjust max_value_size_in_mb accordingly. -# native_transport_max_frame_size_in_mb: 256 - -# The maximum number of concurrent client connections. -# The default is -1, which means unlimited. -# native_transport_max_concurrent_connections: -1 - -# The maximum number of concurrent client connections per source ip. -# The default is -1, which means unlimited. -# native_transport_max_concurrent_connections_per_ip: -1 - -# Whether to start the thrift rpc server. -start_rpc: false - -# The address or interface to bind the Thrift RPC service and native transport -# server to. -# -# Set rpc_address OR rpc_interface, not both. -# -# Leaving rpc_address blank has the same effect as on listen_address -# (i.e. it will be based on the configured hostname of the node). -# -# Note that unlike listen_address, you can specify 0.0.0.0, but you must also -# set broadcast_rpc_address to a value other than 0.0.0.0. -# -# For security reasons, you should not expose this port to the internet. Firewall it if needed. -rpc_address: 0.0.0.0 - -# Set rpc_address OR rpc_interface, not both. Interfaces must correspond -# to a single address, IP aliasing is not supported. -# rpc_interface: eth1 - -# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -# address will be used. If true the first ipv6 address will be used. Defaults to false preferring -# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. -# rpc_interface_prefer_ipv6: false - -# port for Thrift to listen for clients on -rpc_port: 9160 - -# RPC address to broadcast to drivers and other Cassandra nodes. This cannot -# be set to 0.0.0.0. If left blank, this will be set to the value of -# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -# be set. -broadcast_rpc_address: 127.0.0.1 - -# enable or disable keepalive on rpc/native connections -rpc_keepalive: true - -# Cassandra provides two out-of-the-box options for the RPC Server: -# -# sync -# One thread per thrift connection. For a very large number of clients, memory -# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size -# per thread, and that will correspond to your use of virtual memory (but physical memory -# may be limited depending on use of stack space). -# -# hsha -# Stands for "half synchronous, half asynchronous." All thrift clients are handled -# asynchronously using a small number of threads that does not vary with the amount -# of thrift clients (and thus scales well to many clients). The rpc requests are still -# synchronous (one thread per active request). If hsha is selected then it is essential -# that rpc_max_threads is changed from the default value of unlimited. -# -# The default is sync because on Windows hsha is about 30% slower. On Linux, -# sync/hsha performance is about the same, with hsha of course using less memory. -# -# Alternatively, can provide your own RPC server by providing the fully-qualified class name -# of an o.a.c.t.TServerFactory that can create an instance of it. -rpc_server_type: sync - -# Uncomment rpc_min|max_thread to set request pool size limits. -# -# Regardless of your choice of RPC server (see above), the number of maximum requests in the -# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync -# RPC server, it also dictates the number of clients that can be connected at all). -# -# The default is unlimited and thus provides no protection against clients overwhelming the server. You are -# encouraged to set a maximum that makes sense for you in production, but do keep in mind that -# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. -# -# rpc_min_threads: 16 -# rpc_max_threads: 2048 - -# uncomment to set socket buffer sizes on rpc connections -# rpc_send_buff_size_in_bytes: -# rpc_recv_buff_size_in_bytes: - -# Uncomment to set socket buffer size for internode communication -# Note that when setting this, the buffer size is limited by net.core.wmem_max -# and when not setting it it is defined by net.ipv4.tcp_wmem -# See also: -# /proc/sys/net/core/wmem_max -# /proc/sys/net/core/rmem_max -# /proc/sys/net/ipv4/tcp_wmem -# /proc/sys/net/ipv4/tcp_wmem -# and 'man tcp' -# internode_send_buff_size_in_bytes: - -# Uncomment to set socket buffer size for internode communication -# Note that when setting this, the buffer size is limited by net.core.wmem_max -# and when not setting it it is defined by net.ipv4.tcp_wmem -# internode_recv_buff_size_in_bytes: - -# Frame size for thrift (maximum message length). -thrift_framed_transport_size_in_mb: 15 - -# Set to true to have Cassandra create a hard link to each sstable -# flushed or streamed locally in a backups/ subdirectory of the -# keyspace data. Removing these links is the operator's -# responsibility. -incremental_backups: false - -# Whether or not to take a snapshot before each compaction. Be -# careful using this option, since Cassandra won't clean up the -# snapshots for you. Mostly useful if you're paranoid when there -# is a data format change. -snapshot_before_compaction: false - -# Whether or not a snapshot is taken of the data before keyspace truncation -# or dropping of column families. The STRONGLY advised default of true -# should be used to provide data safety. If you set this flag to false, you will -# lose data on truncation or drop. -auto_snapshot: true - -# Granularity of the collation index of rows within a partition. -# Increase if your rows are large, or if you have a very large -# number of rows per partition. The competing goals are these: -# -# - a smaller granularity means more index entries are generated -# and looking up rows withing the partition by collation column -# is faster -# - but, Cassandra will keep the collation index in memory for hot -# rows (as part of the key cache), so a larger granularity means -# you can cache more hot rows -column_index_size_in_kb: 64 - -# Per sstable indexed key cache entries (the collation index in memory -# mentioned above) exceeding this size will not be held on heap. -# This means that only partition information is held on heap and the -# index entries are read from disk. -# -# Note that this size refers to the size of the -# serialized index information and not the size of the partition. -column_index_cache_size_in_kb: 2 - -# Number of simultaneous compactions to allow, NOT including -# validation "compactions" for anti-entropy repair. Simultaneous -# compactions can help preserve read performance in a mixed read/write -# workload, by mitigating the tendency of small sstables to accumulate -# during a single long running compactions. The default is usually -# fine and if you experience problems with compaction running too -# slowly or too fast, you should look at -# compaction_throughput_mb_per_sec first. -# -# concurrent_compactors defaults to the smaller of (number of disks, -# number of cores), with a minimum of 2 and a maximum of 8. -# -# If your data directories are backed by SSD, you should increase this -# to the number of cores. -#concurrent_compactors: 1 - -# Throttles compaction to the given total throughput across the entire -# system. The faster you insert data, the faster you need to compact in -# order to keep the sstable count down, but in general, setting this to -# 16 to 32 times the rate you are inserting data is more than sufficient. -# Setting this to 0 disables throttling. Note that this account for all types -# of compaction, including validation compaction. -compaction_throughput_mb_per_sec: 16 - -# When compacting, the replacement sstable(s) can be opened before they -# are completely written, and used in place of the prior sstables for -# any range that has been written. This helps to smoothly transfer reads -# between the sstables, reducing page cache churn and keeping hot rows hot -sstable_preemptive_open_interval_in_mb: 50 - -# Throttles all outbound streaming file transfers on this node to the -# given total throughput in Mbps. This is necessary because Cassandra does -# mostly sequential IO when streaming data during bootstrap or repair, which -# can lead to saturating the network connection and degrading rpc performance. -# When unset, the default is 200 Mbps or 25 MB/s. -# stream_throughput_outbound_megabits_per_sec: 200 - -# Throttles all streaming file transfer between the datacenters, -# this setting allows users to throttle inter dc stream throughput in addition -# to throttling all network stream traffic as configured with -# stream_throughput_outbound_megabits_per_sec -# When unset, the default is 200 Mbps or 25 MB/s -# inter_dc_stream_throughput_outbound_megabits_per_sec: 200 - -# How long the coordinator should wait for read operations to complete -read_request_timeout_in_ms: 5000 -# How long the coordinator should wait for seq or index scans to complete -range_request_timeout_in_ms: 10000 -# How long the coordinator should wait for writes to complete -write_request_timeout_in_ms: 2000 -# How long the coordinator should wait for counter writes to complete -counter_write_request_timeout_in_ms: 5000 -# How long a coordinator should continue to retry a CAS operation -# that contends with other proposals for the same row -cas_contention_timeout_in_ms: 1000 -# How long the coordinator should wait for truncates to complete -# (This can be much longer, because unless auto_snapshot is disabled -# we need to flush first so we can snapshot before removing the data.) -truncate_request_timeout_in_ms: 60000 -# The default timeout for other, miscellaneous operations -request_timeout_in_ms: 10000 - -# Enable operation timeout information exchange between nodes to accurately -# measure request timeouts. If disabled, replicas will assume that requests -# were forwarded to them instantly by the coordinator, which means that -# under overload conditions we will waste that much extra time processing -# already-timed-out requests. -# -# Warning: before enabling this property make sure to ntp is installed -# and the times are synchronized between the nodes. -cross_node_timeout: false - -# Set socket timeout for streaming operation. -# The stream session is failed if no data/ack is received by any of the participants -# within that period, which means this should also be sufficient to stream a large -# sstable or rebuild table indexes. -# Default value is 86400000ms, which means stale streams timeout after 24 hours. -# A value of zero means stream sockets should never time out. -# streaming_socket_timeout_in_ms: 86400000 - -# phi value that must be reached for a host to be marked down. -# most users should never need to adjust this. -# phi_convict_threshold: 8 - -# endpoint_snitch -- Set this to a class that implements -# IEndpointSnitch. The snitch has two functions: -# -# - it teaches Cassandra enough about your network topology to route -# requests efficiently -# - it allows Cassandra to spread replicas around your cluster to avoid -# correlated failures. It does this by grouping machines into -# "datacenters" and "racks." Cassandra will do its best not to have -# more than one replica on the same "rack" (which may not actually -# be a physical location) -# -# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH -# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. -# This means that if you start with the default SimpleSnitch, which -# locates every node on "rack1" in "datacenter1", your only options -# if you need to add another datacenter are GossipingPropertyFileSnitch -# (and the older PFS). From there, if you want to migrate to an -# incompatible snitch like Ec2Snitch you can do it by adding new nodes -# under Ec2Snitch (which will locate them in a new "datacenter") and -# decommissioning the old ones. -# -# Out of the box, Cassandra provides: -# -# SimpleSnitch: -# Treats Strategy order as proximity. This can improve cache -# locality when disabling read repair. Only appropriate for -# single-datacenter deployments. -# -# GossipingPropertyFileSnitch -# This should be your go-to snitch for production use. The rack -# and datacenter for the local node are defined in -# cassandra-rackdc.properties and propagated to other nodes via -# gossip. If cassandra-topology.properties exists, it is used as a -# fallback, allowing migration from the PropertyFileSnitch. -# -# PropertyFileSnitch: -# Proximity is determined by rack and data center, which are -# explicitly configured in cassandra-topology.properties. -# -# Ec2Snitch: -# Appropriate for EC2 deployments in a single Region. Loads Region -# and Availability Zone information from the EC2 API. The Region is -# treated as the datacenter, and the Availability Zone as the rack. -# Only private IPs are used, so this will not work across multiple -# Regions. -# -# Ec2MultiRegionSnitch: -# Uses public IPs as broadcast_address to allow cross-region -# connectivity. (Thus, you should set seed addresses to the public -# IP as well.) You will need to open the storage_port or -# ssl_storage_port on the public IP firewall. (For intra-Region -# traffic, Cassandra will switch to the private IP after -# establishing a connection.) -# -# RackInferringSnitch: -# Proximity is determined by rack and data center, which are -# assumed to correspond to the 3rd and 2nd octet of each node's IP -# address, respectively. Unless this happens to match your -# deployment conventions, this is best used as an example of -# writing a custom Snitch class and is provided in that spirit. -# -# You can use a custom Snitch by setting this to the full class name -# of the snitch, which will be assumed to be on your classpath. -endpoint_snitch: SimpleSnitch - -# controls how often to perform the more expensive part of host score -# calculation -dynamic_snitch_update_interval_in_ms: 100 -# controls how often to reset all host scores, allowing a bad host to -# possibly recover -dynamic_snitch_reset_interval_in_ms: 600000 -# if set greater than zero and read_repair_chance is < 1.0, this will allow -# 'pinning' of replicas to hosts in order to increase cache capacity. -# The badness threshold will control how much worse the pinned host has to be -# before the dynamic snitch will prefer other replicas over it. This is -# expressed as a double which represents a percentage. Thus, a value of -# 0.2 means Cassandra would continue to prefer the static snitch values -# until the pinned host was 20% worse than the fastest. -dynamic_snitch_badness_threshold: 0.1 - -# request_scheduler -- Set this to a class that implements -# RequestScheduler, which will schedule incoming client requests -# according to the specific policy. This is useful for multi-tenancy -# with a single Cassandra cluster. -# NOTE: This is specifically for requests from the client and does -# not affect inter node communication. -# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place -# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of -# client requests to a node with a separate queue for each -# request_scheduler_id. The scheduler is further customized by -# request_scheduler_options as described below. -request_scheduler: org.apache.cassandra.scheduler.NoScheduler - -# Scheduler Options vary based on the type of scheduler -# -# NoScheduler -# Has no options -# -# RoundRobin -# throttle_limit -# The throttle_limit is the number of in-flight -# requests per client. Requests beyond -# that limit are queued up until -# running requests can complete. -# The value of 80 here is twice the number of -# concurrent_reads + concurrent_writes. -# default_weight -# default_weight is optional and allows for -# overriding the default which is 1. -# weights -# Weights are optional and will default to 1 or the -# overridden default_weight. The weight translates into how -# many requests are handled during each turn of the -# RoundRobin, based on the scheduler id. -# -# request_scheduler_options: -# throttle_limit: 80 -# default_weight: 5 -# weights: -# Keyspace1: 1 -# Keyspace2: 5 - -# request_scheduler_id -- An identifier based on which to perform -# the request scheduling. Currently the only valid option is keyspace. -# request_scheduler_id: keyspace - -# Enable or disable inter-node encryption -# JVM defaults for supported SSL socket protocols and cipher suites can -# be replaced using custom encryption options. This is not recommended -# unless you have policies in place that dictate certain settings, or -# need to disable vulnerable ciphers or protocols in case the JVM cannot -# be updated. -# FIPS compliant settings can be configured at JVM level and should not -# involve changing encryption settings here: -# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html -# *NOTE* No custom encryption options are enabled at the moment -# The available internode options are : all, none, dc, rack -# -# If set to dc cassandra will encrypt the traffic between the DCs -# If set to rack cassandra will encrypt the traffic between the racks -# -# The passwords used in these options must match the passwords used when generating -# the keystore and truststore. For instructions on generating these files, see: -# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore -# -server_encryption_options: - internode_encryption: none - keystore: conf/.keystore - keystore_password: cassandra - truststore: conf/.truststore - truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - # require_client_auth: false - # require_endpoint_verification: false - -# enable or disable client/server encryption. -client_encryption_options: - enabled: false - # If enabled and optional is set to true encrypted and unencrypted connections are handled. - optional: false - keystore: conf/.keystore - keystore_password: cassandra - # require_client_auth: false - # Set truststore and truststore_password if require_client_auth is true - # truststore: conf/.truststore - # truststore_password: cassandra - # More advanced defaults below: - # protocol: TLS - # algorithm: SunX509 - # store_type: JKS - # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] - -# internode_compression controls whether traffic between nodes is -# compressed. -# Can be: -# -# all -# all traffic is compressed -# -# dc -# traffic between different datacenters is compressed -# -# none -# nothing is compressed. -internode_compression: dc - -# Enable or disable tcp_nodelay for inter-dc communication. -# Disabling it will result in larger (but fewer) network packets being sent, -# reducing overhead from the TCP protocol itself, at the cost of increasing -# latency if you block for cross-datacenter responses. -inter_dc_tcp_nodelay: false - -# TTL for different trace types used during logging of the repair process. -tracetype_query_ttl: 86400 -tracetype_repair_ttl: 604800 - -# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level -# This threshold can be adjusted to minimize logging if necessary -# gc_log_threshold_in_ms: 200 - -# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at -# INFO level -# UDFs (user defined functions) are disabled by default. -# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. -enable_user_defined_functions: false - -# Enables scripted UDFs (JavaScript UDFs). -# Java UDFs are always enabled, if enable_user_defined_functions is true. -# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. -# This option has no effect, if enable_user_defined_functions is false. -enable_scripted_user_defined_functions: false - -# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. -# Lowering this value on Windows can provide much tighter latency and better throughput, however -# some virtualized environments may see a negative performance impact from changing this setting -# below their system default. The sysinternals 'clockres' tool can confirm your system's default -# setting. -windows_timer_interval: 1 - - -# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from -# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by -# the "key_alias" is the only key that will be used for encrypt operations; previously used keys -# can still (and should!) be in the keystore and will be used on decrypt operations -# (to handle the case of key rotation). -# -# It is strongly recommended to download and install Java Cryptography Extension (JCE) -# Unlimited Strength Jurisdiction Policy Files for your version of the JDK. -# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) -# -# Currently, only the following file types are supported for transparent data encryption, although -# more are coming in future cassandra releases: commitlog, hints -transparent_data_encryption_options: - enabled: false - chunk_length_kb: 64 - cipher: AES/CBC/PKCS5Padding - key_alias: testing:1 - # CBC IV length for AES needs to be 16 bytes (which is also the default size) - # iv_length: 16 - key_provider: - - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: - - keystore: conf/.keystore - keystore_password: cassandra - store_type: JCEKS - key_password: cassandra - - -##################### -# SAFETY THRESHOLDS # -##################### - -# When executing a scan, within or across a partition, we need to keep the -# tombstones seen in memory so we can return them to the coordinator, which -# will use them to make sure other replicas also know about the deleted rows. -# With workloads that generate a lot of tombstones, this can cause performance -# problems and even exhaust the server heap. -# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -# Adjust the thresholds here if you understand the dangers and want to -# scan more tombstones anyway. These thresholds may also be adjusted at runtime -# using the StorageService mbean. -tombstone_warn_threshold: 1000 -tombstone_failure_threshold: 100000 - -# Log WARN on any batch size exceeding this value. 5kb per batch by default. -# Caution should be taken on increasing the size of this threshold as it can lead to node instability. -batch_size_warn_threshold_in_kb: 5 - -# Fail any batch exceeding this value. 50kb (10x warn threshold) by default. -batch_size_fail_threshold_in_kb: 50 - -# Log WARN on any batches not of type LOGGED than span across more partitions than this limit -unlogged_batch_across_partitions_warn_threshold: 10 - -# Log a warning when compacting partitions larger than this value -compaction_large_partition_warning_threshold_mb: 100 - -# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level -# Adjust the threshold based on your application throughput requirement -# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level -gc_warn_threshold_in_ms: 1000 - -# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption -# early. Any value size larger than this threshold will result into marking an SSTable -# as corrupted. -# max_value_size_in_mb: 256 diff --git a/builtin/logical/cassandra/util.go b/builtin/logical/cassandra/util.go deleted file mode 100644 index 8257aafd787f..000000000000 --- a/builtin/logical/cassandra/util.go +++ /dev/null @@ -1,95 +0,0 @@ -package cassandra - -import ( - "crypto/tls" - "fmt" - "strings" - "time" - - "github.com/gocql/gocql" - "github.com/hashicorp/go-secure-stdlib/tlsutil" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" -) - -// Query templates a query for us. -func substQuery(tpl string, data map[string]string) string { - for k, v := range data { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) - } - - return tpl -} - -func createSession(cfg *sessionConfig, s logical.Storage) (*gocql.Session, error) { - clusterConfig := gocql.NewCluster(strings.Split(cfg.Hosts, ",")...) - clusterConfig.Authenticator = gocql.PasswordAuthenticator{ - Username: cfg.Username, - Password: cfg.Password, - } - - clusterConfig.ProtoVersion = cfg.ProtocolVersion - if clusterConfig.ProtoVersion == 0 { - clusterConfig.ProtoVersion = 2 - } - - clusterConfig.Timeout = time.Duration(cfg.ConnectTimeout) * time.Second - - if cfg.TLS { - var tlsConfig *tls.Config - if len(cfg.Certificate) > 0 || len(cfg.IssuingCA) > 0 { - if len(cfg.Certificate) > 0 && len(cfg.PrivateKey) == 0 { - return nil, fmt.Errorf("found certificate for TLS authentication but no private key") - } - - certBundle := &certutil.CertBundle{} - if len(cfg.Certificate) > 0 { - certBundle.Certificate = cfg.Certificate - certBundle.PrivateKey = cfg.PrivateKey - } - if len(cfg.IssuingCA) > 0 { - certBundle.IssuingCA = cfg.IssuingCA - } - - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return nil, fmt.Errorf("failed to parse certificate bundle: %w", err) - } - - tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient) - if err != nil || tlsConfig == nil { - return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig: %#v; %w", tlsConfig, err) - } - tlsConfig.InsecureSkipVerify = cfg.InsecureTLS - - if cfg.TLSMinVersion != "" { - var ok bool - tlsConfig.MinVersion, ok = tlsutil.TLSLookup[cfg.TLSMinVersion] - if !ok { - return nil, fmt.Errorf("invalid 'tls_min_version' in config") - } - } else { - // MinVersion was not being set earlier. Reset it to - // zero to gracefully handle upgrades. - tlsConfig.MinVersion = 0 - } - } - - clusterConfig.SslOpts = &gocql.SslOptions{ - Config: tlsConfig, - } - } - - session, err := clusterConfig.CreateSession() - if err != nil { - return nil, fmt.Errorf("error creating session: %w", err) - } - - // Verify the info - err = session.Query(`LIST USERS`).Exec() - if err != nil { - return nil, fmt.Errorf("error validating connection info: %w", err) - } - - return session, nil -} diff --git a/builtin/logical/consul/backend.go b/builtin/logical/consul/backend.go index 7fce10e26294..52aeb3cbceec 100644 --- a/builtin/logical/consul/backend.go +++ b/builtin/logical/consul/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( @@ -7,6 +10,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const operationPrefixConsul = "consul" + // ReportedVersion is used to report a specific version to Vault. var ReportedVersion = "" diff --git a/builtin/logical/consul/backend_test.go b/builtin/logical/consul/backend_test.go index fa7cf647135a..aa377f26e084 100644 --- a/builtin/logical/consul/backend_test.go +++ b/builtin/logical/consul/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( @@ -846,6 +849,22 @@ func TestBackend_Roles(t *testing.T) { } } +func TestBackend_Enterprise_Diff_Namespace_Revocation(t *testing.T) { + if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense { + t.Skip("Skipping: No enterprise license found") + } + + testBackendEntDiffNamespaceRevocation(t) +} + +func TestBackend_Enterprise_Diff_Partition_Revocation(t *testing.T) { + if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense { + t.Skip("Skipping: No enterprise license found") + } + + testBackendEntDiffPartitionRevocation(t) +} + func TestBackend_Enterprise_Namespace(t *testing.T) { if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense { t.Skip("Skipping: No enterprise license found") @@ -862,6 +881,268 @@ func TestBackend_Enterprise_Partition(t *testing.T) { testBackendEntPartition(t) } +func testBackendEntDiffNamespaceRevocation(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", true, true) + defer cleanup() + + // Perform additional Consul configuration + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = consulConfig.Address() + consulapiConfig.Token = consulConfig.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + // Create Policy in default namespace to manage ACLs in a different + // namespace + nsPol := &consulapi.ACLPolicy{ + Name: "diff-ns-test", + Description: "policy to test management of ACLs in one ns from another", + Rules: `namespace "ns1" { + acl="write" + } + `, + } + pol, _, err := client.ACL().PolicyCreate(nsPol, nil) + if err != nil { + t.Fatal(err) + } + + // Create new Token in default namespace with new ACL + cToken, _, err := client.ACL().TokenCreate( + &consulapi.ACLToken{ + Policies: []*consulapi.ACLLink{{ID: pol.ID}}, + }, nil) + if err != nil { + t.Fatal(err) + } + + // Write backend config + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": cToken.SecretID, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Create the role in namespace "ns1" + req.Path = "roles/test-ns" + req.Data = map[string]interface{}{ + "consul_policies": []string{"ns-test"}, + "lease": "6h", + "consul_namespace": "ns1", + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Get Token + req.Operation = logical.ReadOperation + req.Path = "creds/test-ns" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + // Verify Secret + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + ConsulNamespace string `mapstructure:"consul_namespace"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + if d.ConsulNamespace != "ns1" { + t.Fatalf("Failed to access namespace") + } + + // Revoke the credential + req.Operation = logical.RevokeOperation + req.Secret = generatedSecret + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("Revocation failed: %v", err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + if err != nil { + t.Fatal(err) + } + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + Namespace: "ns1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + +func testBackendEntDiffPartitionRevocation(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", true, true) + defer cleanup() + + // Perform additional Consul configuration + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = consulConfig.Address() + consulapiConfig.Token = consulConfig.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + // Create Policy in default partition to manage ACLs in a different + // partition + partPol := &consulapi.ACLPolicy{ + Name: "diff-part-test", + Description: "policy to test management of ACLs in one part from another", + Rules: `partition "part1" { + acl="write" + } + `, + } + pol, _, err := client.ACL().PolicyCreate(partPol, nil) + if err != nil { + t.Fatal(err) + } + + // Create new Token in default partition with new ACL + cToken, _, err := client.ACL().TokenCreate( + &consulapi.ACLToken{ + Policies: []*consulapi.ACLLink{{ID: pol.ID}}, + }, nil) + if err != nil { + t.Fatal(err) + } + + // Write backend config + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": cToken.SecretID, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Create the role in partition "part1" + req.Path = "roles/test-part" + req.Data = map[string]interface{}{ + "consul_policies": []string{"part-test"}, + "lease": "6h", + "partition": "part1", + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Get Token + req.Operation = logical.ReadOperation + req.Path = "creds/test-part" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + // Verify Secret + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + Partition string `mapstructure:"partition"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + if d.Partition != "part1" { + t.Fatalf("Failed to access partition") + } + + // Revoke the credential + req.Operation = logical.RevokeOperation + req.Secret = generatedSecret + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("Revocation failed: %v", err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + if err != nil { + t.Fatal(err) + } + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + Partition: "part1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + func testBackendEntNamespace(t *testing.T) { config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} diff --git a/builtin/logical/consul/client.go b/builtin/logical/consul/client.go index fd54830a4b77..8a98200af17c 100644 --- a/builtin/logical/consul/client.go +++ b/builtin/logical/consul/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( diff --git a/builtin/logical/consul/cmd/consul/main.go b/builtin/logical/consul/cmd/consul/main.go index 3b884ddf85ef..6f0dfe45c3a5 100644 --- a/builtin/logical/consul/cmd/consul/main.go +++ b/builtin/logical/consul/cmd/consul/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: consul.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/consul/path_config.go b/builtin/logical/consul/path_config.go index 1fd60e30ec57..11da1f222c66 100644 --- a/builtin/logical/consul/path_config.go +++ b/builtin/logical/consul/path_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( @@ -12,6 +15,11 @@ import ( func pathConfigAccess(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/access", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + }, + Fields: map[string]*framework.FieldSchema{ "address": { Type: framework.TypeString, @@ -52,9 +60,20 @@ must be x509 PEM encoded and if this is set you need to also set client_cert.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigAccessRead, - logical.UpdateOperation: b.pathConfigAccessWrite, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "access-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "access", + }, + }, }, } } @@ -116,7 +135,7 @@ func (b *backend) pathConfigAccessWrite(ctx context.Context, req *logical.Reques } token, _, err := client.ACL().Bootstrap() if err != nil { - return logical.ErrorResponse("Token not provided and failed to bootstrap ACLs"), err + return logical.ErrorResponse("Token not provided and failed to bootstrap ACLs: %s", err), nil } config.Token = token.SecretID } diff --git a/builtin/logical/consul/path_roles.go b/builtin/logical/consul/path_roles.go index fa513b5017d2..1341544ea34f 100644 --- a/builtin/logical/consul/path_roles.go +++ b/builtin/logical/consul/path_roles.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( @@ -14,6 +17,11 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -23,6 +31,12 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/consul/path_token.go b/builtin/logical/consul/path_token.go index 7568774f39c6..6cddd1fddc7c 100644 --- a/builtin/logical/consul/path_token.go +++ b/builtin/logical/consul/path_token.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( @@ -18,6 +21,13 @@ const ( func pathToken(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, diff --git a/builtin/logical/consul/path_token_test.go b/builtin/logical/consul/path_token_test.go index 98e2b826fbca..7f5ac3d2b687 100644 --- a/builtin/logical/consul/path_token_test.go +++ b/builtin/logical/consul/path_token_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( diff --git a/builtin/logical/consul/secret_token.go b/builtin/logical/consul/secret_token.go index 6dbccca014ce..f2219f0790b6 100644 --- a/builtin/logical/consul/secret_token.go +++ b/builtin/logical/consul/secret_token.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( "context" "fmt" + "github.com/hashicorp/consul/api" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -81,6 +85,24 @@ func (b *backend) secretTokenRevoke(ctx context.Context, req *logical.Request, d version = versionRaw.(string) } + // Extract Consul Namespace and Partition info from secret + var revokeWriteOptions *api.WriteOptions + var namespace, partition string + + namespaceRaw, ok := req.Data["consul_namespace"] + if ok { + namespace = namespaceRaw.(string) + } + partitionRaw, ok := req.Data["partition"] + if ok { + partition = partitionRaw.(string) + } + + revokeWriteOptions = &api.WriteOptions{ + Namespace: namespace, + Partition: partition, + } + switch version { case "": // Pre 1.4 tokens @@ -89,7 +111,7 @@ func (b *backend) secretTokenRevoke(ctx context.Context, req *logical.Request, d return nil, err } case tokenPolicyType: - _, err := c.ACL().TokenDelete(tokenRaw.(string), nil) + _, err := c.ACL().TokenDelete(tokenRaw.(string), revokeWriteOptions) if err != nil { return nil, err } diff --git a/builtin/logical/database/backend.go b/builtin/logical/database/backend.go index e2e362fd5fa3..970583ef850a 100644 --- a/builtin/logical/database/backend.go +++ b/builtin/logical/database/backend.go @@ -1,9 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( "context" + "errors" "fmt" "net/rpc" + "strconv" "strings" "sync" "time" @@ -12,7 +17,9 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/builtin/logical/database/schedule" "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/syncmap" "github.com/hashicorp/vault/internalshared/configutil" v4 "github.com/hashicorp/vault/sdk/database/dbplugin" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" @@ -24,19 +31,25 @@ import ( ) const ( - databaseConfigPath = "config/" - databaseRolePath = "role/" - databaseStaticRolePath = "static-role/" - minRootCredRollbackAge = 1 * time.Minute + operationPrefixDatabase = "database" + databaseConfigPath = "config/" + databaseRolePath = "role/" + databaseStaticRolePath = "static-role/" + minRootCredRollbackAge = 1 * time.Minute ) type dbPluginInstance struct { sync.RWMutex database databaseVersionWrapper - id string - name string - closed bool + id string + name string + runningPluginVersion string + closed bool +} + +func (dbi *dbPluginInstance) ID() string { + return dbi.id } func (dbi *dbPluginInstance) Close() error { @@ -59,7 +72,7 @@ func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, b.credRotationQueue = queue.New() // Load queue and kickoff new periodic ticker - go b.initQueue(b.queueCtx, conf, conf.System.ReplicationState()) + go b.initQueue(b.queueCtx, conf) // collect metrics on number of plugin instances var err error @@ -97,6 +110,7 @@ func Backend(conf *logical.BackendConfig) *databaseBackend { pathListPluginConnection(&b), pathConfigurePluginConnection(&b), pathResetConnection(&b), + pathReloadPlugin(&b), }, pathListRoles(&b), pathRoles(&b), @@ -115,25 +129,19 @@ func Backend(conf *logical.BackendConfig) *databaseBackend { } b.logger = conf.Logger - b.connections = make(map[string]*dbPluginInstance) + b.connections = syncmap.NewSyncMap[string, *dbPluginInstance]() b.queueCtx, b.cancelQueueCtx = context.WithCancel(context.Background()) b.roleLocks = locksutil.CreateLocks() + b.schedule = &schedule.DefaultSchedule{} + return &b } func (b *databaseBackend) collectPluginInstanceGaugeValues(context.Context) ([]metricsutil.GaugeLabelValues, error) { // copy the map so we can release the lock - connMapCopy := func() map[string]*dbPluginInstance { - b.connLock.RLock() - defer b.connLock.RUnlock() - mapCopy := map[string]*dbPluginInstance{} - for k, v := range b.connections { - mapCopy[k] = v - } - return mapCopy - }() + connectionsCopy := b.connections.Values() counts := map[string]int{} - for _, v := range connMapCopy { + for _, v := range connectionsCopy { dbType, err := v.database.Type() if err != nil { // there's a chance this will already be closed since we don't hold the lock @@ -152,11 +160,10 @@ func (b *databaseBackend) collectPluginInstanceGaugeValues(context.Context) ([]m } type databaseBackend struct { - // connLock is used to synchronize access to the connections map - connLock sync.RWMutex // connections holds configured database connections by config name - connections map[string]*dbPluginInstance - logger log.Logger + createConnectionLock sync.Mutex + connections *syncmap.SyncMap[string, *dbPluginInstance] + logger log.Logger *framework.Backend // credRotationQueue is an in-memory priority queue used to track Static Roles @@ -177,49 +184,8 @@ type databaseBackend struct { // the running gauge collection process gaugeCollectionProcess *metricsutil.GaugeCollectionProcess gaugeCollectionProcessStop sync.Once -} - -func (b *databaseBackend) connGet(name string) *dbPluginInstance { - b.connLock.RLock() - defer b.connLock.RUnlock() - return b.connections[name] -} - -func (b *databaseBackend) connPop(name string) *dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - dbi, ok := b.connections[name] - if ok { - delete(b.connections, name) - } - return dbi -} - -func (b *databaseBackend) connPopIfEqual(name, id string) *dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - dbi, ok := b.connections[name] - if ok && dbi.id == id { - delete(b.connections, name) - return dbi - } - return nil -} - -func (b *databaseBackend) connPut(name string, newDbi *dbPluginInstance) *dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - dbi := b.connections[name] - b.connections[name] = newDbi - return dbi -} -func (b *databaseBackend) connClear() map[string]*dbPluginInstance { - b.connLock.Lock() - defer b.connLock.Unlock() - old := b.connections - b.connections = make(map[string]*dbPluginInstance) - return old + schedule schedule.Scheduler } func (b *databaseBackend) DatabaseConfig(ctx context.Context, s logical.Storage, name string) (*DatabaseConfig, error) { @@ -326,7 +292,19 @@ func (b *databaseBackend) GetConnection(ctx context.Context, s logical.Storage, } func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name string, config *DatabaseConfig) (*dbPluginInstance, error) { - dbi := b.connGet(name) + // fast path, reuse the existing connection + dbi := b.connections.Get(name) + if dbi != nil { + return dbi, nil + } + + // slow path, create a new connection + // if we don't lock the rest of the operation, there is a race condition for multiple callers of this function + b.createConnectionLock.Lock() + defer b.createConnectionLock.Unlock() + + // check again in case we lost the race + dbi = b.connections.Get(name) if dbi != nil { return dbi, nil } @@ -336,7 +314,17 @@ func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name stri return nil, err } - dbw, err := newDatabaseWrapper(ctx, config.PluginName, config.PluginVersion, b.System(), b.logger) + // Override the configured version if there is a pinned version. + pinnedVersion, err := b.getPinnedVersion(ctx, config.PluginName) + if err != nil { + return nil, err + } + pluginVersion := config.PluginVersion + if pinnedVersion != "" { + pluginVersion = pinnedVersion + } + + dbw, err := newDatabaseWrapper(ctx, config.PluginName, pluginVersion, b.System(), b.logger) if err != nil { return nil, fmt.Errorf("unable to create database instance: %w", err) } @@ -352,24 +340,28 @@ func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name stri } dbi = &dbPluginInstance{ - database: dbw, - id: id, - name: name, + database: dbw, + id: id, + name: name, + runningPluginVersion: pluginVersion, } - oldConn := b.connPut(name, dbi) - if oldConn != nil { - err := oldConn.Close() + conn, ok := b.connections.PutIfEmpty(name, dbi) + if !ok { + // this is a bug + b.Logger().Warn("BUG: there was a race condition adding to the database connection map") + // There was already an existing connection, so we will use that and close our new one to avoid a race condition. + err := dbi.Close() if err != nil { - b.Logger().Warn("Error closing database connection", "error", err) + b.Logger().Warn("Error closing new database connection", "error", err) } } - return dbi, nil + return conn, nil } // ClearConnection closes the database connection and // removes it from the b.connections map. func (b *databaseBackend) ClearConnection(name string) error { - db := b.connPop(name) + db := b.connections.Pop(name) if db != nil { // Ignore error here since the database client is always killed db.Close() @@ -380,7 +372,7 @@ func (b *databaseBackend) ClearConnection(name string) error { // ClearConnectionId closes the database connection with a specific id and // removes it from the b.connections map. func (b *databaseBackend) ClearConnectionId(name, id string) error { - db := b.connPopIfEqual(name, id) + db := b.connections.PopIfEqual(name, id) if db != nil { // Ignore error here since the database client is always killed db.Close() @@ -399,7 +391,7 @@ func (b *databaseBackend) CloseIfShutdown(db *dbPluginInstance, err error) { db.Close() // Delete the connection if it is still active. - b.connPopIfEqual(db.name, db.id) + b.connections.PopIfEqual(db.name, db.id) }() } } @@ -412,7 +404,7 @@ func (b *databaseBackend) clean(_ context.Context) { b.cancelQueueCtx() } - connections := b.connClear() + connections := b.connections.Clear() for _, db := range connections { go db.Close() } @@ -424,6 +416,28 @@ func (b *databaseBackend) clean(_ context.Context) { }) } +func (b *databaseBackend) dbEvent(ctx context.Context, + operation string, + path string, + name string, + modified bool, + additionalMetadataPairs ...string, +) { + metadata := []string{ + logical.EventMetadataModified, strconv.FormatBool(modified), + logical.EventMetadataOperation, operation, + "path", path, + } + if name != "" { + metadata = append(metadata, "name", name) + } + metadata = append(metadata, additionalMetadataPairs...) + err := logical.SendEvent(ctx, b, fmt.Sprintf("database/%s", operation), metadata...) + if err != nil && !errors.Is(err, framework.ErrNoEvents) { + b.Logger().Error("Error sending event", "error", err) + } +} + const backendHelp = ` The database backend supports using many different databases as secret backends, including but not limited to: diff --git a/builtin/logical/database/backend_get_test.go b/builtin/logical/database/backend_get_test.go new file mode 100644 index 000000000000..b7799725eba8 --- /dev/null +++ b/builtin/logical/database/backend_get_test.go @@ -0,0 +1,109 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package database + +import ( + "context" + "sync" + "testing" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +func newSystemViewWrapper(view logical.SystemView) logical.SystemView { + return &systemViewWrapper{ + view, + } +} + +type systemViewWrapper struct { + logical.SystemView +} + +var _ logical.ExtendedSystemView = (*systemViewWrapper)(nil) + +func (s *systemViewWrapper) RequestWellKnownRedirect(ctx context.Context, src, dest string) error { + panic("nope") +} + +func (s *systemViewWrapper) DeregisterWellKnownRedirect(ctx context.Context, src string) bool { + panic("nope") +} + +func (s *systemViewWrapper) Auditor() logical.Auditor { + panic("nope") +} + +func (s *systemViewWrapper) ForwardGenericRequest(ctx context.Context, request *logical.Request) (*logical.Response, error) { + panic("nope") +} + +func (s *systemViewWrapper) APILockShouldBlockRequest() (bool, error) { + panic("nope") +} + +func (s *systemViewWrapper) GetPinnedPluginVersion(ctx context.Context, pluginType consts.PluginType, pluginName string) (*pluginutil.PinnedVersion, error) { + return nil, pluginutil.ErrPinnedVersionNotFound +} + +func (s *systemViewWrapper) LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*pluginutil.PluginRunner, error) { + return &pluginutil.PluginRunner{ + Name: mockv5, + Type: consts.PluginTypeDatabase, + Builtin: true, + BuiltinFactory: New, + }, nil +} + +func getDbBackend(t *testing.T) (*databaseBackend, logical.Storage) { + t.Helper() + config := logical.TestBackendConfig() + config.System = newSystemViewWrapper(config.System) + config.StorageView = &logical.InmemStorage{} + // Create and init the backend ourselves instead of using a Factory because + // the factory function kicks off threads that cause racy tests. + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + b.schedule = &TestSchedule{} + b.credRotationQueue = queue.New() + b.populateQueue(context.Background(), config.StorageView) + + return b, config.StorageView +} + +// TestGetConnectionRaceCondition checks that GetConnection always returns the same instance, even when asked +// by multiple goroutines in parallel. +func TestGetConnectionRaceCondition(t *testing.T) { + ctx := context.Background() + b, s := getDbBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, s) + + goroutines := 16 + + wg := sync.WaitGroup{} + wg.Add(goroutines) + dbis := make([]*dbPluginInstance, goroutines) + errs := make([]error, goroutines) + for i := 0; i < goroutines; i++ { + go func(i int) { + defer wg.Done() + dbis[i], errs[i] = b.GetConnection(ctx, s, mockv5) + }(i) + } + wg.Wait() + for i := 0; i < goroutines; i++ { + if errs[i] != nil { + t.Fatal(errs[i]) + } + if dbis[0] != dbis[i] { + t.Fatal("Error: database instances did not match") + } + } +} diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go index 27ce027c959e..a1b96ad392f1 100644 --- a/builtin/logical/database/backend_test.go +++ b/builtin/logical/database/backend_test.go @@ -1,25 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( "context" "database/sql" + "encoding/json" + "errors" "fmt" "log" "net/url" "os" "reflect" + "slices" "strings" + "sync" "testing" "time" "github.com/go-test/deep" "github.com/hashicorp/go-hclog" - mongodbatlas "github.com/hashicorp/vault-plugin-database-mongodbatlas" "github.com/hashicorp/vault/helper/builtinplugins" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/plugins/database/mongodb" "github.com/hashicorp/vault/plugins/database/postgresql" v4 "github.com/hashicorp/vault/sdk/database/dbplugin" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" @@ -31,14 +37,32 @@ import ( "github.com/hashicorp/vault/vault" _ "github.com/jackc/pgx/v4" "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" ) -func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { +func getClusterPostgresDBWithFactory(t *testing.T, factory logical.Factory) (*vault.TestCluster, logical.SystemView) { + t.Helper() + cluster, sys := getClusterWithFactory(t, factory) + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_PostgresMultiplexed", + []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)}) + return cluster, sys +} + +func getClusterPostgresDB(t *testing.T) (*vault.TestCluster, logical.SystemView) { + t.Helper() + cluster, sys := getClusterPostgresDBWithFactory(t, Factory) + return cluster, sys +} + +func getClusterWithFactory(t *testing.T, factory logical.Factory) (*vault.TestCluster, logical.SystemView) { + t.Helper() + pluginDir := corehelpers.MakeTestPluginDir(t) coreConfig := &vault.CoreConfig{ LogicalBackends: map[string]logical.Factory{ - "database": Factory, + "database": factory, }, BuiltinRegistry: builtinplugins.Registry, + PluginDirectory: pluginDir, } cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ @@ -48,30 +72,15 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { cores := cluster.Cores vault.TestWaitActive(t, cores[0].Core) - os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) - sys := vault.TestDynamicSystemView(cores[0].Core, nil) - vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Postgres", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_PostgresMultiplexed", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Mongo", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoMultiplexed", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoAtlas", []string{}, "") - vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoAtlasMultiplexed", []string{}, "") return cluster, sys } -func TestBackend_PluginMain_Postgres(t *testing.T) { - if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { - return - } - - dbType, err := postgresql.New() - if err != nil { - t.Fatalf("Failed to initialize postgres: %s", err) - } - - v5.Serve(dbType.(v5.Database)) +func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { + t.Helper() + cluster, sys := getClusterWithFactory(t, Factory) + return cluster, sys } func TestBackend_PluginMain_PostgresMultiplexed(t *testing.T) { @@ -82,48 +91,6 @@ func TestBackend_PluginMain_PostgresMultiplexed(t *testing.T) { v5.ServeMultiplex(postgresql.New) } -func TestBackend_PluginMain_Mongo(t *testing.T) { - if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { - return - } - - dbType, err := mongodb.New() - if err != nil { - t.Fatalf("Failed to initialize mongodb: %s", err) - } - - v5.Serve(dbType.(v5.Database)) -} - -func TestBackend_PluginMain_MongoMultiplexed(t *testing.T) { - if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { - return - } - - v5.ServeMultiplex(mongodb.New) -} - -func TestBackend_PluginMain_MongoAtlas(t *testing.T) { - if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { - return - } - - dbType, err := mongodbatlas.New() - if err != nil { - t.Fatalf("Failed to initialize mongodbatlas: %s", err) - } - - v5.Serve(dbType.(v5.Database)) -} - -func TestBackend_PluginMain_MongoAtlasMultiplexed(t *testing.T) { - if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { - return - } - - v5.ServeMultiplex(mongodbatlas.New) -} - func TestBackend_RoleUpgrade(t *testing.T) { storage := &logical.InmemStorage{} backend := &databaseBackend{} @@ -180,12 +147,14 @@ func TestBackend_config_connection(t *testing.T) { var resp *logical.Response var err error - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} config.System = sys + eventSender := logical.NewMockEventSender() + config.EventsSender = eventSender lb, err := Factory(context.Background(), config) if err != nil { t.Fatal(err) @@ -364,10 +333,20 @@ func TestBackend_config_connection(t *testing.T) { if key != "plugin-test" { t.Fatalf("bad key: %q", key) } + assert.Equal(t, 3, len(eventSender.Events)) + assert.Equal(t, "database/config-write", string(eventSender.Events[0].Type)) + assert.Equal(t, "config/plugin-test", eventSender.Events[0].Event.Metadata.AsMap()["path"]) + assert.Equal(t, "plugin-test", eventSender.Events[0].Event.Metadata.AsMap()["name"]) + assert.Equal(t, "database/config-write", string(eventSender.Events[1].Type)) + assert.Equal(t, "config/plugin-test", eventSender.Events[1].Event.Metadata.AsMap()["path"]) + assert.Equal(t, "plugin-test", eventSender.Events[1].Event.Metadata.AsMap()["name"]) + assert.Equal(t, "database/config-write", string(eventSender.Events[2].Type)) + assert.Equal(t, "config/plugin-test", eventSender.Events[2].Event.Metadata.AsMap()["path"]) + assert.Equal(t, "plugin-test", eventSender.Events[2].Event.Metadata.AsMap()["name"]) } func TestBackend_BadConnectionString(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -416,12 +395,14 @@ func TestBackend_BadConnectionString(t *testing.T) { } func TestBackend_basic(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} config.System = sys + eventSender := logical.NewMockEventSender() + config.EventsSender = eventSender b, err := Factory(context.Background(), config) if err != nil { @@ -545,7 +526,7 @@ func TestBackend_basic(t *testing.T) { if credsResp.Secret.TTL != 5*time.Minute { t.Fatalf("unexpected TTL of %d", credsResp.Secret.TTL) } - if !testCredsExist(t, credsResp, connURL) { + if !testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should exist") } @@ -565,7 +546,7 @@ func TestBackend_basic(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - if testCredsExist(t, credsResp, connURL) { + if testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should not exist") } } @@ -583,7 +564,7 @@ func TestBackend_basic(t *testing.T) { if err != nil || (credsResp != nil && credsResp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, credsResp) } - if !testCredsExist(t, credsResp, connURL) { + if !testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should exist") } @@ -616,91 +597,135 @@ func TestBackend_basic(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - if testCredsExist(t, credsResp, connURL) { + if testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should not exist") } } + assert.Equal(t, 9, len(eventSender.Events)) + + assertEvent := func(t *testing.T, typ, name, path string) { + t.Helper() + assert.Equal(t, typ, string(eventSender.Events[0].Type)) + assert.Equal(t, name, eventSender.Events[0].Event.Metadata.AsMap()["name"]) + assert.Equal(t, path, eventSender.Events[0].Event.Metadata.AsMap()["path"]) + eventSender.Events = slices.Delete(eventSender.Events, 0, 1) + } + + assertEvent(t, "database/config-write", "plugin-test", "config/plugin-test") + for i := 0; i < 3; i++ { + assertEvent(t, "database/role-update", "plugin-role-test", "roles/plugin-role-test") + assertEvent(t, "database/creds-create", "plugin-role-test", "creds/plugin-role-test") + } + assertEvent(t, "database/creds-create", "plugin-role-test", "creds/plugin-role-test") + assertEvent(t, "database/role-delete", "plugin-role-test", "roles/plugin-role-test") } -func TestBackend_connectionCrud(t *testing.T) { - cluster, sys := getCluster(t) - defer cluster.Cleanup() +// singletonDBFactory allows us to reach into the internals of a databaseBackend +// even when it's been created by a call to the sys mount. The factory method +// satisfies the logical.Factory type, and lazily creates the databaseBackend +// once the SystemView has been provided because the factory method itself is an +// input for creating the test cluster and its system view. +type singletonDBFactory struct { + once sync.Once + db *databaseBackend + + sys logical.SystemView +} + +// factory satisfies the logical.Factory type. +func (s *singletonDBFactory) factory(context.Context, *logical.BackendConfig) (logical.Backend, error) { + if s.sys == nil { + return nil, errors.New("sys is nil") + } config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} - config.System = sys + config.System = s.sys - b, err := Factory(context.Background(), config) + var err error + s.once.Do(func() { + var b logical.Backend + b, err = Factory(context.Background(), config) + s.db = b.(*databaseBackend) + }) if err != nil { - t.Fatal(err) + return nil, err } - defer b.Cleanup(context.Background()) + if s.db == nil { + return nil, errors.New("db is nil") + } + return s.db, nil +} + +func TestBackend_connectionCrud(t *testing.T) { + dbFactory := &singletonDBFactory{} + cluster, sys := getClusterPostgresDBWithFactory(t, dbFactory.factory) + defer cluster.Cleanup() + + dbFactory.sys = sys + client := cluster.Cores[0].Client.Logical() cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") defer cleanup() + // Mount the database plugin. + resp, err := client.Write("sys/mounts/database", map[string]interface{}{ + "type": "database", + }) + if err != nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + // Configure a connection - data := map[string]interface{}{ + resp, err = client.Write("database/config/plugin-test", map[string]interface{}{ "connection_url": "test", "plugin_name": "postgresql-database-plugin", "verify_connection": false, + }) + if err != nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) } - req := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/plugin-test", - Storage: config.StorageView, - Data: data, - } - resp, err := b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + + // Configure a second connection to confirm below it doesn't get restarted. + resp, err = client.Write("database/config/plugin-test-hana", map[string]interface{}{ + "connection_url": "test", + "plugin_name": "hana-database-plugin", + "verify_connection": false, + }) + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } // Create a role - data = map[string]interface{}{ + resp, err = client.Write("database/roles/plugin-role-test", map[string]interface{}{ "db_name": "plugin-test", "creation_statements": testRole, "revocation_statements": defaultRevocationSQL, "default_ttl": "5m", "max_ttl": "10m", - } - req = &logical.Request{ - Operation: logical.UpdateOperation, - Path: "roles/plugin-role-test", - Storage: config.StorageView, - Data: data, - } - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + }) + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } // Update the connection - data = map[string]interface{}{ + resp, err = client.Write("database/config/plugin-test", map[string]interface{}{ "connection_url": connURL, "plugin_name": "postgresql-database-plugin", "allowed_roles": []string{"plugin-role-test"}, "username": "postgres", "password": "secret", "private_key": "PRIVATE_KEY", - } - req = &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/plugin-test", - Storage: config.StorageView, - Data: data, - } - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + }) + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } if len(resp.Warnings) == 0 { t.Fatalf("expected warning about password in url %s, resp:%#v\n", connURL, resp) } - req.Operation = logical.ReadOperation - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + resp, err = client.Read("database/config/plugin-test") + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } returnedConnectionDetails := resp.Data["connection_details"].(map[string]interface{}) @@ -716,11 +741,16 @@ func TestBackend_connectionCrud(t *testing.T) { } // Replace connection url with templated version - req.Operation = logical.UpdateOperation - connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") - data["connection_url"] = connURL - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + templatedConnURL := strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + resp, err = client.Write("database/config/plugin-test", map[string]interface{}{ + "connection_url": templatedConnURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": "postgres", + "password": "secret", + "private_key": "PRIVATE_KEY", + }) + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } @@ -729,75 +759,92 @@ func TestBackend_connectionCrud(t *testing.T) { "plugin_name": "postgresql-database-plugin", "connection_details": map[string]interface{}{ "username": "postgres", - "connection_url": connURL, + "connection_url": templatedConnURL, }, - "allowed_roles": []string{"plugin-role-test"}, - "root_credentials_rotate_statements": []string(nil), + "allowed_roles": []any{"plugin-role-test"}, + "root_credentials_rotate_statements": []any{}, "password_policy": "", "plugin_version": "", } - req.Operation = logical.ReadOperation - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + resp, err = client.Read("database/config/plugin-test") + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } delete(resp.Data["connection_details"].(map[string]interface{}), "name") if diff := deep.Equal(resp.Data, expected); diff != nil { - t.Fatal(diff) + t.Fatal(strings.Join(diff, "\n")) } - // Reset Connection - data = map[string]interface{}{} - req = &logical.Request{ - Operation: logical.UpdateOperation, - Path: "reset/plugin-test", - Storage: config.StorageView, - Data: data, - } - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%s resp:%#v\n", err, resp) + // Test endpoints for reloading plugins. + for _, reload := range []struct { + path string + data map[string]any + checkCount bool + }{ + {"database/reset/plugin-test", nil, false}, + {"database/reload/postgresql-database-plugin", nil, true}, + {"sys/plugins/reload/backend", map[string]any{ + "plugin": "postgresql-database-plugin", + }, false}, + } { + getConnectionID := func(name string) string { + t.Helper() + dbi := dbFactory.db.connections.Get(name) + if dbi == nil { + t.Fatal("no plugin-test dbi") + } + return dbi.ID() + } + initialID := getConnectionID("plugin-test") + hanaID := getConnectionID("plugin-test-hana") + resp, err = client.Write(reload.path, reload.data) + if err != nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + if initialID == getConnectionID("plugin-test") { + t.Fatal("ID unchanged after connection reset") + } + if hanaID != getConnectionID("plugin-test-hana") { + t.Fatal("hana plugin got restarted but shouldn't have been") + } + if reload.checkCount { + actual, err := resp.Data["count"].(json.Number).Int64() + if err != nil { + t.Fatal(err) + } + if expected := 1; expected != int(actual) { + t.Fatalf("expected %d but got %d", expected, resp.Data["count"].(int)) + } + if expected := []any{"plugin-test"}; !reflect.DeepEqual(expected, resp.Data["connections"]) { + t.Fatalf("expected %v but got %v", expected, resp.Data["connections"]) + } + } } // Get creds - data = map[string]interface{}{} - req = &logical.Request{ - Operation: logical.ReadOperation, - Path: "creds/plugin-role-test", - Storage: config.StorageView, - Data: data, - } - credsResp, err := b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (credsResp != nil && credsResp.IsError()) { + credsResp, err := client.Read("database/creds/plugin-role-test") + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, credsResp) } - credCheckURL := dbutil.QueryHelper(connURL, map[string]string{ + credCheckURL := dbutil.QueryHelper(templatedConnURL, map[string]string{ "username": "postgres", "password": "secret", }) - if !testCredsExist(t, credsResp, credCheckURL) { + if !testCredsExist(t, credsResp.Data, credCheckURL) { t.Fatalf("Creds should exist") } // Delete Connection - data = map[string]interface{}{} - req = &logical.Request{ - Operation: logical.DeleteOperation, - Path: "config/plugin-test", - Storage: config.StorageView, - Data: data, - } - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + resp, err = client.Delete("database/config/plugin-test") + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } // Read connection - req.Operation = logical.ReadOperation - resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + resp, err = client.Read("database/config/plugin-test") + if err != nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } @@ -808,7 +855,7 @@ func TestBackend_connectionCrud(t *testing.T) { } func TestBackend_roleCrud(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -1061,7 +1108,7 @@ func TestBackend_roleCrud(t *testing.T) { } func TestBackend_allowedRoles(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -1171,7 +1218,7 @@ func TestBackend_allowedRoles(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, credsResp) } - if !testCredsExist(t, credsResp, connURL) { + if !testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should exist") } @@ -1205,7 +1252,7 @@ func TestBackend_allowedRoles(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, credsResp) } - if !testCredsExist(t, credsResp, connURL) { + if !testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should exist") } @@ -1252,13 +1299,13 @@ func TestBackend_allowedRoles(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, credsResp) } - if !testCredsExist(t, credsResp, connURL) { + if !testCredsExist(t, credsResp.Data, connURL) { t.Fatalf("Creds should exist") } } func TestBackend_RotateRootCredentials(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -1359,7 +1406,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) { } func TestBackend_ConnectionURL_redacted(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) t.Cleanup(cluster.Cleanup) config := logical.TestBackendConfig() @@ -1510,7 +1557,7 @@ func TestBackend_AsyncClose(t *testing.T) { // Test that having a plugin that takes a LONG time to close will not cause the cleanup function to take // longer than 750ms. cluster, sys := getCluster(t) - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "hanging-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Hanging", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "hanging-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Hanging", []string{}) t.Cleanup(cluster.Cleanup) config := logical.TestBackendConfig() @@ -1562,13 +1609,13 @@ func TestNewDatabaseWrapper_IgnoresBuiltinVersion(t *testing.T) { } } -func testCredsExist(t *testing.T, resp *logical.Response, connURL string) bool { +func testCredsExist(t *testing.T, data map[string]any, connURL string) bool { t.Helper() var d struct { Username string `mapstructure:"username"` Password string `mapstructure:"password"` } - if err := mapstructure.Decode(resp.Data, &d); err != nil { + if err := mapstructure.Decode(data, &d); err != nil { t.Fatal(err) } log.Printf("[TRACE] Generated credentials: %v", d) diff --git a/builtin/logical/database/credentials.go b/builtin/logical/database/credentials.go index a6e54678d058..790dde05a35b 100644 --- a/builtin/logical/database/credentials.go +++ b/builtin/logical/database/credentials.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -5,12 +8,17 @@ import ( "crypto/rand" "crypto/rsa" "crypto/x509" + "crypto/x509/pkix" "encoding/pem" "fmt" "io" "strings" + "time" "github.com/hashicorp/vault/helper/random" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/template" "github.com/mitchellh/mapstructure" ) @@ -167,3 +175,217 @@ func (kg rsaKeyGenerator) configMap() (map[string]interface{}, error) { } return config, nil } + +type ClientCertificateGenerator struct { + // CommonNameTemplate is username template to be used for the client certificate common name. + CommonNameTemplate string `mapstructure:"common_name_template,omitempty"` + + // CAPrivateKey is the PEM-encoded private key for the given ca_cert. + CAPrivateKey string `mapstructure:"ca_private_key,omitempty"` + + // CACert is the PEM-encoded CA certificate. + CACert string `mapstructure:"ca_cert,omitempty"` + + // KeyType specifies the desired key type. + // Options include: 'rsa', 'ed25519', 'ec'. + KeyType string `mapstructure:"key_type,omitempty"` + + // KeyBits is the number of bits to use for the generated keys. + // Options include: with key_type=rsa, 2048 (default), 3072, 4096; + // With key_type=ec, allowed values are: 224, 256 (default), 384, 521; + // Ignored with key_type=ed25519. + KeyBits int `mapstructure:"key_bits,omitempty"` + + // SignatureBits is the number of bits to use in the signature algorithm. + // Options include: 256 (default), 384, 512. + SignatureBits int `mapstructure:"signature_bits,omitempty"` + + parsedCABundle *certutil.ParsedCertBundle + cnProducer template.StringTemplate +} + +// newClientCertificateGenerator returns a new ClientCertificateGenerator +// using the given config. Default values will be set on the returned +// ClientCertificateGenerator if not provided in the config. +func newClientCertificateGenerator(config map[string]interface{}) (ClientCertificateGenerator, error) { + var cg ClientCertificateGenerator + if err := mapstructure.WeakDecode(config, &cg); err != nil { + return cg, err + } + + switch cg.KeyType { + case "rsa": + switch cg.KeyBits { + case 0: + cg.KeyBits = 2048 + case 2048, 3072, 4096: + default: + return cg, fmt.Errorf("invalid key_bits") + } + case "ec": + switch cg.KeyBits { + case 0: + cg.KeyBits = 256 + case 224, 256, 384, 521: + default: + return cg, fmt.Errorf("invalid key_bits") + } + case "ed25519": + // key_bits ignored + default: + return cg, fmt.Errorf("invalid key_type") + } + + switch cg.SignatureBits { + case 0: + cg.SignatureBits = 256 + case 256, 384, 512: + default: + return cg, fmt.Errorf("invalid signature_bits") + } + + if cg.CommonNameTemplate == "" { + return cg, fmt.Errorf("missing required common_name_template") + } + + // Validate the common name template + t, err := template.NewTemplate(template.Template(cg.CommonNameTemplate)) + if err != nil { + return cg, fmt.Errorf("failed to create template: %w", err) + } + + _, err = t.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return cg, fmt.Errorf("invalid common_name_template: %w", err) + } + cg.cnProducer = t + + if cg.CACert == "" { + return cg, fmt.Errorf("missing required ca_cert") + } + if cg.CAPrivateKey == "" { + return cg, fmt.Errorf("missing required ca_private_key") + } + parsedBundle, err := certutil.ParsePEMBundle(strings.Join([]string{cg.CACert, cg.CAPrivateKey}, "\n")) + if err != nil { + return cg, err + } + if parsedBundle.PrivateKey == nil { + return cg, fmt.Errorf("private key not found in the PEM bundle") + } + if parsedBundle.PrivateKeyType == certutil.UnknownPrivateKey { + return cg, fmt.Errorf("unknown private key found in the PEM bundle") + } + if parsedBundle.Certificate == nil { + return cg, fmt.Errorf("certificate not found in the PEM bundle") + } + if !parsedBundle.Certificate.IsCA { + return cg, fmt.Errorf("the given certificate is not marked for CA use") + } + if !parsedBundle.Certificate.BasicConstraintsValid { + return cg, fmt.Errorf("the given certificate does not meet basic constraints for CA use") + } + + certBundle, err := parsedBundle.ToCertBundle() + if err != nil { + return cg, fmt.Errorf("error converting raw values into cert bundle: %w", err) + } + + parsedCABundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return cg, fmt.Errorf("failed to parse cert bundle: %w", err) + } + cg.parsedCABundle = parsedCABundle + + return cg, nil +} + +func (cg *ClientCertificateGenerator) generate(r io.Reader, expiration time.Time, userMeta dbplugin.UsernameMetadata) (*certutil.CertBundle, string, error) { + commonName, err := cg.cnProducer.Generate(userMeta) + if err != nil { + return nil, "", err + } + + // Set defaults + keyBits := cg.KeyBits + signatureBits := cg.SignatureBits + switch cg.KeyType { + case "rsa": + if keyBits == 0 { + keyBits = 2048 + } + if signatureBits == 0 { + signatureBits = 256 + } + case "ec": + if keyBits == 0 { + keyBits = 256 + } + if signatureBits == 0 { + if keyBits == 224 { + signatureBits = 256 + } else { + signatureBits = keyBits + } + } + case "ed25519": + // key_bits ignored + if signatureBits == 0 { + signatureBits = 256 + } + } + + subject := pkix.Name{ + CommonName: commonName, + // Additional subject DN options intentionally omitted for now + } + + creation := &certutil.CreationBundle{ + Params: &certutil.CreationParameters{ + Subject: subject, + KeyType: cg.KeyType, + KeyBits: cg.KeyBits, + SignatureBits: cg.SignatureBits, + NotAfter: expiration, + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: certutil.ClientAuthExtKeyUsage, + BasicConstraintsValidForNonCA: false, + NotBeforeDuration: 30 * time.Second, + URLs: &certutil.URLEntries{ + IssuingCertificates: []string{}, + CRLDistributionPoints: []string{}, + OCSPServers: []string{}, + }, + }, + SigningBundle: &certutil.CAInfoBundle{ + ParsedCertBundle: *cg.parsedCABundle, + URLs: &certutil.URLEntries{ + IssuingCertificates: []string{}, + CRLDistributionPoints: []string{}, + OCSPServers: []string{}, + }, + }, + } + + parsedClientBundle, err := certutil.CreateCertificateWithRandomSource(creation, r) + if err != nil { + return nil, "", fmt.Errorf("unable to generate client certificate: %w", err) + } + + cb, err := parsedClientBundle.ToCertBundle() + if err != nil { + return nil, "", fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) + } + + return cb, subject.String(), nil +} + +// configMap returns the configuration of the ClientCertificateGenerator +// as a map from string to string. +func (cg ClientCertificateGenerator) configMap() (map[string]interface{}, error) { + config := make(map[string]interface{}) + if err := mapstructure.WeakDecode(cg, &config); err != nil { + return nil, err + } + return config, nil +} diff --git a/builtin/logical/database/credentials_test.go b/builtin/logical/database/credentials_test.go index 32ddc26856fe..7f2c4eb3dbb0 100644 --- a/builtin/logical/database/credentials_test.go +++ b/builtin/logical/database/credentials_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -14,6 +17,252 @@ import ( "github.com/stretchr/testify/mock" ) +// Test_newClientCertificateGenerator tests the ClientCertificateGenerator struct based on the config +func Test_newClientCertificateGenerator(t *testing.T) { + type args struct { + config map[string]interface{} + } + tests := []struct { + name string + args args + want ClientCertificateGenerator + wantErr bool + }{ + { + name: "newClientCertificateGenerator with nil config", + args: args{ + config: nil, + }, + want: ClientCertificateGenerator{ + CommonNameTemplate: "", + CAPrivateKey: "", + CACert: "", + KeyType: "", + KeyBits: 0, + SignatureBits: 0, + }, + }, + { + name: "newClientCertificateGenerator with zero value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "", + }, + }, + { + name: "newClientCertificateGenerator with rsa value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "rsa", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "rsa", + KeyBits: 2048, + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with ec value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "ec", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "ec", + KeyBits: 256, + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with ed25519 value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "ed25519", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "ed25519", + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with invalid key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "ece", + }, + }, + wantErr: true, + }, + { + name: "newClientCertificateGenerator with zero value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "0", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 0, + }, + }, + { + name: "newClientCertificateGenerator with 2048 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "2048", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 2048, + }, + }, + { + name: "newClientCertificateGenerator with 3072 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "3072", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 3072, + }, + }, + { + name: "newClientCertificateGenerator with 4096 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "4096", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 4096, + }, + }, + { + name: "newClientCertificateGenerator with 224 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "224", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 224, + }, + }, + { + name: "newClientCertificateGenerator with 256 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "256", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with 384 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "384", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 384, + }, + }, + { + name: "newClientCertificateGenerator with 521 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "521", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 521, + }, + }, + { + name: "newClientCertificateGenerator with invalid key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "4097", + }, + }, + wantErr: true, + }, + { + name: "newClientCertificateGenerator with zero value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "0", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 0, + }, + }, + { + name: "newClientCertificateGenerator with 256 value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "256", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with 384 value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "384", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 384, + }, + }, + { + name: "newClientCertificateGenerator with 512 value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "512", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 512, + }, + }, + { + name: "newClientCertificateGenerator with invalid signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "612", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := newClientCertificateGenerator(tt.args.config) + if tt.wantErr { + assert.Error(t, err) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + func Test_newPasswordGenerator(t *testing.T) { type args struct { config map[string]interface{} diff --git a/builtin/logical/database/dbplugin/plugin_test.go b/builtin/logical/database/dbplugin/plugin_test.go index bea9e30ec7a5..8c9737cdbff5 100644 --- a/builtin/logical/database/dbplugin/plugin_test.go +++ b/builtin/logical/database/dbplugin/plugin_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package dbplugin_test import ( @@ -10,6 +13,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/helper/consts" @@ -103,14 +107,18 @@ func (m *mockPlugin) SetCredentials(ctx context.Context, statements dbplugin.Sta } func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + t.Helper() + pluginDir := corehelpers.MakeTestPluginDir(t) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + PluginDirectory: pluginDir, + }, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, }) cluster.Start() cores := cluster.Cores sys := vault.TestDynamicSystemView(cores[0].Core, nil) - vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", consts.PluginTypeDatabase, "", "TestPlugin_GRPC_Main", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", consts.PluginTypeDatabase, "", "TestPlugin_GRPC_Main", []string{}) return cluster, sys } diff --git a/builtin/logical/database/mocks_test.go b/builtin/logical/database/mocks_test.go index 13eb53006142..4182affd2016 100644 --- a/builtin/logical/database/mocks_test.go +++ b/builtin/logical/database/mocks_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( diff --git a/builtin/logical/database/mockv4.go b/builtin/logical/database/mockv4.go index 4f0b181683a8..f3a753511967 100644 --- a/builtin/logical/database/mockv4.go +++ b/builtin/logical/database/mockv4.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( diff --git a/builtin/logical/database/mockv5.go b/builtin/logical/database/mockv5.go index 632cfb38e037..5f09c37101c3 100644 --- a/builtin/logical/database/mockv5.go +++ b/builtin/logical/database/mockv5.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -48,6 +51,9 @@ func (m MockDatabaseV5) Initialize(ctx context.Context, req v5.InitializeRequest "req", req) config := req.Config + if config == nil { + config = map[string]interface{}{} + } config["from-plugin"] = "this value is from the plugin itself" resp := v5.InitializeResponse{ diff --git a/builtin/logical/database/path_config_connection.go b/builtin/logical/database/path_config_connection.go index 9f1ad4cf5744..6e08cf4bc528 100644 --- a/builtin/logical/database/path_config_connection.go +++ b/builtin/logical/database/path_config_connection.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -6,6 +9,7 @@ import ( "fmt" "net/url" "sort" + "strings" "github.com/fatih/structs" "github.com/hashicorp/go-uuid" @@ -27,8 +31,9 @@ var ( // DatabaseConfig is used by the Factory function to configure a Database // object. type DatabaseConfig struct { - PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"` - PluginVersion string `json:"plugin_version" structs:"plugin_version" mapstructure:"plugin_version"` + PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"` + PluginVersion string `json:"plugin_version" structs:"plugin_version" mapstructure:"plugin_version"` + RunningPluginVersion string `json:"running_plugin_version,omitempty" structs:"running_plugin_version,omitempty" mapstructure:"running_plugin_version,omitempty"` // ConnectionDetails stores the database specific connection settings needed // by each database type. ConnectionDetails map[string]interface{} `json:"connection_details" structs:"connection_details" mapstructure:"connection_details"` @@ -59,6 +64,13 @@ func (c *DatabaseConfig) SupportsCredentialType(credentialType v5.CredentialType func pathResetConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("reset/%s", framework.GenericNameRegex("name")), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "reset", + OperationSuffix: "connection", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -84,17 +96,109 @@ func (b *databaseBackend) pathConnectionReset() framework.OperationFunc { return logical.ErrorResponse(respErrEmptyName), nil } - // Close plugin and delete the entry in the connections cache. - if err := b.ClearConnection(name); err != nil { + if err := b.reloadConnection(ctx, req.Storage, name); err != nil { return nil, err } - // Execute plugin again, we don't need the object so throw away. - if _, err := b.GetConnection(ctx, req.Storage, name); err != nil { + b.dbEvent(ctx, "reset", req.Path, name, false) + return nil, nil + } +} + +func (b *databaseBackend) reloadConnection(ctx context.Context, storage logical.Storage, name string) error { + // Close plugin and delete the entry in the connections cache. + if err := b.ClearConnection(name); err != nil { + return err + } + + // Execute plugin again, we don't need the object so throw away. + if _, err := b.GetConnection(ctx, storage, name); err != nil { + return err + } + + return nil +} + +// pathReloadPlugin reloads all connections using a named plugin. +func pathReloadPlugin(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("reload/%s", framework.GenericNameRegex("plugin_name")), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "reload", + OperationSuffix: "plugin", + }, + + Fields: map[string]*framework.FieldSchema{ + "plugin_name": { + Type: framework.TypeString, + Description: "Name of the database plugin", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.reloadPlugin(), + }, + + HelpSynopsis: pathReloadPluginHelpSyn, + HelpDescription: pathReloadPluginHelpDesc, + } +} + +// reloadPlugin reloads all instances of a named plugin by closing the existing +// instances and creating new ones. +func (b *databaseBackend) reloadPlugin() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + pluginName := data.Get("plugin_name").(string) + if pluginName == "" { + return logical.ErrorResponse(respErrEmptyPluginName), nil + } + + connNames, err := req.Storage.List(ctx, "config/") + if err != nil { return nil, err } + reloaded := []string{} + for _, connName := range connNames { + entry, err := req.Storage.Get(ctx, fmt.Sprintf("config/%s", connName)) + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration: %w", err) + } + if entry == nil { + continue + } + + var config DatabaseConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + if config.PluginName == pluginName { + if err := b.reloadConnection(ctx, req.Storage, connName); err != nil { + var successfullyReloaded string + if len(reloaded) > 0 { + successfullyReloaded = fmt.Sprintf("successfully reloaded %d connection(s): %s; ", + len(reloaded), + strings.Join(reloaded, ", ")) + } + return nil, fmt.Errorf("%sfailed to reload connection %q: %w", successfullyReloaded, connName, err) + } + reloaded = append(reloaded, connName) + } + } - return nil, nil + resp := &logical.Response{ + Data: map[string]interface{}{ + "connections": reloaded, + "count": len(reloaded), + }, + } + + if len(reloaded) == 0 { + resp.AddWarning(fmt.Sprintf("no connections were found with plugin_name %q", pluginName)) + } + b.dbEvent(ctx, "reload", req.Path, "", true, "plugin_name", pluginName) + return resp, nil } } @@ -103,6 +207,11 @@ func (b *databaseBackend) pathConnectionReset() framework.OperationFunc { func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("config/%s", framework.GenericNameRegex("name")), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -149,11 +258,36 @@ func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { }, ExistenceCheck: b.connectionExistenceCheck(), - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.CreateOperation: b.connectionWriteHandler(), - logical.UpdateOperation: b.connectionWriteHandler(), - logical.ReadOperation: b.connectionReadHandler(), - logical.DeleteOperation: b.connectionDeleteHandler(), + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.CreateOperation: &framework.PathOperation{ + Callback: b.connectionWriteHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "connection", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.connectionWriteHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "connection", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.connectionReadHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "connection-configuration", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.connectionDeleteHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + OperationSuffix: "connection-configuration", + }, + }, }, HelpSynopsis: pathConfigConnectionHelpSyn, @@ -181,6 +315,11 @@ func pathListPluginConnection(b *databaseBackend) *framework.Path { return &framework.Path{ Pattern: fmt.Sprintf("config/?$"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationSuffix: "connections", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.connectionListHandler(), }, @@ -237,10 +376,24 @@ func (b *databaseBackend) connectionReadHandler() framework.OperationFunc { delete(config.ConnectionDetails, "password") delete(config.ConnectionDetails, "private_key") + delete(config.ConnectionDetails, "service_account_json") - return &logical.Response{ - Data: structs.New(config).Map(), - }, nil + resp := &logical.Response{} + if dbi, err := b.GetConnection(ctx, req.Storage, name); err == nil { + config.RunningPluginVersion = dbi.runningPluginVersion + if config.PluginVersion != "" && config.PluginVersion != config.RunningPluginVersion { + warning := fmt.Sprintf("Plugin version is configured as %q, but running %q", config.PluginVersion, config.RunningPluginVersion) + if pinnedVersion, _ := b.getPinnedVersion(ctx, config.PluginName); pinnedVersion == config.RunningPluginVersion { + warning += " because that version is pinned" + } else { + warning += " either due to a pinned version or because the plugin was upgraded and not yet reloaded" + } + resp.AddWarning(warning) + } + } + + resp.Data = structs.New(config).Map() + return resp, nil } } @@ -261,6 +414,7 @@ func (b *databaseBackend) connectionDeleteHandler() framework.OperationFunc { return nil, err } + b.dbEvent(ctx, "config-delete", req.Path, name, true) return nil, nil } } @@ -298,58 +452,9 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { return logical.ErrorResponse(respErrEmptyPluginName), nil } - if pluginVersionRaw, ok := data.GetOk("plugin_version"); ok { - config.PluginVersion = pluginVersionRaw.(string) - } - - var builtinShadowed bool - if unversionedPlugin, err := b.System().LookupPlugin(ctx, config.PluginName, consts.PluginTypeDatabase); err == nil && !unversionedPlugin.Builtin { - builtinShadowed = true - } - switch { - case config.PluginVersion != "": - semanticVersion, err := version.NewVersion(config.PluginVersion) - if err != nil { - return logical.ErrorResponse("version %q is not a valid semantic version: %s", config.PluginVersion, err), nil - } - - // Canonicalize the version. - config.PluginVersion = "v" + semanticVersion.String() - - if config.PluginVersion == versions.GetBuiltinVersion(consts.PluginTypeDatabase, config.PluginName) { - if builtinShadowed { - return logical.ErrorResponse("database plugin %q, version %s not found, as it is"+ - " overridden by an unversioned plugin of the same name. Omit `plugin_version` to use the unversioned plugin", config.PluginName, config.PluginVersion), nil - } - - config.PluginVersion = "" - } - case builtinShadowed: - // We'll select the unversioned plugin that's been registered. - case req.Operation == logical.CreateOperation: - // No version provided and no unversioned plugin of that name available. - // Pin to the current latest version if any versioned plugins are registered. - plugins, err := b.System().ListVersionedPlugins(ctx, consts.PluginTypeDatabase) - if err != nil { - return nil, err - } - - var versionedCandidates []pluginutil.VersionedPlugin - for _, plugin := range plugins { - if !plugin.Builtin && plugin.Name == config.PluginName && plugin.Version != "" { - versionedCandidates = append(versionedCandidates, plugin) - } - } - - if len(versionedCandidates) != 0 { - // Sort in reverse order. - sort.SliceStable(versionedCandidates, func(i, j int) bool { - return versionedCandidates[i].SemanticVersion.GreaterThan(versionedCandidates[j].SemanticVersion) - }) - - config.PluginVersion = "v" + versionedCandidates[0].SemanticVersion.String() - b.logger.Debug(fmt.Sprintf("pinning %q database plugin version %q from candidates %v", config.PluginName, config.PluginVersion, versionedCandidates)) - } + pluginVersion, respErr, err := b.selectPluginVersion(ctx, config, data, req.Operation) + if respErr != nil || err != nil { + return respErr, err } if allowedRolesRaw, ok := data.GetOk("allowed_roles"); ok { @@ -398,7 +503,7 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { } // Create a database plugin and initialize it. - dbw, err := newDatabaseWrapper(ctx, config.PluginName, config.PluginVersion, b.System(), b.logger) + dbw, err := newDatabaseWrapper(ctx, config.PluginName, pluginVersion, b.System(), b.logger) if err != nil { return logical.ErrorResponse("error creating database object: %s", err), nil } @@ -417,10 +522,11 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { b.Logger().Debug("created database object", "name", name, "plugin_name", config.PluginName) // Close and remove the old connection - oldConn := b.connPut(name, &dbPluginInstance{ - database: dbw, - name: name, - id: id, + oldConn := b.connections.Put(name, &dbPluginInstance{ + database: dbw, + name: name, + id: id, + runningPluginVersion: pluginVersion, }) if oldConn != nil { oldConn.Close() @@ -455,6 +561,7 @@ func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { "Vault (or the sdk if using a custom plugin) to gain password policy support", config.PluginName)) } + b.dbEvent(ctx, "config-write", req.Path, name, true) if len(resp.Warnings) == 0 { return nil, nil } @@ -475,6 +582,92 @@ func storeConfig(ctx context.Context, storage logical.Storage, name string, conf return nil } +func (b *databaseBackend) getPinnedVersion(ctx context.Context, pluginName string) (string, error) { + extendedSys, ok := b.System().(logical.ExtendedSystemView) + if !ok { + return "", fmt.Errorf("database backend does not support running as an external plugin") + } + + pin, err := extendedSys.GetPinnedPluginVersion(ctx, consts.PluginTypeDatabase, pluginName) + if errors.Is(err, pluginutil.ErrPinnedVersionNotFound) { + return "", nil + } + if err != nil { + return "", err + } + + return pin.Version, nil +} + +func (b *databaseBackend) selectPluginVersion(ctx context.Context, config *DatabaseConfig, data *framework.FieldData, op logical.Operation) (string, *logical.Response, error) { + pinnedVersion, err := b.getPinnedVersion(ctx, config.PluginName) + if err != nil { + return "", nil, err + } + pluginVersionRaw, ok := data.GetOk("plugin_version") + + switch { + case ok && pinnedVersion != "": + return "", logical.ErrorResponse("cannot specify plugin_version for plugin %q as it is pinned (v%s)", config.PluginName, pinnedVersion), nil + case pinnedVersion != "": + return pinnedVersion, nil, nil + case ok: + config.PluginVersion = pluginVersionRaw.(string) + } + + var builtinShadowed bool + if unversionedPlugin, err := b.System().LookupPlugin(ctx, config.PluginName, consts.PluginTypeDatabase); err == nil && !unversionedPlugin.Builtin { + builtinShadowed = true + } + switch { + case config.PluginVersion != "": + semanticVersion, err := version.NewVersion(config.PluginVersion) + if err != nil { + return "", logical.ErrorResponse("version %q is not a valid semantic version: %s", config.PluginVersion, err), nil + } + + // Canonicalize the version. + config.PluginVersion = "v" + semanticVersion.String() + + if config.PluginVersion == versions.GetBuiltinVersion(consts.PluginTypeDatabase, config.PluginName) { + if builtinShadowed { + return "", logical.ErrorResponse("database plugin %q, version %s not found, as it is"+ + " overridden by an unversioned plugin of the same name. Omit `plugin_version` to use the unversioned plugin", config.PluginName, config.PluginVersion), nil + } + + config.PluginVersion = "" + } + case builtinShadowed: + // We'll select the unversioned plugin that's been registered. + case op == logical.CreateOperation: + // No version provided and no unversioned plugin of that name available. + // Pin to the current latest version if any versioned plugins are registered. + plugins, err := b.System().ListVersionedPlugins(ctx, consts.PluginTypeDatabase) + if err != nil { + return "", nil, err + } + + var versionedCandidates []pluginutil.VersionedPlugin + for _, plugin := range plugins { + if !plugin.Builtin && plugin.Name == config.PluginName && plugin.Version != "" { + versionedCandidates = append(versionedCandidates, plugin) + } + } + + if len(versionedCandidates) != 0 { + // Sort in reverse order. + sort.SliceStable(versionedCandidates, func(i, j int) bool { + return versionedCandidates[i].SemanticVersion.GreaterThan(versionedCandidates[j].SemanticVersion) + }) + + config.PluginVersion = "v" + versionedCandidates[0].SemanticVersion.String() + b.logger.Debug(fmt.Sprintf("pinning %q database plugin version %q from candidates %v", config.PluginName, config.PluginVersion, versionedCandidates)) + } + } + + return config.PluginVersion, nil, nil +} + const pathConfigConnectionHelpSyn = ` Configure connection details to a database plugin. ` @@ -505,3 +698,12 @@ const pathResetConnectionHelpDesc = ` This path resets the database connection by closing the existing database plugin instance and running a new one. ` + +const pathReloadPluginHelpSyn = ` +Reloads all connections using a named database plugin. +` + +const pathReloadPluginHelpDesc = ` +This path resets each database connection using a named plugin by closing each +existing database plugin instance and running a new one. +` diff --git a/builtin/logical/database/path_config_connection_test.go b/builtin/logical/database/path_config_connection_test.go index 18f850dbce92..3741d82dc431 100644 --- a/builtin/logical/database/path_config_connection_test.go +++ b/builtin/logical/database/path_config_connection_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -124,7 +127,7 @@ func TestWriteConfig_PluginVersionInStorage(t *testing.T) { } func TestWriteConfig_HelpfulErrorMessageWhenBuiltinOverridden(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) t.Cleanup(cluster.Cleanup) config := logical.TestBackendConfig() diff --git a/builtin/logical/database/path_creds_create.go b/builtin/logical/database/path_creds_create.go index e57516259fef..d8696fc0a12a 100644 --- a/builtin/logical/database/path_creds_create.go +++ b/builtin/logical/database/path_creds_create.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -6,6 +9,7 @@ import ( "time" "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" @@ -15,6 +19,13 @@ func pathCredsCreate(b *databaseBackend) []*framework.Path { return []*framework.Path{ { Pattern: "creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -31,6 +42,13 @@ func pathCredsCreate(b *databaseBackend) []*framework.Path { }, { Pattern: "static-creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "read", + OperationSuffix: "static-role-credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -49,8 +67,16 @@ func pathCredsCreate(b *databaseBackend) []*framework.Path { } func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (resp *logical.Response, err error) { name := data.Get("name").(string) + modified := false + defer func() { + if err == nil && (resp == nil || !resp.IsError()) { + b.dbEvent(ctx, "creds-create", req.Path, name, modified) + } else { + b.dbEvent(ctx, "creds-create-fail", req.Path, name, modified) + } + }() // Get the role role, err := b.Role(ctx, req.Storage, name) @@ -154,6 +180,27 @@ func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { // Set output credential respData["rsa_private_key"] = string(private) + case v5.CredentialTypeClientCertificate: + generator, err := newClientCertificateGenerator(role.CredentialConfig) + if err != nil { + return nil, fmt.Errorf("failed to construct credential generator: %s", err) + } + + // Generate the client certificate + cb, subject, err := generator.generate(b.GetRandomReader(), expiration, + newUserReq.UsernameConfig) + if err != nil { + return nil, fmt.Errorf("failed to generate client certificate: %w", err) + } + + // Set input credential + newUserReq.CredentialType = dbplugin.CredentialTypeClientCertificate + newUserReq.Subject = subject + + // Set output credential + respData["client_certificate"] = cb.Certificate + respData["private_key"] = cb.PrivateKey + respData["private_key_type"] = cb.PrivateKeyType } // Overwriting the password in the event this is a legacy database @@ -163,6 +210,7 @@ func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { b.CloseIfShutdown(dbi, err) return nil, err } + modified = true respData["username"] = newUserResp.Username // Database plugins using the v4 interface generate and return the password. @@ -177,7 +225,7 @@ func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { "db_name": role.DBName, "revocation_statements": role.Statements.Revocation, } - resp := b.Secret(SecretCredsType).Response(respData, internal) + resp = b.Secret(SecretCredsType).Response(respData, internal) resp.Secret.TTL = role.DefaultTTL resp.Secret.MaxTTL = role.MaxTTL return resp, nil @@ -210,10 +258,18 @@ func (b *databaseBackend) pathStaticCredsRead() framework.OperationFunc { respData := map[string]interface{}{ "username": role.StaticAccount.Username, "ttl": role.StaticAccount.CredentialTTL().Seconds(), - "rotation_period": role.StaticAccount.RotationPeriod.Seconds(), "last_vault_rotation": role.StaticAccount.LastVaultRotation, } + if role.StaticAccount.UsesRotationPeriod() { + respData["rotation_period"] = role.StaticAccount.RotationPeriod.Seconds() + } else if role.StaticAccount.UsesRotationSchedule() { + respData["rotation_schedule"] = role.StaticAccount.RotationSchedule + if role.StaticAccount.RotationWindow.Seconds() != 0 { + respData["rotation_window"] = role.StaticAccount.RotationWindow.Seconds() + } + } + switch role.CredentialType { case v5.CredentialTypePassword: respData["password"] = role.StaticAccount.Password diff --git a/builtin/logical/database/path_roles.go b/builtin/logical/database/path_roles.go index 02a199c5326d..e223018962fd 100644 --- a/builtin/logical/database/path_roles.go +++ b/builtin/logical/database/path_roles.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -14,6 +17,7 @@ import ( "github.com/hashicorp/vault/sdk/helper/locksutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/queue" + "github.com/robfig/cron/v3" ) func pathListRoles(b *databaseBackend) []*framework.Path { @@ -21,6 +25,12 @@ func pathListRoles(b *databaseBackend) []*framework.Path { { Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "list", + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -31,6 +41,12 @@ func pathListRoles(b *databaseBackend) []*framework.Path { { Pattern: "static-roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "list", + OperationSuffix: "static-roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -44,7 +60,11 @@ func pathListRoles(b *databaseBackend) []*framework.Path { func pathRoles(b *databaseBackend) []*framework.Path { return []*framework.Path{ { - Pattern: "roles/" + framework.GenericNameRegex("name"), + Pattern: "roles/" + framework.GenericNameRegex("name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationSuffix: "role", + }, Fields: fieldsForType(databaseRolePath), ExistenceCheck: b.pathRoleExistenceCheck, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -59,7 +79,11 @@ func pathRoles(b *databaseBackend) []*framework.Path { }, { - Pattern: "static-roles/" + framework.GenericNameRegex("name"), + Pattern: "static-roles/" + framework.GenericNameRegex("name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationSuffix: "static-role", + }, Fields: fieldsForType(databaseStaticRolePath), ExistenceCheck: b.pathStaticRoleExistenceCheck, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -173,7 +197,18 @@ func staticFields() map[string]*framework.FieldSchema { Type: framework.TypeDurationSecond, Description: `Period for automatic credential rotation of the given username. Not valid unless used with - "username".`, + "username". Mutually exclusive with "rotation_schedule."`, + }, + "rotation_schedule": { + Type: framework.TypeString, + Description: `Schedule for automatic credential rotation of the + given username. Mutually exclusive with "rotation_period."`, + }, + "rotation_window": { + Type: framework.TypeDurationSecond, + Description: `The window of time in which rotations are allowed to + occur starting from a given "rotation_schedule". Requires "rotation_schedule" + to be specified`, }, "rotation_statements": { Type: framework.TypeStringSlice, @@ -203,11 +238,12 @@ func (b *databaseBackend) pathStaticRoleExistenceCheck(ctx context.Context, req } func (b *databaseBackend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, databaseRolePath+data.Get("name").(string)) + name := data.Get("name").(string) + err := req.Storage.Delete(ctx, databaseRolePath+name) if err != nil { return nil, err } - + b.dbEvent(ctx, "role-delete", req.Path, name, true) return nil, nil } @@ -248,6 +284,7 @@ func (b *databaseBackend) pathStaticRoleDelete(ctx context.Context, req *logical } } + b.dbEvent(ctx, "static-role-delete", req.Path, name, true) return nil, merr.ErrorOrNil() } @@ -270,10 +307,20 @@ func (b *databaseBackend) pathStaticRoleRead(ctx context.Context, req *logical.R if role.StaticAccount != nil { data["username"] = role.StaticAccount.Username data["rotation_statements"] = role.Statements.Rotation - data["rotation_period"] = role.StaticAccount.RotationPeriod.Seconds() if !role.StaticAccount.LastVaultRotation.IsZero() { data["last_vault_rotation"] = role.StaticAccount.LastVaultRotation } + + // only return one of the mutually exclusive fields in the response + if role.StaticAccount.UsesRotationPeriod() { + data["rotation_period"] = role.StaticAccount.RotationPeriod.Seconds() + } else if role.StaticAccount.UsesRotationSchedule() { + data["rotation_schedule"] = role.StaticAccount.RotationSchedule + // rotation_window is only valid with rotation_schedule + if role.StaticAccount.RotationWindow != 0 { + data["rotation_window"] = role.StaticAccount.RotationWindow.Seconds() + } + } } if len(role.CredentialConfig) > 0 { @@ -453,10 +500,12 @@ func (b *databaseBackend) pathRoleCreateUpdate(ctx context.Context, req *logical return nil, err } + b.dbEvent(ctx, fmt.Sprintf("role-%s", req.Operation), req.Path, name, true) return nil, nil } func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + response := &logical.Response{} name := data.Get("name").(string) if name == "" { return logical.ErrorResponse("empty role name attribute given"), nil @@ -513,12 +562,17 @@ func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *l } role.StaticAccount.Username = username - // If it's a Create operation, both username and rotation_period must be included - rotationPeriodSecondsRaw, ok := data.GetOk("rotation_period") - if !ok && createRole { - return logical.ErrorResponse("rotation_period is required to create static accounts"), nil + rotationPeriodSecondsRaw, rotationPeriodOk := data.GetOk("rotation_period") + rotationScheduleRaw, rotationScheduleOk := data.GetOk("rotation_schedule") + rotationWindowSecondsRaw, rotationWindowOk := data.GetOk("rotation_window") + + if rotationScheduleOk && rotationPeriodOk { + return logical.ErrorResponse("mutually exclusive fields rotation_period and rotation_schedule were both specified; only one of them can be provided"), nil + } else if createRole && (!rotationScheduleOk && !rotationPeriodOk) { + return logical.ErrorResponse("one of rotation_schedule or rotation_period must be provided to create a static account"), nil } - if ok { + + if rotationPeriodOk { rotationPeriodSeconds := rotationPeriodSecondsRaw.(int) if rotationPeriodSeconds < defaultQueueTickSeconds { // If rotation frequency is specified, and this is an update, the value @@ -527,6 +581,38 @@ func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *l return logical.ErrorResponse(fmt.Sprintf("rotation_period must be %d seconds or more", defaultQueueTickSeconds)), nil } role.StaticAccount.RotationPeriod = time.Duration(rotationPeriodSeconds) * time.Second + + if rotationWindowOk { + return logical.ErrorResponse("rotation_window is invalid with use of rotation_period"), nil + } + + // Unset rotation schedule and window if rotation period is set since + // these are mutually exclusive + role.StaticAccount.RotationSchedule = "" + role.StaticAccount.RotationWindow = 0 + } + + if rotationScheduleOk { + rotationSchedule := rotationScheduleRaw.(string) + parsedSchedule, err := b.schedule.Parse(rotationSchedule) + if err != nil { + return logical.ErrorResponse("could not parse rotation_schedule", "error", err), nil + } + role.StaticAccount.RotationSchedule = rotationSchedule + role.StaticAccount.Schedule = *parsedSchedule + + if rotationWindowOk { + rotationWindowSeconds := rotationWindowSecondsRaw.(int) + err := b.schedule.ValidateRotationWindow(rotationWindowSeconds) + if err != nil { + return logical.ErrorResponse("rotation_window is invalid", "error", err), nil + } + role.StaticAccount.RotationWindow = time.Duration(rotationWindowSeconds) * time.Second + } + + // Unset rotation period if rotation schedule is set since these are + // mutually exclusive + role.StaticAccount.RotationPeriod = 0 } if rotationStmtsRaw, ok := data.GetOk("rotation_statements"); ok { @@ -600,14 +686,21 @@ func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *l } } - item.Priority = lvr.Add(role.StaticAccount.RotationPeriod).Unix() + if rotationPeriodOk { + b.logger.Debug("init priority for RotationPeriod", "lvr", lvr, "next", lvr.Add(role.StaticAccount.RotationPeriod)) + item.Priority = lvr.Add(role.StaticAccount.RotationPeriod).Unix() + } else if rotationScheduleOk { + next := role.StaticAccount.Schedule.Next(lvr) + b.logger.Debug("init priority for Schedule", "lvr", lvr, "next", next) + item.Priority = next.Unix() + } // Add their rotation to the queue if err := b.pushItem(item); err != nil { return nil, err } - - return nil, nil + b.dbEvent(ctx, fmt.Sprintf("static-role-%s", req.Operation), req.Path, name, true) + return response, nil } type roleEntry struct { @@ -628,6 +721,8 @@ func (r *roleEntry) setCredentialType(credentialType string) error { r.CredentialType = v5.CredentialTypePassword case v5.CredentialTypeRSAPrivateKey.String(): r.CredentialType = v5.CredentialTypeRSAPrivateKey + case v5.CredentialTypeClientCertificate.String(): + r.CredentialType = v5.CredentialTypeClientCertificate default: return fmt.Errorf("invalid credential_type %q", credentialType) } @@ -669,6 +764,18 @@ func (r *roleEntry) setCredentialConfig(config map[string]string) error { if len(cm) > 0 { r.CredentialConfig = cm } + case v5.CredentialTypeClientCertificate: + generator, err := newClientCertificateGenerator(c) + if err != nil { + return err + } + cm, err := generator.configMap() + if err != nil { + return err + } + if len(cm) > 0 { + r.CredentialConfig = cm + } } return nil @@ -693,24 +800,102 @@ type staticAccount struct { // LastVaultRotation represents the last time Vault rotated the password LastVaultRotation time.Time `json:"last_vault_rotation"` + // NextVaultRotation represents the next time Vault is expected to rotate + // the password + NextVaultRotation time.Time `json:"next_vault_rotation"` + // RotationPeriod is number in seconds between each rotation, effectively a // "time to live". This value is compared to the LastVaultRotation to // determine if a password needs to be rotated RotationPeriod time.Duration `json:"rotation_period"` + // RotationSchedule is a "chron style" string representing the allowed + // schedule for each rotation. + // e.g. "1 0 * * *" would rotate at one minute past midnight (00:01) every + // day. + RotationSchedule string `json:"rotation_schedule"` + + // RotationWindow is number in seconds in which rotations are allowed to + // occur starting from a given rotation_schedule. + RotationWindow time.Duration `json:"rotation_window"` + + // Schedule holds the parsed "chron style" string representing the allowed + // schedule for each rotation. + Schedule cron.SpecSchedule `json:"schedule"` + // RevokeUser is a boolean flag to indicate if Vault should revoke the // database user when the role is deleted RevokeUserOnDelete bool `json:"revoke_user_on_delete"` } -// NextRotationTime calculates the next rotation by adding the Rotation Period -// to the last known vault rotation +// NextRotationTime calculates the next rotation for period and schedule-based +// rotations. +// +// Period-based expiries are calculated by adding the Rotation Period to the +// last known vault rotation. Schedule-based expiries are calculated by +// querying for the next schedule expiry since the last known vault rotation. func (s *staticAccount) NextRotationTime() time.Time { - return s.LastVaultRotation.Add(s.RotationPeriod) + if s.UsesRotationPeriod() { + return s.LastVaultRotation.Add(s.RotationPeriod) + } + return s.Schedule.Next(time.Now()) +} + +// NextRotationTimeFromInput calculates the next rotation time for period and +// schedule-based roles based on the input. +func (s *staticAccount) NextRotationTimeFromInput(input time.Time) time.Time { + if s.UsesRotationPeriod() { + return input.Add(s.RotationPeriod) + } + return s.Schedule.Next(input) +} + +// UsesRotationSchedule returns true if the given static account has been +// configured to rotate credentials on a schedule (i.e. NOT on a rotation period). +func (s *staticAccount) UsesRotationSchedule() bool { + return s.RotationSchedule != "" && s.RotationPeriod == 0 +} + +// UsesRotationPeriod returns true if the given static account has been +// configured to rotate credentials on a period (i.e. NOT on a rotation schedule). +func (s *staticAccount) UsesRotationPeriod() bool { + return s.RotationPeriod != 0 && s.RotationSchedule == "" +} + +// IsInsideRotationWindow returns true if the current time t is within a given +// static account's rotation window. +// +// Returns true if the rotation window is not set. In this case, the rotation +// window is effectively the span of time between two consecutive rotation +// schedules and we should not prevent rotation. +func (s *staticAccount) IsInsideRotationWindow(t time.Time) bool { + if s.UsesRotationSchedule() && s.RotationWindow != 0 { + return t.Before(s.NextVaultRotation.Add(s.RotationWindow)) + } + return true +} + +// ShouldRotate returns true if a given static account should have its +// credentials rotated. +// +// This will return true when the priority <= the current Unix time. If this +// static account is schedule-based with a rotation window, this method will +// return false if t is outside the rotation window. +func (s *staticAccount) ShouldRotate(priority int64, t time.Time) bool { + return priority <= t.Unix() && s.IsInsideRotationWindow(t) +} + +// SetNextVaultRotation +func (s *staticAccount) SetNextVaultRotation(t time.Time) { + if s.UsesRotationPeriod() { + s.NextVaultRotation = t.Add(s.RotationPeriod) + } else { + s.NextVaultRotation = s.Schedule.Next(t) + } } // CredentialTTL calculates the approximate time remaining until the credential is -// no longer valid. This is approximate because the periodic rotation is only +// no longer valid. This is approximate because the rotation expiry is only // checked approximately every 5 seconds, and each rotation can take a small // amount of time to process. This can result in a negative TTL time while the // rotation function processes the Static Role and performs the rotation. If the diff --git a/builtin/logical/database/path_roles_test.go b/builtin/logical/database/path_roles_test.go index bfb20633896e..91737da2cf8b 100644 --- a/builtin/logical/database/path_roles_test.go +++ b/builtin/logical/database/path_roles_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -202,7 +205,7 @@ func TestBackend_Roles_CredentialTypes(t *testing.T) { } func TestBackend_StaticRole_Config(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -251,6 +254,8 @@ func TestBackend_StaticRole_Config(t *testing.T) { path string expected map[string]interface{} err error + // use this field to check partial error strings, otherwise use err + errContains string }{ "basic": { account: map[string]interface{}{ @@ -263,12 +268,71 @@ func TestBackend_StaticRole_Config(t *testing.T) { "rotation_period": float64(5400), }, }, - "missing rotation period": { + "missing required fields": { account: map[string]interface{}{ "username": dbUser, }, path: "plugin-role-test", - err: errors.New("rotation_period is required to create static accounts"), + err: errors.New("one of rotation_schedule or rotation_period must be provided to create a static account"), + }, + "rotation_period with rotation_schedule": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + "rotation_schedule": "* * * * *", + }, + path: "plugin-role-test", + err: errors.New("mutually exclusive fields rotation_period and rotation_schedule were both specified; only one of them can be provided"), + }, + "rotation window invalid with rotation_period": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + "rotation_window": "3600s", + }, + path: "disallowed-role", + err: errors.New("rotation_window is invalid with use of rotation_period"), + }, + "happy path for rotation_schedule": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + }, + }, + "happy path for rotation_schedule and rotation_window": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + "rotation_window": "3600s", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + "rotation_window": float64(3600), + }, + }, + "error parsing rotation_schedule": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "foo", + }, + path: "plugin-role-test", + errContains: "could not parse rotation_schedule", + }, + "rotation_window invalid": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + "rotation_window": "59s", + }, + path: "plugin-role-test", + errContains: "rotation_window is invalid", }, "disallowed role config": { account: map[string]interface{}{ @@ -278,6 +342,14 @@ func TestBackend_StaticRole_Config(t *testing.T) { path: "disallowed-role", err: errors.New("\"disallowed-role\" is not an allowed role"), }, + "fails to parse cronSpec with seconds": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "*/10 * * * * *", + }, + path: "plugin-role-test-1", + errContains: "could not parse rotation_schedule", + }, } for name, tc := range testCases { @@ -302,7 +374,12 @@ func TestBackend_StaticRole_Config(t *testing.T) { } resp, err = b.HandleRequest(namespace.RootContext(nil), req) - if err != nil || (resp != nil && resp.IsError()) { + if tc.errContains != "" { + if !strings.Contains(resp.Error().Error(), tc.errContains) { + t.Fatalf("expected err message: (%s), got (%s), response error: (%s)", tc.err, err, resp.Error()) + } + return + } else if err != nil || (resp != nil && resp.IsError()) { if tc.err == nil { t.Fatalf("err:%s resp:%#v\n", err, resp) } @@ -338,7 +415,14 @@ func TestBackend_StaticRole_Config(t *testing.T) { expected := tc.expected actual := make(map[string]interface{}) - dataKeys := []string{"username", "password", "last_vault_rotation", "rotation_period"} + dataKeys := []string{ + "username", + "password", + "last_vault_rotation", + "rotation_period", + "rotation_schedule", + "rotation_window", + } for _, key := range dataKeys { if v, ok := resp.Data[key]; ok { actual[key] = v @@ -385,8 +469,188 @@ func TestBackend_StaticRole_Config(t *testing.T) { } } +func TestBackend_StaticRole_ReadCreds(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate) + + verifyPgConn(t, dbUser, dbUserDefaultPassword, connURL) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + testCases := map[string]struct { + account map[string]interface{} + path string + expected map[string]interface{} + }{ + "happy path for rotation_period": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_period": float64(5400), + }, + }, + "happy path for rotation_schedule": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + }, + }, + "happy path for rotation_schedule and rotation_window": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + "rotation_window": "3600s", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "* * * * *", + "rotation_window": float64(3600), + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + } + + for k, v := range tc.account { + data[k] = v + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := tc.expected + actual := make(map[string]interface{}) + dataKeys := []string{ + "username", + "password", + "last_vault_rotation", + "rotation_period", + "rotation_schedule", + "rotation_window", + "ttl", + } + for _, key := range dataKeys { + if v, ok := resp.Data[key]; ok { + actual[key] = v + } + } + + if len(tc.expected) > 0 { + // verify a password is returned, but we don't care what it's value is + if actual["password"] == "" { + t.Fatalf("expected result to contain password, but none found") + } + if actual["ttl"] == "" { + t.Fatalf("expected result to contain ttl, but none found") + } + if v, ok := actual["last_vault_rotation"].(time.Time); !ok { + t.Fatalf("expected last_vault_rotation to be set to time.Time type, got: %#v", v) + } + + // delete these values before the comparison, since we can't know them in + // advance + delete(actual, "password") + delete(actual, "ttl") + delete(actual, "last_vault_rotation") + if diff := deep.Equal(expected, actual); diff != nil { + t.Fatal(diff) + } + } + + // Delete role for next run + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + }) + } +} + func TestBackend_StaticRole_Updates(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -578,8 +842,114 @@ func TestBackend_StaticRole_Updates(t *testing.T) { } } +func TestBackend_StaticRole_Updates_RotationSchedule(t *testing.T) { + ctx := context.Background() + b, storage, mockDB := getBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, storage) + + data := map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "mockv5", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_schedule": "0 0 */2 * * *", + "rotation_window": "1h", + } + + mockDB.On("UpdateUser", mock.Anything, mock.Anything). + Return(v5.UpdateUserResponse{}, nil). + Once() + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: storage, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: storage, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + rotation := resp.Data["rotation_schedule"].(string) + window := resp.Data["rotation_window"].(float64) + + // update rotation_schedule and window + updateData := map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "mockv5", + "username": dbUser, + "rotation_schedule": "0 0 */1 * * *", + "rotation_window": "2h", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: storage, + Data: updateData, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // re-read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: storage, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + newRotation := resp.Data["rotation_schedule"].(string) + if newRotation == rotation { + t.Fatalf("expected change in rotation, but got old value: %#v", newRotation) + } + newWindow := resp.Data["rotation_window"].(float64) + if newWindow == window { + t.Fatalf("expected change in rotation_window, but got old value: %#v", newWindow) + } + + // verify that rotation_schedule is only required when creating + updateData = map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "mockv5", + "username": dbUser, + "rotation_statements": testRoleStaticUpdateRotation, + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: storage, + Data: updateData, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } +} + func TestBackend_StaticRole_Role_name_check(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -825,6 +1195,93 @@ func TestWALsDeletedOnRoleDeletion(t *testing.T) { requireWALs(t, storage, 1) } +func TestIsInsideRotationWindow(t *testing.T) { + for _, tc := range []struct { + name string + expected bool + data map[string]interface{} + now time.Time + timeModifier func(t time.Time) time.Time + }{ + { + "always returns true for rotation_period type", + true, + map[string]interface{}{ + "rotation_period": "86400s", + }, + time.Now(), + nil, + }, + { + "always returns true for rotation_schedule when no rotation_window set", + true, + map[string]interface{}{ + "rotation_schedule": "0 0 */2 * * *", + }, + time.Now(), + nil, + }, + { + "returns true for rotation_schedule when inside rotation_window", + true, + map[string]interface{}{ + "rotation_schedule": "0 0 */2 * * *", + "rotation_window": "3600s", + }, + time.Now(), + func(t time.Time) time.Time { + // set current time just inside window + return t.Add(-3640 * time.Second) + }, + }, + { + "returns false for rotation_schedule when outside rotation_window", + false, + map[string]interface{}{ + "rotation_schedule": "0 0 */2 * * *", + "rotation_window": "3600s", + }, + time.Now(), + func(t time.Time) time.Time { + // set current time just outside window + return t.Add(-3560 * time.Second) + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + b, s, mockDB := getBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, s) + + testTime := tc.now + if tc.data["rotation_schedule"] != nil && tc.timeModifier != nil { + rotationSchedule := tc.data["rotation_schedule"].(string) + schedule, err := b.schedule.Parse(rotationSchedule) + if err != nil { + t.Fatalf("could not parse rotation_schedule: %s", err) + } + next1 := schedule.Next(tc.now) // the next rotation time we expect + next2 := schedule.Next(next1) // the next rotation time after that + testTime = tc.timeModifier(next2) + } + + tc.data["username"] = "hashicorp" + tc.data["db_name"] = "mockv5" + createRoleWithData(t, b, s, mockDB, "test-role", tc.data) + role, err := b.StaticRole(ctx, s, "test-role") + if err != nil { + t.Fatal(err) + } + + isInsideWindow := role.StaticAccount.IsInsideRotationWindow(testTime) + if tc.expected != isInsideWindow { + t.Fatalf("expected %t, got %t", tc.expected, isInsideWindow) + } + }) + } +} + func createRole(t *testing.T, b *databaseBackend, storage logical.Storage, mockDB *mockNewDatabase, roleName string) { t.Helper() mockDB.On("UpdateUser", mock.Anything, mock.Anything). @@ -845,6 +1302,22 @@ func createRole(t *testing.T, b *databaseBackend, storage logical.Storage, mockD } } +func createRoleWithData(t *testing.T, b *databaseBackend, s logical.Storage, mockDB *mockNewDatabase, roleName string, data map[string]interface{}) { + t.Helper() + mockDB.On("UpdateUser", mock.Anything, mock.Anything). + Return(v5.UpdateUserResponse{}, nil). + Once() + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/" + roleName, + Storage: s, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatal(resp, err) + } +} + const testRoleStaticCreate = ` CREATE ROLE "{{name}}" WITH LOGIN diff --git a/builtin/logical/database/path_rotate_credentials.go b/builtin/logical/database/path_rotate_credentials.go index 03a6845e1c57..f2f7fa321ea9 100644 --- a/builtin/logical/database/path_rotate_credentials.go +++ b/builtin/logical/database/path_rotate_credentials.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -16,6 +19,13 @@ func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { return []*framework.Path{ { Pattern: "rotate-root/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "rotate", + OperationSuffix: "root-credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -36,6 +46,13 @@ func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { }, { Pattern: "rotate-role/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "rotate", + OperationSuffix: "static-role-credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -58,8 +75,17 @@ func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { } func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (resp *logical.Response, err error) { name := data.Get("name").(string) + modified := false + defer func() { + if err == nil { + b.dbEvent(ctx, "rotate-root", req.Path, name, modified) + } else { + b.dbEvent(ctx, "rotate-root-fail", req.Path, name, modified) + } + }() + if name == "" { return logical.ErrorResponse(respErrEmptyName), nil } @@ -74,6 +100,11 @@ func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationF return nil, fmt.Errorf("unable to rotate root credentials: no username in configuration") } + rootPassword, ok := config.ConnectionDetails["password"].(string) + if !ok || rootPassword == "" { + return nil, fmt.Errorf("unable to rotate root credentials: no password in configuration") + } + dbi, err := b.GetConnection(ctx, req.Storage, name) if err != nil { return nil, err @@ -137,6 +168,7 @@ func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationF if newConfigDetails != nil { config.ConnectionDetails = newConfigDetails } + modified = true // 1.12.0 and 1.12.1 stored builtin plugins in storage, but 1.12.2 reverted // that, so clean up any pre-existing stored builtin versions on write. @@ -157,8 +189,16 @@ func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationF } func (b *databaseBackend) pathRotateRoleCredentialsUpdate() framework.OperationFunc { - return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (_ *logical.Response, err error) { name := data.Get("name").(string) + modified := false + defer func() { + if err == nil { + b.dbEvent(ctx, "rotate", req.Path, name, modified) + } else { + b.dbEvent(ctx, "rotate-fail", req.Path, name, modified) + } + }() if name == "" { return logical.ErrorResponse("empty role name attribute given"), nil } @@ -202,9 +242,10 @@ func (b *databaseBackend) pathRotateRoleCredentialsUpdate() framework.OperationF item.Value = resp.WALID } } else { - item.Priority = resp.RotationTime.Add(role.StaticAccount.RotationPeriod).Unix() + item.Priority = role.StaticAccount.NextRotationTimeFromInput(resp.RotationTime).Unix() // Clear any stored WAL ID as we must have successfully deleted our WAL to get here. item.Value = "" + modified = true } // Add their rotation to the queue diff --git a/builtin/logical/database/rollback.go b/builtin/logical/database/rollback.go index a9810e816643..6e1b1dc48400 100644 --- a/builtin/logical/database/rollback.go +++ b/builtin/logical/database/rollback.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( diff --git a/builtin/logical/database/rollback_test.go b/builtin/logical/database/rollback_test.go index dc061ae99a3f..f60491a6662c 100644 --- a/builtin/logical/database/rollback_test.go +++ b/builtin/logical/database/rollback_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -24,7 +27,7 @@ const ( // - Password has been altered on the database // - Password has not been updated in storage func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -167,7 +170,7 @@ func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { // - Password has not been altered on the database // - Password has not been updated in storage func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -271,7 +274,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { // - Password has been altered on the database // - Password has been updated in storage func TestBackend_RotateRootCredentials_WAL_no_rollback_2(t *testing.T) { - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() diff --git a/builtin/logical/database/rotation.go b/builtin/logical/database/rotation.go index 5ae2756f279c..0e5840bd30c2 100644 --- a/builtin/logical/database/rotation.go +++ b/builtin/logical/database/rotation.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -10,7 +13,6 @@ import ( "github.com/hashicorp/go-secure-stdlib/strutil" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/locksutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/queue" @@ -89,9 +91,9 @@ func (b *databaseBackend) populateQueue(ctx context.Context, s logical.Storage) log.Warn("unable to delete WAL", "error", err, "WAL ID", walEntry.walID) } } else { - log.Info("found WAL for role", - "role", item.Key, - "WAL ID", walEntry.walID) + // previous rotation attempt was interrupted, so we set the + // Priority as highest to be processed immediately + log.Info("found WAL for role", "role", item.Key, "WAL ID", walEntry.walID) item.Value = walEntry.walID item.Priority = time.Now().Unix() } @@ -188,41 +190,77 @@ func (b *databaseBackend) rotateCredential(ctx context.Context, s logical.Storag return false } + roleName := item.Key + logger := b.Logger().With("role", roleName) + // Grab the exclusive lock for this Role, to make sure we don't incur and // writes during the rotation process - lock := locksutil.LockForKey(b.roleLocks, item.Key) + lock := locksutil.LockForKey(b.roleLocks, roleName) lock.Lock() defer lock.Unlock() // Validate the role still exists - role, err := b.StaticRole(ctx, s, item.Key) + role, err := b.StaticRole(ctx, s, roleName) if err != nil { - b.logger.Error("unable to load role", "role", item.Key, "error", err) + logger.Error("unable to load role", "error", err) + item.Priority = time.Now().Add(10 * time.Second).Unix() if err := b.pushItem(item); err != nil { - b.logger.Error("unable to push item on to queue", "error", err) + logger.Error("unable to push item on to queue", "error", err) } return true } if role == nil { - b.logger.Warn("role not found", "role", item.Key, "error", err) + logger.Warn("role not found", "error", err) return true } - // If "now" is less than the Item priority, then this item does not need to - // be rotated - if time.Now().Unix() < item.Priority { + logger = logger.With("database", role.DBName) + + input := &setStaticAccountInput{ + RoleName: roleName, + Role: role, + } + + now := time.Now() + if !role.StaticAccount.ShouldRotate(item.Priority, now) { + if !role.StaticAccount.IsInsideRotationWindow(now) { + // We are a schedule-based rotation and we are outside a rotation + // window so we update priority and NextVaultRotation + item.Priority = role.StaticAccount.NextRotationTimeFromInput(now).Unix() + role.StaticAccount.SetNextVaultRotation(now) + b.logger.Trace("outside schedule-based rotation window, update priority", "next", role.StaticAccount.NextRotationTime()) + + // write to storage after updating NextVaultRotation so the next + // time this item is checked for rotation our role that we retrieve + // from storage reflects that change + entry, err := logical.StorageEntryJSON(databaseStaticRolePath+input.RoleName, input.Role) + if err != nil { + logger.Error("unable to encode entry for storage", "error", err) + return false + } + if err := s.Put(ctx, entry); err != nil { + logger.Error("unable to write to storage", "error", err) + return false + } + } + // do not rotate now, push item back onto queue to be rotated later if err := b.pushItem(item); err != nil { - b.logger.Error("unable to push item on to queue", "error", err) + logger.Error("unable to push item on to queue", "error", err) } // Break out of the for loop return false } - input := &setStaticAccountInput{ - RoleName: item.Key, - Role: role, - } + // send an event indicating if the rotation was a success or failure + rotated := false + defer func() { + if rotated { + b.dbEvent(ctx, "rotate", "", roleName, true) + } else { + b.dbEvent(ctx, "rotate-fail", "", roleName, false) + } + }() // If there is a WAL entry related to this Role, the corresponding WAL ID // should be stored in the Item's Value field. @@ -232,7 +270,8 @@ func (b *databaseBackend) rotateCredential(ctx context.Context, s logical.Storag resp, err := b.setStaticAccount(ctx, s, input) if err != nil { - b.logger.Error("unable to rotate credentials in periodic function", "error", err) + logger.Error("unable to rotate credentials in periodic function", "error", err) + // Increment the priority enough so that the next call to this method // likely will not attempt to rotate it, as a back-off of sorts item.Priority = time.Now().Add(10 * time.Second).Unix() @@ -243,7 +282,7 @@ func (b *databaseBackend) rotateCredential(ctx context.Context, s logical.Storag } if err := b.pushItem(item); err != nil { - b.logger.Error("unable to push item on to queue", "error", err) + logger.Error("unable to push item on to queue", "error", err) } // Go to next item return true @@ -257,11 +296,12 @@ func (b *databaseBackend) rotateCredential(ctx context.Context, s logical.Storag } // Update priority and push updated Item to the queue - nextRotation := lvr.Add(role.StaticAccount.RotationPeriod) - item.Priority = nextRotation.Unix() + item.Priority = role.StaticAccount.NextRotationTimeFromInput(lvr).Unix() + if err := b.pushItem(item); err != nil { - b.logger.Warn("unable to push item on to queue", "error", err) + logger.Warn("unable to push item on to queue", "error", err) } + rotated = true return true } @@ -321,10 +361,19 @@ type setStaticAccountOutput struct { // // This method does not perform any operations on the priority queue. Those // tasks must be handled outside of this method. -func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storage, input *setStaticAccountInput) (*setStaticAccountOutput, error) { +func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storage, input *setStaticAccountInput) (_ *setStaticAccountOutput, err error) { if input == nil || input.Role == nil || input.RoleName == "" { return nil, errors.New("input was empty when attempting to set credentials for static account") } + modified := false + defer func() { + if err == nil { + b.dbEvent(ctx, "static-creds-create", "", input.RoleName, modified) + } else { + b.dbEvent(ctx, "static-creds-create-fail", "", input.RoleName, modified) + } + }() + // Re-use WAL ID if present, otherwise PUT a new WAL output := &setStaticAccountOutput{WALID: input.WALID} @@ -478,11 +527,13 @@ func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storag b.CloseIfShutdown(dbi, err) return output, fmt.Errorf("error setting credentials: %w", err) } + modified = true // Store updated role information // lvr is the known LastVaultRotation lvr := time.Now() input.Role.StaticAccount.LastVaultRotation = lvr + input.Role.StaticAccount.SetNextVaultRotation(lvr) output.RotationTime = lvr entry, err := logical.StorageEntryJSON(databaseStaticRolePath+input.RoleName, input.Role) @@ -514,14 +565,12 @@ func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storag // not wait for success or failure of it's tasks before continuing. This is to // avoid blocking the mount process while loading and evaluating existing roles, // etc. -func (b *databaseBackend) initQueue(ctx context.Context, conf *logical.BackendConfig, replicationState consts.ReplicationState) { +func (b *databaseBackend) initQueue(ctx context.Context, conf *logical.BackendConfig) { // Verify this mount is on the primary server, or is a local mount. If not, do // not create a queue or launch a ticker. Both processing the WAL list and // populating the queue are done sequentially and before launching a // go-routine to run the periodic ticker. - if (conf.System.LocalMount() || !replicationState.HasState(consts.ReplicationPerformanceSecondary)) && - !replicationState.HasState(consts.ReplicationDRSecondary) && - !replicationState.HasState(consts.ReplicationPerformanceStandby) { + if b.WriteSafeReplicationState() { b.Logger().Info("initializing database rotation queue") // Poll for a PutWAL call that does not return a "read-only storage" error. @@ -558,10 +607,10 @@ func (b *databaseBackend) initQueue(ctx context.Context, conf *logical.BackendCo queueTickerInterval := defaultQueueTickSeconds * time.Second if strVal, ok := conf.Config[queueTickIntervalKey]; ok { newVal, err := strconv.Atoi(strVal) - if err == nil { + if err == nil && newVal > 0 { queueTickerInterval = time.Duration(newVal) * time.Second } else { - b.Logger().Error("bad value for %q option: %q", queueTickIntervalKey, strVal) + b.Logger().Error("bad value for %q option: %q, default value of %d being used instead", queueTickIntervalKey, strVal, defaultQueueTickSeconds) } } go b.runTicker(ctx, queueTickerInterval, conf.StorageView) diff --git a/builtin/logical/database/rotation_test.go b/builtin/logical/database/rotation_test.go index ccbd64588592..c9917cb37458 100644 --- a/builtin/logical/database/rotation_test.go +++ b/builtin/logical/database/rotation_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( @@ -12,30 +15,36 @@ import ( "time" "github.com/Sectorbob/mlab-ns2/gae/ns/digest" + "github.com/hashicorp/vault/builtin/logical/database/schedule" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/testhelpers/mongodb" postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql" v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/dbtxn" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/queue" _ "github.com/jackc/pgx/v4/stdlib" + "github.com/robfig/cron/v3" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" mongodbatlasapi "go.mongodb.org/atlas/mongodbatlas" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" ) const ( - dbUser = "vaultstatictest" - dbUserDefaultPassword = "password" + mockv5 = "mockv5" + dbUser = "vaultstatictest" + dbUserDefaultPassword = "password" + testMinRotationWindowSeconds = 5 + testScheduleParseOptions = cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow ) -func TestBackend_StaticRole_Rotate_basic(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_basic(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -52,6 +61,8 @@ func TestBackend_StaticRole_Rotate_basic(t *testing.T) { } defer b.Cleanup(context.Background()) + b.schedule = &TestSchedule{} + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") defer cleanup() @@ -80,110 +91,357 @@ func TestBackend_StaticRole_Rotate_basic(t *testing.T) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - data = map[string]interface{}{ + testCases := map[string]struct { + account map[string]interface{} + path string + expected map[string]interface{} + waitTime time.Duration + }{ + "basic with rotation_period": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + }, + path: "plugin-role-test-1", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_period": float64(5400), + }, + }, + "rotation_schedule is set and expires": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "*/10 * * * * *", + }, + path: "plugin-role-test-2", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_schedule": "*/10 * * * * *", + }, + waitTime: 20 * time.Second, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + } + + for k, v := range tc.account { + data[k] = v + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/" + tc.path, + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + tc.path, + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("empty username (%s) or password (%s)", username, password) + } + + // Verify username/password + verifyPgConn(t, dbUser, password, connURL) + + // Re-read the creds, verifying they aren't changing on read + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + tc.path, + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if username != resp.Data["username"].(string) || password != resp.Data["password"].(string) { + t.Fatal("expected re-read username/password to match, but didn't") + } + + // Trigger rotation + data = map[string]interface{}{"name": "plugin-role-test"} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rotate-role/" + tc.path, + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if resp != nil { + t.Fatalf("Expected empty response from rotate-role: (%#v)", resp) + } + + // Re-Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + tc.path, + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + newPassword := resp.Data["password"].(string) + if password == newPassword { + t.Fatalf("expected passwords to differ, got (%s)", newPassword) + } + + // Verify new username/password + verifyPgConn(t, username, newPassword, connURL) + + if tc.waitTime > 0 { + time.Sleep(tc.waitTime) + // Re-Read the creds after schedule expiration + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + tc.path, + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + checkPassword := resp.Data["password"].(string) + if newPassword == checkPassword { + t.Fatalf("expected passwords to differ, got (%s)", checkPassword) + } + } + }) + } +} + +// TestBackend_StaticRole_Rotation_Schedule_ErrorRecover tests that failed +// rotations can successfully recover and that they do not occur outside of a +// rotation window. +func TestBackend_StaticRole_Rotation_Schedule_ErrorRecover(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) + t.Cleanup(cluster.Cleanup) + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + eventSender := logical.NewMockEventSender() + config.EventsSender = eventSender + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + b.schedule = &TestSchedule{} + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + t.Cleanup(cleanup) + + // create the database user + createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate) + verifyPgConn(t, dbUser, dbUserDefaultPassword, connURL) + + // Configure a connection + connectionData := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + configureConnection(t, b, config.StorageView, connectionData) + + // create the role that will rotate every 10th second + // rotations will not be allowed after 5s + data := map[string]interface{}{ "name": "plugin-role-test", "db_name": "plugin-test", "rotation_statements": testRoleStaticUpdate, + "rotation_schedule": "*/10 * * * * *", + "rotation_window": "5s", "username": dbUser, - "rotation_period": "5400s", } - - req = &logical.Request{ + req := &logical.Request{ Operation: logical.CreateOperation, Path: "static-roles/plugin-role-test", Storage: config.StorageView, Data: data, } - - resp, err = b.HandleRequest(namespace.RootContext(nil), req) + resp, err := b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, resp) } // Read the creds - data = map[string]interface{}{} req = &logical.Request{ Operation: logical.ReadOperation, Path: "static-creds/plugin-role-test", Storage: config.StorageView, - Data: data, } - resp, err = b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, resp) } username := resp.Data["username"].(string) - password := resp.Data["password"].(string) - if username == "" || password == "" { - t.Fatalf("empty username (%s) or password (%s)", username, password) + originalPassword := resp.Data["password"].(string) + if username == "" || originalPassword == "" { + t.Fatalf("empty username (%s) or password (%s)", username, originalPassword) } // Verify username/password - verifyPgConn(t, dbUser, password, connURL) + verifyPgConn(t, dbUser, originalPassword, connURL) - // Re-read the creds, verifying they aren't changing on read - data = map[string]interface{}{} + // Set invalid connection URL so we fail to rotate + connectionData["connection_url"] = strings.Replace(connURL, "postgres:secret", "postgres:foo", 1) + configureConnection(t, b, config.StorageView, connectionData) + + // determine next rotation schedules based on current test time + rotationSchedule := data["rotation_schedule"].(string) + schedule, err := b.schedule.Parse(rotationSchedule) + if err != nil { + t.Fatalf("could not parse rotation_schedule: %s", err) + } + next := schedule.Next(time.Now()) // the next rotation time we expect + time.Sleep(next.Sub(time.Now())) + + // Re-Read the creds after schedule expiration req = &logical.Request{ Operation: logical.ReadOperation, Path: "static-creds/plugin-role-test", Storage: config.StorageView, - Data: data, } resp, err = b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - if username != resp.Data["username"].(string) || password != resp.Data["password"].(string) { - t.Fatal("expected re-read username/password to match, but didn't") + checkPassword := resp.Data["password"].(string) + if originalPassword != checkPassword { + // should match because rotations should be failing + t.Fatalf("expected passwords to match, got (%s)", checkPassword) } - // Trigger rotation - data = map[string]interface{}{"name": "plugin-role-test"} + // wait until we are outside the rotation window so that rotations will not occur + next = schedule.Next(time.Now()) // the next rotation time after now + time.Sleep(next.Add(time.Second * 6).Sub(time.Now())) + + // reset to valid connection URL so we do not fail to rotate anymore + connectionData["connection_url"] = connURL + configureConnection(t, b, config.StorageView, connectionData) + + // we are outside a rotation window, Re-Read the creds req = &logical.Request{ - Operation: logical.UpdateOperation, - Path: "rotate-role/plugin-role-test", + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", Storage: config.StorageView, - Data: data, } resp, err = b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - if resp != nil { - t.Fatalf("Expected empty response from rotate-role: (%#v)", resp) + checkPassword = resp.Data["password"].(string) + if originalPassword != checkPassword { + // should match because rotations should not occur outside the rotation window + t.Fatalf("expected passwords to match, got (%s)", checkPassword) } + // Verify new username/password + verifyPgConn(t, username, checkPassword, connURL) + + // sleep until the next rotation time with a buffer to ensure we had time to rotate + next = schedule.Next(time.Now()) // the next rotation time we expect + time.Sleep(next.Add(time.Second * 5).Sub(time.Now())) // Re-Read the creds - data = map[string]interface{}{} req = &logical.Request{ Operation: logical.ReadOperation, Path: "static-creds/plugin-role-test", Storage: config.StorageView, - Data: data, } resp, err = b.HandleRequest(namespace.RootContext(nil), req) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("err:%s resp:%#v\n", err, resp) } - newPassword := resp.Data["password"].(string) - if password == newPassword { - t.Fatalf("expected passwords to differ, got (%s)", newPassword) + checkPassword = resp.Data["password"].(string) + if originalPassword == checkPassword { + // should differ because we slept until the next rotation time + t.Fatalf("expected passwords to differ, got (%s)", checkPassword) } // Verify new username/password - verifyPgConn(t, username, newPassword, connURL) + verifyPgConn(t, username, checkPassword, connURL) + + eventSender.Stop() // avoid race detector + // check that we got a successful rotation event + if len(eventSender.Events) == 0 { + t.Fatal("Expected to have some events but got none") + } + // check that we got a rotate-fail event + found := false + for _, event := range eventSender.Events { + if string(event.Type) == "database/rotate-fail" { + found = true + break + } + } + assert.True(t, found) + found = false + for _, event := range eventSender.Events { + if string(event.Type) == "database/rotate" { + found = true + break + } + } + assert.True(t, found) } // Sanity check to make sure we don't allow an attempt of rotating credentials // for non-static accounts, which doesn't make sense anyway, but doesn't hurt to // verify we return an error -func TestBackend_StaticRole_Rotate_NonStaticError(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_NonStaticError(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -286,8 +544,8 @@ func TestBackend_StaticRole_Rotate_NonStaticError(t *testing.T) { } } -func TestBackend_StaticRole_Revoke_user(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_Revoke_user(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -464,8 +722,8 @@ func verifyPgConn(t *testing.T, username, password, connURL string) { // WAL testing // // First scenario, WAL contains a role name that does not exist. -func TestBackend_Static_QueueWAL_discard_role_not_found(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_QueueWAL_discard_role_not_found(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() ctx := context.Background() @@ -505,8 +763,8 @@ func TestBackend_Static_QueueWAL_discard_role_not_found(t *testing.T) { // Second scenario, WAL contains a role name that does exist, but the role's // LastVaultRotation is greater than the WAL has -func TestBackend_Static_QueueWAL_discard_role_newer_rotation_date(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_QueueWAL_discard_role_newer_rotation_date(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() ctx := context.Background() @@ -693,7 +951,7 @@ func assertWALCount(t *testing.T, s logical.Storage, expected int, key string) { type userCreator func(t *testing.T, username, password string) -func TestBackend_StaticRole_Rotations_PostgreSQL(t *testing.T) { +func TestBackend_StaticRole_Rotation_PostgreSQL(t *testing.T) { cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") defer cleanup() uc := userCreator(func(t *testing.T, username, password string) { @@ -705,7 +963,7 @@ func TestBackend_StaticRole_Rotations_PostgreSQL(t *testing.T) { }) } -func TestBackend_StaticRole_Rotations_MongoDB(t *testing.T) { +func TestBackend_StaticRole_Rotation_MongoDB(t *testing.T) { cleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, "5.0.10", "vaulttestdb") defer cleanup() @@ -718,7 +976,7 @@ func TestBackend_StaticRole_Rotations_MongoDB(t *testing.T) { }) } -func TestBackend_StaticRole_Rotations_MongoDBAtlas(t *testing.T) { +func TestBackend_StaticRole_Rotation_MongoDBAtlas(t *testing.T) { // To get the project ID, connect to cloud.mongodb.com, go to the vault-test project and // look at Project Settings. projID := os.Getenv("VAULT_MONGODBATLAS_PROJECT_ID") @@ -767,6 +1025,34 @@ func TestBackend_StaticRole_Rotations_MongoDBAtlas(t *testing.T) { }) } +// TestQueueTickIntervalKeyConfig tests the configuration of queueTickIntervalKey +// does not break on invalid values. +func TestQueueTickIntervalKeyConfig(t *testing.T) { + t.Parallel() + cluster, sys := getClusterPostgresDB(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + config.Config[queueTickIntervalKey] = "1" + + // Rotation ticker starts running in Factory call + b, err := Factory(context.Background(), config) + require.Nil(t, err) + b.Cleanup(context.Background()) + + config.Config[queueTickIntervalKey] = "0" + b, err = Factory(context.Background(), config) + require.Nil(t, err) + b.Cleanup(context.Background()) + + config.Config[queueTickIntervalKey] = "-1" + b, err = Factory(context.Background(), config) + require.Nil(t, err) + b.Cleanup(context.Background()) +} + func testBackend_StaticRole_Rotations(t *testing.T, createUser userCreator, opts map[string]interface{}) { // We need to set this value for the plugin to run, but it doesn't matter what we set it to. oldToken := os.Getenv(pluginutil.PluginUnwrapTokenEnv) @@ -779,7 +1065,7 @@ func testBackend_StaticRole_Rotations(t *testing.T, createUser userCreator, opts } }() - cluster, sys := getCluster(t) + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -942,8 +1228,8 @@ type createUserCommand struct { } // Demonstrates a bug fix for the credential rotation not releasing locks -func TestBackend_StaticRole_LockRegression(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_LockRegression(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -1021,8 +1307,8 @@ func TestBackend_StaticRole_LockRegression(t *testing.T) { } } -func TestBackend_StaticRole_Rotate_Invalid_Role(t *testing.T) { - cluster, sys := getCluster(t) +func TestBackend_StaticRole_Rotation_Invalid_Role(t *testing.T) { + cluster, sys := getClusterPostgresDB(t) defer cluster.Cleanup() config := logical.TestBackendConfig() @@ -1158,10 +1444,18 @@ func TestRollsPasswordForwardsUsingWAL(t *testing.T) { func TestStoredWALsCorrectlyProcessed(t *testing.T) { const walNewPassword = "new-password-from-wal" + + rotationPeriodData := map[string]interface{}{ + "username": "hashicorp", + "db_name": mockv5, + "rotation_period": "86400s", + } + for _, tc := range []struct { name string shouldRotate bool wal *setCredentialsWAL + data map[string]interface{} }{ { "WAL is kept and used for roll forward", @@ -1172,6 +1466,7 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { NewPassword: walNewPassword, LastVaultRotation: time.Now().Add(time.Hour), }, + rotationPeriodData, }, { "zero-time WAL is discarded on load", @@ -1182,9 +1477,10 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { NewPassword: walNewPassword, LastVaultRotation: time.Time{}, }, + rotationPeriodData, }, { - "empty-password WAL is kept but a new password is generated", + "rotation_period empty-password WAL is kept but a new password is generated", true, &setCredentialsWAL{ RoleName: "hashicorp", @@ -1192,6 +1488,22 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { NewPassword: "", LastVaultRotation: time.Now().Add(time.Hour), }, + rotationPeriodData, + }, + { + "rotation_schedule empty-password WAL is kept but a new password is generated", + true, + &setCredentialsWAL{ + RoleName: "hashicorp", + Username: "hashicorp", + NewPassword: "", + LastVaultRotation: time.Now().Add(time.Hour), + }, + map[string]interface{}{ + "username": "hashicorp", + "db_name": mockv5, + "rotation_schedule": "*/10 * * * * *", + }, }, } { t.Run(tc.name, func(t *testing.T) { @@ -1206,8 +1518,9 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { t.Fatal(err) } b.credRotationQueue = queue.New() + b.schedule = &TestSchedule{} configureDBMount(t, config.StorageView) - createRole(t, b, config.StorageView, mockDB, "hashicorp") + createRoleWithData(t, b, config.StorageView, mockDB, tc.wal.RoleName, tc.data) role, err := b.StaticRole(ctx, config.StorageView, "hashicorp") if err != nil { t.Fatal(err) @@ -1221,7 +1534,7 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { b.credRotationQueue = queue.New() // Now finish the startup process by populating the queue, which should discard the WAL - b.initQueue(ctx, config, consts.ReplicationUnknown) + b.initQueue(ctx, config) if tc.shouldRotate { requireWALs(t, storage, 1) @@ -1245,6 +1558,7 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { t.Fatal(err) } + nextRotationTime := role.StaticAccount.NextRotationTime() if tc.shouldRotate { if tc.wal.NewPassword != "" { // Should use WAL's new_password field @@ -1260,11 +1574,11 @@ func TestStoredWALsCorrectlyProcessed(t *testing.T) { t.Fatal() } } + // Ensure the role was not promoted for early rotation + assertPriorityUnchanged(t, item.Priority, nextRotationTime) } else { // Ensure the role was not promoted for early rotation - if item.Priority < time.Now().Add(time.Hour).Unix() { - t.Fatal("priority should be for about a week away, but was", item.Priority) - } + assertPriorityUnchanged(t, item.Priority, nextRotationTime) if role.StaticAccount.Password != initialPassword { t.Fatal("password should not have been rotated yet") } @@ -1365,6 +1679,7 @@ func getBackend(t *testing.T) (*databaseBackend, logical.Storage, *mockNewDataba if err := b.Setup(context.Background(), config); err != nil { t.Fatal(err) } + b.schedule = &TestSchedule{} b.credRotationQueue = queue.New() b.populateQueue(context.Background(), config.StorageView) @@ -1385,9 +1700,9 @@ func setupMockDB(b *databaseBackend) *mockNewDatabase { dbi := &dbPluginInstance{ database: dbw, id: "foo-id", - name: "mockV5", + name: mockv5, } - b.connections["mockv5"] = dbi + b.connections.Put(mockv5, dbi) return mockDB } @@ -1396,7 +1711,7 @@ func setupMockDB(b *databaseBackend) *mockNewDatabase { // plugin init code paths, allowing us to use a manually populated mock DB object. func configureDBMount(t *testing.T, storage logical.Storage) { t.Helper() - entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/mockv5"), &DatabaseConfig{ + entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/"+mockv5), &DatabaseConfig{ AllowedRoles: []string{"*"}, }) if err != nil { @@ -1441,7 +1756,53 @@ func capturePasswords(t *testing.T, b logical.Backend, config *logical.BackendCo return pws } +func configureConnection(t *testing.T, b *databaseBackend, s logical.Storage, data map[string]interface{}) { + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/" + data["name"].(string), + Storage: s, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } +} + func newBoolPtr(b bool) *bool { v := b return &v } + +// assertPriorityUnchanged is a helper to verify that the priority is the +// expected value for a given rotation time +func assertPriorityUnchanged(t *testing.T, priority int64, nextRotationTime time.Time) { + t.Helper() + if priority != nextRotationTime.Unix() { + t.Fatalf("expected next rotation at %s, but got %s", nextRotationTime, time.Unix(priority, 0).String()) + } +} + +var _ schedule.Scheduler = &TestSchedule{} + +type TestSchedule struct{} + +func (d *TestSchedule) Parse(rotationSchedule string) (*cron.SpecSchedule, error) { + parser := cron.NewParser(testScheduleParseOptions) + schedule, err := parser.Parse(rotationSchedule) + if err != nil { + return nil, err + } + sched, ok := schedule.(*cron.SpecSchedule) + if !ok { + return nil, fmt.Errorf("invalid rotation schedule") + } + return sched, nil +} + +func (d *TestSchedule) ValidateRotationWindow(s int) error { + if s < testMinRotationWindowSeconds { + return fmt.Errorf("rotation_window must be %d seconds or more", testMinRotationWindowSeconds) + } + return nil +} diff --git a/builtin/logical/database/schedule/schedule.go b/builtin/logical/database/schedule/schedule.go new file mode 100644 index 000000000000..8f30717ec131 --- /dev/null +++ b/builtin/logical/database/schedule/schedule.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package schedule + +import ( + "fmt" + + "github.com/robfig/cron/v3" +) + +const ( + // Minimum allowed value for rotation_window + minRotationWindowSeconds = 3600 + parseOptions = cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow +) + +type Scheduler interface { + Parse(string) (*cron.SpecSchedule, error) + ValidateRotationWindow(int) error +} + +var _ Scheduler = &DefaultSchedule{} + +type DefaultSchedule struct{} + +func (d *DefaultSchedule) Parse(rotationSchedule string) (*cron.SpecSchedule, error) { + parser := cron.NewParser(parseOptions) + schedule, err := parser.Parse(rotationSchedule) + if err != nil { + return nil, err + } + sched, ok := schedule.(*cron.SpecSchedule) + if !ok { + return nil, fmt.Errorf("invalid rotation schedule") + } + return sched, nil +} + +func (d *DefaultSchedule) ValidateRotationWindow(s int) error { + if s < minRotationWindowSeconds { + return fmt.Errorf("rotation_window must be %d seconds or more", minRotationWindowSeconds) + } + return nil +} diff --git a/builtin/logical/database/secret_creds.go b/builtin/logical/database/secret_creds.go index 9c9b348e2437..e2130c1cf7da 100644 --- a/builtin/logical/database/secret_creds.go +++ b/builtin/logical/database/secret_creds.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( diff --git a/builtin/logical/database/version_wrapper.go b/builtin/logical/database/version_wrapper.go index 8c4db1388861..f5280307c953 100644 --- a/builtin/logical/database/version_wrapper.go +++ b/builtin/logical/database/version_wrapper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( diff --git a/builtin/logical/database/version_wrapper_test.go b/builtin/logical/database/version_wrapper_test.go index 054241f978a3..47840385966e 100644 --- a/builtin/logical/database/version_wrapper_test.go +++ b/builtin/logical/database/version_wrapper_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database import ( diff --git a/builtin/logical/database/versioning_large_test.go b/builtin/logical/database/versioning_large_test.go index a9f7efde62a6..be936c760336 100644 --- a/builtin/logical/database/versioning_large_test.go +++ b/builtin/logical/database/versioning_large_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package database // This file contains all "large"/expensive tests. These are running requests against a running backend @@ -22,9 +25,10 @@ func TestPlugin_lifecycle(t *testing.T) { cluster, sys := getCluster(t) defer cluster.Cleanup() - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v4-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV4", []string{}, "") - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{}, "") - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v6-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV6Multiplexed", []string{}, "") + env := []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)} + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v4-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV4", env) + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", env) + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v6-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV6Multiplexed", env) config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} @@ -223,7 +227,7 @@ func TestPlugin_VersionSelection(t *testing.T) { defer cluster.Cleanup() for _, version := range []string{"v11.0.0", "v11.0.1-rc1", "v2.0.0"} { - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, version, "TestBackend_PluginMain_MockV5", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, version, "TestBackend_PluginMain_MockV5", []string{}) } config := logical.TestBackendConfig() @@ -309,11 +313,11 @@ func TestPlugin_VersionSelection(t *testing.T) { } // Register a newer version of the plugin, and ensure that's the new default version selected. - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "v11.0.1", "TestBackend_PluginMain_MockV5", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "v11.0.1", "TestBackend_PluginMain_MockV5", []string{}) t.Run("no version specified, new latest version selected", test(t, "", "v11.0.1")) // Register an unversioned plugin and ensure that is now selected when no version is specified. - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{}) for name, tc := range map[string]struct { selectVersion string expectedVersion string @@ -394,7 +398,7 @@ func TestPlugin_VersionMustBeExplicitlyUpgraded(t *testing.T) { } // Register versioned plugin, and check that a new write to existing config doesn't upgrade the plugin implicitly. - vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mysql-database-plugin", consts.PluginTypeDatabase, "v1.0.0", "TestBackend_PluginMain_MockV5", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mysql-database-plugin", consts.PluginTypeDatabase, "v1.0.0", "TestBackend_PluginMain_MockV5", []string{}) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, Path: "config/db", diff --git a/builtin/logical/mongodb/backend.go b/builtin/logical/mongodb/backend.go deleted file mode 100644 index a9374243b86c..000000000000 --- a/builtin/logical/mongodb/backend.go +++ /dev/null @@ -1,144 +0,0 @@ -package mongodb - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" - mgo "gopkg.in/mgo.v2" -) - -func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b := Backend() - if err := b.Setup(ctx, conf); err != nil { - return nil, err - } - return b, nil -} - -func Backend() *framework.Backend { - var b backend - b.Backend = &framework.Backend{ - Help: strings.TrimSpace(backendHelp), - - PathsSpecial: &logical.Paths{ - SealWrapStorage: []string{ - "config/connection", - }, - }, - - Paths: []*framework.Path{ - pathConfigConnection(&b), - pathConfigLease(&b), - pathListRoles(&b), - pathRoles(&b), - pathCredsCreate(&b), - }, - - Secrets: []*framework.Secret{ - secretCreds(&b), - }, - - Clean: b.ResetSession, - - Invalidate: b.invalidate, - BackendType: logical.TypeLogical, - } - - return b.Backend -} - -type backend struct { - *framework.Backend - - session *mgo.Session - lock sync.Mutex -} - -// Session returns the database connection. -func (b *backend) Session(ctx context.Context, s logical.Storage) (*mgo.Session, error) { - b.lock.Lock() - defer b.lock.Unlock() - - if b.session != nil { - if err := b.session.Ping(); err == nil { - return b.session, nil - } - b.session.Close() - } - - connConfigJSON, err := s.Get(ctx, "config/connection") - if err != nil { - return nil, err - } - if connConfigJSON == nil { - return nil, fmt.Errorf("configure the MongoDB connection with config/connection first") - } - - var connConfig connectionConfig - if err := connConfigJSON.DecodeJSON(&connConfig); err != nil { - return nil, err - } - - dialInfo, err := parseMongoURI(connConfig.URI) - if err != nil { - return nil, err - } - - b.session, err = mgo.DialWithInfo(dialInfo) - if err != nil { - return nil, err - } - b.session.SetSyncTimeout(1 * time.Minute) - b.session.SetSocketTimeout(1 * time.Minute) - - return b.session, nil -} - -// ResetSession forces creation of a new connection next time Session() is called. -func (b *backend) ResetSession(_ context.Context) { - b.lock.Lock() - defer b.lock.Unlock() - - if b.session != nil { - b.session.Close() - } - - b.session = nil -} - -func (b *backend) invalidate(ctx context.Context, key string) { - switch key { - case "config/connection": - b.ResetSession(ctx) - } -} - -// LeaseConfig returns the lease configuration -func (b *backend) LeaseConfig(ctx context.Context, s logical.Storage) (*configLease, error) { - entry, err := s.Get(ctx, "config/lease") - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result configLease - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -const backendHelp = ` -The mongodb backend dynamically generates MongoDB credentials. - -After mounting this backend, configure it using the endpoints within -the "config/" path. -` diff --git a/builtin/logical/mongodb/backend_test.go b/builtin/logical/mongodb/backend_test.go deleted file mode 100644 index 43cee7de981c..000000000000 --- a/builtin/logical/mongodb/backend_test.go +++ /dev/null @@ -1,268 +0,0 @@ -package mongodb - -import ( - "context" - "fmt" - "log" - "strings" - "sync" - "testing" - - logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - "github.com/hashicorp/vault/helper/testhelpers/mongodb" - "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/mapstructure" -) - -var testImagePull sync.Once - -func TestBackend_config_connection(t *testing.T) { - var resp *logical.Response - var err error - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - configData := map[string]interface{}{ - "uri": "sample_connection_uri", - "verify_connection": false, - } - - configReq := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/connection", - Storage: config.StorageView, - Data: configData, - } - resp, err = b.HandleRequest(context.Background(), configReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%s resp:%#v\n", err, resp) - } - - configReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), configReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%s resp:%#v\n", err, resp) - } -} - -func TestBackend_basic(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURI := mongodb.PrepareTestContainer(t, "5.0.10") - defer cleanup() - connData := map[string]interface{}{ - "uri": connURI, - } - - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(connData, false), - testAccStepRole(), - testAccStepReadCreds("web"), - }, - }) -} - -func TestBackend_roleCrud(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURI := mongodb.PrepareTestContainer(t, "5.0.10") - defer cleanup() - connData := map[string]interface{}{ - "uri": connURI, - } - - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(connData, false), - testAccStepRole(), - testAccStepReadRole("web", testDb, testMongoDBRoles), - testAccStepDeleteRole("web"), - testAccStepReadRole("web", "", ""), - }, - }) -} - -func TestBackend_leaseWriteRead(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURI := mongodb.PrepareTestContainer(t, "5.0.10") - defer cleanup() - connData := map[string]interface{}{ - "uri": connURI, - } - - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(connData, false), - testAccStepWriteLease(), - testAccStepReadLease(), - }, - }) -} - -func testAccStepConfig(d map[string]interface{}, expectError bool) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "config/connection", - Data: d, - ErrorOk: true, - Check: func(resp *logical.Response) error { - if expectError { - if resp.Data == nil { - return fmt.Errorf("data is nil") - } - var e struct { - Error string `mapstructure:"error"` - } - if err := mapstructure.Decode(resp.Data, &e); err != nil { - return err - } - if len(e.Error) == 0 { - return fmt.Errorf("expected error, but write succeeded") - } - return nil - } else if resp != nil && resp.IsError() { - return fmt.Errorf("got an error response: %v", resp.Error()) - } - return nil - }, - } -} - -func testAccStepRole() logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "roles/web", - Data: map[string]interface{}{ - "db": testDb, - "roles": testMongoDBRoles, - }, - } -} - -func testAccStepDeleteRole(n string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.DeleteOperation, - Path: "roles/" + n, - } -} - -func testAccStepReadCreds(name string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "creds/" + name, - Check: func(resp *logical.Response) error { - var d struct { - DB string `mapstructure:"db"` - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - - if d.DB == "" { - return fmt.Errorf("bad: %#v", resp) - } - if d.Username == "" { - return fmt.Errorf("bad: %#v", resp) - } - if !strings.HasPrefix(d.Username, "vault-root-") { - return fmt.Errorf("bad: %#v", resp) - } - if d.Password == "" { - return fmt.Errorf("bad: %#v", resp) - } - - log.Printf("[WARN] Generated credentials: %v", d) - - return nil - }, - } -} - -func testAccStepReadRole(name, db, mongoDBRoles string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "roles/" + name, - Check: func(resp *logical.Response) error { - if resp == nil { - if db == "" && mongoDBRoles == "" { - return nil - } - - return fmt.Errorf("bad: %#v", resp) - } - - var d struct { - DB string `mapstructure:"db"` - MongoDBRoles string `mapstructure:"roles"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - - if d.DB != db { - return fmt.Errorf("bad: %#v", resp) - } - if d.MongoDBRoles != mongoDBRoles { - return fmt.Errorf("bad: %#v", resp) - } - - return nil - }, - } -} - -func testAccStepWriteLease() logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "config/lease", - Data: map[string]interface{}{ - "ttl": "1h5m", - "max_ttl": "24h", - }, - } -} - -func testAccStepReadLease() logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "config/lease", - Check: func(resp *logical.Response) error { - if resp.Data["ttl"].(float64) != 3900 || resp.Data["max_ttl"].(float64) != 86400 { - return fmt.Errorf("bad: %#v", resp) - } - - return nil - }, - } -} - -const ( - testDb = "foo" - testMongoDBRoles = `["readWrite",{"role":"read","db":"bar"}]` -) diff --git a/builtin/logical/mongodb/cmd/mongodb/main.go b/builtin/logical/mongodb/cmd/mongodb/main.go deleted file mode 100644 index 619f8e3a5a40..000000000000 --- a/builtin/logical/mongodb/cmd/mongodb/main.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "os" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/logical/mongodb" - "github.com/hashicorp/vault/sdk/plugin" -) - -func main() { - apiClientMeta := &api.PluginAPIClientMeta{} - flags := apiClientMeta.FlagSet() - flags.Parse(os.Args[1:]) - - tlsConfig := apiClientMeta.GetTLSConfig() - tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - - if err := plugin.Serve(&plugin.ServeOpts{ - BackendFactoryFunc: mongodb.Factory, - TLSProviderFunc: tlsProviderFunc, - }); err != nil { - logger := hclog.New(&hclog.LoggerOptions{}) - - logger.Error("plugin shutting down", "error", err) - os.Exit(1) - } -} diff --git a/builtin/logical/mongodb/path_config_connection.go b/builtin/logical/mongodb/path_config_connection.go deleted file mode 100644 index 7f7360729a1d..000000000000 --- a/builtin/logical/mongodb/path_config_connection.go +++ /dev/null @@ -1,112 +0,0 @@ -package mongodb - -import ( - "context" - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" - mgo "gopkg.in/mgo.v2" -) - -func pathConfigConnection(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/connection", - Fields: map[string]*framework.FieldSchema{ - "uri": { - Type: framework.TypeString, - Description: "MongoDB standard connection string (URI)", - }, - "verify_connection": { - Type: framework.TypeBool, - Default: true, - Description: `If set, uri is verified by actually connecting to the database`, - }, - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConnectionRead, - logical.UpdateOperation: b.pathConnectionWrite, - }, - HelpSynopsis: pathConfigConnectionHelpSyn, - HelpDescription: pathConfigConnectionHelpDesc, - } -} - -// pathConnectionRead reads out the connection configuration -func (b *backend) pathConnectionRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - entry, err := req.Storage.Get(ctx, "config/connection") - if err != nil { - return nil, fmt.Errorf("failed to read connection configuration") - } - if entry == nil { - return nil, nil - } - - return nil, nil -} - -func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - uri := data.Get("uri").(string) - if uri == "" { - return logical.ErrorResponse("uri parameter is required"), nil - } - - dialInfo, err := parseMongoURI(uri) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf("invalid uri: %s", err)), nil - } - - // Don't check the config if verification is disabled - verifyConnection := data.Get("verify_connection").(bool) - if verifyConnection { - // Verify the config - session, err := mgo.DialWithInfo(dialInfo) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error validating connection info: %s", err)), nil - } - defer session.Close() - if err := session.Ping(); err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error validating connection info: %s", err)), nil - } - } - - // Store it - entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{ - URI: uri, - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - // Reset the Session - b.ResetSession(ctx) - - resp := &logical.Response{} - resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection URI as it is, including passwords, if any.") - - return resp, nil -} - -type connectionConfig struct { - URI string `json:"uri" structs:"uri" mapstructure:"uri"` -} - -const pathConfigConnectionHelpSyn = ` -Configure the connection string to talk to MongoDB. -` - -const pathConfigConnectionHelpDesc = ` -This path configures the standard connection string (URI) used to connect to MongoDB. - -A MongoDB URI looks like: -"mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]" - -See https://docs.mongodb.org/manual/reference/connection-string/ for detailed documentation of the URI format. - -When configuring the connection string, the backend will verify its validity. -` diff --git a/builtin/logical/mongodb/path_config_lease.go b/builtin/logical/mongodb/path_config_lease.go deleted file mode 100644 index c64a4d1d89a7..000000000000 --- a/builtin/logical/mongodb/path_config_lease.go +++ /dev/null @@ -1,89 +0,0 @@ -package mongodb - -import ( - "context" - "time" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathConfigLease(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/lease", - Fields: map[string]*framework.FieldSchema{ - "ttl": { - Type: framework.TypeDurationSecond, - Description: "Default ttl for credentials.", - }, - - "max_ttl": { - Type: framework.TypeDurationSecond, - Description: "Maximum time a set of credentials can be valid for.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigLeaseRead, - logical.UpdateOperation: b.pathConfigLeaseWrite, - }, - - HelpSynopsis: pathConfigLeaseHelpSyn, - HelpDescription: pathConfigLeaseHelpDesc, - } -} - -func (b *backend) pathConfigLeaseWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - entry, err := logical.StorageEntryJSON("config/lease", &configLease{ - TTL: time.Second * time.Duration(d.Get("ttl").(int)), - MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)), - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - return nil, nil -} - -func (b *backend) pathConfigLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - leaseConfig, err := b.LeaseConfig(ctx, req.Storage) - if err != nil { - return nil, err - } - if leaseConfig == nil { - return nil, nil - } - - return &logical.Response{ - Data: map[string]interface{}{ - "ttl": leaseConfig.TTL.Seconds(), - "max_ttl": leaseConfig.MaxTTL.Seconds(), - }, - }, nil -} - -type configLease struct { - TTL time.Duration - MaxTTL time.Duration -} - -const pathConfigLeaseHelpSyn = ` -Configure the default lease TTL settings for credentials -generated by the mongodb backend. -` - -const pathConfigLeaseHelpDesc = ` -This configures the default lease TTL settings used for -credentials generated by this backend. The ttl specifies the -duration that a set of credentials will be valid for before -the lease must be renewed (if it is renewable), while the -max_ttl specifies the overall maximum duration that the -credentials will be valid regardless of lease renewals. - -The format for the TTL values is an integer and then unit. For -example, the value "1h" specifies a 1-hour TTL. The longest -supported unit is hours. -` diff --git a/builtin/logical/mongodb/path_creds_create.go b/builtin/logical/mongodb/path_creds_create.go deleted file mode 100644 index ca6533408669..000000000000 --- a/builtin/logical/mongodb/path_creds_create.go +++ /dev/null @@ -1,119 +0,0 @@ -package mongodb - -import ( - "context" - "fmt" - - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathCredsCreate(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "creds/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role to generate credentials for.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathCredsCreateRead, - }, - - HelpSynopsis: pathCredsCreateReadHelpSyn, - HelpDescription: pathCredsCreateReadHelpDesc, - } -} - -func (b *backend) pathCredsCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - - // Get the role - role, err := b.Role(ctx, req.Storage, name) - if err != nil { - return nil, err - } - if role == nil { - return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil - } - - // Determine if we have a lease configuration - leaseConfig, err := b.LeaseConfig(ctx, req.Storage) - if err != nil { - return nil, err - } - if leaseConfig == nil { - leaseConfig = &configLease{} - } - - // Generate the username and password - displayName := req.DisplayName - if displayName != "" { - displayName += "-" - } - - userUUID, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - - username := fmt.Sprintf("vault-%s%s", displayName, userUUID) - - password, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - - // Build the user creation command - createUserCmd := createUserCommand{ - Username: username, - Password: password, - Roles: role.MongoDBRoles.toStandardRolesArray(), - } - - // Get our connection - session, err := b.Session(ctx, req.Storage) - if err != nil { - return nil, err - } - - // Create the user - err = session.DB(role.DB).Run(createUserCmd, nil) - if err != nil { - return nil, err - } - - // Return the secret - resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ - "db": role.DB, - "username": username, - "password": password, - }, map[string]interface{}{ - "username": username, - "db": role.DB, - }) - resp.Secret.TTL = leaseConfig.TTL - resp.Secret.MaxTTL = leaseConfig.MaxTTL - - return resp, nil -} - -type createUserCommand struct { - Username string `bson:"createUser"` - Password string `bson:"pwd"` - Roles []interface{} `bson:"roles"` -} - -const pathCredsCreateReadHelpSyn = ` -Request MongoDB database credentials for a particular role. -` - -const pathCredsCreateReadHelpDesc = ` -This path reads generates MongoDB database credentials for -a particular role. The database credentials will be -generated on demand and will be automatically revoked when -the lease is up. -` diff --git a/builtin/logical/mongodb/path_roles.go b/builtin/logical/mongodb/path_roles.go deleted file mode 100644 index 9bea9e4d4aaa..000000000000 --- a/builtin/logical/mongodb/path_roles.go +++ /dev/null @@ -1,224 +0,0 @@ -package mongodb - -import ( - "context" - "encoding/json" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathListRoles(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "roles/?$", - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ListOperation: b.pathRoleList, - }, - - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, - } -} - -func pathRoles(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "roles/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role.", - }, - "db": { - Type: framework.TypeString, - Description: "Name of the authentication database for users generated for this role.", - }, - "roles": { - Type: framework.TypeString, - Description: "MongoDB roles to assign to the users generated for this role.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathRoleRead, - logical.UpdateOperation: b.pathRoleCreate, - logical.DeleteOperation: b.pathRoleDelete, - }, - - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, - } -} - -func (b *backend) Role(ctx context.Context, s logical.Storage, n string) (*roleStorageEntry, error) { - entry, err := s.Get(ctx, "role/"+n) - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result roleStorageEntry - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) - if err != nil { - return nil, err - } - - return nil, nil -} - -func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) - if err != nil { - return nil, err - } - if role == nil { - return nil, nil - } - - rolesJsonBytes, err := json.Marshal(role.MongoDBRoles.toStandardRolesArray()) - if err != nil { - return nil, err - } - - return &logical.Response{ - Data: map[string]interface{}{ - "db": role.DB, - "roles": string(rolesJsonBytes), - }, - }, nil -} - -func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - entries, err := req.Storage.List(ctx, "role/") - if err != nil { - return nil, err - } - - return logical.ListResponse(entries), nil -} - -func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - if name == "" { - return logical.ErrorResponse("Missing name"), nil - } - - roleDB := data.Get("db").(string) - if roleDB == "" { - return logical.ErrorResponse("db parameter is required"), nil - } - - // Example roles JSON: - // - // [ "readWrite", { "role": "readWrite", "db": "test" } ] - // - // For storage, we convert such an array into a homogeneous array of role documents like: - // - // [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ] - // - var roles []mongodbRole - rolesJson := []byte(data.Get("roles").(string)) - if len(rolesJson) > 0 { - var rolesArray []interface{} - err := json.Unmarshal(rolesJson, &rolesArray) - if err != nil { - return nil, err - } - for _, rawRole := range rolesArray { - switch role := rawRole.(type) { - case string: - roles = append(roles, mongodbRole{Role: role}) - case map[string]interface{}: - if db, ok := role["db"].(string); ok { - if roleName, ok := role["role"].(string); ok { - roles = append(roles, mongodbRole{Role: roleName, DB: db}) - } - } - } - } - } - - // Store it - entry, err := logical.StorageEntryJSON("role/"+name, &roleStorageEntry{ - DB: roleDB, - MongoDBRoles: roles, - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - return nil, nil -} - -func (roles mongodbRoles) toStandardRolesArray() []interface{} { - // Convert array of role documents like: - // - // [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ] - // - // into a "standard" MongoDB roles array containing both strings and role documents: - // - // [ "readWrite", { "role": "readWrite", "db": "test" } ] - // - // MongoDB's createUser command accepts the latter. - // - var standardRolesArray []interface{} - for _, role := range roles { - if role.DB == "" { - standardRolesArray = append(standardRolesArray, role.Role) - } else { - standardRolesArray = append(standardRolesArray, role) - } - } - return standardRolesArray -} - -type roleStorageEntry struct { - DB string `json:"db"` - MongoDBRoles mongodbRoles `json:"roles"` -} - -type mongodbRole struct { - Role string `json:"role" bson:"role"` - DB string `json:"db" bson:"db"` -} - -type mongodbRoles []mongodbRole - -const pathRoleHelpSyn = ` -Manage the roles used to generate MongoDB credentials. -` - -const pathRoleHelpDesc = ` -This path lets you manage the roles used to generate MongoDB credentials. - -The "db" parameter specifies the authentication database for users -generated for a given role. - -The "roles" parameter specifies the MongoDB roles that should be assigned -to users created for a given role. Just like when creating a user directly -using db.createUser, the roles JSON array can specify both built-in roles -and user-defined roles for both the database the user is created in and -for other databases. - -For example, the following roles JSON array grants the "readWrite" -permission on both the user's authentication database and the "test" -database: - -[ "readWrite", { "role": "readWrite", "db": "test" } ] - -Please consult the MongoDB documentation for more -details on Role-Based Access Control in MongoDB. -` diff --git a/builtin/logical/mongodb/secret_creds.go b/builtin/logical/mongodb/secret_creds.go deleted file mode 100644 index 4e5e09fc2427..000000000000 --- a/builtin/logical/mongodb/secret_creds.go +++ /dev/null @@ -1,84 +0,0 @@ -package mongodb - -import ( - "context" - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" - mgo "gopkg.in/mgo.v2" -) - -const SecretCredsType = "creds" - -func secretCreds(b *backend) *framework.Secret { - return &framework.Secret{ - Type: SecretCredsType, - Fields: map[string]*framework.FieldSchema{ - "username": { - Type: framework.TypeString, - Description: "Username", - }, - - "password": { - Type: framework.TypeString, - Description: "Password", - }, - }, - - Renew: b.secretCredsRenew, - Revoke: b.secretCredsRevoke, - } -} - -func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // Get the lease information - leaseConfig, err := b.LeaseConfig(ctx, req.Storage) - if err != nil { - return nil, err - } - if leaseConfig == nil { - leaseConfig = &configLease{} - } - - resp := &logical.Response{Secret: req.Secret} - resp.Secret.TTL = leaseConfig.TTL - resp.Secret.MaxTTL = leaseConfig.MaxTTL - return resp, nil -} - -func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // Get the username from the internal data - usernameRaw, ok := req.Secret.InternalData["username"] - if !ok { - return nil, fmt.Errorf("secret is missing username internal data") - } - username, ok := usernameRaw.(string) - if !ok { - return nil, fmt.Errorf("username internal data is not a string") - } - - // Get the db from the internal data - dbRaw, ok := req.Secret.InternalData["db"] - if !ok { - return nil, fmt.Errorf("secret is missing db internal data") - } - db, ok := dbRaw.(string) - if !ok { - return nil, fmt.Errorf("db internal data is not a string") - } - - // Get our connection - session, err := b.Session(ctx, req.Storage) - if err != nil { - return nil, err - } - - // Drop the user - err = session.DB(db).RemoveUser(username) - if err != nil && err != mgo.ErrNotFound { - return nil, err - } - - return nil, nil -} diff --git a/builtin/logical/mongodb/util.go b/builtin/logical/mongodb/util.go deleted file mode 100644 index 1880bbd40b41..000000000000 --- a/builtin/logical/mongodb/util.go +++ /dev/null @@ -1,81 +0,0 @@ -package mongodb - -import ( - "crypto/tls" - "errors" - "net" - "net/url" - "strconv" - "strings" - "time" - - mgo "gopkg.in/mgo.v2" -) - -// Unfortunately, mgo doesn't support the ssl parameter in its MongoDB URI parsing logic, so we have to handle that -// ourselves. See https://github.com/go-mgo/mgo/issues/84 -func parseMongoURI(rawUri string) (*mgo.DialInfo, error) { - uri, err := url.Parse(rawUri) - if err != nil { - return nil, err - } - - info := mgo.DialInfo{ - Addrs: strings.Split(uri.Host, ","), - Database: strings.TrimPrefix(uri.Path, "/"), - Timeout: 10 * time.Second, - } - - if uri.User != nil { - info.Username = uri.User.Username() - info.Password, _ = uri.User.Password() - } - - query := uri.Query() - for key, values := range query { - var value string - if len(values) > 0 { - value = values[0] - } - - switch key { - case "authSource": - info.Source = value - case "authMechanism": - info.Mechanism = value - case "gssapiServiceName": - info.Service = value - case "replicaSet": - info.ReplicaSetName = value - case "maxPoolSize": - poolLimit, err := strconv.Atoi(value) - if err != nil { - return nil, errors.New("bad value for maxPoolSize: " + value) - } - info.PoolLimit = poolLimit - case "ssl": - ssl, err := strconv.ParseBool(value) - if err != nil { - return nil, errors.New("bad value for ssl: " + value) - } - if ssl { - info.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { - return tls.Dial("tcp", addr.String(), &tls.Config{}) - } - } - case "connect": - if value == "direct" { - info.Direct = true - break - } - if value == "replicaSet" { - break - } - fallthrough - default: - return nil, errors.New("unsupported connection URL option: " + key + "=" + value) - } - } - - return &info, nil -} diff --git a/builtin/logical/mssql/backend.go b/builtin/logical/mssql/backend.go deleted file mode 100644 index b06a7ff96c7f..000000000000 --- a/builtin/logical/mssql/backend.go +++ /dev/null @@ -1,160 +0,0 @@ -package mssql - -import ( - "context" - "database/sql" - "fmt" - "strings" - "sync" - - _ "github.com/denisenkom/go-mssqldb" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b := Backend() - if err := b.Setup(ctx, conf); err != nil { - return nil, err - } - return b, nil -} - -func Backend() *backend { - var b backend - b.Backend = &framework.Backend{ - Help: strings.TrimSpace(backendHelp), - - PathsSpecial: &logical.Paths{ - SealWrapStorage: []string{ - "config/connection", - }, - }, - - Paths: []*framework.Path{ - pathConfigConnection(&b), - pathConfigLease(&b), - pathListRoles(&b), - pathRoles(&b), - pathCredsCreate(&b), - }, - - Secrets: []*framework.Secret{ - secretCreds(&b), - }, - - Invalidate: b.invalidate, - Clean: b.ResetDB, - BackendType: logical.TypeLogical, - } - - return &b -} - -type backend struct { - *framework.Backend - - db *sql.DB - defaultDb string - lock sync.Mutex -} - -// DB returns the default database connection. -func (b *backend) DB(ctx context.Context, s logical.Storage) (*sql.DB, error) { - b.lock.Lock() - defer b.lock.Unlock() - - // If we already have a DB, we got it! - if b.db != nil { - if err := b.db.Ping(); err == nil { - return b.db, nil - } - // If the ping was unsuccessful, close it and ignore errors as we'll be - // reestablishing anyways - b.db.Close() - } - - // Otherwise, attempt to make connection - entry, err := s.Get(ctx, "config/connection") - if err != nil { - return nil, err - } - if entry == nil { - return nil, fmt.Errorf("configure the DB connection with config/connection first") - } - - var connConfig connectionConfig - if err := entry.DecodeJSON(&connConfig); err != nil { - return nil, err - } - connString := connConfig.ConnectionString - - db, err := sql.Open("sqlserver", connString) - if err != nil { - return nil, err - } - - // Set some connection pool settings. We don't need much of this, - // since the request rate shouldn't be high. - db.SetMaxOpenConns(connConfig.MaxOpenConnections) - - stmt, err := db.Prepare("SELECT db_name();") - if err != nil { - return nil, err - } - defer stmt.Close() - - err = stmt.QueryRow().Scan(&b.defaultDb) - if err != nil { - return nil, err - } - - b.db = db - return b.db, nil -} - -// ResetDB forces a connection next time DB() is called. -func (b *backend) ResetDB(_ context.Context) { - b.lock.Lock() - defer b.lock.Unlock() - - if b.db != nil { - b.db.Close() - } - - b.db = nil -} - -func (b *backend) invalidate(ctx context.Context, key string) { - switch key { - case "config/connection": - b.ResetDB(ctx) - } -} - -// LeaseConfig returns the lease configuration -func (b *backend) LeaseConfig(ctx context.Context, s logical.Storage) (*configLease, error) { - entry, err := s.Get(ctx, "config/lease") - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result configLease - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -const backendHelp = ` -The MSSQL backend dynamically generates database users. - -After mounting this backend, configure it using the endpoints within -the "config/" path. - -This backend does not support Azure SQL Databases. -` diff --git a/builtin/logical/mssql/backend_test.go b/builtin/logical/mssql/backend_test.go deleted file mode 100644 index afb239c2d3fb..000000000000 --- a/builtin/logical/mssql/backend_test.go +++ /dev/null @@ -1,222 +0,0 @@ -package mssql - -import ( - "context" - "fmt" - "log" - "reflect" - "testing" - - _ "github.com/denisenkom/go-mssqldb" - logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - mssqlhelper "github.com/hashicorp/vault/helper/testhelpers/mssql" - "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/mapstructure" -) - -func Backend_config_connection(t *testing.T) { - var resp *logical.Response - var err error - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - configData := map[string]interface{}{ - "connection_string": "sample_connection_string", - "max_open_connections": 7, - "verify_connection": false, - } - - configReq := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/connection", - Storage: config.StorageView, - Data: configData, - } - resp, err = b.HandleRequest(context.Background(), configReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%s resp:%#v\n", err, resp) - } - - configReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), configReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%s resp:%#v\n", err, resp) - } - - delete(configData, "verify_connection") - delete(configData, "connection_string") - if !reflect.DeepEqual(configData, resp.Data) { - t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data) - } -} - -func TestBackend_basic(t *testing.T) { - b, _ := Factory(context.Background(), logical.TestBackendConfig()) - - cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) - defer cleanup() - - logicaltest.Test(t, logicaltest.TestCase{ - PreCheck: testAccPreCheckFunc(t, connURL), - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connURL), - testAccStepRole(t), - testAccStepReadCreds(t, "web"), - }, - }) -} - -func TestBackend_roleCrud(t *testing.T) { - b := Backend() - - cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) - defer cleanup() - - logicaltest.Test(t, logicaltest.TestCase{ - PreCheck: testAccPreCheckFunc(t, connURL), - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connURL), - testAccStepRole(t), - testAccStepReadRole(t, "web", testRoleSQL), - testAccStepDeleteRole(t, "web"), - testAccStepReadRole(t, "web", ""), - }, - }) -} - -func TestBackend_leaseWriteRead(t *testing.T) { - b := Backend() - - cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) - defer cleanup() - - logicaltest.Test(t, logicaltest.TestCase{ - PreCheck: testAccPreCheckFunc(t, connURL), - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connURL), - testAccStepWriteLease(t), - testAccStepReadLease(t), - }, - }) -} - -func testAccPreCheckFunc(t *testing.T, connectionURL string) func() { - return func() { - if connectionURL == "" { - t.Fatal("connection URL must be set for acceptance tests") - } - } -} - -func testAccStepConfig(t *testing.T, connURL string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "config/connection", - Data: map[string]interface{}{ - "connection_string": connURL, - }, - } -} - -func testAccStepRole(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "roles/web", - Data: map[string]interface{}{ - "sql": testRoleSQL, - }, - } -} - -func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.DeleteOperation, - Path: "roles/" + n, - } -} - -func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "creds/" + name, - Check: func(resp *logical.Response) error { - var d struct { - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - log.Printf("[WARN] Generated credentials: %v", d) - - return nil - }, - } -} - -func testAccStepReadRole(t *testing.T, name, sql string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "roles/" + name, - Check: func(resp *logical.Response) error { - if resp == nil { - if sql == "" { - return nil - } - - return fmt.Errorf("bad: %#v", resp) - } - - var d struct { - SQL string `mapstructure:"sql"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - - if d.SQL != sql { - return fmt.Errorf("bad: %#v", resp) - } - - return nil - }, - } -} - -func testAccStepWriteLease(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "config/lease", - Data: map[string]interface{}{ - "ttl": "1h5m", - "max_ttl": "24h", - }, - } -} - -func testAccStepReadLease(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "config/lease", - Check: func(resp *logical.Response) error { - if resp.Data["ttl"] != "1h5m0s" || resp.Data["max_ttl"] != "24h0m0s" { - return fmt.Errorf("bad: %#v", resp) - } - - return nil - }, - } -} - -const testRoleSQL = ` -CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}'; -CREATE USER [{{name}}] FOR LOGIN [{{name}}]; -GRANT SELECT ON SCHEMA::dbo TO [{{name}}] -` diff --git a/builtin/logical/mssql/cmd/mssql/main.go b/builtin/logical/mssql/cmd/mssql/main.go deleted file mode 100644 index 0db9c1c98203..000000000000 --- a/builtin/logical/mssql/cmd/mssql/main.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "os" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/logical/mssql" - "github.com/hashicorp/vault/sdk/plugin" -) - -func main() { - apiClientMeta := &api.PluginAPIClientMeta{} - flags := apiClientMeta.FlagSet() - flags.Parse(os.Args[1:]) - - tlsConfig := apiClientMeta.GetTLSConfig() - tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - - if err := plugin.Serve(&plugin.ServeOpts{ - BackendFactoryFunc: mssql.Factory, - TLSProviderFunc: tlsProviderFunc, - }); err != nil { - logger := hclog.New(&hclog.LoggerOptions{}) - - logger.Error("plugin shutting down", "error", err) - os.Exit(1) - } -} diff --git a/builtin/logical/mssql/path_config_connection.go b/builtin/logical/mssql/path_config_connection.go deleted file mode 100644 index f0ad63108e81..000000000000 --- a/builtin/logical/mssql/path_config_connection.go +++ /dev/null @@ -1,126 +0,0 @@ -package mssql - -import ( - "context" - "database/sql" - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathConfigConnection(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/connection", - Fields: map[string]*framework.FieldSchema{ - "connection_string": { - Type: framework.TypeString, - Description: "DB connection parameters", - }, - "max_open_connections": { - Type: framework.TypeInt, - Description: "Maximum number of open connections to database", - }, - "verify_connection": { - Type: framework.TypeBool, - Default: true, - Description: "If set, connection_string is verified by actually connecting to the database", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConnectionWrite, - logical.ReadOperation: b.pathConnectionRead, - }, - - HelpSynopsis: pathConfigConnectionHelpSyn, - HelpDescription: pathConfigConnectionHelpDesc, - } -} - -// pathConnectionRead reads out the connection configuration -func (b *backend) pathConnectionRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - entry, err := req.Storage.Get(ctx, "config/connection") - if err != nil { - return nil, fmt.Errorf("failed to read connection configuration") - } - if entry == nil { - return nil, nil - } - - var config connectionConfig - if err := entry.DecodeJSON(&config); err != nil { - return nil, err - } - - return &logical.Response{ - Data: map[string]interface{}{ - "max_open_connections": config.MaxOpenConnections, - }, - }, nil -} - -// pathConnectionWrite stores the connection configuration -func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - connString := data.Get("connection_string").(string) - - maxOpenConns := data.Get("max_open_connections").(int) - if maxOpenConns == 0 { - maxOpenConns = 2 - } - - // Don't check the connection_string if verification is disabled - verifyConnection := data.Get("verify_connection").(bool) - if verifyConnection { - // Verify the string - db, err := sql.Open("mssql", connString) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error validating connection info: %s", err)), nil - } - defer db.Close() - if err := db.Ping(); err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error validating connection info: %s", err)), nil - } - } - - // Store it - entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{ - ConnectionString: connString, - MaxOpenConnections: maxOpenConns, - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - // Reset the DB connection - b.ResetDB(ctx) - - resp := &logical.Response{} - resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection string as it is, including passwords, if any.") - - return resp, nil -} - -type connectionConfig struct { - ConnectionString string `json:"connection_string" structs:"connection_string" mapstructure:"connection_string"` - MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"` -} - -const pathConfigConnectionHelpSyn = ` -Configure the connection string to talk to Microsoft Sql Server. -` - -const pathConfigConnectionHelpDesc = ` -This path configures the connection string used to connect to Sql Server. -The value of the string is a Data Source Name (DSN). An example is -using "server=;port=;user id=;password=;database=;app name=vault;" - -When configuring the connection string, the backend will verify its validity. -If the database is not available when setting the connection string, set the -"verify_connection" option to false. -` diff --git a/builtin/logical/mssql/path_config_lease.go b/builtin/logical/mssql/path_config_lease.go deleted file mode 100644 index d0fe86dfbd40..000000000000 --- a/builtin/logical/mssql/path_config_lease.go +++ /dev/null @@ -1,114 +0,0 @@ -package mssql - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathConfigLease(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/lease", - Fields: map[string]*framework.FieldSchema{ - "ttl": { - Type: framework.TypeString, - Description: "Default ttl for roles.", - }, - - "ttl_max": { - Type: framework.TypeString, - Description: `Deprecated: use "max_ttl" instead. Maximum -time a credential is valid for.`, - }, - - "max_ttl": { - Type: framework.TypeString, - Description: "Maximum time a credential is valid for.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigLeaseRead, - logical.UpdateOperation: b.pathConfigLeaseWrite, - }, - - HelpSynopsis: pathConfigLeaseHelpSyn, - HelpDescription: pathConfigLeaseHelpDesc, - } -} - -func (b *backend) pathConfigLeaseWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - ttlRaw := d.Get("ttl").(string) - ttlMaxRaw := d.Get("max_ttl").(string) - if len(ttlMaxRaw) == 0 { - ttlMaxRaw = d.Get("ttl_max").(string) - } - - ttl, err := time.ParseDuration(ttlRaw) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Invalid ttl: %s", err)), nil - } - ttlMax, err := time.ParseDuration(ttlMaxRaw) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Invalid max_ttl: %s", err)), nil - } - - // Store it - entry, err := logical.StorageEntryJSON("config/lease", &configLease{ - TTL: ttl, - TTLMax: ttlMax, - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - return nil, nil -} - -func (b *backend) pathConfigLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - leaseConfig, err := b.LeaseConfig(ctx, req.Storage) - if err != nil { - return nil, err - } - if leaseConfig == nil { - return nil, nil - } - - resp := &logical.Response{ - Data: map[string]interface{}{ - "ttl": leaseConfig.TTL.String(), - "ttl_max": leaseConfig.TTLMax.String(), - "max_ttl": leaseConfig.TTLMax.String(), - }, - } - resp.AddWarning("The field ttl_max is deprecated and will be removed in a future release. Use max_ttl instead.") - - return resp, nil -} - -type configLease struct { - TTL time.Duration - TTLMax time.Duration -} - -const pathConfigLeaseHelpSyn = ` -Configure the default lease ttl for generated credentials. -` - -const pathConfigLeaseHelpDesc = ` -This configures the default lease ttl used for credentials -generated by this backend. The ttl specifies the duration that a -credential will be valid for, as well as the maximum session for -a set of credentials. - -The format for the ttl is "1h" or integer and then unit. The longest -unit is hour. -` diff --git a/builtin/logical/mssql/path_creds_create.go b/builtin/logical/mssql/path_creds_create.go deleted file mode 100644 index 42e8642c031a..000000000000 --- a/builtin/logical/mssql/path_creds_create.go +++ /dev/null @@ -1,129 +0,0 @@ -package mssql - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/go-secure-stdlib/strutil" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/dbtxn" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathCredsCreate(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "creds/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathCredsCreateRead, - }, - - HelpSynopsis: pathCredsCreateHelpSyn, - HelpDescription: pathCredsCreateHelpDesc, - } -} - -func (b *backend) pathCredsCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - - // Get the role - role, err := b.Role(ctx, req.Storage, name) - if err != nil { - return nil, err - } - if role == nil { - return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil - } - - // Determine if we have a lease configuration - leaseConfig, err := b.LeaseConfig(ctx, req.Storage) - if err != nil { - return nil, err - } - if leaseConfig == nil { - leaseConfig = &configLease{} - } - - // Generate our username and password - displayName := req.DisplayName - if len(displayName) > 10 { - displayName = displayName[:10] - } - userUUID, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - username := fmt.Sprintf("%s-%s", displayName, userUUID) - password, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - - // Get our handle - db, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - // Start a transaction - tx, err := db.Begin() - if err != nil { - return nil, err - } - defer tx.Rollback() - - // Always reset database to default db of connection. Since it is in a - // transaction, all statements will be on the same connection in the pool. - roleSQL := fmt.Sprintf("USE [%s]; %s", b.defaultDb, role.SQL) - - // Execute each query - for _, query := range strutil.ParseArbitraryStringSlice(roleSQL, ";") { - query = strings.TrimSpace(query) - if len(query) == 0 { - continue - } - - m := map[string]string{ - "name": username, - "password": password, - } - if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { - return nil, err - } - } - - // Commit the transaction - if err := tx.Commit(); err != nil { - return nil, err - } - - // Return the secret - resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ - "username": username, - "password": password, - }, map[string]interface{}{ - "username": username, - }) - resp.Secret.TTL = leaseConfig.TTL - resp.Secret.MaxTTL = leaseConfig.TTLMax - - return resp, nil -} - -const pathCredsCreateHelpSyn = ` -Request database credentials for a certain role. -` - -const pathCredsCreateHelpDesc = ` -This path reads database credentials for a certain role. The -database credentials will be generated on demand and will be automatically -revoked when the lease is up. -` diff --git a/builtin/logical/mssql/path_roles.go b/builtin/logical/mssql/path_roles.go deleted file mode 100644 index e378422d3cf7..000000000000 --- a/builtin/logical/mssql/path_roles.go +++ /dev/null @@ -1,172 +0,0 @@ -package mssql - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathListRoles(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "roles/?$", - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ListOperation: b.pathRoleList, - }, - - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, - } -} - -func pathRoles(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "roles/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role.", - }, - - "sql": { - Type: framework.TypeString, - Description: "SQL string to create a role. See help for more info.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathRoleRead, - logical.UpdateOperation: b.pathRoleCreate, - logical.DeleteOperation: b.pathRoleDelete, - }, - - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, - } -} - -func (b *backend) Role(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { - entry, err := s.Get(ctx, "role/"+n) - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result roleEntry - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) - if err != nil { - return nil, err - } - - return nil, nil -} - -func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) - if err != nil { - return nil, err - } - if role == nil { - return nil, nil - } - - return &logical.Response{ - Data: map[string]interface{}{ - "sql": role.SQL, - }, - }, nil -} - -func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - entries, err := req.Storage.List(ctx, "role/") - if err != nil { - return nil, err - } - - return logical.ListResponse(entries), nil -} - -func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - sql := data.Get("sql").(string) - - // Get our connection - db, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - // Test the query by trying to prepare it - for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") { - query = strings.TrimSpace(query) - if len(query) == 0 { - continue - } - - stmt, err := db.Prepare(Query(query, map[string]string{ - "name": "foo", - "password": "bar", - })) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error testing query: %s", err)), nil - } - stmt.Close() - } - - // Store it - entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{ - SQL: sql, - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - return nil, nil -} - -type roleEntry struct { - SQL string `json:"sql"` -} - -const pathRoleHelpSyn = ` -Manage the roles that can be created with this backend. -` - -const pathRoleHelpDesc = ` -This path lets you manage the roles that can be created with this backend. - -The "sql" parameter customizes the SQL string used to create the login to -the server. The parameter can be a sequence of SQL queries, each semi-colon -separated. Some substitution will be done to the SQL string for certain keys. -The names of the variables must be surrounded by "{{" and "}}" to be replaced. - - * "name" - The random username generated for the DB user. - - * "password" - The random password generated for the DB user. - -Example SQL query to use: - - CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}'; - CREATE USER [{{name}}] FROM LOGIN [{{name}}]; - GRANT SELECT, UPDATE, DELETE, INSERT on SCHEMA::dbo TO [{{name}}]; - -Please see the Microsoft SQL Server manual on the GRANT command to learn how to -do more fine grained access. -` diff --git a/builtin/logical/mssql/secret_creds.go b/builtin/logical/mssql/secret_creds.go deleted file mode 100644 index 9fc52f9a777a..000000000000 --- a/builtin/logical/mssql/secret_creds.go +++ /dev/null @@ -1,180 +0,0 @@ -package mssql - -import ( - "context" - "database/sql" - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/dbtxn" - "github.com/hashicorp/vault/sdk/logical" -) - -const SecretCredsType = "creds" - -func secretCreds(b *backend) *framework.Secret { - return &framework.Secret{ - Type: SecretCredsType, - Fields: map[string]*framework.FieldSchema{ - "username": { - Type: framework.TypeString, - Description: "Username", - }, - - "password": { - Type: framework.TypeString, - Description: "Password", - }, - }, - - Renew: b.secretCredsRenew, - Revoke: b.secretCredsRevoke, - } -} - -func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // Get the lease information - leaseConfig, err := b.LeaseConfig(ctx, req.Storage) - if err != nil { - return nil, err - } - if leaseConfig == nil { - leaseConfig = &configLease{} - } - - resp := &logical.Response{Secret: req.Secret} - resp.Secret.TTL = leaseConfig.TTL - resp.Secret.MaxTTL = leaseConfig.TTLMax - return resp, nil -} - -func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // Get the username from the internal data - usernameRaw, ok := req.Secret.InternalData["username"] - if !ok { - return nil, fmt.Errorf("secret is missing username internal data") - } - username, ok := usernameRaw.(string) - - // Get our connection - db, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - // First disable server login - disableStmt, err := db.Prepare(fmt.Sprintf("ALTER LOGIN [%s] DISABLE;", username)) - if err != nil { - return nil, err - } - defer disableStmt.Close() - if _, err := disableStmt.Exec(); err != nil { - return nil, err - } - - // Query for sessions for the login so that we can kill any outstanding - // sessions. There cannot be any active sessions before we drop the logins - // This isn't done in a transaction because even if we fail along the way, - // we want to remove as much access as possible - sessionStmt, err := db.Prepare("SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = @p1;") - if err != nil { - return nil, err - } - defer sessionStmt.Close() - - sessionRows, err := sessionStmt.Query(username) - if err != nil { - return nil, err - } - defer sessionRows.Close() - - var revokeStmts []string - for sessionRows.Next() { - var sessionID int - err = sessionRows.Scan(&sessionID) - if err != nil { - return nil, err - } - revokeStmts = append(revokeStmts, fmt.Sprintf("KILL %d;", sessionID)) - } - - // Query for database users using undocumented stored procedure for now since - // it is the easiest way to get this information; - // we need to drop the database users before we can drop the login and the role - // This isn't done in a transaction because even if we fail along the way, - // we want to remove as much access as possible - stmt, err := db.Prepare("EXEC master.dbo.sp_msloginmappings @p1;") - if err != nil { - return nil, err - } - defer stmt.Close() - - rows, err := stmt.Query(username) - if err != nil { - return nil, err - } - defer rows.Close() - - for rows.Next() { - var loginName, dbName, qUsername, aliasName sql.NullString - err = rows.Scan(&loginName, &dbName, &qUsername, &aliasName) - if err != nil { - return nil, err - } - if !dbName.Valid { - continue - } - revokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName.String, username, username)) - } - - // we do not stop on error, as we want to remove as - // many permissions as possible right now - var lastStmtError error - for _, query := range revokeStmts { - if err := dbtxn.ExecuteDBQueryDirect(ctx, db, nil, query); err != nil { - lastStmtError = err - continue - } - } - - // can't drop if not all database users are dropped - if rows.Err() != nil { - return nil, fmt.Errorf("could not generate sql statements for all rows: %w", rows.Err()) - } - if lastStmtError != nil { - return nil, fmt.Errorf("could not perform all sql statements: %w", lastStmtError) - } - - // Drop this login - stmt, err = db.Prepare(fmt.Sprintf(dropLoginSQL, username, username)) - if err != nil { - return nil, err - } - defer stmt.Close() - if _, err := stmt.Exec(); err != nil { - return nil, err - } - - return nil, nil -} - -const dropUserSQL = ` -USE [%s] -IF EXISTS - (SELECT name - FROM sys.database_principals - WHERE name = N'%s') -BEGIN - DROP USER [%s] -END -` - -const dropLoginSQL = ` -IF EXISTS - (SELECT name - FROM master.sys.server_principals - WHERE name = N'%s') -BEGIN - DROP LOGIN [%s] -END -` diff --git a/builtin/logical/mssql/util.go b/builtin/logical/mssql/util.go deleted file mode 100644 index 17c46c6813bb..000000000000 --- a/builtin/logical/mssql/util.go +++ /dev/null @@ -1,28 +0,0 @@ -package mssql - -import ( - "fmt" - "strings" -) - -// SplitSQL is used to split a series of SQL statements -func SplitSQL(sql string) []string { - parts := strings.Split(sql, ";") - out := make([]string, 0, len(parts)) - for _, p := range parts { - clean := strings.TrimSpace(p) - if len(clean) > 0 { - out = append(out, clean) - } - } - return out -} - -// Query templates a query for us. -func Query(tpl string, data map[string]string) string { - for k, v := range data { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) - } - - return tpl -} diff --git a/builtin/logical/mysql/backend.go b/builtin/logical/mysql/backend.go deleted file mode 100644 index b9840dad3000..000000000000 --- a/builtin/logical/mysql/backend.go +++ /dev/null @@ -1,151 +0,0 @@ -package mysql - -import ( - "context" - "database/sql" - "fmt" - "strings" - "sync" - - _ "github.com/go-sql-driver/mysql" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b := Backend() - if err := b.Setup(ctx, conf); err != nil { - return nil, err - } - return b, nil -} - -func Backend() *backend { - var b backend - b.Backend = &framework.Backend{ - Help: strings.TrimSpace(backendHelp), - - PathsSpecial: &logical.Paths{ - SealWrapStorage: []string{ - "config/connection", - }, - }, - - Paths: []*framework.Path{ - pathConfigConnection(&b), - pathConfigLease(&b), - pathListRoles(&b), - pathRoles(&b), - pathRoleCreate(&b), - }, - - Secrets: []*framework.Secret{ - secretCreds(&b), - }, - - Invalidate: b.invalidate, - Clean: b.ResetDB, - BackendType: logical.TypeLogical, - } - - return &b -} - -type backend struct { - *framework.Backend - - db *sql.DB - lock sync.Mutex -} - -// DB returns the database connection. -func (b *backend) DB(ctx context.Context, s logical.Storage) (*sql.DB, error) { - b.lock.Lock() - defer b.lock.Unlock() - - // If we already have a DB, we got it! - if b.db != nil { - if err := b.db.Ping(); err == nil { - return b.db, nil - } - // If the ping was unsuccessful, close it and ignore errors as we'll be - // reestablishing anyways - b.db.Close() - } - - // Otherwise, attempt to make connection - entry, err := s.Get(ctx, "config/connection") - if err != nil { - return nil, err - } - if entry == nil { - return nil, - fmt.Errorf("configure the DB connection with config/connection first") - } - - var connConfig connectionConfig - if err := entry.DecodeJSON(&connConfig); err != nil { - return nil, err - } - - conn := connConfig.ConnectionURL - if len(conn) == 0 { - conn = connConfig.ConnectionString - } - - b.db, err = sql.Open("mysql", conn) - if err != nil { - return nil, err - } - - // Set some connection pool settings. We don't need much of this, - // since the request rate shouldn't be high. - b.db.SetMaxOpenConns(connConfig.MaxOpenConnections) - b.db.SetMaxIdleConns(connConfig.MaxIdleConnections) - - return b.db, nil -} - -// ResetDB forces a connection next time DB() is called. -func (b *backend) ResetDB(_ context.Context) { - b.lock.Lock() - defer b.lock.Unlock() - - if b.db != nil { - b.db.Close() - } - - b.db = nil -} - -func (b *backend) invalidate(ctx context.Context, key string) { - switch key { - case "config/connection": - b.ResetDB(ctx) - } -} - -// Lease returns the lease information -func (b *backend) Lease(ctx context.Context, s logical.Storage) (*configLease, error) { - entry, err := s.Get(ctx, "config/lease") - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result configLease - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -const backendHelp = ` -The MySQL backend dynamically generates database users. - -After mounting this backend, configure it using the endpoints within -the "config/" path. -` diff --git a/builtin/logical/mysql/backend_test.go b/builtin/logical/mysql/backend_test.go deleted file mode 100644 index 62074d8ed4e5..000000000000 --- a/builtin/logical/mysql/backend_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package mysql - -import ( - "context" - "fmt" - "log" - "reflect" - "testing" - - logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - mysqlhelper "github.com/hashicorp/vault/helper/testhelpers/mysql" - "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/mapstructure" -) - -func TestBackend_config_connection(t *testing.T) { - var resp *logical.Response - var err error - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - configData := map[string]interface{}{ - "connection_url": "sample_connection_url", - "max_open_connections": 9, - "max_idle_connections": 7, - "verify_connection": false, - } - - configReq := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/connection", - Storage: config.StorageView, - Data: configData, - } - resp, err = b.HandleRequest(context.Background(), configReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%s resp:%#v\n", err, resp) - } - - configReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), configReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%s resp:%#v\n", err, resp) - } - - delete(configData, "verify_connection") - delete(configData, "connection_url") - if !reflect.DeepEqual(configData, resp.Data) { - t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data) - } -} - -func TestBackend_basic(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") - defer cleanup() - - connData := map[string]interface{}{ - "connection_url": connURL, - } - - // for wildcard based mysql user - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connData, false), - testAccStepRole(t, true), - testAccStepReadCreds(t, "web"), - }, - }) -} - -func TestBackend_basicHostRevoke(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") - defer cleanup() - - connData := map[string]interface{}{ - "connection_url": connURL, - } - - // for host based mysql user - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connData, false), - testAccStepRole(t, false), - testAccStepReadCreds(t, "web"), - }, - }) -} - -func TestBackend_roleCrud(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") - defer cleanup() - - connData := map[string]interface{}{ - "connection_url": connURL, - } - - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connData, false), - // test SQL with wildcard based user - testAccStepRole(t, true), - testAccStepReadRole(t, "web", testRoleWildCard), - testAccStepDeleteRole(t, "web"), - // test SQL with host based user - testAccStepRole(t, false), - testAccStepReadRole(t, "web", testRoleHost), - testAccStepDeleteRole(t, "web"), - }, - }) -} - -func TestBackend_leaseWriteRead(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") - defer cleanup() - - connData := map[string]interface{}{ - "connection_url": connURL, - } - - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connData, false), - testAccStepWriteLease(t), - testAccStepReadLease(t), - }, - }) -} - -func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "config/connection", - Data: d, - ErrorOk: true, - Check: func(resp *logical.Response) error { - if expectError { - if resp.Data == nil { - return fmt.Errorf("data is nil") - } - var e struct { - Error string `mapstructure:"error"` - } - if err := mapstructure.Decode(resp.Data, &e); err != nil { - return err - } - if len(e.Error) == 0 { - return fmt.Errorf("expected error, but write succeeded") - } - return nil - } else if resp != nil && resp.IsError() { - return fmt.Errorf("got an error response: %v", resp.Error()) - } - return nil - }, - } -} - -func testAccStepRole(t *testing.T, wildCard bool) logicaltest.TestStep { - pathData := make(map[string]interface{}) - if wildCard { - pathData = map[string]interface{}{ - "sql": testRoleWildCard, - } - } else { - pathData = map[string]interface{}{ - "sql": testRoleHost, - "revocation_sql": testRevocationSQL, - } - } - - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "roles/web", - Data: pathData, - } -} - -func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.DeleteOperation, - Path: "roles/" + n, - } -} - -func testAccStepReadCreds(t *testing.T, name string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "creds/" + name, - Check: func(resp *logical.Response) error { - var d struct { - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - log.Printf("[WARN] Generated credentials: %v", d) - - return nil - }, - } -} - -func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "roles/" + name, - Check: func(resp *logical.Response) error { - if resp == nil { - if sql == "" { - return nil - } - - return fmt.Errorf("bad: %#v", resp) - } - - var d struct { - SQL string `mapstructure:"sql"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - - if d.SQL != sql { - return fmt.Errorf("bad: %#v", resp) - } - - return nil - }, - } -} - -func testAccStepWriteLease(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "config/lease", - Data: map[string]interface{}{ - "lease": "1h5m", - "lease_max": "24h", - }, - } -} - -func testAccStepReadLease(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "config/lease", - Check: func(resp *logical.Response) error { - if resp.Data["lease"] != "1h5m0s" || resp.Data["lease_max"] != "24h0m0s" { - return fmt.Errorf("bad: %#v", resp) - } - - return nil - }, - } -} - -const testRoleWildCard = ` -CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; -GRANT SELECT ON *.* TO '{{name}}'@'%'; -` - -const testRoleHost = ` -CREATE USER '{{name}}'@'10.1.1.2' IDENTIFIED BY '{{password}}'; -GRANT SELECT ON *.* TO '{{name}}'@'10.1.1.2'; -` - -const testRevocationSQL = ` -REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'10.1.1.2'; -DROP USER '{{name}}'@'10.1.1.2'; -` diff --git a/builtin/logical/mysql/cmd/mysql/main.go b/builtin/logical/mysql/cmd/mysql/main.go deleted file mode 100644 index e1fbe4a01e49..000000000000 --- a/builtin/logical/mysql/cmd/mysql/main.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "os" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/logical/mysql" - "github.com/hashicorp/vault/sdk/plugin" -) - -func main() { - apiClientMeta := &api.PluginAPIClientMeta{} - flags := apiClientMeta.FlagSet() - flags.Parse(os.Args[1:]) - - tlsConfig := apiClientMeta.GetTLSConfig() - tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - - if err := plugin.Serve(&plugin.ServeOpts{ - BackendFactoryFunc: mysql.Factory, - TLSProviderFunc: tlsProviderFunc, - }); err != nil { - logger := hclog.New(&hclog.LoggerOptions{}) - - logger.Error("plugin shutting down", "error", err) - os.Exit(1) - } -} diff --git a/builtin/logical/mysql/path_config_connection.go b/builtin/logical/mysql/path_config_connection.go deleted file mode 100644 index 151fd7c8787c..000000000000 --- a/builtin/logical/mysql/path_config_connection.go +++ /dev/null @@ -1,159 +0,0 @@ -package mysql - -import ( - "context" - "database/sql" - "fmt" - - _ "github.com/go-sql-driver/mysql" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathConfigConnection(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/connection", - Fields: map[string]*framework.FieldSchema{ - "connection_url": { - Type: framework.TypeString, - Description: "DB connection string", - }, - "value": { - Type: framework.TypeString, - Description: `DB connection string. Use 'connection_url' instead. -This name is deprecated.`, - }, - "max_open_connections": { - Type: framework.TypeInt, - Description: "Maximum number of open connections to database", - }, - "max_idle_connections": { - Type: framework.TypeInt, - Description: "Maximum number of idle connections to the database; a zero uses the value of max_open_connections and a negative value disables idle connections. If larger than max_open_connections it will be reduced to the same size.", - }, - "verify_connection": { - Type: framework.TypeBool, - Default: true, - Description: "If set, connection_url is verified by actually connecting to the database", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConnectionWrite, - logical.ReadOperation: b.pathConnectionRead, - }, - - HelpSynopsis: pathConfigConnectionHelpSyn, - HelpDescription: pathConfigConnectionHelpDesc, - } -} - -// pathConnectionRead reads out the connection configuration -func (b *backend) pathConnectionRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - entry, err := req.Storage.Get(ctx, "config/connection") - if err != nil { - return nil, fmt.Errorf("failed to read connection configuration") - } - if entry == nil { - return nil, nil - } - - var config connectionConfig - if err := entry.DecodeJSON(&config); err != nil { - return nil, err - } - - return &logical.Response{ - Data: map[string]interface{}{ - "max_open_connections": config.MaxOpenConnections, - "max_idle_connections": config.MaxIdleConnections, - }, - }, nil -} - -func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - connValue := data.Get("value").(string) - connURL := data.Get("connection_url").(string) - if connURL == "" { - if connValue == "" { - return logical.ErrorResponse("the connection_url parameter must be supplied"), nil - } else { - connURL = connValue - } - } - - maxOpenConns := data.Get("max_open_connections").(int) - if maxOpenConns == 0 { - maxOpenConns = 2 - } - - maxIdleConns := data.Get("max_idle_connections").(int) - if maxIdleConns == 0 { - maxIdleConns = maxOpenConns - } - if maxIdleConns > maxOpenConns { - maxIdleConns = maxOpenConns - } - - // Don't check the connection_url if verification is disabled - verifyConnection := data.Get("verify_connection").(bool) - if verifyConnection { - // Verify the string - db, err := sql.Open("mysql", connURL) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "error validating connection info: %s", err)), nil - } - defer db.Close() - if err := db.Ping(); err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "error validating connection info: %s", err)), nil - } - } - - // Store it - entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{ - ConnectionURL: connURL, - MaxOpenConnections: maxOpenConns, - MaxIdleConnections: maxIdleConns, - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - // Reset the DB connection - b.ResetDB(ctx) - - resp := &logical.Response{} - resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection URL as it is, including passwords, if any.") - - return resp, nil -} - -type connectionConfig struct { - ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"` - // Deprecate "value" in coming releases - ConnectionString string `json:"value" structs:"value" mapstructure:"value"` - MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"` - MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"` -} - -const pathConfigConnectionHelpSyn = ` -Configure the connection string to talk to MySQL. -` - -const pathConfigConnectionHelpDesc = ` -This path configures the connection string used to connect to MySQL. The value -of the string is a Data Source Name (DSN). An example is using -"username:password@protocol(address)/dbname?param=value" - -For example, RDS may look like: -"id:password@tcp(your-amazonaws-uri.com:3306)/dbname" - -When configuring the connection string, the backend will verify its validity. -If the database is not available when setting the connection URL, set the -"verify_connection" option to false. -` diff --git a/builtin/logical/mysql/path_config_lease.go b/builtin/logical/mysql/path_config_lease.go deleted file mode 100644 index e8b0543e0101..000000000000 --- a/builtin/logical/mysql/path_config_lease.go +++ /dev/null @@ -1,101 +0,0 @@ -package mysql - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathConfigLease(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/lease", - Fields: map[string]*framework.FieldSchema{ - "lease": { - Type: framework.TypeString, - Description: "Default lease for roles.", - }, - - "lease_max": { - Type: framework.TypeString, - Description: "Maximum time a credential is valid for.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathLeaseRead, - logical.UpdateOperation: b.pathLeaseWrite, - }, - - HelpSynopsis: pathConfigLeaseHelpSyn, - HelpDescription: pathConfigLeaseHelpDesc, - } -} - -func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - leaseRaw := d.Get("lease").(string) - leaseMaxRaw := d.Get("lease_max").(string) - - lease, err := time.ParseDuration(leaseRaw) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Invalid lease: %s", err)), nil - } - leaseMax, err := time.ParseDuration(leaseMaxRaw) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Invalid lease: %s", err)), nil - } - - // Store it - entry, err := logical.StorageEntryJSON("config/lease", &configLease{ - Lease: lease, - LeaseMax: leaseMax, - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - return nil, nil -} - -func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - lease, err := b.Lease(ctx, req.Storage) - if err != nil { - return nil, err - } - if lease == nil { - return nil, nil - } - - return &logical.Response{ - Data: map[string]interface{}{ - "lease": lease.Lease.String(), - "lease_max": lease.LeaseMax.String(), - }, - }, nil -} - -type configLease struct { - Lease time.Duration - LeaseMax time.Duration -} - -const pathConfigLeaseHelpSyn = ` -Configure the default lease information for generated credentials. -` - -const pathConfigLeaseHelpDesc = ` -This configures the default lease information used for credentials -generated by this backend. The lease specifies the duration that a -credential will be valid for, as well as the maximum session for -a set of credentials. - -The format for the lease is "1h" or integer and then unit. The longest -unit is hour. -` diff --git a/builtin/logical/mysql/path_role_create.go b/builtin/logical/mysql/path_role_create.go deleted file mode 100644 index 6bc0c4b3b321..000000000000 --- a/builtin/logical/mysql/path_role_create.go +++ /dev/null @@ -1,143 +0,0 @@ -package mysql - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/go-secure-stdlib/strutil" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/dbtxn" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathRoleCreate(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "creds/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathRoleCreateRead, - }, - - HelpSynopsis: pathRoleCreateReadHelpSyn, - HelpDescription: pathRoleCreateReadHelpDesc, - } -} - -func (b *backend) pathRoleCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - - // Get the role - role, err := b.Role(ctx, req.Storage, name) - if err != nil { - return nil, err - } - if role == nil { - return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil - } - - // Determine if we have a lease - lease, err := b.Lease(ctx, req.Storage) - if err != nil { - return nil, err - } - if lease == nil { - lease = &configLease{} - } - - // Generate our username and password. The username will be a - // concatenation of: - // - // - the role name, truncated to role.rolenameLength (default 4) - // - the token display name, truncated to role.displaynameLength (default 4) - // - a UUID - // - // the entire concatenated string is then truncated to role.usernameLength, - // which by default is 16 due to limitations in older but still-prevalent - // versions of MySQL. - roleName := name - if len(roleName) > role.RolenameLength { - roleName = roleName[:role.RolenameLength] - } - displayName := req.DisplayName - if len(displayName) > role.DisplaynameLength { - displayName = displayName[:role.DisplaynameLength] - } - userUUID, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - username := fmt.Sprintf("%s-%s-%s", roleName, displayName, userUUID) - if len(username) > role.UsernameLength { - username = username[:role.UsernameLength] - } - password, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - - // Get our handle - db, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - // Start a transaction - tx, err := db.Begin() - if err != nil { - return nil, err - } - defer tx.Rollback() - - // Execute each query - for _, query := range strutil.ParseArbitraryStringSlice(role.SQL, ";") { - query = strings.TrimSpace(query) - if len(query) == 0 { - continue - } - - m := map[string]string{ - "name": username, - "password": password, - } - if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { - return nil, err - } - } - - // Commit the transaction - if err := tx.Commit(); err != nil { - return nil, err - } - - // Return the secret - resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ - "username": username, - "password": password, - }, map[string]interface{}{ - "username": username, - "role": name, - }) - - resp.Secret.TTL = lease.Lease - resp.Secret.MaxTTL = lease.LeaseMax - - return resp, nil -} - -const pathRoleCreateReadHelpSyn = ` -Request database credentials for a certain role. -` - -const pathRoleCreateReadHelpDesc = ` -This path reads database credentials for a certain role. The -database credentials will be generated on demand and will be automatically -revoked when the lease is up. -` diff --git a/builtin/logical/mysql/path_roles.go b/builtin/logical/mysql/path_roles.go deleted file mode 100644 index eecf48732fe2..000000000000 --- a/builtin/logical/mysql/path_roles.go +++ /dev/null @@ -1,230 +0,0 @@ -package mysql - -import ( - "context" - "fmt" - "strings" - - _ "github.com/go-sql-driver/mysql" - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathListRoles(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "roles/?$", - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ListOperation: b.pathRoleList, - }, - - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, - } -} - -func pathRoles(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "roles/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role.", - }, - - "sql": { - Type: framework.TypeString, - Description: "SQL string to create a user. See help for more info.", - }, - - "revocation_sql": { - Type: framework.TypeString, - Description: "SQL string to revoke a user. See help for more info.", - }, - - "username_length": { - Type: framework.TypeInt, - Description: "number of characters to truncate generated mysql usernames to (default 16)", - Default: 16, - }, - - "rolename_length": { - Type: framework.TypeInt, - Description: "number of characters to truncate the rolename portion of generated mysql usernames to (default 4)", - Default: 4, - }, - - "displayname_length": { - Type: framework.TypeInt, - Description: "number of characters to truncate the displayname portion of generated mysql usernames to (default 4)", - Default: 4, - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathRoleRead, - logical.UpdateOperation: b.pathRoleCreate, - logical.DeleteOperation: b.pathRoleDelete, - }, - - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, - } -} - -func (b *backend) Role(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { - entry, err := s.Get(ctx, "role/"+n) - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - // Set defaults to handle upgrade cases - result := roleEntry{ - UsernameLength: 16, - RolenameLength: 4, - DisplaynameLength: 4, - } - - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) - if err != nil { - return nil, err - } - - return nil, nil -} - -func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) - if err != nil { - return nil, err - } - if role == nil { - return nil, nil - } - - return &logical.Response{ - Data: map[string]interface{}{ - "sql": role.SQL, - "revocation_sql": role.RevocationSQL, - }, - }, nil -} - -func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - entries, err := req.Storage.List(ctx, "role/") - if err != nil { - return nil, err - } - - return logical.ListResponse(entries), nil -} - -func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - - // Get our connection - db, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - // Test the query by trying to prepare it - sql := data.Get("sql").(string) - for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") { - query = strings.TrimSpace(query) - if len(query) == 0 { - continue - } - - stmt, err := db.Prepare(Query(query, map[string]string{ - "name": "foo", - "password": "bar", - })) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error testing query: %s", err)), nil - } - stmt.Close() - } - - // Store it - entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{ - SQL: sql, - RevocationSQL: data.Get("revocation_sql").(string), - UsernameLength: data.Get("username_length").(int), - DisplaynameLength: data.Get("displayname_length").(int), - RolenameLength: data.Get("rolename_length").(int), - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - return nil, nil -} - -type roleEntry struct { - SQL string `json:"sql" mapstructure:"sql" structs:"sql"` - RevocationSQL string `json:"revocation_sql" mapstructure:"revocation_sql" structs:"revocation_sql"` - UsernameLength int `json:"username_length" mapstructure:"username_length" structs:"username_length"` - DisplaynameLength int `json:"displayname_length" mapstructure:"displayname_length" structs:"displayname_length"` - RolenameLength int `json:"rolename_length" mapstructure:"rolename_length" structs:"rolename_length"` -} - -const pathRoleHelpSyn = ` -Manage the roles that can be created with this backend. -` - -const pathRoleHelpDesc = ` -This path lets you manage the roles that can be created with this backend. - -The "sql" parameter customizes the SQL string used to create the role. -This can be a sequence of SQL queries, each semi-colon separated. Some -substitution will be done to the SQL string for certain keys. -The names of the variables must be surrounded by "{{" and "}}" to be replaced. - - * "name" - The random username generated for the DB user. - - * "password" - The random password generated for the DB user. - -Example of a decent SQL query to use: - - CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; - GRANT ALL ON db1.* TO '{{name}}'@'%'; - -Note the above user would be able to access anything in db1. Please see the MySQL -manual on the GRANT command to learn how to do more fine grained access. - -The "rolename_length" parameter determines how many characters of the role name -will be used in creating the generated mysql username; the default is 4. - -The "displayname_length" parameter determines how many characters of the token -display name will be used in creating the generated mysql username; the default -is 4. - -The "username_length" parameter determines how many total characters the -generated username (including the role name, token display name and the uuid -portion) will be truncated to. Versions of MySQL prior to 5.7.8 are limited to -16 characters total (see -http://dev.mysql.com/doc/refman/5.7/en/user-names.html) so that is the default; -for versions >=5.7.8 it is safe to increase this to 32. - -For best readability in MySQL process lists, we recommend using MySQL 5.7.8 or -later, setting "username_length" to 32 and setting both "rolename_length" and -"displayname_length" to 8. However due the the prevalence of older versions of -MySQL in general deployment, the defaults are currently tuned for a -username_length of 16. -` diff --git a/builtin/logical/mysql/secret_creds.go b/builtin/logical/mysql/secret_creds.go deleted file mode 100644 index 454edbaa927e..000000000000 --- a/builtin/logical/mysql/secret_creds.go +++ /dev/null @@ -1,136 +0,0 @@ -package mysql - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -const SecretCredsType = "creds" - -// defaultRevocationSQL is a default SQL statement for revoking a user. Revoking -// permissions for the user is done before the drop, because MySQL explicitly -// documents that open user connections will not be closed. By revoking all -// grants, at least we ensure that the open connection is useless. Dropping the -// user will only affect the next connection. -const defaultRevocationSQL = ` -REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; -DROP USER '{{name}}'@'%' -` - -func secretCreds(b *backend) *framework.Secret { - return &framework.Secret{ - Type: SecretCredsType, - Fields: map[string]*framework.FieldSchema{ - "username": { - Type: framework.TypeString, - Description: "Username", - }, - - "password": { - Type: framework.TypeString, - Description: "Password", - }, - }, - - Renew: b.secretCredsRenew, - Revoke: b.secretCredsRevoke, - } -} - -func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // Get the lease information - lease, err := b.Lease(ctx, req.Storage) - if err != nil { - return nil, err - } - if lease == nil { - lease = &configLease{} - } - - resp := &logical.Response{Secret: req.Secret} - resp.Secret.TTL = lease.Lease - resp.Secret.MaxTTL = lease.LeaseMax - return resp, nil -} - -func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - var resp *logical.Response - - // Get the username from the internal data - usernameRaw, ok := req.Secret.InternalData["username"] - if !ok { - return nil, fmt.Errorf("secret is missing username internal data") - } - username, ok := usernameRaw.(string) - if !ok { - return nil, fmt.Errorf("usernameRaw is not a string") - } - - // Get our connection - db, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - roleName := "" - roleNameRaw, ok := req.Secret.InternalData["role"] - if ok { - roleName = roleNameRaw.(string) - } - - var role *roleEntry - if roleName != "" { - role, err = b.Role(ctx, req.Storage, roleName) - if err != nil { - return nil, err - } - } - - // Use a default SQL statement for revocation if one cannot be fetched from the role - revocationSQL := defaultRevocationSQL - - if role != nil && role.RevocationSQL != "" { - revocationSQL = role.RevocationSQL - } else { - if resp == nil { - resp = &logical.Response{} - } - resp.AddWarning(fmt.Sprintf("Role %q cannot be found. Using default SQL for revoking user.", roleName)) - } - - // Start a transaction - tx, err := db.Begin() - if err != nil { - return nil, err - } - defer tx.Rollback() - - for _, query := range strutil.ParseArbitraryStringSlice(revocationSQL, ";") { - query = strings.TrimSpace(query) - if len(query) == 0 { - continue - } - - // This is not a prepared statement because not all commands are supported - // 1295: This command is not supported in the prepared statement protocol yet - // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/ - query = strings.ReplaceAll(query, "{{name}}", username) - _, err = tx.Exec(query) - if err != nil { - return nil, err - } - - } - - // Commit the transaction - if err := tx.Commit(); err != nil { - return nil, err - } - - return resp, nil -} diff --git a/builtin/logical/mysql/util.go b/builtin/logical/mysql/util.go deleted file mode 100644 index 4ba7c650c208..000000000000 --- a/builtin/logical/mysql/util.go +++ /dev/null @@ -1,15 +0,0 @@ -package mysql - -import ( - "fmt" - "strings" -) - -// Query templates a query for us. -func Query(tpl string, data map[string]string) string { - for k, v := range data { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) - } - - return tpl -} diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go index e1df32e87a83..4c7e149185e8 100644 --- a/builtin/logical/nomad/backend.go +++ b/builtin/logical/nomad/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package nomad import ( @@ -8,6 +11,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const operationPrefixNomad = "nomad" + // Factory returns a Nomad backend that satisfies the logical.Backend interface func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go index 8452c2b019e4..e4e3fcded07a 100644 --- a/builtin/logical/nomad/backend_test.go +++ b/builtin/logical/nomad/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package nomad import ( @@ -5,13 +8,14 @@ import ( "fmt" "os" "reflect" + "runtime" "strings" "testing" "time" nomadapi "github.com/hashicorp/nomad/api" "github.com/hashicorp/vault/helper/testhelpers" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) @@ -35,6 +39,11 @@ func (c *Config) Client() (*nomadapi.Client, error) { } func prepareTestContainer(t *testing.T, bootstrap bool) (func(), *Config) { + // Skipping on ARM, as this image can't run on ARM architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + if retAddress := os.Getenv("NOMAD_ADDR"); retAddress != "" { s, err := docker.NewServiceURLParse(retAddress) if err != nil { @@ -44,7 +53,7 @@ func prepareTestContainer(t *testing.T, bootstrap bool) (func(), *Config) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "multani/nomad", + ImageRepo: "docker.mirror.hashicorp.services/multani/nomad", ImageTag: "1.1.6", ContainerName: "nomad", Ports: []string{"4646/tcp"}, diff --git a/builtin/logical/nomad/cmd/nomad/main.go b/builtin/logical/nomad/cmd/nomad/main.go index 31b1c93500e7..493e1be2d5da 100644 --- a/builtin/logical/nomad/cmd/nomad/main.go +++ b/builtin/logical/nomad/cmd/nomad/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: nomad.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go index b482a9c1aca8..cde6f97edb8a 100644 --- a/builtin/logical/nomad/path_config_access.go +++ b/builtin/logical/nomad/path_config_access.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package nomad import ( @@ -13,6 +16,11 @@ const configAccessKey = "config/access" func pathConfigAccess(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/access", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + }, + Fields: map[string]*framework.FieldSchema{ "address": { Type: framework.TypeString, @@ -45,11 +53,35 @@ must be x509 PEM encoded and if this is set you need to also set client_cert.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathConfigAccessRead, - logical.CreateOperation: b.pathConfigAccessWrite, - logical.UpdateOperation: b.pathConfigAccessWrite, - logical.DeleteOperation: b.pathConfigAccessDelete, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "access-configuration", + }, + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "access", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "access", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + OperationSuffix: "access-configuration", + }, + }, }, ExistenceCheck: b.configExistenceCheck, diff --git a/builtin/logical/nomad/path_config_lease.go b/builtin/logical/nomad/path_config_lease.go index 676e515cb84b..2569a07ade01 100644 --- a/builtin/logical/nomad/path_config_lease.go +++ b/builtin/logical/nomad/path_config_lease.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package nomad import ( @@ -13,6 +16,11 @@ const leaseConfigKey = "config/lease" func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + }, + Fields: map[string]*framework.FieldSchema{ "ttl": { Type: framework.TypeDurationSecond, @@ -24,10 +32,28 @@ func pathConfigLease(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathLeaseRead, - logical.UpdateOperation: b.pathLeaseUpdate, - logical.DeleteOperation: b.pathLeaseDelete, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathLeaseRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "lease-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLeaseUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "lease", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathLeaseDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + OperationSuffix: "lease-configuration", + }, + }, }, HelpSynopsis: pathConfigLeaseHelpSyn, diff --git a/builtin/logical/nomad/path_creds_create.go b/builtin/logical/nomad/path_creds_create.go index 14df1ff939db..9c25bed07dd7 100644 --- a/builtin/logical/nomad/path_creds_create.go +++ b/builtin/logical/nomad/path_creds_create.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package nomad import ( @@ -17,6 +20,13 @@ const maxTokenNameLength = 256 func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go index 92109ba74123..4732dec5b8a4 100644 --- a/builtin/logical/nomad/path_roles.go +++ b/builtin/logical/nomad/path_roles.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package nomad import ( @@ -13,6 +16,11 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "role/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -22,6 +30,12 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "role/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go index fd446f7a6436..2eaf19be2795 100644 --- a/builtin/logical/nomad/secret_token.go +++ b/builtin/logical/nomad/secret_token.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package nomad import ( diff --git a/builtin/logical/pki/acme_authorizations.go b/builtin/logical/pki/acme_authorizations.go new file mode 100644 index 000000000000..64548ffed99e --- /dev/null +++ b/builtin/logical/pki/acme_authorizations.go @@ -0,0 +1,187 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + "time" +) + +type ACMEIdentifierType string + +const ( + ACMEDNSIdentifier ACMEIdentifierType = "dns" + ACMEIPIdentifier ACMEIdentifierType = "ip" +) + +type ACMEIdentifier struct { + Type ACMEIdentifierType `json:"type"` + Value string `json:"value"` + OriginalValue string `json:"original_value"` + IsWildcard bool `json:"is_wildcard"` +} + +func (ai *ACMEIdentifier) MaybeParseWildcard() (bool, string, error) { + if ai.Type != ACMEDNSIdentifier || !isWildcardDomain(ai.Value) { + return false, ai.Value, nil + } + + // Here on out, technically it is a wildcard. + ai.IsWildcard = true + + wildcardLabel, reducedName, err := validateWildcardDomain(ai.Value) + if err != nil { + return true, "", err + } + + if wildcardLabel != "*" { + // Per RFC 8555 Section. 7.1.3. Order Objects: + // + // > Any identifier of type "dns" in a newOrder request MAY have a + // > wildcard domain name as its value. A wildcard domain name consists + // > of a single asterisk character followed by a single full stop + // > character ("*.") followed by a domain name as defined for use in the + // > Subject Alternate Name Extension by [RFC5280]. + return true, "", fmt.Errorf("wildcard must be entire left-most label") + } + + if reducedName == "" { + return true, "", fmt.Errorf("wildcard must not be entire domain name; need at least two domain labels") + } + + // Parsing was indeed successful, so update our reduced name. + ai.Value = reducedName + + return true, reducedName, nil +} + +func (ai *ACMEIdentifier) NetworkMarshal(useOriginalValue bool) map[string]interface{} { + value := ai.OriginalValue + if !useOriginalValue { + value = ai.Value + } + return map[string]interface{}{ + "type": ai.Type, + "value": value, + } +} + +type ACMEAuthorizationStatusType string + +const ( + ACMEAuthorizationPending ACMEAuthorizationStatusType = "pending" + ACMEAuthorizationValid ACMEAuthorizationStatusType = "valid" + ACMEAuthorizationInvalid ACMEAuthorizationStatusType = "invalid" + ACMEAuthorizationDeactivated ACMEAuthorizationStatusType = "deactivated" + ACMEAuthorizationExpired ACMEAuthorizationStatusType = "expired" + ACMEAuthorizationRevoked ACMEAuthorizationStatusType = "revoked" +) + +type ACMEOrderStatusType string + +const ( + ACMEOrderPending ACMEOrderStatusType = "pending" + ACMEOrderProcessing ACMEOrderStatusType = "processing" + ACMEOrderValid ACMEOrderStatusType = "valid" + ACMEOrderInvalid ACMEOrderStatusType = "invalid" + ACMEOrderReady ACMEOrderStatusType = "ready" +) + +type ACMEChallengeType string + +const ( + ACMEHTTPChallenge ACMEChallengeType = "http-01" + ACMEDNSChallenge ACMEChallengeType = "dns-01" + ACMEALPNChallenge ACMEChallengeType = "tls-alpn-01" +) + +type ACMEChallengeStatusType string + +const ( + ACMEChallengePending ACMEChallengeStatusType = "pending" + ACMEChallengeProcessing ACMEChallengeStatusType = "processing" + ACMEChallengeValid ACMEChallengeStatusType = "valid" + ACMEChallengeInvalid ACMEChallengeStatusType = "invalid" +) + +type ACMEChallenge struct { + Type ACMEChallengeType `json:"type"` + Status ACMEChallengeStatusType `json:"status"` + Validated string `json:"validated,optional"` + Error map[string]interface{} `json:"error,optional"` + ChallengeFields map[string]interface{} `json:"challenge_fields"` +} + +func (ac *ACMEChallenge) NetworkMarshal(acmeCtx *acmeContext, authId string) map[string]interface{} { + resp := map[string]interface{}{ + "type": ac.Type, + "url": buildChallengeUrl(acmeCtx, authId, string(ac.Type)), + "status": ac.Status, + } + + if ac.Validated != "" { + resp["validated"] = ac.Validated + } + + if len(ac.Error) > 0 { + resp["error"] = ac.Error + } + + for field, value := range ac.ChallengeFields { + resp[field] = value + } + + return resp +} + +func buildChallengeUrl(acmeCtx *acmeContext, authId, challengeType string) string { + return acmeCtx.baseUrl.JoinPath("/challenge/", authId, challengeType).String() +} + +type ACMEAuthorization struct { + Id string `json:"id"` + AccountId string `json:"account_id"` + + Identifier *ACMEIdentifier `json:"identifier"` + Status ACMEAuthorizationStatusType `json:"status"` + + // Per RFC 8555 Section 7.1.4. Authorization Objects: + // + // > This field is REQUIRED for objects with "valid" in the "status" + // > field. + Expires string `json:"expires,optional"` + + Challenges []*ACMEChallenge `json:"challenges"` + Wildcard bool `json:"wildcard"` +} + +func (aa *ACMEAuthorization) GetExpires() (time.Time, error) { + if aa.Expires == "" { + return time.Time{}, nil + } + + return time.Parse(time.RFC3339, aa.Expires) +} + +func (aa *ACMEAuthorization) NetworkMarshal(acmeCtx *acmeContext) map[string]interface{} { + resp := map[string]interface{}{ + "identifier": aa.Identifier.NetworkMarshal( /* use value, not original value */ false), + "status": aa.Status, + "wildcard": aa.Wildcard, + } + + if aa.Expires != "" { + resp["expires"] = aa.Expires + } + + if len(aa.Challenges) > 0 { + challenges := []map[string]interface{}{} + for _, challenge := range aa.Challenges { + challenges = append(challenges, challenge.NetworkMarshal(acmeCtx, aa.Id)) + } + resp["challenges"] = challenges + } + + return resp +} diff --git a/builtin/logical/pki/acme_billing.go b/builtin/logical/pki/acme_billing.go new file mode 100644 index 000000000000..6c66ad447cf0 --- /dev/null +++ b/builtin/logical/pki/acme_billing.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) doTrackBilling(ctx context.Context, identifiers []*ACMEIdentifier) error { + billingView, ok := b.System().(logical.ACMEBillingSystemView) + if !ok { + return fmt.Errorf("failed to perform cast to ACME billing system view interface") + } + + var realized []string + for _, identifier := range identifiers { + realized = append(realized, fmt.Sprintf("%s/%s", identifier.Type, identifier.OriginalValue)) + } + + return billingView.CreateActivityCountEventForIdentifiers(ctx, realized) +} diff --git a/builtin/logical/pki/acme_billing_test.go b/builtin/logical/pki/acme_billing_test.go new file mode 100644 index 000000000000..b17a3492ed13 --- /dev/null +++ b/builtin/logical/pki/acme_billing_test.go @@ -0,0 +1,322 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "strings" + "testing" + "time" + + "golang.org/x/crypto/acme" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/timeutil" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/activity" + + "github.com/stretchr/testify/require" +) + +// TestACMEBilling is a basic test that will validate client counts created via ACME workflows. +func TestACMEBilling(t *testing.T) { + t.Parallel() + timeutil.SkipAtEndOfMonth(t) + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + dns := dnstest.SetupResolver(t, "dadgarcorp.com") + defer dns.Cleanup() + + // Enable additional mounts. + setupAcmeBackendOnClusterAtPath(t, cluster, client, "pki2") + setupAcmeBackendOnClusterAtPath(t, cluster, client, "ns1/pki") + setupAcmeBackendOnClusterAtPath(t, cluster, client, "ns2/pki") + + // Enable custom DNS resolver for testing. + for _, mount := range []string{"pki", "pki2", "ns1/pki", "ns2/pki"} { + _, err := client.Logical().Write(mount+"/config/acme", map[string]interface{}{ + "dns_resolver": dns.GetLocalAddr(), + }) + require.NoError(t, err, "failed to set local dns resolver address for testing on mount: "+mount) + } + + // Enable client counting. + _, err := client.Logical().Write("/sys/internal/counters/config", map[string]interface{}{ + "enabled": "enable", + }) + require.NoError(t, err, "failed to enable client counting") + + // Setup ACME clients. We refresh account keys each time for consistency. + acmeClientPKI := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", nil) + acmeClientPKI2 := getAcmeClientForCluster(t, cluster, "/v1/pki2/acme/", nil) + acmeClientPKINS1 := getAcmeClientForCluster(t, cluster, "/v1/ns1/pki/acme/", nil) + acmeClientPKINS2 := getAcmeClientForCluster(t, cluster, "/v1/ns2/pki/acme/", nil) + + // Get our initial count. + expectedCount := validateClientCount(t, client, "", -1, "initial fetch") + + // Unique identifier: should increase by one. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") + + // Different identifier; should increase by one. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"example.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") + + // While same identifiers, used together and so thus are unique; increase by one. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"example.dadgarcorp.com", "dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") + + // Same identifiers in different order are not unique; keep the same. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"dadgarcorp.com", "example.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount, "different order; same identifiers") + + // Using a different mount shouldn't affect counts. + doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "", expectedCount, "different mount; same identifiers") + + // But using a different identifier should. + doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"pki2.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki2", expectedCount+1, "different mount with different identifiers") + + // A new identifier in a unique namespace will affect results. + doACMEForDomainWithDNS(t, dns, acmeClientPKINS1, []string{"unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "ns1/pki", expectedCount+1, "unique identifier in a namespace") + + // But in a different namespace with the existing identifier will not. + doACMEForDomainWithDNS(t, dns, acmeClientPKINS2, []string{"unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "", expectedCount, "existing identifier in a namespace") + doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "", expectedCount, "existing identifier outside of a namespace") + + // Creating a unique identifier in a namespace with a mount with the + // same name as another namespace should increase counts as well. + doACMEForDomainWithDNS(t, dns, acmeClientPKINS2, []string{"very-unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "ns2/pki", expectedCount+1, "unique identifier in a different namespace") + + // Check the current fragment + fragment := cluster.Cores[0].Core.ResetActivityLog()[0] + if fragment == nil { + t.Fatal("no fragment created") + } + validateAcmeClientTypes(t, fragment, expectedCount) +} + +func validateAcmeClientTypes(t *testing.T, fragment *activity.LogFragment, expectedCount int64) { + t.Helper() + if int64(len(fragment.Clients)) != expectedCount { + t.Fatalf("bad number of entities, expected %v: got %v, entities are: %v", expectedCount, len(fragment.Clients), fragment.Clients) + } + + for _, ac := range fragment.Clients { + if ac.ClientType != vault.ACMEActivityType { + t.Fatalf("Couldn't find expected '%v' client_type in %v", vault.ACMEActivityType, fragment.Clients) + } + } +} + +func validateClientCount(t *testing.T, client *api.Client, mount string, expected int64, message string) int64 { + resp, err := client.Logical().Read("/sys/internal/counters/activity/monthly") + require.NoError(t, err, "failed to fetch client count values") + t.Logf("got client count numbers: %v", resp) + + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Contains(t, resp.Data, "non_entity_clients") + require.Contains(t, resp.Data, "months") + + rawCount := resp.Data["non_entity_clients"].(json.Number) + count, err := rawCount.Int64() + require.NoError(t, err, "failed to parse number as int64: "+rawCount.String()) + + if expected != -1 { + require.Equal(t, expected, count, "value of client counts did not match expectations: "+message) + } + + if mount == "" { + return count + } + + months := resp.Data["months"].([]interface{}) + if len(months) > 1 { + t.Fatalf("running across a month boundary despite using SkipAtEndOfMonth(...); rerun test from start fully in the next month instead") + } + + require.Equal(t, 1, len(months), "expected only a single month when running this test") + + monthlyInfo := months[0].(map[string]interface{}) + + // Validate this month's aggregate counts match the overall value. + require.Contains(t, monthlyInfo, "counts", "expected monthly info to contain a count key") + monthlyCounts := monthlyInfo["counts"].(map[string]interface{}) + require.Contains(t, monthlyCounts, "non_entity_clients", "expected month[0].counts to contain a non_entity_clients key") + monthlyCountNonEntityRaw := monthlyCounts["non_entity_clients"].(json.Number) + monthlyCountNonEntity, err := monthlyCountNonEntityRaw.Int64() + require.NoError(t, err, "failed to parse number as int64: "+monthlyCountNonEntityRaw.String()) + require.Equal(t, count, monthlyCountNonEntity, "expected equal values for non entity client counts") + + // Validate this mount's namespace is included in the namespaces list, + // if this is enterprise. Otherwise, if its OSS or we don't have a + // namespace, we default to the value root. + mountNamespace := "" + mountPath := mount + "/" + if constants.IsEnterprise && strings.Contains(mount, "/") { + pieces := strings.Split(mount, "/") + require.Equal(t, 2, len(pieces), "we do not support nested namespaces in this test") + mountNamespace = pieces[0] + "/" + mountPath = pieces[1] + "/" + } + + require.Contains(t, monthlyInfo, "namespaces", "expected monthly info to contain a namespaces key") + monthlyNamespaces := monthlyInfo["namespaces"].([]interface{}) + foundNamespace := false + for index, namespaceRaw := range monthlyNamespaces { + namespace := namespaceRaw.(map[string]interface{}) + require.Contains(t, namespace, "namespace_path", "expected monthly.namespaces[%v] to contain a namespace_path key", index) + namespacePath := namespace["namespace_path"].(string) + + if namespacePath != mountNamespace { + t.Logf("skipping non-matching namespace %v: %v != %v / %v", index, namespacePath, mountNamespace, namespace) + continue + } + + foundNamespace = true + + // This namespace must have a non-empty aggregate non-entity count. + require.Contains(t, namespace, "counts", "expected monthly.namespaces[%v] to contain a counts key", index) + namespaceCounts := namespace["counts"].(map[string]interface{}) + require.Contains(t, namespaceCounts, "non_entity_clients", "expected namespace counts to contain a non_entity_clients key") + namespaceCountNonEntityRaw := namespaceCounts["non_entity_clients"].(json.Number) + namespaceCountNonEntity, err := namespaceCountNonEntityRaw.Int64() + require.NoError(t, err, "failed to parse number as int64: "+namespaceCountNonEntityRaw.String()) + require.Greater(t, namespaceCountNonEntity, int64(0), "expected at least one non-entity client count value in the namespace") + + require.Contains(t, namespace, "mounts", "expected monthly.namespaces[%v] to contain a mounts key", index) + namespaceMounts := namespace["mounts"].([]interface{}) + foundMount := false + for mountIndex, mountRaw := range namespaceMounts { + mountInfo := mountRaw.(map[string]interface{}) + require.Contains(t, mountInfo, "mount_path", "expected monthly.namespaces[%v].mounts[%v] to contain a mount_path key", index, mountIndex) + mountInfoPath := mountInfo["mount_path"].(string) + if mountPath != mountInfoPath { + t.Logf("skipping non-matching mount path %v in namespace %v: %v != %v / %v of %v", mountIndex, index, mountPath, mountInfoPath, mountInfo, namespace) + continue + } + + foundMount = true + + // This mount must also have a non-empty non-entity client count. + require.Contains(t, mountInfo, "counts", "expected monthly.namespaces[%v].mounts[%v] to contain a counts key", index, mountIndex) + mountCounts := mountInfo["counts"].(map[string]interface{}) + require.Contains(t, mountCounts, "non_entity_clients", "expected mount counts to contain a non_entity_clients key") + mountCountNonEntityRaw := mountCounts["non_entity_clients"].(json.Number) + mountCountNonEntity, err := mountCountNonEntityRaw.Int64() + require.NoError(t, err, "failed to parse number as int64: "+mountCountNonEntityRaw.String()) + require.Greater(t, mountCountNonEntity, int64(0), "expected at least one non-entity client count value in the mount") + } + + require.True(t, foundMount, "expected to find the mount "+mountPath+" in the list of mounts for namespace, but did not") + } + + require.True(t, foundNamespace, "expected to find the namespace "+mountNamespace+" in the list of namespaces, but did not") + + return count +} + +func doACMEForDomainWithDNS(t *testing.T, dns *dnstest.TestServer, acmeClient *acme.Client, domains []string) *x509.Certificate { + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: domains[0]}, + DNSNames: domains, + } + + return doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr) +} + +func doACMEForCSRWithDNS(t *testing.T, dns *dnstest.TestServer, acmeClient *acme.Client, domains []string, cr *x509.CertificateRequest) *x509.Certificate { + accountKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed to generate account key") + acmeClient.Key = accountKey + + testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancelFunc() + + // Register the client. + _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create the Order + var orderIdentifiers []acme.AuthzID + for _, domain := range domains { + orderIdentifiers = append(orderIdentifiers, acme.AuthzID{Type: "dns", Value: domain}) + } + order, err := acmeClient.AuthorizeOrder(testCtx, orderIdentifiers) + require.NoError(t, err, "failed creating ACME order") + + // Fetch its authorizations. + var auths []*acme.Authorization + for _, authUrl := range order.AuthzURLs { + authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + auths = append(auths, authorization) + } + + // For each dns-01 challenge, place the record in the associated DNS resolver. + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + t.Logf("ignoring challenge not in status pending: %v", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + dns.AddRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + defer dns.RemoveRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + + require.NoError(t, err, "failed setting DNS record") + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + dns.PushConfig() + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + + // Tell the ACME server, that they can now validate those challenges. + for _, challenge := range challengesToAccept { + _, err = acmeClient.Accept(testCtx, challenge) + require.NoError(t, err, "failed to accept challenge: %v", challenge) + } + + // Wait for the order/challenges to be validated. + _, err = acmeClient.WaitOrder(testCtx, order.URI) + require.NoError(t, err, "failed waiting for order to be ready") + + // Create/sign the CSR and ask ACME server to sign it returning us the final certificate + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) + require.NoError(t, err, "failed to get a certificate back from ACME") + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + return acmeCert +} diff --git a/builtin/logical/pki/acme_challenge_engine.go b/builtin/logical/pki/acme_challenge_engine.go new file mode 100644 index 000000000000..330a55b1ccd7 --- /dev/null +++ b/builtin/logical/pki/acme_challenge_engine.go @@ -0,0 +1,563 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "container/list" + "context" + "fmt" + "sync" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +var MaxChallengeTimeout = 1 * time.Minute + +const MaxRetryAttempts = 5 + +const ChallengeAttemptFailedMsg = "this may occur if the validation target was misconfigured: check that challenge responses are available at the required locations and retry." + +type ChallengeValidation struct { + // Account KID that this validation attempt is recorded under. + Account string `json:"account"` + + // The authorization ID that this validation attempt is for. + Authorization string `json:"authorization"` + ChallengeType ACMEChallengeType `json:"challenge_type"` + + // The token of this challenge and the JWS thumbprint of the account + // we're validating against. + Token string `json:"token"` + Thumbprint string `json:"thumbprint"` + + Initiated time.Time `json:"initiated"` + FirstValidation time.Time `json:"first_validation,omitempty"` + RetryCount int `json:"retry_count,omitempty"` + LastRetry time.Time `json:"last_retry,omitempty"` + RetryAfter time.Time `json:"retry_after,omitempty"` +} + +type ChallengeQueueEntry struct { + Identifier string + RetryAfter time.Time + NumRetries int // Track if we are spinning on a corrupted challenge +} + +type ACMEChallengeEngine struct { + NumWorkers int + + ValidationLock sync.Mutex + NewValidation chan string + Closing chan struct{} + Validations *list.List +} + +func NewACMEChallengeEngine() *ACMEChallengeEngine { + ace := &ACMEChallengeEngine{} + ace.NewValidation = make(chan string, 1) + ace.Closing = make(chan struct{}, 1) + ace.Validations = list.New() + ace.NumWorkers = 5 + + return ace +} + +func (ace *ACMEChallengeEngine) LoadFromStorage(b *backend, sc *storageContext) error { + items, err := sc.Storage.List(sc.Context, acmeValidationPrefix) + if err != nil { + return fmt.Errorf("failed loading list of validations from disk: %w", err) + } + + ace.ValidationLock.Lock() + defer ace.ValidationLock.Unlock() + + // Add them to our queue of validations to work through later. + foundExistingValidations := false + for _, item := range items { + ace.Validations.PushBack(&ChallengeQueueEntry{ + Identifier: item, + }) + foundExistingValidations = true + } + + if foundExistingValidations { + ace.NewValidation <- "existing" + } + + return nil +} + +func (ace *ACMEChallengeEngine) Run(b *backend, state *acmeState, sc *storageContext) { + // We load the existing ACME challenges within the Run thread to avoid + // delaying the PKI mount initialization + b.Logger().Debug("Loading existing challenge validations on disk") + err := ace.LoadFromStorage(b, sc) + if err != nil { + b.Logger().Error("failed loading existing ACME challenge validations:", "err", err) + } + + for { + // err == nil on shutdown. + b.Logger().Debug("Starting ACME challenge validation engine") + err := ace._run(b, state) + if err != nil { + b.Logger().Error("Got unexpected error from ACME challenge validation engine", "err", err) + time.Sleep(1 * time.Second) + continue + } + break + } +} + +func (ace *ACMEChallengeEngine) _run(b *backend, state *acmeState) error { + // This runner uses a background context for storage operations: we don't + // want to tie it to a inbound request and we don't want to set a time + // limit, so create a fresh background context. + runnerSC := b.makeStorageContext(context.Background(), b.storage) + + // We want at most a certain number of workers operating to verify + // challenges. + var finishedWorkersChannels []chan bool + for { + // Wait until we've got more work to do. + select { + case <-ace.Closing: + b.Logger().Debug("shutting down ACME challenge validation engine") + return nil + case <-ace.NewValidation: + } + + // First try to reap any finished workers. Read from their channels + // and if not finished yet, add to a fresh slice. + var newFinishedWorkersChannels []chan bool + for _, channel := range finishedWorkersChannels { + select { + case <-channel: + default: + // This channel had not been written to, indicating that the + // worker had not yet finished. + newFinishedWorkersChannels = append(newFinishedWorkersChannels, channel) + } + } + finishedWorkersChannels = newFinishedWorkersChannels + + // If we have space to take on another work item, do so. + firstIdentifier := "" + startedWork := false + now := time.Now() + for len(finishedWorkersChannels) < ace.NumWorkers { + var task *ChallengeQueueEntry + + // Find our next work item. We do all of these operations + // while holding the queue lock, hence some repeated checks + // afterwards. Out of this, we get a candidate task, using + // element == nil as a sentinel for breaking our parent + // loop. + ace.ValidationLock.Lock() + element := ace.Validations.Front() + if element != nil { + ace.Validations.Remove(element) + task = element.Value.(*ChallengeQueueEntry) + if !task.RetryAfter.IsZero() && now.Before(task.RetryAfter) { + // We cannot work on this element yet; remove it to + // the back of the queue. This allows us to potentially + // select the next item in the next iteration. + ace.Validations.PushBack(task) + } + + if firstIdentifier != "" && task.Identifier == firstIdentifier { + // We found and rejected this element before; exit the + // loop by "claiming" we didn't find any work. + element = nil + } else if firstIdentifier == "" { + firstIdentifier = task.Identifier + } + } + ace.ValidationLock.Unlock() + if element == nil { + // There was no more work to do to fill up the queue; exit + // this loop. + break + } + if now.Before(task.RetryAfter) { + // Here, while we found an element, we didn't want to + // completely exit the loop (perhaps it was our first time + // finding a work order), so retry without modifying + // firstIdentifier. + continue + } + + config, err := state.getConfigWithUpdate(runnerSC) + if err != nil { + return fmt.Errorf("failed fetching ACME configuration: %w", err) + } + + // Since this work item was valid, we won't expect to see it in + // the validation queue again until it is executed. Here, we + // want to avoid infinite looping above (if we removed the one + // valid item and the remainder are all not immediately + // actionable). At the worst, we'll spend a little more time + // looping through the queue until we hit a repeat. + firstIdentifier = "" + + // If we are no longer the active node, break out + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) { + break + } + + // Here, we got a piece of work that is ready to check; create a + // channel and a new go routine and run it. Note that this still + // could have a RetryAfter date we're not aware of (e.g., if the + // cluster restarted as we do not read the entries there). + channel := make(chan bool, 1) + go ace.VerifyChallenge(runnerSC, task.Identifier, task.NumRetries, channel, config) + finishedWorkersChannels = append(finishedWorkersChannels, channel) + startedWork = true + } + + // If we have no more capacity for work, we should pause a little to + // let the system catch up. Additionally, if we only had + // non-actionable work items, we should pause until some time has + // elapsed: not too much that we potentially starve any new incoming + // items from validation, but not too short that we cause a busy loop. + if len(finishedWorkersChannels) == ace.NumWorkers || !startedWork { + time.Sleep(100 * time.Millisecond) + } + + // Lastly, if we have more work to do, re-trigger ourselves. + ace.ValidationLock.Lock() + if ace.Validations.Front() != nil { + select { + case ace.NewValidation <- "retry": + default: + } + } + ace.ValidationLock.Unlock() + } + + return fmt.Errorf("unexpectedly exited from ACMEChallengeEngine._run()") +} + +func (ace *ACMEChallengeEngine) AcceptChallenge(sc *storageContext, account string, authz *ACMEAuthorization, challenge *ACMEChallenge, thumbprint string) error { + name := authz.Id + "-" + string(challenge.Type) + path := acmeValidationPrefix + name + + entry, err := sc.Storage.Get(sc.Context, path) + if err == nil && entry != nil { + // Challenge already in the queue; exit without re-adding it. + return nil + } + + if authz.Status != ACMEAuthorizationPending { + return fmt.Errorf("%w: cannot accept already validated authorization %v (%v)", ErrMalformed, authz.Id, authz.Status) + } + + for _, otherChallenge := range authz.Challenges { + // We assume within an authorization we won't have multiple challenges of the same challenge type + // and we want to limit a single challenge being in a processing state to avoid race conditions + // failing one challenge and passing another. + if otherChallenge.Type != challenge.Type && otherChallenge.Status != ACMEChallengePending { + return fmt.Errorf("%w: only a single challenge within an authorization can be accepted (%v) in status %v", ErrMalformed, otherChallenge.Type, otherChallenge.Status) + } + + // The requested challenge can ping us to wake us up, so allow pending and currently processing statuses + if otherChallenge.Status != ACMEChallengePending && otherChallenge.Status != ACMEChallengeProcessing { + return fmt.Errorf("%w: challenge is in invalid state (%v) in authorization %v", ErrMalformed, challenge.Status, authz.Id) + } + } + + token := challenge.ChallengeFields["token"].(string) + + cv := &ChallengeValidation{ + Account: account, + Authorization: authz.Id, + ChallengeType: challenge.Type, + Token: token, + Thumbprint: thumbprint, + Initiated: time.Now(), + } + + json, err := logical.StorageEntryJSON(path, &cv) + if err != nil { + return fmt.Errorf("error creating challenge validation queue entry: %w", err) + } + + if err := sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("error writing challenge validation entry: %w", err) + } + + if challenge.Status == ACMEChallengePending { + challenge.Status = ACMEChallengeProcessing + + authzPath := getAuthorizationPath(account, authz.Id) + if err := saveAuthorizationAtPath(sc, authzPath, authz); err != nil { + return fmt.Errorf("error saving updated authorization %v: %w", authz.Id, err) + } + } + + ace.ValidationLock.Lock() + defer ace.ValidationLock.Unlock() + ace.Validations.PushBack(&ChallengeQueueEntry{ + Identifier: name, + }) + + select { + case ace.NewValidation <- name: + default: + } + + return nil +} + +func (ace *ACMEChallengeEngine) VerifyChallenge(runnerSc *storageContext, id string, validationQueueRetries int, finished chan bool, config *acmeConfigEntry) { + sc, cancel := runnerSc.WithFreshTimeout(MaxChallengeTimeout) + defer cancel() + runnerSc.Backend.Logger().Debug("Starting verification of challenge", "id", id) + + if retry, retryAfter, err := ace._verifyChallenge(sc, id, config); err != nil { + // Because verification of this challenge failed, we need to retry + // it in the future. Log the error and re-add the item to the queue + // to try again later. + sc.Backend.Logger().Error(fmt.Sprintf("ACME validation failed for %v: %v", id, err)) + + if retry { + validationQueueRetries++ + + // The retry logic within _verifyChallenge is dependent on being able to read and decode + // the ACME challenge entries. If we encounter such failures we would retry forever, so + // we have a secondary check here to see if we are consistently looping within the validation + // queue that is larger than the normal retry attempts we would allow. + if validationQueueRetries > MaxRetryAttempts*2 { + sc.Backend.Logger().Warn("reached max error attempts within challenge queue: %v, giving up", id) + _, _, err = ace._verifyChallengeCleanup(sc, nil, id) + if err != nil { + sc.Backend.Logger().Warn("Failed cleaning up challenge entry: %v", err) + } + finished <- true + return + } + + ace.ValidationLock.Lock() + defer ace.ValidationLock.Unlock() + ace.Validations.PushBack(&ChallengeQueueEntry{ + Identifier: id, + RetryAfter: retryAfter, + NumRetries: validationQueueRetries, + }) + + // Let the validator know there's a pending challenge. + select { + case ace.NewValidation <- id: + default: + } + } + + // We're the only producer on this channel and it has a buffer size + // of one element, so it is safe to directly write here. + finished <- true + return + } + + // We're the only producer on this channel and it has a buffer size of one + // element, so it is safe to directly write here. + finished <- false +} + +func (ace *ACMEChallengeEngine) _verifyChallenge(sc *storageContext, id string, config *acmeConfigEntry) (bool, time.Time, error) { + now := time.Now() + backoffTime := now.Add(1 * time.Second) + path := acmeValidationPrefix + id + challengeEntry, err := sc.Storage.Get(sc.Context, path) + if err != nil { + return true, backoffTime, fmt.Errorf("error loading challenge %v: %w", id, err) + } + + if challengeEntry == nil { + // Something must've successfully cleaned up our storage entry from + // under us. Assume we don't need to rerun, else the client will + // trigger us to re-run. + return ace._verifyChallengeCleanup(sc, nil, id) + } + + var cv *ChallengeValidation + if err := challengeEntry.DecodeJSON(&cv); err != nil { + return true, backoffTime, fmt.Errorf("error decoding challenge %v: %w", id, err) + } + + if now.Before(cv.RetryAfter) { + return true, cv.RetryAfter, fmt.Errorf("retrying challenge %v too soon", id) + } + + authzPath := getAuthorizationPath(cv.Account, cv.Authorization) + authz, err := loadAuthorizationAtPath(sc, authzPath) + if err != nil { + return true, backoffTime, fmt.Errorf("error loading authorization %v/%v for challenge %v: %w", cv.Account, cv.Authorization, id, err) + } + + if authz.Status != ACMEAuthorizationPending { + // Something must've finished up this challenge for us. Assume we + // don't need to rerun and exit instead. + err = nil + return ace._verifyChallengeCleanup(sc, err, id) + } + + var challenge *ACMEChallenge + for _, authzChallenge := range authz.Challenges { + if authzChallenge.Type == cv.ChallengeType { + challenge = authzChallenge + break + } + } + + if challenge == nil { + err = fmt.Errorf("no challenge of type %v in authorization %v/%v for challenge %v", cv.ChallengeType, cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if challenge.Status != ACMEChallengePending && challenge.Status != ACMEChallengeProcessing { + err = fmt.Errorf("challenge is in invalid state %v in authorization %v/%v for challenge %v", challenge.Status, cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + var valid bool + switch challenge.Type { + case ACMEHTTPChallenge: + if authz.Identifier.Type != ACMEDNSIdentifier && authz.Identifier.Type != ACMEIPIdentifier { + err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if authz.Wildcard { + err = fmt.Errorf("unable to validate wildcard authorization %v/%v in challenge %v via http-01 challenge", cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + valid, err = ValidateHTTP01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) + if err != nil { + err = fmt.Errorf("%w: error validating http-01 challenge %v: %v; %v", ErrIncorrectResponse, id, err, ChallengeAttemptFailedMsg) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + case ACMEDNSChallenge: + if authz.Identifier.Type != ACMEDNSIdentifier { + err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) + return ace._verifyChallengeCleanup(sc, err, id) + } + + valid, err = ValidateDNS01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) + if err != nil { + err = fmt.Errorf("%w: error validating dns-01 challenge %v: %v; %v", ErrIncorrectResponse, id, err, ChallengeAttemptFailedMsg) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + case ACMEALPNChallenge: + if authz.Identifier.Type != ACMEDNSIdentifier { + err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if authz.Wildcard { + err = fmt.Errorf("unable to validate wildcard authorization %v/%v in challenge %v via tls-alpn-01 challenge", cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + valid, err = ValidateTLSALPN01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) + if err != nil { + err = fmt.Errorf("%w: error validating tls-alpn-01 challenge %v: %s", ErrIncorrectResponse, id, err.Error()) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + default: + err = fmt.Errorf("unsupported ACME challenge type %v for challenge %v", cv.ChallengeType, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if !valid { + err = fmt.Errorf("%w: challenge failed with no additional information", ErrIncorrectResponse) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + + // If we got here, the challenge verification was successful. Update + // the authorization appropriately. + expires := now.Add(15 * 24 * time.Hour) + challenge.Status = ACMEChallengeValid + challenge.Validated = now.Format(time.RFC3339) + challenge.Error = nil + authz.Status = ACMEAuthorizationValid + authz.Expires = expires.Format(time.RFC3339) + + if err := saveAuthorizationAtPath(sc, authzPath, authz); err != nil { + err = fmt.Errorf("error saving updated (validated) authorization %v/%v for challenge %v: %w", cv.Account, cv.Authorization, id, err) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + + return ace._verifyChallengeCleanup(sc, nil, id) +} + +func (ace *ACMEChallengeEngine) _verifyChallengeRetry(sc *storageContext, cv *ChallengeValidation, authzPath string, auth *ACMEAuthorization, challenge *ACMEChallenge, verificationErr error, id string) (bool, time.Time, error) { + now := time.Now() + path := acmeValidationPrefix + id + + if err := updateChallengeStatus(sc, cv, authzPath, auth, challenge, verificationErr); err != nil { + return true, now, err + } + + if cv.RetryCount > MaxRetryAttempts { + err := fmt.Errorf("reached max error attempts for challenge %v: %w", id, verificationErr) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if cv.FirstValidation.IsZero() { + cv.FirstValidation = now + } + cv.RetryCount += 1 + cv.LastRetry = now + cv.RetryAfter = now.Add(time.Duration(cv.RetryCount*5) * time.Second) + + json, jsonErr := logical.StorageEntryJSON(path, cv) + if jsonErr != nil { + return true, now, fmt.Errorf("error persisting updated challenge validation queue entry (error prior to retry, if any: %v): %w", verificationErr, jsonErr) + } + + if putErr := sc.Storage.Put(sc.Context, json); putErr != nil { + return true, now, fmt.Errorf("error writing updated challenge validation entry (error prior to retry, if any: %v): %w", verificationErr, putErr) + } + + if verificationErr != nil { + verificationErr = fmt.Errorf("retrying validation: %w", verificationErr) + } + + return true, cv.RetryAfter, verificationErr +} + +func updateChallengeStatus(sc *storageContext, cv *ChallengeValidation, authzPath string, auth *ACMEAuthorization, challenge *ACMEChallenge, verificationErr error) error { + if verificationErr != nil { + challengeError := TranslateErrorToErrorResponse(verificationErr) + challenge.Error = challengeError.MarshalForStorage() + } + + if cv.RetryCount > MaxRetryAttempts { + challenge.Status = ACMEChallengeInvalid + auth.Status = ACMEAuthorizationInvalid + } + + if err := saveAuthorizationAtPath(sc, authzPath, auth); err != nil { + return fmt.Errorf("error persisting authorization/challenge update: %w", err) + } + return nil +} + +func (ace *ACMEChallengeEngine) _verifyChallengeCleanup(sc *storageContext, err error, id string) (bool, time.Time, error) { + now := time.Now() + + // Remove our ChallengeValidation entry only. + if deleteErr := sc.Storage.Delete(sc.Context, acmeValidationPrefix+id); deleteErr != nil { + return true, now.Add(1 * time.Second), fmt.Errorf("error deleting challenge %v (error prior to cleanup, if any: %v): %w", id, err, deleteErr) + } + + if err != nil { + err = fmt.Errorf("removing challenge validation attempt and not retrying %v; previous error: %w", id, err) + } + + return false, now, err +} diff --git a/builtin/logical/pki/acme_challenges.go b/builtin/logical/pki/acme_challenges.go new file mode 100644 index 000000000000..85c051c86e0e --- /dev/null +++ b/builtin/logical/pki/acme_challenges.go @@ -0,0 +1,502 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "bytes" + "context" + "crypto/sha256" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "strings" + "time" +) + +const ( + DNSChallengePrefix = "_acme-challenge." + ALPNProtocol = "acme-tls/1" +) + +// While this should be a constant, there's no way to do a low-level test of +// ValidateTLSALPN01Challenge without spinning up a complicated Docker +// instance to build a custom responder. Because we already have a local +// toolchain, it is far easier to drive this through Go tests with a custom +// (high) port, rather than requiring permission to bind to port 443 (root-run +// tests are even worse). +var ALPNPort = "443" + +// OID of the acmeIdentifier X.509 Certificate Extension. +var OIDACMEIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31} + +// ValidateKeyAuthorization validates that the given keyAuthz from a challenge +// matches our expectation, returning (true, nil) if so, or (false, err) if +// not. +func ValidateKeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { + parts := strings.Split(keyAuthz, ".") + if len(parts) != 2 { + return false, fmt.Errorf("invalid authorization: got %v parts, expected 2", len(parts)) + } + + tokenPart := parts[0] + thumbprintPart := parts[1] + + if token != tokenPart || thumbprint != thumbprintPart { + return false, fmt.Errorf("key authorization was invalid") + } + + return true, nil +} + +// ValidateSHA256KeyAuthorization validates that the given keyAuthz from a +// challenge matches our expectation, returning (true, nil) if so, or +// (false, err) if not. +// +// This is for use with DNS challenges, which require base64 encoding. +func ValidateSHA256KeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { + authzContents := token + "." + thumbprint + checksum := sha256.Sum256([]byte(authzContents)) + expectedAuthz := base64.RawURLEncoding.EncodeToString(checksum[:]) + + if keyAuthz != expectedAuthz { + return false, fmt.Errorf("sha256 key authorization was invalid") + } + + return true, nil +} + +// ValidateRawSHA256KeyAuthorization validates that the given keyAuthz from a +// challenge matches our expectation, returning (true, nil) if so, or +// (false, err) if not. +// +// This is for use with TLS challenges, which require the raw hash output. +func ValidateRawSHA256KeyAuthorization(keyAuthz []byte, token string, thumbprint string) (bool, error) { + authzContents := token + "." + thumbprint + expectedAuthz := sha256.Sum256([]byte(authzContents)) + + if len(keyAuthz) != len(expectedAuthz) || subtle.ConstantTimeCompare(expectedAuthz[:], keyAuthz) != 1 { + return false, fmt.Errorf("sha256 key authorization was invalid") + } + + return true, nil +} + +func buildResolver(config *acmeConfigEntry) (*net.Resolver, error) { + if len(config.DNSResolver) == 0 { + return net.DefaultResolver, nil + } + + return &net.Resolver{ + PreferGo: true, + StrictErrors: false, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := net.Dialer{ + Timeout: 10 * time.Second, + } + return d.DialContext(ctx, network, config.DNSResolver) + }, + }, nil +} + +func buildDialerConfig(config *acmeConfigEntry) (*net.Dialer, error) { + resolver, err := buildResolver(config) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %w", err) + } + + return &net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: -1 * time.Second, + Resolver: resolver, + }, nil +} + +// Validates a given ACME http-01 challenge against the specified domain, +// per RFC 8555. +// +// We attempt to be defensive here against timeouts, extra redirects, &c. +func ValidateHTTP01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + path := "http://" + domain + "/.well-known/acme-challenge/" + token + dialer, err := buildDialerConfig(config) + if err != nil { + return false, fmt.Errorf("failed to build dialer: %w", err) + } + + transport := &http.Transport{ + // Only a single request is sent to this server as we do not do any + // batching of validation attempts. There is no need to do an HTTP + // KeepAlive as a result. + DisableKeepAlives: true, + MaxIdleConns: 1, + MaxIdleConnsPerHost: 1, + MaxConnsPerHost: 1, + IdleConnTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + + // We'd rather timeout and re-attempt validation later than hang + // too many validators waiting for slow hosts. + DialContext: dialer.DialContext, + ResponseHeaderTimeout: 10 * time.Second, + } + + maxRedirects := 10 + urlLength := 2000 + + client := &http.Client{ + Transport: transport, + CheckRedirect: func(req *http.Request, via []*http.Request) error { + if len(via)+1 >= maxRedirects { + return fmt.Errorf("http-01: too many redirects: %v", len(via)+1) + } + + reqUrlLen := len(req.URL.String()) + if reqUrlLen > urlLength { + return fmt.Errorf("http-01: redirect url length too long: %v", reqUrlLen) + } + + return nil + }, + } + + resp, err := client.Get(path) + if err != nil { + return false, fmt.Errorf("http-01: failed to fetch path %v: %w", path, err) + } + + // We provision a buffer which allows for a variable size challenge, some + // whitespace, and a detection gap for too long of a message. + minExpected := len(token) + 1 + len(thumbprint) + maxExpected := 512 + + defer resp.Body.Close() + + // Attempt to read the body, but don't do so infinitely. + body, err := io.ReadAll(io.LimitReader(resp.Body, int64(maxExpected+1))) + if err != nil { + return false, fmt.Errorf("http-01: unexpected error while reading body: %w", err) + } + + if len(body) > maxExpected { + return false, fmt.Errorf("http-01: response too large: received %v > %v bytes", len(body), maxExpected) + } + + if len(body) < minExpected { + return false, fmt.Errorf("http-01: response too small: received %v < %v bytes", len(body), minExpected) + } + + // Per RFC 8555 Section 8.3. HTTP Challenge: + // + // > The server SHOULD ignore whitespace characters at the end of the body. + keyAuthz := string(body) + keyAuthz = strings.TrimSpace(keyAuthz) + + // If we got here, we got no non-EOF error while reading. Try to validate + // the token because we're bounded by a reasonable amount of length. + return ValidateKeyAuthorization(keyAuthz, token, thumbprint) +} + +func ValidateDNS01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + // Here, domain is the value from the post-wildcard-processed identifier. + // Per RFC 8555, no difference in validation occurs if a wildcard entry + // is requested or if a non-wildcard entry is requested. + // + // XXX: In this case the DNS server is operator controlled and is assumed + // to be less malicious so the default resolver is used. In the future, + // we'll want to use net.Resolver for two reasons: + // + // 1. To control the actual resolver via ACME configuration, + // 2. To use a context to set stricter timeout limits. + resolver, err := buildResolver(config) + if err != nil { + return false, fmt.Errorf("failed to build resolver: %w", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + name := DNSChallengePrefix + domain + results, err := resolver.LookupTXT(ctx, name) + if err != nil { + return false, fmt.Errorf("dns-01: failed to lookup TXT records for domain (%v) via resolver %v: %w", name, config.DNSResolver, err) + } + + for _, keyAuthz := range results { + ok, _ := ValidateSHA256KeyAuthorization(keyAuthz, token, thumbprint) + if ok { + return true, nil + } + } + + return false, fmt.Errorf("dns-01: challenge failed against %v records", len(results)) +} + +func ValidateTLSALPN01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + // This RFC is defined in RFC 8737 Automated Certificate Management + // Environment (ACME) TLS Application‑Layer Protocol Negotiation + // (ALPN) Challenge Extension. + // + // This is conceptually similar to ValidateHTTP01Challenge, but + // uses a TLS connection on port 443 with the specified ALPN + // protocol. + + cfg := &tls.Config{ + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge, the name of the negotiated + // protocol is "acme-tls/1". + NextProtos: []string{ALPNProtocol}, + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > ... and an SNI extension containing only the domain name + // > being validated during the TLS handshake. + // + // According to the Go docs, setting this option (even though + // InsecureSkipVerify=true is also specified), allows us to + // set the SNI extension to this value. + ServerName: domain, + + VerifyConnection: func(connState tls.ConnectionState) error { + // We initiated a fresh connection with no session tickets; + // even if we did have a session ticket, we do not wish to + // use it. Verify that the server has not inadvertently + // reused connections between validation attempts or something. + if connState.DidResume { + return fmt.Errorf("server under test incorrectly reported that handshake was resumed when no session cache was provided; refusing to continue") + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The ACME server verifies that during the TLS handshake the + // > application-layer protocol "acme-tls/1" was successfully + // > negotiated (and that the ALPN extension contained only the + // > value "acme-tls/1"). + if connState.NegotiatedProtocol != ALPNProtocol { + return fmt.Errorf("server under test negotiated unexpected ALPN protocol %v", connState.NegotiatedProtocol) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > and that the certificate returned + // + // Because this certificate MUST be self-signed (per earlier + // statement in RFC 8737 Section 3), there is no point in sending + // more than one certificate, and so we will err early here if + // we got more than one. + if len(connState.PeerCertificates) > 1 { + return fmt.Errorf("server under test returned multiple (%v) certificates when we expected only one", len(connState.PeerCertificates)) + } + cert := connState.PeerCertificates[0] + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The client prepares for validation by constructing a + // > self-signed certificate that MUST contain an acmeIdentifier + // > extension and a subjectAlternativeName extension [RFC5280]. + // + // Verify that this is a self-signed certificate that isn't signed + // by another certificate (i.e., with the same key material but + // different issuer). + // NOTE: Do not use cert.CheckSignatureFrom(cert) as we need to bypass the + // checks for the parent certificate having the IsCA basic constraint set. + err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature) + if err != nil { + return fmt.Errorf("server under test returned a non-self-signed certificate: %w", err) + } + + if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { + return fmt.Errorf("server under test returned a non-self-signed certificate: invalid subject (%v) <-> issuer (%v) match", cert.Subject.String(), cert.Issuer.String()) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The subjectAlternativeName extension MUST contain a single + // > dNSName entry where the value is the domain name being + // > validated. + // + // TODO: this does not validate that there are not other SANs + // with unknown (to Go) OIDs. + if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) > 0 || len(cert.IPAddresses) > 0 || len(cert.URIs) > 0 { + return fmt.Errorf("server under test returned a certificate with incorrect SANs") + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The comparison of dNSNames MUST be case insensitive + // > [RFC4343]. Note that as ACME doesn't support Unicode + // > identifiers, all dNSNames MUST be encoded using the rules + // > of [RFC3492]. + if !strings.EqualFold(cert.DNSNames[0], domain) { + return fmt.Errorf("server under test returned a certificate with unexpected identifier: %v", cert.DNSNames[0]) + } + + // Per above, verify that the acmeIdentifier extension is present + // exactly once and has the correct value. + var foundACMEId bool + for _, ext := range cert.Extensions { + if !ext.Id.Equal(OIDACMEIdentifier) { + continue + } + + // There must be only a single ACME extension. + if foundACMEId { + return fmt.Errorf("server under test returned a certificate with multiple acmeIdentifier extensions") + } + foundACMEId = true + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > a critical acmeIdentifier extension + if !ext.Critical { + return fmt.Errorf("server under test returned a certificate with an acmeIdentifier extension marked non-Critical") + } + + var keyAuthz []byte + remainder, err := asn1.Unmarshal(ext.Value, &keyAuthz) + if err != nil { + return fmt.Errorf("server under test returned a certificate with invalid acmeIdentifier extension value: %w", err) + } + if len(remainder) > 0 { + return fmt.Errorf("server under test returned a certificate with invalid acmeIdentifier extension value with additional trailing data") + } + + ok, err := ValidateRawSHA256KeyAuthorization(keyAuthz, token, thumbprint) + if !ok || err != nil { + return fmt.Errorf("server under test returned a certificate with an invalid key authorization (%w)", err) + } + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The ACME server verifies that ... the certificate returned + // > contains: ... a critical acmeIdentifier extension containing + // > the expected SHA-256 digest computed in step 1. + if !foundACMEId { + return fmt.Errorf("server under test returned a certificate without the required acmeIdentifier extension") + } + + // Remove the handled critical extension and validate that we + // have no additional critical extensions left unhandled. + var index int = -1 + for oidIndex, oid := range cert.UnhandledCriticalExtensions { + if oid.Equal(OIDACMEIdentifier) { + index = oidIndex + break + } + } + if index != -1 { + // Unlike the foundACMEId case, this is not a failure; if Go + // updates to "understand" this critical extension, we do not + // wish to fail. + cert.UnhandledCriticalExtensions = append(cert.UnhandledCriticalExtensions[0:index], cert.UnhandledCriticalExtensions[index+1:]...) + } + if len(cert.UnhandledCriticalExtensions) > 0 { + return fmt.Errorf("server under test returned a certificate with additional unknown critical extensions (%v)", cert.UnhandledCriticalExtensions) + } + + // All good! + return nil + }, + + // We never want to resume a connection; do not provide session + // cache storage. + ClientSessionCache: nil, + + // Do not trust any system trusted certificates; we're going to be + // manually validating the chain, so specifying a non-empty pool + // here could only cause additional, unnecessary work. + RootCAs: x509.NewCertPool(), + + // Do not bother validating the client's chain; we know it should be + // self-signed. This also disables hostname verification, but we do + // this verification as part of VerifyConnection(...) ourselves. + // + // Per Go docs, this option is only safe in conjunction with + // VerifyConnection which we define above. + InsecureSkipVerify: true, + + // RFC 8737 Section 4. acme-tls/1 Protocol Definition: + // + // > ACME servers that implement "acme-tls/1" MUST only negotiate + // > TLS 1.2 [RFC5246] or higher when connecting to clients for + // > validation. + MinVersion: tls.VersionTLS12, + + // While RFC 8737 does not place restrictions around allowed cipher + // suites, we wish to restrict ourselves to secure defaults. Specify + // the Intermediate guideline from Mozilla's TLS config generator to + // disable obviously weak ciphers. + // + // See also: https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.7 + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + }, + } + + // Build a dialer using our custom DNS resolver, to ensure domains get + // resolved according to configuration. + dialer, err := buildDialerConfig(config) + if err != nil { + return false, fmt.Errorf("failed to build dialer: %w", err) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > 2. The ACME server resolves the domain name being validated and + // > chooses one of the IP addresses returned for validation (the + // > server MAY validate against multiple addresses if more than + // > one is returned). + // > 3. The ACME server initiates a TLS connection to the chosen IP + // > address. This connection MUST use TCP port 443. + address := fmt.Sprintf("%v:"+ALPNPort, domain) + conn, err := dialer.Dial("tcp", address) + if err != nil { + return false, fmt.Errorf("tls-alpn-01: failed to dial host: %w", err) + } + + // Initiate the connection to the remote peer. + client := tls.Client(conn, cfg) + + // We intentionally swallow this error as it isn't useful to the + // underlying protocol we perform here. Notably, per RFC 8737 + // Section 4. acme-tls/1 Protocol Definition: + // + // > Once the handshake is completed, the client MUST NOT exchange + // > any further data with the server and MUST immediately close the + // > connection. ... Because of this, an ACME server MAY choose to + // > withhold authorization if either the certificate signature is + // > invalid or the handshake doesn't fully complete. + defer client.Close() + + // We wish to put time bounds on the total time the handshake can + // stall for, so build a connection context here. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // See note above about why we can allow Handshake to complete + // successfully. + if err := client.HandshakeContext(ctx); err != nil { + return false, fmt.Errorf("tls-alpn-01: failed to perform handshake: %w", err) + } + return true, nil +} diff --git a/builtin/logical/pki/acme_challenges_test.go b/builtin/logical/pki/acme_challenges_test.go new file mode 100644 index 000000000000..591486d69677 --- /dev/null +++ b/builtin/logical/pki/acme_challenges_test.go @@ -0,0 +1,759 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "fmt" + "math/big" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + + "github.com/stretchr/testify/require" +) + +type keyAuthorizationTestCase struct { + keyAuthz string + token string + thumbprint string + shouldFail bool +} + +var keyAuthorizationTestCases = []keyAuthorizationTestCase{ + { + // Entirely empty + "", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Both empty + ".", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Not equal + "non-.non-", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Empty thumbprint + "non-.", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Empty token + ".non-", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Wrong order + "non-empty-thumbprint.non-empty-token", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Too many pieces + "one.two.three", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Valid + "non-empty-token.non-empty-thumbprint", + "non-empty-token", + "non-empty-thumbprint", + false, + }, +} + +func TestAcmeValidateKeyAuthorization(t *testing.T) { + t.Parallel() + + for index, tc := range keyAuthorizationTestCases { + t.Run("subtest-"+strconv.Itoa(index), func(st *testing.T) { + isValid, err := ValidateKeyAuthorization(tc.keyAuthz, tc.token, tc.thumbprint) + if !isValid && err == nil { + st.Fatalf("[%d] expected failure to give reason via err (%v / %v)", index, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + st.Fatalf("[%d] got ret=%v, expected ret=%v (shouldFail=%v)", index, isValid, expectedValid, tc.shouldFail) + } + }) + } +} + +func TestAcmeValidateHTTP01Challenge(t *testing.T) { + t.Parallel() + + for index, tc := range keyAuthorizationTestCases { + validFunc := func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(tc.keyAuthz)) + } + withPadding := func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(" " + tc.keyAuthz + " ")) + } + withRedirect := func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/.well-known/") { + http.Redirect(w, r, "/my-http-01-challenge-response", 301) + return + } + + w.Write([]byte(tc.keyAuthz)) + } + withSleep := func(w http.ResponseWriter, r *http.Request) { + // Long enough to ensure any excessively short timeouts are hit, + // not long enough to trigger a failure (hopefully). + time.Sleep(5 * time.Second) + w.Write([]byte(tc.keyAuthz)) + } + + validHandlers := []http.HandlerFunc{ + http.HandlerFunc(validFunc), http.HandlerFunc(withPadding), + http.HandlerFunc(withRedirect), http.HandlerFunc(withSleep), + } + + for handlerIndex, handler := range validHandlers { + func() { + ts := httptest.NewServer(handler) + defer ts.Close() + + host := ts.URL[7:] + isValid, err := ValidateHTTP01Challenge(host, tc.token, tc.thumbprint, &acmeConfigEntry{}) + if !isValid && err == nil { + t.Fatalf("[tc=%d/handler=%d] expected failure to give reason via err (%v / %v)", index, handlerIndex, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + t.Fatalf("[tc=%d/handler=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, handlerIndex, isValid, err, expectedValid, tc.shouldFail) + } + }() + } + } + + // Negative test cases for various HTTP-specific scenarios. + redirectLoop := func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/my-http-01-challenge-response", 301) + } + publicRedirect := func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "http://hashicorp.com/", 301) + } + noData := func(w http.ResponseWriter, r *http.Request) {} + noContent := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + } + notFound := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + } + simulateHang := func(w http.ResponseWriter, r *http.Request) { + time.Sleep(30 * time.Second) + w.Write([]byte("my-token.my-thumbprint")) + } + tooLarge := func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < 512; i++ { + w.Write([]byte("my-token.my-thumbprint\n")) + } + } + + validHandlers := []http.HandlerFunc{ + http.HandlerFunc(redirectLoop), http.HandlerFunc(publicRedirect), + http.HandlerFunc(noData), http.HandlerFunc(noContent), + http.HandlerFunc(notFound), http.HandlerFunc(simulateHang), + http.HandlerFunc(tooLarge), + } + for handlerIndex, handler := range validHandlers { + func() { + ts := httptest.NewServer(handler) + defer ts.Close() + + host := ts.URL[7:] + isValid, err := ValidateHTTP01Challenge(host, "my-token", "my-thumbprint", &acmeConfigEntry{}) + if isValid || err == nil { + t.Fatalf("[handler=%d] expected failure validating challenge (%v / %v)", handlerIndex, isValid, err) + } + }() + } +} + +func TestAcmeValidateDNS01Challenge(t *testing.T) { + t.Parallel() + + host := "dadgarcorp.com" + resolver := dnstest.SetupResolver(t, host) + defer resolver.Cleanup() + + t.Logf("DNS Server Address: %v", resolver.GetLocalAddr()) + + config := &acmeConfigEntry{ + DNSResolver: resolver.GetLocalAddr(), + } + + for index, tc := range keyAuthorizationTestCases { + checksum := sha256.Sum256([]byte(tc.keyAuthz)) + authz := base64.RawURLEncoding.EncodeToString(checksum[:]) + resolver.AddRecord(DNSChallengePrefix+host, "TXT", authz) + resolver.PushConfig() + + isValid, err := ValidateDNS01Challenge(host, tc.token, tc.thumbprint, config) + if !isValid && err == nil { + t.Fatalf("[tc=%d] expected failure to give reason via err (%v / %v)", index, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + t.Fatalf("[tc=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, isValid, err, expectedValid, tc.shouldFail) + } + + resolver.RemoveAllRecords() + } +} + +func TestAcmeValidateTLSALPN01Challenge(t *testing.T) { + // This test is not parallel because we modify ALPNPort to use a custom + // non-standard port _just for testing purposes_. + host := "localhost" + config := &acmeConfigEntry{} + + log := hclog.L() + + returnedProtocols := []string{ALPNProtocol} + var certificates []*x509.Certificate + var privateKey crypto.PrivateKey + + tlsCfg := &tls.Config{} + tlsCfg.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { + var retCfg tls.Config = *tlsCfg + retCfg.NextProtos = returnedProtocols + log.Info(fmt.Sprintf("[alpn-server] returned protocol: %v", returnedProtocols)) + return &retCfg, nil + } + tlsCfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + var ret tls.Certificate + for index, cert := range certificates { + ret.Certificate = append(ret.Certificate, cert.Raw) + if index == 0 { + ret.Leaf = cert + } + } + ret.PrivateKey = privateKey + log.Info(fmt.Sprintf("[alpn-server] returned certificates: %v", ret)) + return &ret, nil + } + + ln, err := tls.Listen("tcp", host+":0", tlsCfg) + require.NoError(t, err, "failed to listen with TLS config") + + doOneAccept := func() { + log.Info("[alpn-server] starting accept...") + connRaw, err := ln.Accept() + require.NoError(t, err, "failed to accept TLS connection") + + log.Info("[alpn-server] got connection...") + conn := tls.Server(connRaw.(*tls.Conn), tlsCfg) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer func() { + log.Info("[alpn-server] canceling listener connection...") + cancel() + }() + + log.Info("[alpn-server] starting handshake...") + if err := conn.HandshakeContext(ctx); err != nil { + log.Info("[alpn-server] got non-fatal error while handshaking connection: %v", err) + } + + log.Info("[alpn-server] closing connection...") + if err := conn.Close(); err != nil { + log.Info("[alpn-server] got non-fatal error while closing connection: %v", err) + } + } + + ALPNPort = strings.Split(ln.Addr().String(), ":")[1] + + type alpnTestCase struct { + name string + certificates []*x509.Certificate + privateKey crypto.PrivateKey + protocols []string + token string + thumbprint string + shouldFail bool + } + + var alpnTestCases []alpnTestCase + // Add all of our keyAuthorizationTestCases into alpnTestCases + for index, tc := range keyAuthorizationTestCases { + log.Info(fmt.Sprintf("using keyAuthorizationTestCase [tc=%d] as alpnTestCase [tc=%d]...", index, len(alpnTestCases))) + // Properly encode the authorization. + checksum := sha256.Sum256([]byte(tc.keyAuthz)) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed asn.1 marshalling authz") + + // Build a self-signed certificate. + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(1), + DNSNames: []string{host}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated certificate") + + newTc := alpnTestCase{ + name: fmt.Sprintf("keyAuthorizationTestCase[%d]", index), + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: tc.token, + thumbprint: tc.thumbprint, + shouldFail: tc.shouldFail, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: Longer chain + // Build a self-signed certificate. + rootKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating root private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "Root CA", + }, + Issuer: pkix.Name{ + CommonName: "Root CA", + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: rootKey.Public(), + SerialNumber: big.NewInt(1), + BasicConstraintsValid: true, + IsCA: true, + } + rootCertBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, rootKey.Public(), rootKey) + require.NoError(t, err, "failed to create root certificate") + rootCert, err := x509.ParseCertificate(rootCertBytes) + require.NoError(t, err, "failed to parse newly generated root certificate") + + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which _could_ pass validation + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl = &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: "Root CA", + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, rootCert, key.Public(), rootKey) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "longer chain with valid leaf", + certificates: []*x509.Certificate{cert, rootCert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without DNSSan + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate without a DNSSan + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + // NO DNSNames + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid keyauthz without valid dnsname", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without matching DNSSan + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which fails validation due to bad DNSName + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host + ".dadgarcorp.com" /* not matching host! */}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid keyauthz without matching dnsname", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert with additional SAN + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which has an invalid additional SAN + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host}, + EmailAddresses: []string{"webmaster@" + host}, /* unexpected */ + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid keyauthz with additional email SANs", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without CN + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which should pass validation + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{}, + Issuer: pkix.Name{}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid certificate; no Subject/Issuer (missing CN)", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: false, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without the extension + // Build a leaf certificate which should fail validation + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{}, + Issuer: pkix.Name{}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(1), + DNSNames: []string{host}, + BasicConstraintsValid: true, + IsCA: true, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "missing required acmeIdentifier extension", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: root without a leaf + // Build a self-signed certificate. + rootKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating root private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "Root CA", + }, + Issuer: pkix.Name{ + CommonName: "Root CA", + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: rootKey.Public(), + SerialNumber: big.NewInt(1), + BasicConstraintsValid: true, + IsCA: true, + } + rootCertBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, rootKey.Public(), rootKey) + require.NoError(t, err, "failed to create root certificate") + rootCert, err := x509.ParseCertificate(rootCertBytes) + require.NoError(t, err, "failed to parse newly generated root certificate") + + newTc := alpnTestCase{ + name: "root without leaf", + certificates: []*x509.Certificate{rootCert}, + privateKey: rootKey, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + for index, tc := range alpnTestCases { + log.Info(fmt.Sprintf("\n\n[tc=%d/name=%s] starting validation", index, tc.name)) + certificates = tc.certificates + privateKey = tc.privateKey + returnedProtocols = tc.protocols + + // Attempt to validate the challenge. + go doOneAccept() + isValid, err := ValidateTLSALPN01Challenge(host, tc.token, tc.thumbprint, config) + if !isValid && err == nil { + t.Fatalf("[tc=%d/name=%s] expected failure to give reason via err (%v / %v)", index, tc.name, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + t.Fatalf("[tc=%d/name=%s] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, tc.name, isValid, err, expectedValid, tc.shouldFail) + } else if err != nil { + log.Info(fmt.Sprintf("[tc=%d/name=%s] got expected failure: err=%v", index, tc.name, err)) + } + } +} + +// TestAcmeValidateHttp01TLSRedirect verify that we allow a http-01 challenge to redirect +// to a TLS server and not validate the certificate chain is valid. We don't validate the +// TLS chain as we would have accepted the auth over a non-secured channel anyway had +// the original request not redirected us. +func TestAcmeValidateHttp01TLSRedirect(t *testing.T) { + t.Parallel() + + for index, tc := range keyAuthorizationTestCases { + t.Run("subtest-"+strconv.Itoa(index), func(st *testing.T) { + validFunc := func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/.well-known/") { + w.Write([]byte(tc.keyAuthz)) + return + } + http.Error(w, "status not found", http.StatusNotFound) + } + + tlsTs := httptest.NewTLSServer(http.HandlerFunc(validFunc)) + defer tlsTs.Close() + + // Set up a http server that will redirect to our TLS server + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, tlsTs.URL+r.URL.Path, 301) + })) + defer ts.Close() + + host := ts.URL[len("http://"):] + isValid, err := ValidateHTTP01Challenge(host, tc.token, tc.thumbprint, &acmeConfigEntry{}) + if !isValid && err == nil { + st.Fatalf("[tc=%d] expected failure to give reason via err (%v / %v)", index, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + st.Fatalf("[tc=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, isValid, err, expectedValid, tc.shouldFail) + } + }) + } +} diff --git a/builtin/logical/pki/acme_eab_policy.go b/builtin/logical/pki/acme_eab_policy.go new file mode 100644 index 000000000000..43af5d330055 --- /dev/null +++ b/builtin/logical/pki/acme_eab_policy.go @@ -0,0 +1,69 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + "strings" +) + +type EabPolicyName string + +const ( + eabPolicyNotRequired EabPolicyName = "not-required" + eabPolicyNewAccountRequired EabPolicyName = "new-account-required" + eabPolicyAlwaysRequired EabPolicyName = "always-required" +) + +func getEabPolicyByString(name string) (EabPolicy, error) { + lcName := strings.TrimSpace(strings.ToLower(name)) + switch lcName { + case string(eabPolicyNotRequired): + return getEabPolicyByName(eabPolicyNotRequired), nil + case string(eabPolicyNewAccountRequired): + return getEabPolicyByName(eabPolicyNewAccountRequired), nil + case string(eabPolicyAlwaysRequired): + return getEabPolicyByName(eabPolicyAlwaysRequired), nil + default: + return getEabPolicyByName(eabPolicyAlwaysRequired), fmt.Errorf("unknown eab policy name: %s", name) + } +} + +func getEabPolicyByName(name EabPolicyName) EabPolicy { + return EabPolicy{Name: name} +} + +type EabPolicy struct { + Name EabPolicyName +} + +// EnforceForNewAccount for new account creations, should we require an EAB. +func (ep EabPolicy) EnforceForNewAccount(eabData *eabType) error { + if (ep.Name == eabPolicyAlwaysRequired || ep.Name == eabPolicyNewAccountRequired) && eabData == nil { + return ErrExternalAccountRequired + } + + return nil +} + +// EnforceForExistingAccount for all operations within ACME, does the account being used require an EAB attached to it. +func (ep EabPolicy) EnforceForExistingAccount(account *acmeAccount) error { + if ep.Name == eabPolicyAlwaysRequired && account.Eab == nil { + return ErrExternalAccountRequired + } + + return nil +} + +// IsExternalAccountRequired for new accounts incoming does is an EAB required +func (ep EabPolicy) IsExternalAccountRequired() bool { + return ep.Name == eabPolicyAlwaysRequired || ep.Name == eabPolicyNewAccountRequired +} + +// OverrideEnvDisablingPublicAcme determines if ACME is enabled but the OS environment variable +// has said to disable public acme support, if we can override that environment variable to +// turn on ACME support +func (ep EabPolicy) OverrideEnvDisablingPublicAcme() bool { + return ep.Name == eabPolicyAlwaysRequired +} diff --git a/builtin/logical/pki/acme_errors.go b/builtin/logical/pki/acme_errors.go new file mode 100644 index 000000000000..3c9c059f7d22 --- /dev/null +++ b/builtin/logical/pki/acme_errors.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/logical" +) + +// Error prefix; see RFC 8555 Section 6.7. Errors. +const ( + ErrorPrefix = "urn:ietf:params:acme:error:" + ErrorContentType = "application/problem+json" +) + +// See RFC 8555 Section 6.7. Errors. +var ErrAccountDoesNotExist = errors.New("The request specified an account that does not exist") + +var ErrAcmeDisabled = errors.New("ACME feature is disabled") + +var ( + ErrAlreadyRevoked = errors.New("The request specified a certificate to be revoked that has already been revoked") + ErrBadCSR = errors.New("The CSR is unacceptable") + ErrBadNonce = errors.New("The client sent an unacceptable anti-replay nonce") + ErrBadPublicKey = errors.New("The JWS was signed by a public key the server does not support") + ErrBadRevocationReason = errors.New("The revocation reason provided is not allowed by the server") + ErrBadSignatureAlgorithm = errors.New("The JWS was signed with an algorithm the server does not support") + ErrCAA = errors.New("Certification Authority Authorization (CAA) records forbid the CA from issuing a certificate") + ErrCompound = errors.New("Specific error conditions are indicated in the 'subproblems' array") + ErrConnection = errors.New("The server could not connect to validation target") + ErrDNS = errors.New("There was a problem with a DNS query during identifier validation") + ErrExternalAccountRequired = errors.New("The request must include a value for the 'externalAccountBinding' field") + ErrIncorrectResponse = errors.New("Response received didn't match the challenge's requirements") + ErrInvalidContact = errors.New("A contact URL for an account was invalid") + ErrMalformed = errors.New("The request message was malformed") + ErrOrderNotReady = errors.New("The request attempted to finalize an order that is not ready to be finalized") + ErrRateLimited = errors.New("The request exceeds a rate limit") + ErrRejectedIdentifier = errors.New("The server will not issue certificates for the identifier") + ErrServerInternal = errors.New("The server experienced an internal error") + ErrTLS = errors.New("The server received a TLS error during validation") + ErrUnauthorized = errors.New("The client lacks sufficient authorization") + ErrUnsupportedContact = errors.New("A contact URL for an account used an unsupported protocol scheme") + ErrUnsupportedIdentifier = errors.New("An identifier is of an unsupported type") + ErrUserActionRequired = errors.New("Visit the 'instance' URL and take actions specified there") +) + +// Mapping of err->name; see table in RFC 8555 Section 6.7. Errors. +var errIdMappings = map[error]string{ + ErrAccountDoesNotExist: "accountDoesNotExist", + ErrAlreadyRevoked: "alreadyRevoked", + ErrBadCSR: "badCSR", + ErrBadNonce: "badNonce", + ErrBadPublicKey: "badPublicKey", + ErrBadRevocationReason: "badRevocationReason", + ErrBadSignatureAlgorithm: "badSignatureAlgorithm", + ErrCAA: "caa", + ErrCompound: "compound", + ErrConnection: "connection", + ErrDNS: "dns", + ErrExternalAccountRequired: "externalAccountRequired", + ErrIncorrectResponse: "incorrectResponse", + ErrInvalidContact: "invalidContact", + ErrMalformed: "malformed", + ErrOrderNotReady: "orderNotReady", + ErrRateLimited: "rateLimited", + ErrRejectedIdentifier: "rejectedIdentifier", + ErrServerInternal: "serverInternal", + ErrTLS: "tls", + ErrUnauthorized: "unauthorized", + ErrUnsupportedContact: "unsupportedContact", + ErrUnsupportedIdentifier: "unsupportedIdentifier", + ErrUserActionRequired: "userActionRequired", +} + +// Mapping of err->status codes; see table in RFC 8555 Section 6.7. Errors. +var errCodeMappings = map[error]int{ + ErrAccountDoesNotExist: http.StatusBadRequest, // See RFC 8555 Section 7.3.1. Finding an Account URL Given a Key. + ErrAlreadyRevoked: http.StatusBadRequest, + ErrBadCSR: http.StatusBadRequest, + ErrBadNonce: http.StatusBadRequest, + ErrBadPublicKey: http.StatusBadRequest, + ErrBadRevocationReason: http.StatusBadRequest, + ErrBadSignatureAlgorithm: http.StatusBadRequest, + ErrCAA: http.StatusForbidden, + ErrCompound: http.StatusBadRequest, + ErrConnection: http.StatusInternalServerError, + ErrDNS: http.StatusInternalServerError, + ErrExternalAccountRequired: http.StatusUnauthorized, + ErrIncorrectResponse: http.StatusBadRequest, + ErrInvalidContact: http.StatusBadRequest, + ErrMalformed: http.StatusBadRequest, + ErrOrderNotReady: http.StatusForbidden, // See RFC 8555 Section 7.4. Applying for Certificate Issuance. + ErrRateLimited: http.StatusTooManyRequests, + ErrRejectedIdentifier: http.StatusBadRequest, + ErrServerInternal: http.StatusInternalServerError, + ErrTLS: http.StatusInternalServerError, + ErrUnauthorized: http.StatusUnauthorized, + ErrUnsupportedContact: http.StatusBadRequest, + ErrUnsupportedIdentifier: http.StatusBadRequest, + ErrUserActionRequired: http.StatusUnauthorized, +} + +type ErrorResponse struct { + StatusCode int `json:"-"` + Type string `json:"type"` + Detail string `json:"detail"` + Subproblems []*ErrorResponse `json:"subproblems"` +} + +func (e *ErrorResponse) MarshalForStorage() map[string]interface{} { + subProblems := []map[string]interface{}{} + for _, subProblem := range e.Subproblems { + subProblems = append(subProblems, subProblem.MarshalForStorage()) + } + return map[string]interface{}{ + "status": e.StatusCode, + "type": e.Type, + "detail": e.Detail, + "subproblems": subProblems, + } +} + +func (e *ErrorResponse) Marshal() (*logical.Response, error) { + body, err := json.Marshal(e) + if err != nil { + return nil, fmt.Errorf("failed marshalling of error response: %w", err) + } + + var resp logical.Response + resp.Data = map[string]interface{}{ + logical.HTTPContentType: ErrorContentType, + logical.HTTPRawBody: body, + logical.HTTPStatusCode: e.StatusCode, + } + + return &resp, nil +} + +func FindType(given error) (err error, id string, code int, found bool) { + matchedError := false + for err, id = range errIdMappings { + if errors.Is(given, err) { + matchedError = true + break + } + } + + // If the given error was not matched from one of the standard ACME errors + // make this error, force ErrServerInternal + if !matchedError { + err = ErrServerInternal + id = errIdMappings[err] + } + + code = errCodeMappings[err] + + return +} + +func TranslateError(given error) (*logical.Response, error) { + if errors.Is(given, logical.ErrReadOnly) { + return nil, given + } + + if errors.Is(given, ErrAcmeDisabled) { + return logical.RespondWithStatusCode(nil, nil, http.StatusNotFound) + } + + body := TranslateErrorToErrorResponse(given) + + return body.Marshal() +} + +func TranslateErrorToErrorResponse(given error) ErrorResponse { + // We're multierror aware here: if we're given a list of errors, assume + // they're structured so the first error is the outer error and the inner + // subproblems are subsequent in the multierror. + var remaining []error + if unwrapped, ok := given.(*multierror.Error); ok { + remaining = unwrapped.Errors[1:] + given = unwrapped.Errors[0] + } + + _, id, code, found := FindType(given) + if !found && len(remaining) > 0 { + // Translate multierrors into a generic error code. + id = errIdMappings[ErrCompound] + code = errCodeMappings[ErrCompound] + } + + var body ErrorResponse + body.Type = ErrorPrefix + id + body.Detail = given.Error() + body.StatusCode = code + + for _, subgiven := range remaining { + _, subid, _, _ := FindType(subgiven) + + var sub ErrorResponse + sub.Type = ErrorPrefix + subid + body.Detail = subgiven.Error() + + body.Subproblems = append(body.Subproblems, &sub) + } + return body +} diff --git a/builtin/logical/pki/acme_jws.go b/builtin/logical/pki/acme_jws.go new file mode 100644 index 000000000000..cc096c55c250 --- /dev/null +++ b/builtin/logical/pki/acme_jws.go @@ -0,0 +1,278 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "bytes" + "crypto" + "encoding/base64" + "encoding/json" + "fmt" + "strings" + + "github.com/go-jose/go-jose/v3" +) + +var AllowedOuterJWSTypes = map[string]interface{}{ + "RS256": true, + "RS384": true, + "RS512": true, + "PS256": true, + "PS384": true, + "PS512": true, + "ES256": true, + "ES384": true, + "ES512": true, + "EdDSA2": true, +} + +var AllowedEabJWSTypes = map[string]interface{}{ + "HS256": true, + "HS384": true, + "HS512": true, +} + +// This wraps a JWS message structure. +type jwsCtx struct { + Algo string `json:"alg"` + Kid string `json:"kid"` + Jwk json.RawMessage `json:"jwk"` + Nonce string `json:"nonce"` + Url string `json:"url"` + Key jose.JSONWebKey `json:"-"` + Existing bool `json:"-"` +} + +func (c *jwsCtx) GetKeyThumbprint() (string, error) { + keyThumbprint, err := c.Key.Thumbprint(crypto.SHA256) + if err != nil { + return "", fmt.Errorf("failed creating thumbprint: %w", err) + } + return base64.RawURLEncoding.EncodeToString(keyThumbprint), nil +} + +func UnmarshalEabJwsJson(eabBytes []byte) (*jwsCtx, error) { + var eabJws jwsCtx + var err error + if err = json.Unmarshal(eabBytes, &eabJws); err != nil { + return nil, err + } + + if eabJws.Kid == "" { + return nil, fmt.Errorf("invalid header: got missing required field 'kid': %w", ErrMalformed) + } + + if _, present := AllowedEabJWSTypes[eabJws.Algo]; !present { + return nil, fmt.Errorf("invalid header: unexpected value for 'algo': %w", ErrMalformed) + } + + return &eabJws, nil +} + +func (c *jwsCtx) UnmarshalOuterJwsJson(a *acmeState, ac *acmeContext, jws []byte) error { + var err error + if err = json.Unmarshal(jws, c); err != nil { + return err + } + + if c.Kid != "" && len(c.Jwk) > 0 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The "jwk" and "kid" fields are mutually exclusive. Servers MUST + // > reject requests that contain both. + return fmt.Errorf("invalid header: got both account 'kid' and 'jwk' in the same message; expected only one: %w", ErrMalformed) + } + + if c.Kid == "" && len(c.Jwk) == 0 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > Either "jwk" (JSON Web Key) or "kid" (Key ID) as specified + // > below + return fmt.Errorf("invalid header: got neither required fields of 'kid' nor 'jwk': %w", ErrMalformed) + } + + if _, present := AllowedOuterJWSTypes[c.Algo]; !present { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Protected Header MUST include the following fields: + // > + // > - "alg" (Algorithm) + // > + // > * This field MUST NOT contain "none" or a Message + // > Authentication Code (MAC) algorithm (e.g. one in which the + // > algorithm registry description mentions MAC/HMAC). + return fmt.Errorf("invalid header: unexpected value for 'algo': %w", ErrMalformed) + } + + if c.Kid != "" { + // Load KID from storage first. + kid := getKeyIdFromAccountUrl(c.Kid) + c.Jwk, err = a.LoadJWK(ac, kid) + if err != nil { + return err + } + c.Kid = kid // Use the uuid itself, not the full account url that was originally provided to us. + c.Existing = true + } + + if err = c.Key.UnmarshalJSON(c.Jwk); err != nil { + return err + } + + if !c.Key.Valid() { + return fmt.Errorf("received invalid jwk: %w", ErrMalformed) + } + + if c.Kid == "" { + c.Kid = genUuid() + c.Existing = false + } + + return nil +} + +func getKeyIdFromAccountUrl(accountUrl string) string { + pieces := strings.Split(accountUrl, "/") + return pieces[len(pieces)-1] +} + +func hasValues(h jose.Header) bool { + return h.KeyID != "" || h.JSONWebKey != nil || h.Algorithm != "" || h.Nonce != "" || len(h.ExtraHeaders) > 0 +} + +func (c *jwsCtx) VerifyJWS(signature string) (map[string]interface{}, error) { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Unencoded Payload Option [RFC7797] MUST NOT be used + // + // This is validated by go-jose. + sig, err := jose.ParseSigned(signature) + if err != nil { + return nil, fmt.Errorf("error parsing signature: %s: %w", err, ErrMalformed) + } + + if len(sig.Signatures) > 1 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS MUST NOT have multiple signatures + return nil, fmt.Errorf("request had multiple signatures: %w", ErrMalformed) + } + + if hasValues(sig.Signatures[0].Unprotected) { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Unprotected Header [RFC7515] MUST NOT be used + return nil, fmt.Errorf("request had unprotected headers: %w", ErrMalformed) + } + + payload, err := sig.Verify(c.Key) + if err != nil { + return nil, err + } + + if len(payload) == 0 { + // Distinguish POST-AS-GET from POST-with-an-empty-body. + return nil, nil + } + + var m map[string]interface{} + if err := json.Unmarshal(payload, &m); err != nil { + return nil, fmt.Errorf("failed to json unmarshal 'payload': %s: %w", err, ErrMalformed) + } + + return m, nil +} + +func verifyEabPayload(acmeState *acmeState, ac *acmeContext, outer *jwsCtx, expectedPath string, payload map[string]interface{}) (*eabType, error) { + // Parse the key out. + rawProtectedBase64, ok := payload["protected"] + if !ok { + return nil, fmt.Errorf("missing required field 'protected': %w", ErrMalformed) + } + jwkBase64 := rawProtectedBase64.(string) + + jwkBytes, err := base64.RawURLEncoding.DecodeString(jwkBase64) + if err != nil { + return nil, fmt.Errorf("failed to base64 parse eab 'protected': %s: %w", err, ErrMalformed) + } + + eabJws, err := UnmarshalEabJwsJson(jwkBytes) + if err != nil { + return nil, fmt.Errorf("failed to json unmarshal eab 'protected': %w", err) + } + + if len(eabJws.Url) == 0 { + return nil, fmt.Errorf("missing required parameter 'url' in eab 'protected': %w", ErrMalformed) + } + expectedUrl := ac.clusterUrl.JoinPath(expectedPath).String() + if expectedUrl != eabJws.Url { + return nil, fmt.Errorf("invalid value for 'url' in eab 'protected': got '%v' expected '%v': %w", eabJws.Url, expectedUrl, ErrUnauthorized) + } + + if len(eabJws.Nonce) != 0 { + return nil, fmt.Errorf("nonce should not be provided in eab 'protected': %w", ErrMalformed) + } + + rawPayloadBase64, ok := payload["payload"] + if !ok { + return nil, fmt.Errorf("missing required field eab 'payload': %w", ErrMalformed) + } + payloadBase64, ok := rawPayloadBase64.(string) + if !ok { + return nil, fmt.Errorf("failed to parse 'payload' field: %w", ErrMalformed) + } + + rawSignatureBase64, ok := payload["signature"] + if !ok { + return nil, fmt.Errorf("missing required field 'signature': %w", ErrMalformed) + } + signatureBase64, ok := rawSignatureBase64.(string) + if !ok { + return nil, fmt.Errorf("failed to parse 'signature' field: %w", ErrMalformed) + } + + // go-jose only seems to support compact signature encodings. + compactSig := fmt.Sprintf("%v.%v.%v", jwkBase64, payloadBase64, signatureBase64) + sig, err := jose.ParseSigned(compactSig) + if err != nil { + return nil, fmt.Errorf("error parsing eab signature: %s: %w", err, ErrMalformed) + } + + if len(sig.Signatures) > 1 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS MUST NOT have multiple signatures + return nil, fmt.Errorf("eab had multiple signatures: %w", ErrMalformed) + } + + if hasValues(sig.Signatures[0].Unprotected) { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Unprotected Header [RFC7515] MUST NOT be used + return nil, fmt.Errorf("eab had unprotected headers: %w", ErrMalformed) + } + + // Load the EAB to validate the signature against + eabEntry, err := acmeState.LoadEab(ac.sc, eabJws.Kid) + if err != nil { + return nil, fmt.Errorf("%w: failed to verify eab", ErrUnauthorized) + } + + verifiedPayload, err := sig.Verify(eabEntry.PrivateBytes) + if err != nil { + return nil, err + } + + // Make sure how eab payload matches the outer JWK key value + if !bytes.Equal(outer.Jwk, verifiedPayload) { + return nil, fmt.Errorf("eab payload does not match outer JWK key: %w", ErrMalformed) + } + + if eabEntry.AcmeDirectory != ac.acmeDirectory { + // This EAB was not created for this specific ACME directory, reject it + return nil, fmt.Errorf("%w: failed to verify eab", ErrUnauthorized) + } + + return eabEntry, nil +} diff --git a/builtin/logical/pki/acme_state.go b/builtin/logical/pki/acme_state.go new file mode 100644 index 000000000000..aa0701058439 --- /dev/null +++ b/builtin/logical/pki/acme_state.go @@ -0,0 +1,700 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "path" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-secure-stdlib/nonceutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // How many bytes are in a token. Per RFC 8555 Section + // 8.3. HTTP Challenge and Section 11.3 Token Entropy: + // + // > token (required, string): A random value that uniquely identifies + // > the challenge. This value MUST have at least 128 bits of entropy. + tokenBytes = 128 / 8 + + // Path Prefixes + acmePathPrefix = "acme/" + acmeAccountPrefix = acmePathPrefix + "accounts/" + acmeThumbprintPrefix = acmePathPrefix + "account-thumbprints/" + acmeValidationPrefix = acmePathPrefix + "validations/" + acmeEabPrefix = acmePathPrefix + "eab/" +) + +type acmeState struct { + nonces nonceutil.NonceService + + validator *ACMEChallengeEngine + + configDirty *atomic.Bool + _config sync.RWMutex + config acmeConfigEntry +} + +type acmeThumbprint struct { + Kid string `json:"kid"` + Thumbprint string `json:"-"` +} + +func NewACMEState() *acmeState { + state := &acmeState{ + nonces: nonceutil.NewNonceService(), + validator: NewACMEChallengeEngine(), + configDirty: new(atomic.Bool), + } + // Config hasn't been loaded yet; mark dirty. + state.configDirty.Store(true) + + return state +} + +func (a *acmeState) Initialize(b *backend, sc *storageContext) error { + // Initialize the nonce service. + if err := a.nonces.Initialize(); err != nil { + return fmt.Errorf("failed to initialize the ACME nonce service: %w", err) + } + + // Load the ACME config. + _, err := a.getConfigWithUpdate(sc) + if err != nil { + return fmt.Errorf("error initializing ACME engine: %w", err) + } + + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) { + // It is assumed, that if the node does become the active node later + // the plugin is re-initialized, so this is safe. It also spares the node + // from loading the existing queue into memory for no reason. + b.Logger().Debug("Not on an active node, skipping starting ACME challenge validation engine") + return nil + } + // Kick off our ACME challenge validation engine. + go a.validator.Run(b, a, sc) + + // All good. + return nil +} + +func (a *acmeState) Shutdown(b *backend) { + // If we aren't the active node, nothing to shutdown + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) { + return + } + + a.validator.Closing <- struct{}{} +} + +func (a *acmeState) markConfigDirty() { + a.configDirty.Store(true) +} + +func (a *acmeState) reloadConfigIfRequired(sc *storageContext) error { + if !a.configDirty.Load() { + return nil + } + + a._config.Lock() + defer a._config.Unlock() + + if !a.configDirty.Load() { + // Someone beat us to grabbing the above write lock and already + // updated the config. + return nil + } + + config, err := sc.getAcmeConfig() + if err != nil { + return fmt.Errorf("failed reading ACME config: %w", err) + } + + a.config = *config + a.configDirty.Store(false) + + return nil +} + +func (a *acmeState) getConfigWithUpdate(sc *storageContext) (*acmeConfigEntry, error) { + if err := a.reloadConfigIfRequired(sc); err != nil { + return nil, err + } + + a._config.RLock() + defer a._config.RUnlock() + + configCopy := a.config + return &configCopy, nil +} + +func (a *acmeState) getConfigWithForcedUpdate(sc *storageContext) (*acmeConfigEntry, error) { + a.markConfigDirty() + return a.getConfigWithUpdate(sc) +} + +func (a *acmeState) writeConfig(sc *storageContext, config *acmeConfigEntry) (*acmeConfigEntry, error) { + a._config.Lock() + defer a._config.Unlock() + + if err := sc.setAcmeConfig(config); err != nil { + a.markConfigDirty() + return nil, fmt.Errorf("failed writing ACME config: %w", err) + } + + if config != nil { + a.config = *config + } else { + a.config = defaultAcmeConfig + } + + return config, nil +} + +func generateRandomBase64(srcBytes int) (string, error) { + data := make([]byte, 21) + if _, err := io.ReadFull(rand.Reader, data); err != nil { + return "", err + } + + return base64.RawURLEncoding.EncodeToString(data), nil +} + +func (a *acmeState) GetNonce() (string, time.Time, error) { + return a.nonces.Get() +} + +func (a *acmeState) RedeemNonce(nonce string) bool { + return a.nonces.Redeem(nonce) +} + +func (a *acmeState) DoTidyNonces() { + a.nonces.Tidy() +} + +type ACMEAccountStatus string + +func (aas ACMEAccountStatus) String() string { + return string(aas) +} + +const ( + AccountStatusValid ACMEAccountStatus = "valid" + AccountStatusDeactivated ACMEAccountStatus = "deactivated" + AccountStatusRevoked ACMEAccountStatus = "revoked" +) + +type acmeAccount struct { + KeyId string `json:"-"` + Status ACMEAccountStatus `json:"status"` + Contact []string `json:"contact"` + TermsOfServiceAgreed bool `json:"terms-of-service-agreed"` + Jwk []byte `json:"jwk"` + AcmeDirectory string `json:"acme-directory"` + AccountCreatedDate time.Time `json:"account-created-date"` + MaxCertExpiry time.Time `json:"account-max-cert-expiry"` + AccountRevokedDate time.Time `json:"account-revoked-date"` + Eab *eabType `json:"eab"` +} + +type acmeOrder struct { + OrderId string `json:"-"` + AccountId string `json:"account-id"` + Status ACMEOrderStatusType `json:"status"` + Expires time.Time `json:"expires"` + Identifiers []*ACMEIdentifier `json:"identifiers"` + AuthorizationIds []string `json:"authorization-ids"` + CertificateSerialNumber string `json:"cert-serial-number"` + CertificateExpiry time.Time `json:"cert-expiry"` + // The actual issuer UUID that issued the certificate, blank if an order exists but no certificate was issued. + IssuerId issuing.IssuerID `json:"issuer-id"` +} + +func (o acmeOrder) getIdentifierDNSValues() []string { + var identifiers []string + for _, value := range o.Identifiers { + if value.Type == ACMEDNSIdentifier { + // Here, because of wildcard processing, we need to use the + // original value provided by the caller rather than the + // post-modification (trimmed '*.' prefix) value. + identifiers = append(identifiers, value.OriginalValue) + } + } + return identifiers +} + +func (o acmeOrder) getIdentifierIPValues() []net.IP { + var identifiers []net.IP + for _, value := range o.Identifiers { + if value.Type == ACMEIPIdentifier { + identifiers = append(identifiers, net.ParseIP(value.Value)) + } + } + return identifiers +} + +func (a *acmeState) CreateAccount(ac *acmeContext, c *jwsCtx, contact []string, termsOfServiceAgreed bool, eab *eabType) (*acmeAccount, error) { + // Write out the thumbprint value/entry out first, if we get an error mid-way through + // this is easier to recover from. The new kid with the same existing public key + // will rewrite the thumbprint entry. This goes in hand with LoadAccountByKey that + // will return a nil, nil value if the referenced kid in a loaded thumbprint does not + // exist. This effectively makes this self-healing IF the end-user re-attempts the + // account creation with the same public key. + thumbprint, err := c.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed generating thumbprint: %w", err) + } + + thumbPrint := &acmeThumbprint{ + Kid: c.Kid, + Thumbprint: thumbprint, + } + thumbPrintEntry, err := logical.StorageEntryJSON(acmeThumbprintPrefix+thumbprint, thumbPrint) + if err != nil { + return nil, fmt.Errorf("error generating account thumbprint entry: %w", err) + } + + if err = ac.sc.Storage.Put(ac.sc.Context, thumbPrintEntry); err != nil { + return nil, fmt.Errorf("error writing account thumbprint entry: %w", err) + } + + // Now write out the main value that the thumbprint points too. + acct := &acmeAccount{ + KeyId: c.Kid, + Contact: contact, + TermsOfServiceAgreed: termsOfServiceAgreed, + Jwk: c.Jwk, + Status: AccountStatusValid, + AcmeDirectory: ac.acmeDirectory, + AccountCreatedDate: time.Now(), + Eab: eab, + } + json, err := logical.StorageEntryJSON(acmeAccountPrefix+c.Kid, acct) + if err != nil { + return nil, fmt.Errorf("error creating account entry: %w", err) + } + + if err := ac.sc.Storage.Put(ac.sc.Context, json); err != nil { + return nil, fmt.Errorf("error writing account entry: %w", err) + } + + return acct, nil +} + +func (a *acmeState) UpdateAccount(sc *storageContext, acct *acmeAccount) error { + json, err := logical.StorageEntryJSON(acmeAccountPrefix+acct.KeyId, acct) + if err != nil { + return fmt.Errorf("error creating account entry: %w", err) + } + + if err := sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("error writing account entry: %w", err) + } + + return nil +} + +// LoadAccount will load the account object based on the passed in keyId field value +// otherwise will return an error if the account does not exist. +func (a *acmeState) LoadAccount(ac *acmeContext, keyId string) (*acmeAccount, error) { + entry, err := ac.sc.Storage.Get(ac.sc.Context, acmeAccountPrefix+keyId) + if err != nil { + return nil, fmt.Errorf("error loading account: %w", err) + } + if entry == nil { + return nil, fmt.Errorf("account not found: %w", ErrAccountDoesNotExist) + } + + var acct acmeAccount + err = entry.DecodeJSON(&acct) + if err != nil { + return nil, fmt.Errorf("error decoding account: %w", err) + } + + if acct.AcmeDirectory != ac.acmeDirectory { + return nil, fmt.Errorf("%w: account part of different ACME directory path", ErrMalformed) + } + + acct.KeyId = keyId + + return &acct, nil +} + +// LoadAccountByKey will attempt to load the account based on a key thumbprint. If the thumbprint +// or kid is unknown a nil, nil will be returned. +func (a *acmeState) LoadAccountByKey(ac *acmeContext, keyThumbprint string) (*acmeAccount, error) { + thumbprintEntry, err := ac.sc.Storage.Get(ac.sc.Context, acmeThumbprintPrefix+keyThumbprint) + if err != nil { + return nil, fmt.Errorf("failed loading acme thumbprintEntry for key: %w", err) + } + if thumbprintEntry == nil { + return nil, nil + } + + var thumbprint acmeThumbprint + err = thumbprintEntry.DecodeJSON(&thumbprint) + if err != nil { + return nil, fmt.Errorf("failed decoding thumbprint entry: %s: %w", keyThumbprint, err) + } + + if len(thumbprint.Kid) == 0 { + return nil, fmt.Errorf("empty kid within thumbprint entry: %s", keyThumbprint) + } + + acct, err := a.LoadAccount(ac, thumbprint.Kid) + if err != nil { + // If we fail to lookup the account that the thumbprint entry references, assume a bad + // write previously occurred in which we managed to write out the thumbprint but failed + // writing out the main account information. + if errors.Is(err, ErrAccountDoesNotExist) { + return nil, nil + } + return nil, err + } + + return acct, nil +} + +func (a *acmeState) LoadJWK(ac *acmeContext, keyId string) ([]byte, error) { + key, err := a.LoadAccount(ac, keyId) + if err != nil { + return nil, err + } + + if len(key.Jwk) == 0 { + return nil, fmt.Errorf("malformed key entry lacks JWK") + } + + return key.Jwk, nil +} + +func (a *acmeState) LoadAuthorization(ac *acmeContext, userCtx *jwsCtx, authId string) (*ACMEAuthorization, error) { + if authId == "" { + return nil, fmt.Errorf("malformed authorization identifier") + } + + authorizationPath := getAuthorizationPath(userCtx.Kid, authId) + + authz, err := loadAuthorizationAtPath(ac.sc, authorizationPath) + if err != nil { + return nil, err + } + + if userCtx.Kid != authz.AccountId { + return nil, ErrUnauthorized + } + + return authz, nil +} + +func loadAuthorizationAtPath(sc *storageContext, authorizationPath string) (*ACMEAuthorization, error) { + entry, err := sc.Storage.Get(sc.Context, authorizationPath) + if err != nil { + return nil, fmt.Errorf("error loading authorization: %w", err) + } + + if entry == nil { + return nil, fmt.Errorf("authorization does not exist: %w", ErrMalformed) + } + + var authz ACMEAuthorization + err = entry.DecodeJSON(&authz) + if err != nil { + return nil, fmt.Errorf("error decoding authorization: %w", err) + } + + return &authz, nil +} + +func (a *acmeState) SaveAuthorization(ac *acmeContext, authz *ACMEAuthorization) error { + path := getAuthorizationPath(authz.AccountId, authz.Id) + return saveAuthorizationAtPath(ac.sc, path, authz) +} + +func saveAuthorizationAtPath(sc *storageContext, path string, authz *ACMEAuthorization) error { + if authz.Id == "" { + return fmt.Errorf("invalid authorization, missing id") + } + + if authz.AccountId == "" { + return fmt.Errorf("invalid authorization, missing account id") + } + + json, err := logical.StorageEntryJSON(path, authz) + if err != nil { + return fmt.Errorf("error creating authorization entry: %w", err) + } + + if err = sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("error writing authorization entry: %w", err) + } + + return nil +} + +func (a *acmeState) ParseRequestParams(ac *acmeContext, req *logical.Request, data *framework.FieldData) (*jwsCtx, map[string]interface{}, error) { + var c jwsCtx + var m map[string]interface{} + + // Parse the key out. + rawJWKBase64, ok := data.GetOk("protected") + if !ok { + return nil, nil, fmt.Errorf("missing required field 'protected': %w", ErrMalformed) + } + jwkBase64 := rawJWKBase64.(string) + + jwkBytes, err := base64.RawURLEncoding.DecodeString(jwkBase64) + if err != nil { + return nil, nil, fmt.Errorf("failed to base64 parse 'protected': %s: %w", err, ErrMalformed) + } + if err = c.UnmarshalOuterJwsJson(a, ac, jwkBytes); err != nil { + return nil, nil, fmt.Errorf("failed to json unmarshal 'protected': %w", err) + } + + // Since we already parsed the header to verify the JWS context, we + // should read and redeem the nonce here too, to avoid doing any extra + // work if it is invalid. + if !a.RedeemNonce(c.Nonce) { + return nil, nil, fmt.Errorf("invalid or reused nonce: %w", ErrBadNonce) + } + + // If the path is incorrect, reject the request. + // + // See RFC 8555 Section 6.4. Request URL Integrity: + // + // > As noted in Section 6.2, all ACME request objects carry a "url" + // > header parameter in their protected header. ... On receiving such + // > an object in an HTTP request, the server MUST compare the "url" + // > header parameter to the request URL. If the two do not match, + // > then the server MUST reject the request as unauthorized. + if len(c.Url) == 0 { + return nil, nil, fmt.Errorf("missing required parameter 'url' in 'protected': %w", ErrMalformed) + } + if ac.clusterUrl.JoinPath(req.Path).String() != c.Url { + return nil, nil, fmt.Errorf("invalid value for 'url' in 'protected': got '%v' expected '%v': %w", c.Url, ac.clusterUrl.JoinPath(req.Path).String(), ErrUnauthorized) + } + + rawPayloadBase64, ok := data.GetOk("payload") + if !ok { + return nil, nil, fmt.Errorf("missing required field 'payload': %w", ErrMalformed) + } + payloadBase64 := rawPayloadBase64.(string) + + rawSignatureBase64, ok := data.GetOk("signature") + if !ok { + return nil, nil, fmt.Errorf("missing required field 'signature': %w", ErrMalformed) + } + signatureBase64 := rawSignatureBase64.(string) + + // go-jose only seems to support compact signature encodings. + compactSig := fmt.Sprintf("%v.%v.%v", jwkBase64, payloadBase64, signatureBase64) + m, err = c.VerifyJWS(compactSig) + if err != nil { + return nil, nil, fmt.Errorf("failed to verify signature: %w", err) + } + + return &c, m, nil +} + +func (a *acmeState) LoadOrder(ac *acmeContext, userCtx *jwsCtx, orderId string) (*acmeOrder, error) { + path := getOrderPath(userCtx.Kid, orderId) + entry, err := ac.sc.Storage.Get(ac.sc.Context, path) + if err != nil { + return nil, fmt.Errorf("error loading order: %w", err) + } + + if entry == nil { + return nil, fmt.Errorf("order does not exist: %w", ErrMalformed) + } + + var order acmeOrder + err = entry.DecodeJSON(&order) + if err != nil { + return nil, fmt.Errorf("error decoding order: %w", err) + } + + if userCtx.Kid != order.AccountId { + return nil, ErrUnauthorized + } + + order.OrderId = orderId + + return &order, nil +} + +func (a *acmeState) SaveOrder(ac *acmeContext, order *acmeOrder) error { + if order.OrderId == "" { + return fmt.Errorf("invalid order, missing order id") + } + + if order.AccountId == "" { + return fmt.Errorf("invalid order, missing account id") + } + path := getOrderPath(order.AccountId, order.OrderId) + json, err := logical.StorageEntryJSON(path, order) + if err != nil { + return fmt.Errorf("error serializing order entry: %w", err) + } + + if err = ac.sc.Storage.Put(ac.sc.Context, json); err != nil { + return fmt.Errorf("error writing order entry: %w", err) + } + + return nil +} + +func (a *acmeState) ListOrderIds(sc *storageContext, accountId string) ([]string, error) { + accountOrderPrefixPath := acmeAccountPrefix + accountId + "/orders/" + + rawOrderIds, err := sc.Storage.List(sc.Context, accountOrderPrefixPath) + if err != nil { + return nil, fmt.Errorf("failed listing order ids for account %s: %w", accountId, err) + } + + orderIds := []string{} + for _, order := range rawOrderIds { + if strings.HasSuffix(order, "/") { + // skip any folders we might have for some reason + continue + } + orderIds = append(orderIds, order) + } + return orderIds, nil +} + +type acmeCertEntry struct { + Serial string `json:"-"` + Account string `json:"-"` + Order string `json:"order"` +} + +func (a *acmeState) TrackIssuedCert(ac *acmeContext, accountId string, serial string, orderId string) error { + path := getAcmeSerialToAccountTrackerPath(accountId, serial) + entry := acmeCertEntry{ + Order: orderId, + } + + json, err := logical.StorageEntryJSON(path, &entry) + if err != nil { + return fmt.Errorf("error serializing acme cert entry: %w", err) + } + + if err = ac.sc.Storage.Put(ac.sc.Context, json); err != nil { + return fmt.Errorf("error writing acme cert entry: %w", err) + } + + return nil +} + +func (a *acmeState) GetIssuedCert(ac *acmeContext, accountId string, serial string) (*acmeCertEntry, error) { + path := acmeAccountPrefix + accountId + "/certs/" + normalizeSerial(serial) + + entry, err := ac.sc.Storage.Get(ac.sc.Context, path) + if err != nil { + return nil, fmt.Errorf("error loading acme cert entry: %w", err) + } + + if entry == nil { + return nil, fmt.Errorf("no certificate with this serial was issued for this account") + } + + var cert acmeCertEntry + err = entry.DecodeJSON(&cert) + if err != nil { + return nil, fmt.Errorf("error decoding acme cert entry: %w", err) + } + + cert.Serial = denormalizeSerial(serial) + cert.Account = accountId + + return &cert, nil +} + +func (a *acmeState) SaveEab(sc *storageContext, eab *eabType) error { + json, err := logical.StorageEntryJSON(path.Join(acmeEabPrefix, eab.KeyID), eab) + if err != nil { + return err + } + return sc.Storage.Put(sc.Context, json) +} + +func (a *acmeState) LoadEab(sc *storageContext, eabKid string) (*eabType, error) { + rawEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeEabPrefix, eabKid)) + if err != nil { + return nil, err + } + if rawEntry == nil { + return nil, fmt.Errorf("%w: no eab found for kid %s", ErrStorageItemNotFound, eabKid) + } + + var eab eabType + err = rawEntry.DecodeJSON(&eab) + if err != nil { + return nil, err + } + + eab.KeyID = eabKid + return &eab, nil +} + +func (a *acmeState) DeleteEab(sc *storageContext, eabKid string) (bool, error) { + rawEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeEabPrefix, eabKid)) + if err != nil { + return false, err + } + if rawEntry == nil { + return false, nil + } + + err = sc.Storage.Delete(sc.Context, path.Join(acmeEabPrefix, eabKid)) + if err != nil { + return false, err + } + return true, nil +} + +func (a *acmeState) ListEabIds(sc *storageContext) ([]string, error) { + entries, err := sc.Storage.List(sc.Context, acmeEabPrefix) + if err != nil { + return nil, err + } + var ids []string + for _, entry := range entries { + if strings.HasSuffix(entry, "/") { + continue + } + ids = append(ids, entry) + } + + return ids, nil +} + +func getAcmeSerialToAccountTrackerPath(accountId string, serial string) string { + return acmeAccountPrefix + accountId + "/certs/" + normalizeSerial(serial) +} + +func getAuthorizationPath(accountId string, authId string) string { + return acmeAccountPrefix + accountId + "/authorizations/" + authId +} + +func getOrderPath(accountId string, orderId string) string { + return acmeAccountPrefix + accountId + "/orders/" + orderId +} + +func getACMEToken() (string, error) { + return generateRandomBase64(tokenBytes) +} diff --git a/builtin/logical/pki/acme_state_test.go b/builtin/logical/pki/acme_state_test.go new file mode 100644 index 000000000000..ed9586e834e2 --- /dev/null +++ b/builtin/logical/pki/acme_state_test.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAcmeNonces(t *testing.T) { + t.Parallel() + + a := NewACMEState() + a.nonces.Initialize() + + // Simple operation should succeed. + nonce, _, err := a.GetNonce() + require.NoError(t, err) + require.NotEmpty(t, nonce) + + require.True(t, a.RedeemNonce(nonce)) + require.False(t, a.RedeemNonce(nonce)) + + // Redeeming in opposite order should work. + var nonces []string + for i := 0; i < len(nonce); i++ { + nonce, _, err = a.GetNonce() + require.NoError(t, err) + require.NotEmpty(t, nonce) + } + + for i := len(nonces) - 1; i >= 0; i-- { + nonce = nonces[i] + require.True(t, a.RedeemNonce(nonce)) + } + + for i := 0; i < len(nonces); i++ { + nonce = nonces[i] + require.False(t, a.RedeemNonce(nonce)) + } +} diff --git a/builtin/logical/pki/acme_wrappers.go b/builtin/logical/pki/acme_wrappers.go new file mode 100644 index 000000000000..ff3e72b9356f --- /dev/null +++ b/builtin/logical/pki/acme_wrappers.go @@ -0,0 +1,512 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" +) + +type acmeContext struct { + // baseUrl is the combination of the configured cluster local URL and the acmePath up to /acme/ + baseUrl *url.URL + clusterUrl *url.URL + sc *storageContext + role *issuing.RoleEntry + issuer *issuing.IssuerEntry + // acmeDirectory is a string that can distinguish the various acme directories we have configured + // if something needs to remain locked into a directory path structure. + acmeDirectory string + eabPolicy EabPolicy + ciepsPolicy string + runtimeOpts acmeWrapperOpts +} + +func (c acmeContext) getAcmeState() *acmeState { + return c.sc.Backend.GetAcmeState() +} + +type ( + acmeOperation func(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) + acmeParsedOperation func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) + acmeAccountRequiredOperation func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, acct *acmeAccount) (*logical.Response, error) +) + +// setupAcmeDirectory will populate a prefix'd URL with all the paths required +// for a given ACME directory. +func setupAcmeDirectory(b *backend, acmePrefix string, unauthPrefix string, opts acmeWrapperOpts) { + acmePrefix = strings.TrimRight(acmePrefix, "/") + unauthPrefix = strings.TrimRight(unauthPrefix, "/") + + b.Backend.Paths = append(b.Backend.Paths, pathAcmeDirectory(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeNonce(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeNewAccount(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeUpdateAccount(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeGetOrder(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeListOrders(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeNewOrder(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeFinalizeOrder(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeFetchOrderCert(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeChallenge(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeAuthorization(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeRevoke(b, acmePrefix, opts)) + b.Backend.Paths = append(b.Backend.Paths, pathAcmeNewEab(b, acmePrefix)) // auth'd API that lives underneath the various /acme paths + + // Add specific un-auth'd paths for ACME APIs + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/directory") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/new-nonce") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/new-account") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/new-order") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/revoke-cert") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/key-change") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/account/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/authorization/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/challenge/+/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/orders") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/order/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/order/+/finalize") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, unauthPrefix+"/order/+/cert") + // We specifically do NOT add acme/new-eab to this as it should be auth'd +} + +// acmeErrorWrapper the lowest level wrapper that will translate errors into proper ACME error responses +func acmeErrorWrapper(op framework.OperationFunc) framework.OperationFunc { + return func(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { + resp, err := op(ctx, r, data) + if err != nil { + return TranslateError(err) + } + + return resp, nil + } +} + +type acmeWrapperOpts struct { + isDefault bool + isCiepsEnabled bool +} + +func (o acmeWrapperOpts) Clone() acmeWrapperOpts { + return acmeWrapperOpts{ + isDefault: o.isDefault, + isCiepsEnabled: o.isCiepsEnabled, + } +} + +// acmeWrapper a basic wrapper that all ACME handlers should leverage as the basis. +// This will create a basic ACME context, validate basic ACME configuration is setup +// for operations. This pulls in acmeErrorWrapper to translate error messages for users, +// but does not enforce any sort of ACME authentication. +func (b *backend) acmeWrapper(opts acmeWrapperOpts, op acmeOperation) framework.OperationFunc { + return acmeErrorWrapper(func(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, r.Storage) + + config, err := sc.Backend.GetAcmeState().getConfigWithUpdate(sc) + if err != nil { + return nil, fmt.Errorf("failed to fetch ACME configuration: %w", err) + } + + // use string form in case someone messes up our config from raw storage. + eabPolicy, err := getEabPolicyByString(string(config.EabPolicyName)) + if err != nil { + return nil, err + } + + if isAcmeDisabled(sc, config, eabPolicy) { + return nil, ErrAcmeDisabled + } + + if b.UseLegacyBundleCaStorage() { + return nil, fmt.Errorf("%w: Can not perform ACME operations until migration has completed", ErrServerInternal) + } + + acmeBaseUrl, clusterBase, err := getAcmeBaseUrl(sc, r) + if err != nil { + return nil, err + } + + role, issuer, err := getAcmeRoleAndIssuer(sc, data, config) + if err != nil { + return nil, err + } + + acmeDirectory, err := getAcmeDirectory(r) + if err != nil { + return nil, err + } + + isCiepsEnabled, ciepsPolicy, err := getCiepsAcmeSettings(sc, opts, config, data) + if err != nil { + return nil, err + } + runtimeOpts := opts.Clone() + + if isCiepsEnabled { + // We need to possibly reset the isCiepsEnabled option to true if we are in + // the default folder with the external-policy set as it would have been + // normally disabled. + if runtimeOpts.isDefault { + runtimeOpts.isCiepsEnabled = true + } + } + + acmeCtx := &acmeContext{ + baseUrl: acmeBaseUrl, + clusterUrl: clusterBase, + sc: sc, + role: role, + issuer: issuer, + acmeDirectory: acmeDirectory, + eabPolicy: eabPolicy, + ciepsPolicy: ciepsPolicy, + runtimeOpts: runtimeOpts, + } + + return op(acmeCtx, r, data) + }) +} + +// acmeParsedWrapper is an ACME wrapper that will parse out the ACME request parameters, validate +// that we have a proper signature and pass to the operation a decoded map of arguments received. +// This wrapper builds on top of acmeWrapper. Note that this does perform signature verification +// it does not enforce the account being in a valid state nor existing. +func (b *backend) acmeParsedWrapper(opt acmeWrapperOpts, op acmeParsedOperation) framework.OperationFunc { + return b.acmeWrapper(opt, func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData) (*logical.Response, error) { + user, data, err := b.GetAcmeState().ParseRequestParams(acmeCtx, r, fields) + if err != nil { + return nil, err + } + + resp, err := op(acmeCtx, r, fields, user, data) + + // Our response handlers might not add the necessary headers. + if resp != nil { + if resp.Headers == nil { + resp.Headers = map[string][]string{} + } + + if _, ok := resp.Headers["Replay-Nonce"]; !ok { + nonce, _, err := b.GetAcmeState().GetNonce() + if err != nil { + return nil, err + } + + resp.Headers["Replay-Nonce"] = []string{nonce} + } + + if _, ok := resp.Headers["Link"]; !ok { + resp.Headers["Link"] = genAcmeLinkHeader(acmeCtx) + } else { + directory := genAcmeLinkHeader(acmeCtx)[0] + addDirectory := true + for _, item := range resp.Headers["Link"] { + if item == directory { + addDirectory = false + break + } + } + if addDirectory { + resp.Headers["Link"] = append(resp.Headers["Link"], directory) + } + } + + // ACME responses don't understand Vault's default encoding + // format. Rather than expecting everything to handle creating + // ACME-formatted responses, do the marshaling in one place. + if _, ok := resp.Data[logical.HTTPRawBody]; !ok { + ignored_values := map[string]bool{logical.HTTPContentType: true, logical.HTTPStatusCode: true} + fields := map[string]interface{}{} + body := map[string]interface{}{ + logical.HTTPContentType: "application/json", + logical.HTTPStatusCode: http.StatusOK, + } + + for key, value := range resp.Data { + if _, present := ignored_values[key]; !present { + fields[key] = value + } else { + body[key] = value + } + } + + rawBody, err := json.Marshal(fields) + if err != nil { + return nil, fmt.Errorf("Error marshaling JSON body: %w", err) + } + + body[logical.HTTPRawBody] = rawBody + resp.Data = body + } + } + + return resp, err + }) +} + +// acmeAccountRequiredWrapper builds on top of acmeParsedWrapper, enforcing the +// request has a proper signature for an existing account, and that account is +// in a valid status. It passes to the operation a decoded form of the request +// parameters as well as the ACME account the request is for. +func (b *backend) acmeAccountRequiredWrapper(opt acmeWrapperOpts, op acmeAccountRequiredOperation) framework.OperationFunc { + return b.acmeParsedWrapper(opt, func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}) (*logical.Response, error) { + if !uc.Existing { + return nil, fmt.Errorf("cannot process request without a 'kid': %w", ErrMalformed) + } + + account, err := requireValidAcmeAccount(acmeCtx, uc) + if err != nil { + return nil, err + } + + return op(acmeCtx, r, fields, uc, data, account) + }) +} + +func requireValidAcmeAccount(acmeCtx *acmeContext, uc *jwsCtx) (*acmeAccount, error) { + account, err := acmeCtx.getAcmeState().LoadAccount(acmeCtx, uc.Kid) + if err != nil { + return nil, fmt.Errorf("error loading account: %w", err) + } + + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { + return nil, err + } + + if account.Status != AccountStatusValid { + // Treating "revoked" and "deactivated" as the same here. + return nil, fmt.Errorf("%w: account in status: %s", ErrUnauthorized, account.Status) + } + return account, nil +} + +func getAcmeBaseUrl(sc *storageContext, r *logical.Request) (*url.URL, *url.URL, error) { + baseUrl, err := getBasePathFromClusterConfig(sc) + if err != nil { + return nil, nil, err + } + + directoryPrefix, err := getAcmeDirectory(r) + if err != nil { + return nil, nil, err + } + + return baseUrl.JoinPath(directoryPrefix), baseUrl, nil +} + +func getBasePathFromClusterConfig(sc *storageContext) (*url.URL, error) { + cfg, err := sc.getClusterConfig() + if err != nil { + return nil, fmt.Errorf("failed loading cluster config: %w", err) + } + + if cfg.Path == "" { + return nil, fmt.Errorf("ACME feature requires local cluster 'path' field configuration to be set") + } + + baseUrl, err := url.Parse(cfg.Path) + if err != nil { + return nil, fmt.Errorf("failed parsing URL configured in local cluster 'path' configuration: %s: %s", + cfg.Path, err.Error()) + } + return baseUrl, nil +} + +func getAcmeIssuer(sc *storageContext, issuerName string) (*issuing.IssuerEntry, error) { + if issuerName == "" { + issuerName = defaultRef + } + issuerId, err := sc.resolveIssuerReference(issuerName) + if err != nil { + return nil, fmt.Errorf("%w: issuer does not exist", ErrMalformed) + } + + issuer, err := sc.fetchIssuerById(issuerId) + if err != nil { + return nil, fmt.Errorf("issuer failed to load: %w", err) + } + + if issuer.Usage.HasUsage(issuing.IssuanceUsage) && len(issuer.KeyID) > 0 { + return issuer, nil + } + + return nil, fmt.Errorf("%w: issuer missing proper issuance usage or key", ErrServerInternal) +} + +// getAcmeDirectory return the base acme directory path, without a leading '/' and including +// the trailing /acme/ folder which is the root of all our various directories +func getAcmeDirectory(r *logical.Request) (string, error) { + acmePath := r.Path + if !strings.HasPrefix(acmePath, "/") { + acmePath = "/" + acmePath + } + + lastIndex := strings.LastIndex(acmePath, "/acme/") + if lastIndex == -1 { + return "", fmt.Errorf("%w: unable to determine acme base folder path: %s", ErrServerInternal, acmePath) + } + + // Skip the leading '/' and return our base path with the /acme/ + return strings.TrimLeft(acmePath[0:lastIndex]+"/acme/", "/"), nil +} + +func getAcmeRoleAndIssuer(sc *storageContext, data *framework.FieldData, config *acmeConfigEntry) (*issuing.RoleEntry, *issuing.IssuerEntry, error) { + requestedIssuer := getRequestedAcmeIssuerFromPath(data) + requestedRole := getRequestedAcmeRoleFromPath(data) + issuerToLoad := requestedIssuer + + var role *issuing.RoleEntry + var err error + + if len(requestedRole) == 0 { // Default Directory + policyType, extraInfo, err := getDefaultDirectoryPolicyType(config.DefaultDirectoryPolicy) + if err != nil { + return nil, nil, err + } + switch policyType { + case Forbid: + return nil, nil, fmt.Errorf("%w: default directory not allowed by ACME policy", ErrServerInternal) + case SignVerbatim, ExternalPolicy: + role = issuing.SignVerbatimRoleWithOpts( + issuing.WithIssuer(requestedIssuer), + issuing.WithNoStore(false)) + case Role: + role, err = getAndValidateAcmeRole(sc, extraInfo) + if err != nil { + return nil, nil, err + } + } + } else { // Requested Role + role, err = getAndValidateAcmeRole(sc, requestedRole) + if err != nil { + return nil, nil, err + } + + // Check the Requested Role is Allowed + allowAnyRole := len(config.AllowedRoles) == 1 && config.AllowedRoles[0] == "*" + if !allowAnyRole { + + var foundRole bool + for _, name := range config.AllowedRoles { + if name == role.Name { + foundRole = true + break + } + } + + if !foundRole { + return nil, nil, fmt.Errorf("%w: specified role not allowed by ACME policy", ErrServerInternal) + } + } + } + + // If we haven't loaded an issuer directly from our path and the specified (or default) + // role does specify an issuer prefer the role's issuer rather than the default issuer. + if len(role.Issuer) > 0 && len(requestedIssuer) == 0 { + issuerToLoad = role.Issuer + } + + issuer, err := getAcmeIssuer(sc, issuerToLoad) + if err != nil { + return nil, nil, err + } + + allowAnyIssuer := len(config.AllowedIssuers) == 1 && config.AllowedIssuers[0] == "*" + if !allowAnyIssuer { + var foundIssuer bool + for index, name := range config.AllowedIssuers { + candidateId, err := sc.resolveIssuerReference(name) + if err != nil { + return nil, nil, fmt.Errorf("failed to resolve reference for allowed_issuer entry %d: %w", index, err) + } + + if candidateId == issuer.ID { + foundIssuer = true + break + } + } + + if !foundIssuer { + return nil, nil, fmt.Errorf("%w: specified issuer not allowed by ACME policy", ErrServerInternal) + } + } + + // If not allowed in configuration, override ExtKeyUsage behavior to force it to only be + // ServerAuth within ACME issued certs + if !config.AllowRoleExtKeyUsage { + role.ExtKeyUsage = []string{"serverauth"} + role.ExtKeyUsageOIDs = []string{} + role.ServerFlag = true + role.ClientFlag = false + role.CodeSigningFlag = false + role.EmailProtectionFlag = false + } + + return role, issuer, nil +} + +func getAndValidateAcmeRole(sc *storageContext, requestedRole string) (*issuing.RoleEntry, error) { + var err error + role, err := sc.Backend.GetRole(sc.Context, sc.Storage, requestedRole) + if err != nil { + return nil, fmt.Errorf("%w: err loading role", ErrServerInternal) + } + + if role == nil { + return nil, fmt.Errorf("%w: role does not exist", ErrMalformed) + } + + if role.NoStore { + return nil, fmt.Errorf("%w: role can not be used as NoStore is set to true", ErrServerInternal) + } + + return role, nil +} + +func getRequestedAcmeRoleFromPath(data *framework.FieldData) string { + requestedRole := "" + roleNameRaw, present := data.GetOk("role") + if present { + requestedRole = roleNameRaw.(string) + } + return requestedRole +} + +func getRequestedAcmeIssuerFromPath(data *framework.FieldData) string { + requestedIssuer := "" + requestedIssuerRaw, present := data.GetOk(issuerRefParam) + if present { + requestedIssuer = requestedIssuerRaw.(string) + } + return requestedIssuer +} + +func isAcmeDisabled(sc *storageContext, config *acmeConfigEntry, policy EabPolicy) bool { + if !config.Enabled { + return true + } + + disableAcme, nonFatalErr := isPublicACMEDisabledByEnv() + if nonFatalErr != nil { + sc.Backend.Logger().Warn(fmt.Sprintf("could not parse env var '%s'", disableAcmeEnvVar), "error", nonFatalErr) + } + + // The OS environment if true will override any configuration option. + if disableAcme { + if policy.OverrideEnvDisablingPublicAcme() { + return false + } + return true + } + + return false +} diff --git a/builtin/logical/pki/acme_wrappers_test.go b/builtin/logical/pki/acme_wrappers_test.go new file mode 100644 index 000000000000..cabca180940f --- /dev/null +++ b/builtin/logical/pki/acme_wrappers_test.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestACMEIssuerRoleLoading validates the role and issuer loading logic within the base +// ACME wrapper is correct. +func TestACMEIssuerRoleLoading(t *testing.T) { + b, s := CreateBackendWithStorage(t) + + _, err := CBWrite(b, s, "config/cluster", map[string]interface{}{ + "path": "http://localhost:8200/v1/pki", + "aia_path": "http://localhost:8200/cdn/pki", + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "config/acme", map[string]interface{}{ + "enabled": true, + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault1.com", + "issuer_name": "issuer-1", + "key_type": "ec", + }) + require.NoError(t, err, "failed creating issuer issuer-1") + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault2.com", + "issuer_name": "issuer-2", + "key_type": "ec", + }) + require.NoError(t, err, "failed creating issuer issuer-2") + + _, err = CBWrite(b, s, "roles/role-bad-issuer", map[string]interface{}{ + issuerRefParam: "non-existant", + "no_store": "false", + }) + require.NoError(t, err, "failed creating role role-bad-issuer") + + _, err = CBWrite(b, s, "roles/role-no-store-enabled", map[string]interface{}{ + issuerRefParam: "issuer-2", + "no_store": "true", + }) + require.NoError(t, err, "failed creating role role-no-store-enabled") + + _, err = CBWrite(b, s, "roles/role-issuer-2", map[string]interface{}{ + issuerRefParam: "issuer-2", + "no_store": "false", + }) + require.NoError(t, err, "failed creating role role-issuer-2") + + tc := []struct { + name string + roleName string + issuerName string + expectedIssuerName string + expectErr bool + }{ + {name: "pass-default-use-default", roleName: "", issuerName: "", expectedIssuerName: "issuer-1", expectErr: false}, + {name: "pass-role-issuer-2", roleName: "role-issuer-2", issuerName: "", expectedIssuerName: "issuer-2", expectErr: false}, + {name: "pass-issuer-1-no-role", roleName: "", issuerName: "issuer-1", expectedIssuerName: "issuer-1", expectErr: false}, + {name: "fail-role-has-bad-issuer", roleName: "role-bad-issuer", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-role-no-store-enabled", roleName: "role-no-store-enabled", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-role-no-store-enabled", roleName: "role-no-store-enabled", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-role-does-not-exist", roleName: "non-existant", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-issuer-does-not-exist", roleName: "", issuerName: "non-existant", expectedIssuerName: "", expectErr: true}, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + f := b.acmeWrapper(acmeWrapperOpts{}, func(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + if tt.roleName != acmeCtx.role.Name { + return nil, fmt.Errorf("expected role %s but got %s", tt.roleName, acmeCtx.role.Name) + } + + if tt.expectedIssuerName != acmeCtx.issuer.Name { + return nil, fmt.Errorf("expected issuer %s but got %s", tt.expectedIssuerName, acmeCtx.issuer.Name) + } + + return nil, nil + }) + + var acmePath string + fieldRaw := map[string]interface{}{} + if tt.issuerName != "" { + fieldRaw[issuerRefParam] = tt.issuerName + acmePath = "issuer/" + tt.issuerName + "/" + } + if tt.roleName != "" { + fieldRaw["role"] = tt.roleName + acmePath = acmePath + "roles/" + tt.roleName + "/" + } + + acmePath = strings.TrimLeft(acmePath+"/acme/directory", "/") + + resp, err := f(context.Background(), &logical.Request{Path: acmePath, Storage: s}, &framework.FieldData{ + Raw: fieldRaw, + Schema: getCsrSignVerbatimSchemaFields(), + }) + require.NoError(t, err, "all errors should be re-encoded") + + if tt.expectErr { + require.NotEqual(t, 200, resp.Data[logical.HTTPStatusCode]) + require.Equal(t, ErrorContentType, resp.Data[logical.HTTPContentType]) + } else { + if resp != nil { + t.Fatalf("expected no error got %s", string(resp.Data[logical.HTTPRawBody].([]uint8))) + } + } + }) + } +} diff --git a/builtin/logical/pki/backend.go b/builtin/logical/pki/backend.go index 4eaea7f90182..900bcaa31162 100644 --- a/builtin/logical/pki/backend.go +++ b/builtin/logical/pki/backend.go @@ -1,26 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" "fmt" - "sort" "strings" "sync" "sync/atomic" "time" - atomic2 "go.uber.org/atomic" - - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/armon/go-metrics" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" ) const ( + operationPrefixPKI = "pki" + operationPrefixPKIIssuer = "pki-issuer" + operationPrefixPKIIssuers = "pki-issuers" + operationPrefixPKIRoot = "pki-root" + noRole = 0 roleOptional = 1 roleRequired = 2 @@ -88,21 +98,36 @@ func Backend(conf *logical.BackendConfig) *backend { "issuer/+/crl/delta/der", "issuer/+/crl/delta/pem", "issuer/+/crl/delta", + "issuer/+/unified-crl/der", + "issuer/+/unified-crl/pem", + "issuer/+/unified-crl", + "issuer/+/unified-crl/delta/der", + "issuer/+/unified-crl/delta/pem", + "issuer/+/unified-crl/delta", "issuer/+/pem", "issuer/+/der", "issuer/+/json", "issuers/", // LIST operations append a '/' to the requested path "ocsp", // OCSP POST "ocsp/*", // OCSP GET + "unified-crl/delta", + "unified-crl/delta/pem", + "unified-crl/pem", + "unified-crl", + "unified-ocsp", // Unified OCSP POST + "unified-ocsp/*", // Unified OCSP GET + + // ACME paths are added below }, LocalStorage: []string{ revokedPath, - deltaWALPath, + localDeltaWALPath, legacyCRLPath, clusterConfigPath, "crls/", "certs/", + acmePathPrefix, }, Root: []string{ @@ -112,8 +137,27 @@ func Backend(conf *logical.BackendConfig) *backend { SealWrapStorage: []string{ legacyCertBundlePath, + legacyCertBundleBackupPath, keyPrefix, }, + + WriteForwardedStorage: []string{ + crossRevocationPath, + unifiedRevocationWritePathPrefix, + unifiedDeltaWALPath, + }, + + Limited: []string{ + "issue", + "issue/*", + }, + + Binary: []string{ + "ocsp", // OCSP POST + "ocsp/*", // OCSP GET + "unified-ocsp", // Unified OCSP POST + "unified-ocsp/*", // Unified OCSP GET + }, }, Paths: []*framework.Path{ @@ -145,6 +189,7 @@ func Backend(conf *logical.BackendConfig) *backend { // Issuer APIs pathListIssuers(&b), pathGetIssuer(&b), + pathGetUnauthedIssuer(&b), pathGetIssuerCRL(&b), pathImportIssuer(&b), pathIssuerIssue(&b), @@ -183,6 +228,11 @@ func Backend(conf *logical.BackendConfig) *backend { // CRL Signing pathResignCrls(&b), pathSignRevocationList(&b), + + // ACME + pathAcmeConfig(&b), + pathAcmeEabList(&b), + pathAcmeEabDelete(&b), }, Secrets: []*framework.Secret{ @@ -193,6 +243,37 @@ func Backend(conf *logical.BackendConfig) *backend { InitializeFunc: b.initialize, Invalidate: b.invalidate, PeriodicFunc: b.periodicFunc, + Clean: b.cleanup, + } + + // Add ACME paths to backend + for _, prefix := range []struct { + acmePrefix string + unauthPrefix string + opts acmeWrapperOpts + }{ + { + "acme", + "acme", + acmeWrapperOpts{true, false}, + }, + { + "roles/" + framework.GenericNameRegex("role") + "/acme", + "roles/+/acme", + acmeWrapperOpts{}, + }, + { + "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/acme", + "issuer/+/acme", + acmeWrapperOpts{}, + }, + { + "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/roles/" + framework.GenericNameRegex("role") + "/acme", + "issuer/+/roles/+/acme", + acmeWrapperOpts{}, + }, + } { + setupAcmeDirectory(&b, prefix.acmePrefix, prefix.unauthPrefix, prefix.opts) } b.tidyCASGuard = new(uint32) @@ -213,18 +294,20 @@ func Backend(conf *logical.BackendConfig) *backend { // Delay the first tidy until after we've started up. b.lastTidy = time.Now() - // Metrics initialization for count of certificates in storage - b.certsCounted = atomic2.NewBool(false) - b.certCount = new(uint32) - b.revokedCertCount = new(uint32) - b.possibleDoubleCountedSerials = make([]string, 0, 250) - b.possibleDoubleCountedRevokedSerials = make([]string, 0, 250) + b.unifiedTransferStatus = newUnifiedTransferStatus() + b.acmeState = NewACMEState() + b.certificateCounter = NewCertificateCounter(b.backendUUID) + + // It is important that we call SetupEnt at the very end as + // some ENT backends need access to the member vars initialized above. + b.SetupEnt() return &b } type backend struct { *framework.Backend + entBackend backendUUID string storage logical.Storage @@ -236,53 +319,40 @@ type backend struct { tidyStatus *tidyStatus lastTidy time.Time - certCount *uint32 - revokedCertCount *uint32 - certsCounted *atomic2.Bool - possibleDoubleCountedSerials []string - possibleDoubleCountedRevokedSerials []string + unifiedTransferStatus *UnifiedTransferStatus + + certificateCounter *CertificateCounter pkiStorageVersion atomic.Value - crlBuilder *crlBuilder + crlBuilder *CrlBuilder // Write lock around issuers and keys. issuersLock sync.RWMutex + + // Context around ACME operations + acmeState *acmeState + acmeAccountLock sync.RWMutex // (Write) Locked on Tidy, (Read) Locked on Account Creation } -type ( - tidyStatusState int - roleOperation func(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) -) +// BackendOps a bridge/legacy interface until we can further +// separate out backend things into distinct packages. +type BackendOps interface { + managed_key.PkiManagedKeyView + pki_backend.SystemViewGetter + pki_backend.MountInfo + pki_backend.Logger + UseLegacyBundleCaStorage() bool + CrlBuilder() *CrlBuilder + GetRevokeStorageLock() *sync.RWMutex + GetUnifiedTransferStatus() *UnifiedTransferStatus + GetAcmeState() *acmeState + GetRole(ctx context.Context, s logical.Storage, n string) (*issuing.RoleEntry, error) + GetCertificateCounter() *CertificateCounter +} -const ( - tidyStatusInactive tidyStatusState = iota - tidyStatusStarted = iota - tidyStatusFinished = iota - tidyStatusError = iota - tidyStatusCancelling = iota - tidyStatusCancelled = iota -) +var _ BackendOps = &backend{} -type tidyStatus struct { - // Parameters used to initiate the operation - safetyBuffer int - issuerSafetyBuffer int - tidyCertStore bool - tidyRevokedCerts bool - tidyRevokedAssocs bool - tidyExpiredIssuers bool - pauseDuration string - - // Status - state tidyStatusState - err error - timeStarted time.Time - timeFinished time.Time - message string - certStoreDeletedCount uint - revokedCertDeletedCount uint - missingIssuerCertCount uint -} +type roleOperation func(ctx context.Context, req *logical.Request, data *framework.FieldData, role *issuing.RoleEntry) (*logical.Response, error) const backendHelp = ` The PKI backend dynamically generates X509 server and client certificates. @@ -304,7 +374,7 @@ func metricsKey(req *logical.Request, extra ...string) []string { func (b *backend) metricsWrap(callType string, roleMode int, ofunc roleOperation) framework.OperationFunc { return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { key := metricsKey(req, callType) - var role *roleEntry + var role *issuing.RoleEntry var labels []metrics.Label var err error @@ -320,7 +390,7 @@ func (b *backend) metricsWrap(callType string, roleMode int, ofunc roleOperation } if roleMode > noRole { // Get the role - role, err = b.getRole(ctx, req.Storage, roleName) + role, err = b.GetRole(ctx, req.Storage, roleName) if err != nil { return nil, err } @@ -349,9 +419,9 @@ func (b *backend) metricsWrap(callType string, roleMode int, ofunc roleOperation } // initialize is used to perform a possible PKI storage migration if needed -func (b *backend) initialize(ctx context.Context, _ *logical.InitializationRequest) error { +func (b *backend) initialize(ctx context.Context, ir *logical.InitializationRequest) error { sc := b.makeStorageContext(ctx, b.storage) - if err := b.crlBuilder.reloadConfigIfRequired(sc); err != nil { + if err := b.CrlBuilder().reloadConfigIfRequired(sc); err != nil { return err } @@ -360,13 +430,28 @@ func (b *backend) initialize(ctx context.Context, _ *logical.InitializationReque return err } + err = b.GetAcmeState().Initialize(b, sc) + if err != nil { + return err + } + // Initialize also needs to populate our certificate and revoked certificate count err = b.initializeStoredCertificateCounts(ctx) if err != nil { - return err + // Don't block/err initialize/startup for metrics. Context on this call can time out due to number of certificates. + b.Logger().Error("Could not initialize stored certificate counts", "error", err) + b.GetCertificateCounter().SetError(err) } - return nil + return b.initializeEnt(sc, ir) +} + +func (b *backend) cleanup(ctx context.Context) { + sc := b.makeStorageContext(ctx, b.storage) + + b.GetAcmeState().Shutdown(b) + + b.cleanupEnt(sc) } func (b *backend) initializePKIIssuersStorage(ctx context.Context) error { @@ -395,7 +480,31 @@ func (b *backend) initializePKIIssuersStorage(ctx context.Context) error { return nil } -func (b *backend) useLegacyBundleCaStorage() bool { +func (b *backend) BackendUUID() string { + return b.backendUUID +} + +func (b *backend) CrlBuilder() *CrlBuilder { + return b.crlBuilder +} + +func (b *backend) GetRevokeStorageLock() *sync.RWMutex { + return &b.revokeStorageLock +} + +func (b *backend) GetUnifiedTransferStatus() *UnifiedTransferStatus { + return b.unifiedTransferStatus +} + +func (b *backend) GetAcmeState() *acmeState { + return b.acmeState +} + +func (b *backend) GetCertificateCounter() *CertificateCounter { + return b.certificateCounter +} + +func (b *backend) UseLegacyBundleCaStorage() bool { // This helper function is here to choose whether or not we use the newer // issuer/key storage format or the older legacy ca bundle format. // @@ -408,6 +517,18 @@ func (b *backend) useLegacyBundleCaStorage() bool { return version == nil || version == 0 } +func (b *backend) IsSecondaryNode() bool { + return b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) +} + +func (b *backend) GetManagedKeyView() (logical.ManagedKeySystemView, error) { + managedKeyView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported system view")} + } + return managedKeyView, nil +} + func (b *backend) updatePkiStorageVersion(ctx context.Context, grabIssuersLock bool) { info, err := getMigrationInfo(ctx, b.storage) if err != nil { @@ -415,6 +536,12 @@ func (b *backend) updatePkiStorageVersion(ctx context.Context, grabIssuersLock b return } + // If this method is called outside the initialize function, like say an + // invalidate func on a performance replica cluster, we should be grabbing + // the issuers lock to offer a consistent view of the storage version while + // other events are processing things. Its unknown what might happen during + // a single event if one part thinks we are in legacy mode, and then later + // on we aren't. if grabIssuersLock { b.issuersLock.Lock() defer b.issuersLock.Unlock() @@ -428,6 +555,9 @@ func (b *backend) updatePkiStorageVersion(ctx context.Context, grabIssuersLock b } func (b *backend) invalidate(ctx context.Context, key string) { + isNotPerfPrimary := b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) + switch { case strings.HasPrefix(key, legacyMigrationBundleLogKey): // This is for a secondary cluster to pick up that the migration has completed @@ -437,28 +567,59 @@ func (b *backend) invalidate(ctx context.Context, key string) { go func() { b.Logger().Info("Detected a migration completed, resetting pki storage version") b.updatePkiStorageVersion(ctx, true) - b.crlBuilder.requestRebuildIfActiveNode(b) + b.CrlBuilder().requestRebuildIfActiveNode(b) }() case strings.HasPrefix(key, issuerPrefix): - if !b.useLegacyBundleCaStorage() { + if !b.UseLegacyBundleCaStorage() { // See note in updateDefaultIssuerId about why this is necessary. // We do this ahead of CRL rebuilding just so we know that things // are stale. - b.crlBuilder.invalidateCRLBuildTime() + b.CrlBuilder().invalidateCRLBuildTime() // If an issuer has changed on the primary, we need to schedule an update of our CRL, // the primary cluster would have done it already, but the CRL is cluster specific so // force a rebuild of ours. - b.crlBuilder.requestRebuildIfActiveNode(b) + b.CrlBuilder().requestRebuildIfActiveNode(b) } else { b.Logger().Debug("Ignoring invalidation updates for issuer as the PKI migration has yet to complete.") } case key == "config/crl": // We may need to reload our OCSP status flag - b.crlBuilder.markConfigDirty() + b.CrlBuilder().markConfigDirty() + case key == storageAcmeConfig: + b.GetAcmeState().markConfigDirty() case key == storageIssuerConfig: - b.crlBuilder.invalidateCRLBuildTime() + b.CrlBuilder().invalidateCRLBuildTime() + case strings.HasPrefix(key, crossRevocationPrefix): + split := strings.Split(key, "/") + + if !strings.HasSuffix(key, "/confirmed") { + cluster := split[len(split)-2] + serial := split[len(split)-1] + b.CrlBuilder().addCertForRevocationCheck(cluster, serial) + } else { + if len(split) >= 3 { + cluster := split[len(split)-3] + serial := split[len(split)-2] + // Only process confirmations on the perf primary. The + // performance secondaries cannot remove other clusters' + // entries, and so do not need to track them (only to + // ignore them). On performance primary nodes though, + // we do want to track them to remove them. + if !isNotPerfPrimary { + b.CrlBuilder().addCertForRevocationRemoval(cluster, serial) + } + } + } + case strings.HasPrefix(key, unifiedRevocationReadPathPrefix): + // Three parts to this key: prefix, cluster, and serial. + split := strings.Split(key, "/") + cluster := split[len(split)-2] + serial := split[len(split)-1] + b.CrlBuilder().addCertFromCrossRevocation(cluster, serial) } + + b.invalidateEnt(ctx, key) } func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) error { @@ -466,7 +627,7 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er doCRL := func() error { // First attempt to reload the CRL configuration. - if err := b.crlBuilder.reloadConfigIfRequired(sc); err != nil { + if err := b.CrlBuilder().reloadConfigIfRequired(sc); err != nil { return err } @@ -477,22 +638,48 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er return nil } + // First handle any global revocation queue entries. + if err := b.CrlBuilder().processRevocationQueue(sc); err != nil { + return err + } + + // Then handle any unified cross-cluster revocations. + if err := b.CrlBuilder().processCrossClusterRevocations(sc); err != nil { + return err + } + // Check if we're set to auto rebuild and a CRL is set to expire. - if err := b.crlBuilder.checkForAutoRebuild(sc); err != nil { + if err := b.CrlBuilder().checkForAutoRebuild(sc); err != nil { return err } // Then attempt to rebuild the CRLs if required. - if err := b.crlBuilder.rebuildIfForced(sc); err != nil { + warnings, err := b.CrlBuilder().rebuildIfForced(sc) + if err != nil { return err } + if len(warnings) > 0 { + msg := "During rebuild of complete CRL, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } // If a delta CRL was rebuilt above as part of the complete CRL rebuild, // this will be a no-op. However, if we do need to rebuild delta CRLs, // this would cause us to do so. - if err := b.crlBuilder.rebuildDeltaCRLsIfForced(sc, false); err != nil { + warnings, err = b.CrlBuilder().rebuildDeltaCRLsIfForced(sc, false) + if err != nil { return err } + if len(warnings) > 0 { + msg := "During rebuild of delta CRL, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } return nil } @@ -548,188 +735,72 @@ func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) er return nil } + // First tidy any ACME nonces to free memory. + b.GetAcmeState().DoTidyNonces() + + // Then run unified transfer. + backgroundSc := b.makeStorageContext(context.Background(), b.storage) + go runUnifiedTransfer(backgroundSc) + + // Then run the CRL rebuild and tidy operation. crlErr := doCRL() tidyErr := doAutoTidy() - if crlErr != nil && tidyErr != nil { - return fmt.Errorf("Error building CRLs:\n - %v\n\nError running auto-tidy:\n - %w\n", crlErr, tidyErr) - } + // Periodically re-emit gauges so that they don't disappear/go stale + b.GetCertificateCounter().EmitCertStoreMetrics() + var errors error if crlErr != nil { - return fmt.Errorf("Error building CRLs:\n - %w\n", crlErr) + errors = multierror.Append(errors, fmt.Errorf("Error building CRLs:\n - %w\n", crlErr)) } if tidyErr != nil { - return fmt.Errorf("Error running auto-tidy:\n - %w\n", tidyErr) + errors = multierror.Append(errors, fmt.Errorf("Error running auto-tidy:\n - %w\n", tidyErr)) + } + + if errors != nil { + return errors } // Check if the CRL was invalidated due to issuer swap and update // accordingly. - if err := b.crlBuilder.flushCRLBuildTimeInvalidation(sc); err != nil { + if err := b.CrlBuilder().flushCRLBuildTimeInvalidation(sc); err != nil { return err } // All good! - return nil + return b.periodicFuncEnt(backgroundSc, request) } func (b *backend) initializeStoredCertificateCounts(ctx context.Context) error { - b.tidyStatusLock.RLock() - defer b.tidyStatusLock.RUnlock() // For performance reasons, we can't lock on issuance/storage of certs until a list operation completes, // but we want to limit possible miscounts / double-counts to over-counting, so we take the tidy lock which // prevents (most) deletions - in particular we take a read lock (sufficient to block the write lock in // tidyStatusStart while allowing tidy to still acquire a read lock to report via its endpoint) + b.tidyStatusLock.RLock() + defer b.tidyStatusLock.RUnlock() + sc := b.makeStorageContext(ctx, b.storage) + config, err := sc.getAutoTidyConfig() + if err != nil { + return err + } + + certCounter := b.GetCertificateCounter() + isEnabled := certCounter.ReconfigureWithTidyConfig(config) + if !isEnabled { + return nil + } entries, err := b.storage.List(ctx, "certs/") if err != nil { return err } - atomic.AddUint32(b.certCount, uint32(len(entries))) revokedEntries, err := b.storage.List(ctx, "revoked/") if err != nil { return err } - atomic.AddUint32(b.revokedCertCount, uint32(len(revokedEntries))) - - b.certsCounted.Store(true) - // Now that the metrics are set, we can switch from appending newly-stored certificates to the possible double-count - // list, and instead have them update the counter directly. We need to do this so that we are looking at a static - // slice of possibly double counted serials. Note that certsCounted is computed before the storage operation, so - // there may be some delay here. - - // Sort the listed-entries first, to accommodate that delay. - sort.Slice(entries, func(i, j int) bool { - return entries[i] < entries[j] - }) - - sort.Slice(revokedEntries, func(i, j int) bool { - return revokedEntries[i] < revokedEntries[j] - }) - - // We assume here that these lists are now complete. - sort.Slice(b.possibleDoubleCountedSerials, func(i, j int) bool { - return b.possibleDoubleCountedSerials[i] < b.possibleDoubleCountedSerials[j] - }) - - listEntriesIndex := 0 - possibleDoubleCountIndex := 0 - for { - if listEntriesIndex >= len(entries) { - break - } - if possibleDoubleCountIndex >= len(b.possibleDoubleCountedSerials) { - break - } - if entries[listEntriesIndex] == b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { - // This represents a double-counted entry - b.decrementTotalCertificatesCountNoReport() - listEntriesIndex = listEntriesIndex + 1 - possibleDoubleCountIndex = possibleDoubleCountIndex + 1 - continue - } - if entries[listEntriesIndex] < b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { - listEntriesIndex = listEntriesIndex + 1 - continue - } - if entries[listEntriesIndex] > b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { - possibleDoubleCountIndex = possibleDoubleCountIndex + 1 - continue - } - } - - sort.Slice(b.possibleDoubleCountedRevokedSerials, func(i, j int) bool { - return b.possibleDoubleCountedRevokedSerials[i] < b.possibleDoubleCountedRevokedSerials[j] - }) - - listRevokedEntriesIndex := 0 - possibleRevokedDoubleCountIndex := 0 - for { - if listRevokedEntriesIndex >= len(revokedEntries) { - break - } - if possibleRevokedDoubleCountIndex >= len(b.possibleDoubleCountedRevokedSerials) { - break - } - if revokedEntries[listRevokedEntriesIndex] == b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { - // This represents a double-counted revoked entry - b.decrementTotalRevokedCertificatesCountNoReport() - listRevokedEntriesIndex = listRevokedEntriesIndex + 1 - possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 - continue - } - if revokedEntries[listRevokedEntriesIndex] < b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { - listRevokedEntriesIndex = listRevokedEntriesIndex + 1 - continue - } - if revokedEntries[listRevokedEntriesIndex] > b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { - possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 - continue - } - } - - b.possibleDoubleCountedRevokedSerials = nil - b.possibleDoubleCountedSerials = nil - - certCount := atomic.LoadUint32(b.certCount) - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) - revokedCertCount := atomic.LoadUint32(b.revokedCertCount) - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(revokedCertCount)) + certCounter.InitializeCountsFromStorage(entries, revokedEntries) return nil } - -// The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: -// eg. certsCounted := b.certsCounted.Load() -func (b *backend) incrementTotalCertificatesCount(certsCounted bool, newSerial string) { - certCount := atomic.AddUint32(b.certCount, 1) - switch { - case !certsCounted: - // This is unsafe, but a good best-attempt - if strings.HasPrefix(newSerial, "certs/") { - newSerial = newSerial[6:] - } - b.possibleDoubleCountedSerials = append(b.possibleDoubleCountedSerials, newSerial) - default: - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) - } -} - -func (b *backend) decrementTotalCertificatesCountReport() { - certCount := b.decrementTotalCertificatesCountNoReport() - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) -} - -// Called directly only by the initialize function to deduplicate the count, when we don't have a full count yet -func (b *backend) decrementTotalCertificatesCountNoReport() uint32 { - newCount := atomic.AddUint32(b.certCount, ^uint32(0)) - return newCount -} - -// The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: -// eg. certsCounted := b.certsCounted.Load() -func (b *backend) incrementTotalRevokedCertificatesCount(certsCounted bool, newSerial string) { - newRevokedCertCount := atomic.AddUint32(b.revokedCertCount, 1) - switch { - case !certsCounted: - // This is unsafe, but a good best-attempt - if strings.HasPrefix(newSerial, "revoked/") { // allow passing in the path (revoked/serial) OR the serial - newSerial = newSerial[8:] - } - b.possibleDoubleCountedRevokedSerials = append(b.possibleDoubleCountedRevokedSerials, newSerial) - default: - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(newRevokedCertCount)) - } -} - -func (b *backend) decrementTotalRevokedCertificatesCountReport() { - revokedCertCount := b.decrementTotalRevokedCertificatesCountNoReport() - metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(revokedCertCount)) -} - -// Called directly only by the initialize function to deduplicate the count, when we don't have a full count yet -func (b *backend) decrementTotalRevokedCertificatesCountNoReport() uint32 { - newRevokedCertCount := atomic.AddUint32(b.revokedCertCount, ^uint32(0)) - return newRevokedCertCount -} diff --git a/builtin/logical/pki/backend_oss.go b/builtin/logical/pki/backend_oss.go new file mode 100644 index 000000000000..aa8c413e767f --- /dev/null +++ b/builtin/logical/pki/backend_oss.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package pki + +import ( + "context" + + "github.com/hashicorp/vault/sdk/logical" +) + +type entBackend struct{} + +func (b *backend) initializeEnt(_ *storageContext, _ *logical.InitializationRequest) error { + return nil +} + +func (b *backend) invalidateEnt(_ context.Context, _ string) {} + +func (b *backend) periodicFuncEnt(_ *storageContext, _ *logical.Request) error { + return nil +} + +func (b *backend) cleanupEnt(_ *storageContext) {} + +func (b *backend) SetupEnt() {} diff --git a/builtin/logical/pki/backend_oss_test.go b/builtin/logical/pki/backend_oss_test.go new file mode 100644 index 000000000000..fb4648293342 --- /dev/null +++ b/builtin/logical/pki/backend_oss_test.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package pki + +func getEntProperAuthingPaths(_ string) map[string]pathAuthChecker { + return map[string]pathAuthChecker{} +} + +func getEntAcmePrefixes() []string { + return []string{} +} + +func entProperAuthingPathReplacer(rawPath string) string { + return rawPath +} diff --git a/builtin/logical/pki/backend_test.go b/builtin/logical/pki/backend_test.go index c67999984e9a..7ea97ece116a 100644 --- a/builtin/logical/pki/backend_test.go +++ b/builtin/logical/pki/backend_test.go @@ -1,7 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "bytes" + "cmp" "context" "crypto" "crypto/ecdsa" @@ -23,14 +27,22 @@ import ( "net/url" "os" "reflect" + "slices" "sort" "strconv" "strings" "sync" - "sync/atomic" "testing" "time" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + "golang.org/x/exp/maps" + + "github.com/hashicorp/vault/helper/testhelpers" + + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/stretchr/testify/require" "github.com/armon/go-metrics" @@ -47,6 +59,8 @@ import ( "github.com/hashicorp/vault/vault" "github.com/mitchellh/mapstructure" "golang.org/x/net/idna" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" ) var stepCount = 0 @@ -129,9 +143,10 @@ func TestPKI_RequireCN(t *testing.T) { // Issue a cert with require_cn set to true and with common name supplied. // It should succeed. - _, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ "common_name": "foobar.com", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issue/example"), logical.UpdateOperation), resp, true) if err != nil { t.Fatal(err) } @@ -551,7 +566,6 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s if err != nil { return err } - if !reflect.DeepEqual(entries, expected) { return fmt.Errorf("expected urls\n%#v\ndoes not match provided\n%#v\n", expected, entries) } @@ -683,6 +697,8 @@ func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s } func generateCSR(t *testing.T, csrTemplate *x509.CertificateRequest, keyType string, keyBits int) (interface{}, []byte, string) { + t.Helper() + var priv interface{} var err error switch keyType { @@ -709,6 +725,10 @@ func generateCSR(t *testing.T, csrTemplate *x509.CertificateRequest, keyType str t.Fatalf("Got error generating private key for CSR: %v", err) } + return generateCSRWithKey(t, csrTemplate, priv) +} + +func generateCSRWithKey(t *testing.T, csrTemplate *x509.CertificateRequest, priv interface{}) (interface{}, []byte, string) { csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, priv) if err != nil { t.Fatalf("Got error generating CSR: %v", err) @@ -814,6 +834,8 @@ func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[s } func generateTestCsr(t *testing.T, keyType certutil.PrivateKeyType, keyBits int) (x509.CertificateRequest, string) { + t.Helper() + csrTemplate := x509.CertificateRequest{ Subject: pkix.Name{ Country: []string{"MyCountry"}, @@ -843,7 +865,7 @@ func generateTestCsr(t *testing.T, keyType certutil.PrivateKeyType, keyBits int) // Generates steps to test out various role permutations func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { - roleVals := roleEntry{ + roleVals := issuing.RoleEntry{ MaxTTL: 12 * time.Hour, KeyType: "rsa", KeyBits: 2048, @@ -925,7 +947,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { ret = append(ret, issueTestStep) } - getCountryCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getCountryCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -946,7 +968,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getOuCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getOuCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -967,7 +989,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getOrganizationCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getOrganizationCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -988,7 +1010,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getLocalityCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getLocalityCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1009,7 +1031,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getProvinceCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getProvinceCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1030,7 +1052,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getStreetAddressCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getStreetAddressCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1051,7 +1073,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getPostalCodeCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getPostalCodeCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1072,7 +1094,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - getNotBeforeCheck := func(role roleEntry) logicaltest.TestCheckFunc { + getNotBeforeCheck := func(role issuing.RoleEntry) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1097,7 +1119,9 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { // Returns a TestCheckFunc that performs various validity checks on the // returned certificate information, mostly within checkCertsAndPrivateKey - getCnCheck := func(name string, role roleEntry, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration) logicaltest.TestCheckFunc { + getCnCheck := func(name string, role issuing.RoleEntry, key crypto.Signer, usage x509.KeyUsage, + extUsage x509.ExtKeyUsage, validity time.Duration, + ) logicaltest.TestCheckFunc { var certBundle certutil.CertBundle return func(resp *logical.Response) error { err := mapstructure.Decode(resp.Data, &certBundle) @@ -1207,7 +1231,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } getRandCsr := func(keyType string, errorOk bool, csrTemplate *x509.CertificateRequest) csrPlan { - rsaKeyBits := []int{2048, 3072, 4096} + rsaKeyBits := []int{2048, 3072, 4096, 8192} ecKeyBits := []int{224, 256, 384, 521} plan := csrPlan{errorOk: errorOk} @@ -1320,7 +1344,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } roleVals.KeyUsage = usage - parsedKeyUsage := parseKeyUsages(roleVals.KeyUsage) + parsedKeyUsage := parsing.ParseKeyUsages(roleVals.KeyUsage) if parsedKeyUsage == 0 && len(usage) != 0 { panic("parsed key usages was zero") } @@ -1579,7 +1603,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } { - getOtherCheck := func(expectedOthers ...otherNameUtf8) logicaltest.TestCheckFunc { + getOtherCheck := func(expectedOthers ...certutil.OtherNameUtf8) logicaltest.TestCheckFunc { return func(resp *logical.Response) error { var certBundle certutil.CertBundle err := mapstructure.Decode(resp.Data, &certBundle) @@ -1595,7 +1619,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { if err != nil { return err } - var expected []otherNameUtf8 + var expected []certutil.OtherNameUtf8 expected = append(expected, expectedOthers...) if diff := deep.Equal(foundOthers, expected); len(diff) > 0 { return fmt.Errorf("wrong SAN IPs, diff: %v", diff) @@ -1604,11 +1628,11 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { } } - addOtherSANTests := func(useCSRs, useCSRSANs bool, allowedOtherSANs []string, errorOk bool, otherSANs []string, csrOtherSANs []otherNameUtf8, check logicaltest.TestCheckFunc) { - otherSansMap := func(os []otherNameUtf8) map[string][]string { + addOtherSANTests := func(useCSRs, useCSRSANs bool, allowedOtherSANs []string, errorOk bool, otherSANs []string, csrOtherSANs []certutil.OtherNameUtf8, check logicaltest.TestCheckFunc) { + otherSansMap := func(os []certutil.OtherNameUtf8) map[string][]string { ret := make(map[string][]string) for _, o := range os { - ret[o.oid] = append(ret[o.oid], o.value) + ret[o.Oid] = append(ret[o.Oid], o.Value) } return ret } @@ -1639,14 +1663,14 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { roleVals.UseCSRCommonName = true commonNames.Localhost = true - newOtherNameUtf8 := func(s string) (ret otherNameUtf8) { + newOtherNameUtf8 := func(s string) (ret certutil.OtherNameUtf8) { pieces := strings.Split(s, ";") if len(pieces) == 2 { piecesRest := strings.Split(pieces[1], ":") if len(piecesRest) == 2 { switch strings.ToUpper(piecesRest[0]) { case "UTF-8", "UTF8": - return otherNameUtf8{oid: pieces[0], value: piecesRest[1]} + return certutil.OtherNameUtf8{Oid: pieces[0], Value: piecesRest[1]} } } } @@ -1656,7 +1680,7 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { oid1 := "1.3.6.1.4.1.311.20.2.3" oth1str := oid1 + ";utf8:devops@nope.com" oth1 := newOtherNameUtf8(oth1str) - oth2 := otherNameUtf8{oid1, "me@example.com"} + oth2 := certutil.OtherNameUtf8{oid1, "me@example.com"} // allowNone, allowAll := []string{}, []string{oid1 + ";UTF-8:*"} allowNone, allowAll := []string{}, []string{"*"} @@ -1671,15 +1695,15 @@ func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { // Given OtherSANs as API argument and useCSRSANs false, CSR arg ignored. addOtherSANTests(useCSRs, false, allowAll, false, []string{oth1str}, - []otherNameUtf8{oth2}, getOtherCheck(oth1)) + []certutil.OtherNameUtf8{oth2}, getOtherCheck(oth1)) if useCSRs { // OtherSANs not allowed, valid OtherSANs provided via CSR, should be an error. - addOtherSANTests(useCSRs, true, allowNone, true, nil, []otherNameUtf8{oth1}, nil) + addOtherSANTests(useCSRs, true, allowNone, true, nil, []certutil.OtherNameUtf8{oth1}, nil) // Given OtherSANs as both API and CSR arguments and useCSRSANs=true, API arg ignored. addOtherSANTests(useCSRs, false, allowAll, false, []string{oth2.String()}, - []otherNameUtf8{oth1}, getOtherCheck(oth2)) + []certutil.OtherNameUtf8{oth1}, getOtherCheck(oth2)) } } @@ -1875,6 +1899,7 @@ func TestBackend_PathFetchValidRaw(t *testing.T) { Data: map[string]interface{}{}, MountPoint: "pki/", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("ca/pem"), logical.ReadOperation), resp, true) require.NoError(t, err) if resp != nil && resp.IsError() { t.Fatalf("failed read ca/pem, %#v", resp) @@ -1980,6 +2005,7 @@ func TestBackend_PathFetchCertList(t *testing.T) { Data: rootData, MountPoint: "pki/", }) + if resp != nil && resp.IsError() { t.Fatalf("failed to generate root, %#v", resp) } @@ -2000,6 +2026,16 @@ func TestBackend_PathFetchCertList(t *testing.T) { Data: urlsData, MountPoint: "pki/", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/urls"), logical.UpdateOperation), resp, true) + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/urls", + Storage: storage, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/urls"), logical.ReadOperation), resp, true) + if resp != nil && resp.IsError() { t.Fatalf("failed to config urls, %#v", resp) } @@ -2145,7 +2181,7 @@ func runTestSignVerbatim(t *testing.T, keyType string) { // On older versions of Go this test will fail due to an explicit check for duplicate otherNames later in this test. ExtraExtensions: []pkix.Extension{ { - Id: oidExtensionSubjectAltName, + Id: certutil.OidExtensionSubjectAltName, Critical: false, Value: []byte{0x30, 0x26, 0xA0, 0x24, 0x06, 0x0A, 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x14, 0x02, 0x03, 0xA0, 0x16, 0x0C, 0x14, 0x75, 0x73, 0x65, 0x72, 0x6E, 0x61, 0x6D, 0x65, 0x40, 0x65, 0x78, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x2E, 0x63, 0x6F, 0x6D}, }, @@ -2179,6 +2215,8 @@ func runTestSignVerbatim(t *testing.T, keyType string) { Data: signVerbatimData, MountPoint: "pki/", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("sign-verbatim"), logical.UpdateOperation), resp, true) + if resp != nil && resp.IsError() { t.Fatalf("failed to sign-verbatim basic CSR: %#v", *resp) } @@ -2305,7 +2343,7 @@ func runTestSignVerbatim(t *testing.T, keyType string) { // We assume that there is only one SAN in the original CSR and that it is an otherName. san_count := 0 for _, ext := range cert.Extensions { - if ext.Id.Equal(oidExtensionSubjectAltName) { + if ext.Id.Equal(certutil.OidExtensionSubjectAltName) { san_count += 1 } } @@ -2374,6 +2412,14 @@ func TestBackend_Root_Idempotency(t *testing.T) { require.NotNil(t, resp, "expected ca info") keyId1 := resp.Data["key_id"] issuerId1 := resp.Data["issuer_id"] + cert := parseCert(t, resp.Data["certificate"].(string)) + certSkid := certutil.GetHexFormatted(cert.SubjectKeyId, ":") + + // -> Validate the SKID matches between the root cert and the key + resp, err = CBRead(b, s, "key/"+keyId1.(issuing.KeyID).String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + require.Equal(t, resp.Data["subject_key_id"], certSkid) resp, err = CBRead(b, s, "cert/ca_chain") require.NoError(t, err, "error reading ca_chain: %v", err) @@ -2388,6 +2434,14 @@ func TestBackend_Root_Idempotency(t *testing.T) { require.NotNil(t, resp, "expected ca info") keyId2 := resp.Data["key_id"] issuerId2 := resp.Data["issuer_id"] + cert = parseCert(t, resp.Data["certificate"].(string)) + certSkid = certutil.GetHexFormatted(cert.SubjectKeyId, ":") + + // -> Validate the SKID matches between the root cert and the key + resp, err = CBRead(b, s, "key/"+keyId2.(issuing.KeyID).String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + require.Equal(t, resp.Data["subject_key_id"], certSkid) // Make sure that we actually generated different issuer and key values require.NotEqual(t, keyId1, keyId2) @@ -2396,6 +2450,7 @@ func TestBackend_Root_Idempotency(t *testing.T) { // Now because the issued CA's have no links, the call to ca_chain should return the same data (ca chain from default) resp, err = CBRead(b, s, "cert/ca_chain") require.NoError(t, err, "error reading ca_chain: %v", err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("cert/ca_chain"), logical.ReadOperation), resp, true) r2Data := resp.Data if !reflect.DeepEqual(r1Data, r2Data) { @@ -2407,15 +2462,31 @@ func TestBackend_Root_Idempotency(t *testing.T) { resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ "pem_bundle": pemBundleRootCA, }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/ca"), logical.UpdateOperation), resp, true) + require.NoError(t, err) require.NotNil(t, resp, "expected ca info") + firstMapping := resp.Data["mapping"].(map[string]string) firstImportedKeys := resp.Data["imported_keys"].([]string) firstImportedIssuers := resp.Data["imported_issuers"].([]string) + firstExistingKeys := resp.Data["existing_keys"].([]string) + firstExistingIssuers := resp.Data["existing_issuers"].([]string) require.NotContains(t, firstImportedKeys, keyId1) require.NotContains(t, firstImportedKeys, keyId2) require.NotContains(t, firstImportedIssuers, issuerId1) require.NotContains(t, firstImportedIssuers, issuerId2) + require.Empty(t, firstExistingKeys) + require.Empty(t, firstExistingIssuers) + require.NotEmpty(t, firstMapping) + require.Equal(t, 1, len(firstMapping)) + + var issuerId3 string + var keyId3 string + for i, k := range firstMapping { + issuerId3 = i + keyId3 = k + } // Performing this again should result in no key/issuer ids being imported/generated. resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ @@ -2423,11 +2494,17 @@ func TestBackend_Root_Idempotency(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, resp, "expected ca info") + secondMapping := resp.Data["mapping"].(map[string]string) secondImportedKeys := resp.Data["imported_keys"] secondImportedIssuers := resp.Data["imported_issuers"] + secondExistingKeys := resp.Data["existing_keys"] + secondExistingIssuers := resp.Data["existing_issuers"] - require.Nil(t, secondImportedKeys) - require.Nil(t, secondImportedIssuers) + require.Empty(t, secondImportedKeys) + require.Empty(t, secondImportedIssuers) + require.Contains(t, secondExistingKeys, keyId3) + require.Contains(t, secondExistingIssuers, issuerId3) + require.Equal(t, 1, len(secondMapping)) resp, err = CBDelete(b, s, "root") require.NoError(t, err) @@ -2467,7 +2544,7 @@ func TestBackend_Root_Idempotency(t *testing.T) { } } -func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { +func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { t.Parallel() b_root, s_root := CreateBackendWithStorage(t) b_int, s_int := CreateBackendWithStorage(t) @@ -2485,6 +2562,7 @@ func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { _, err = CBWrite(b_root, s_root, "roles/test", map[string]interface{}{ "allow_bare_domains": true, "allow_subdomains": true, + "allow_any_name": true, }) if err != nil { t.Fatal(err) @@ -2493,20 +2571,26 @@ func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { resp, err := CBWrite(b_int, s_int, "intermediate/generate/internal", map[string]interface{}{ "common_name": "myint.com", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b_root.Route("intermediate/generate/internal"), logical.UpdateOperation), resp, true) + require.Contains(t, resp.Data, "key_id") + intKeyId := resp.Data["key_id"].(issuing.KeyID) + csr := resp.Data["csr"] + + resp, err = CBRead(b_int, s_int, "key/"+intKeyId.String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + intSkid := resp.Data["subject_key_id"].(string) + if err != nil { t.Fatal(err) } - csr := resp.Data["csr"] - _, err = CBWrite(b_root, s_root, "sign/test", map[string]interface{}{ "common_name": "myint.com", "csr": csr, "ttl": "60h", }) - if err == nil { - t.Fatal("expected error") - } + require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") _, err = CBWrite(b_root, s_root, "sign-verbatim/test", map[string]interface{}{ "common_name": "myint.com", @@ -2514,9 +2598,7 @@ func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { "csr": csr, "ttl": "60h", }) - if err == nil { - t.Fatal("expected error") - } + require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ "common_name": "myint.com", @@ -2533,6 +2615,10 @@ func TestBackend_SignIntermediate_AllowedPastCA(t *testing.T) { if len(resp.Warnings) == 0 { t.Fatalf("expected warnings, got %#v", *resp) } + + cert := parseCert(t, resp.Data["certificate"].(string)) + certSkid := certutil.GetHexFormatted(cert.SubjectKeyId, ":") + require.Equal(t, intSkid, certSkid) } func TestBackend_ConsulSignLeafWithLegacyRole(t *testing.T) { @@ -2670,6 +2756,7 @@ func TestBackend_SignSelfIssued(t *testing.T) { }, MountPoint: "pki/", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("root/sign-self-issued"), logical.UpdateOperation), resp, true) if err != nil { t.Fatal(err) } @@ -2688,7 +2775,7 @@ func TestBackend_SignSelfIssued(t *testing.T) { } sc := b.makeStorageContext(context.Background(), storage) - signingBundle, err := sc.fetchCAInfo(defaultRef, ReadOnlyUsage) + signingBundle, err := sc.fetchCAInfo(defaultRef, issuing.ReadOnlyUsage) if err != nil { t.Fatal(err) } @@ -3047,11 +3134,14 @@ func TestBackend_OID_SANs(t *testing.T) { cert.DNSNames[2] != "foobar.com" { t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) } - expectedOtherNames := []otherNameUtf8{{oid1, val1}, {oid2, val2}} + expectedOtherNames := []certutil.OtherNameUtf8{{oid1, val1}, {oid2, val2}} foundOtherNames, err := getOtherSANsFromX509Extensions(cert.Extensions) if err != nil { t.Fatal(err) } + // Sort our returned list as SANS are built internally with a map so ordering can be inconsistent + slices.SortFunc(foundOtherNames, func(a, b certutil.OtherNameUtf8) int { return cmp.Compare(a.Oid, b.Oid) }) + if diff := deep.Equal(expectedOtherNames, foundOtherNames); len(diff) != 0 { t.Errorf("unexpected otherNames: %v", diff) } @@ -3621,6 +3711,7 @@ func TestReadWriteDeleteRoles(t *testing.T) { "code_signing_flag": false, "issuer_ref": "default", "cn_validations": []interface{}{"email", "hostname"}, + "allowed_user_ids": []interface{}{}, } if diff := deep.Equal(expectedData, resp.Data); len(diff) > 0 { @@ -3792,6 +3883,17 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { t.Fatal(err) } + // Set up Metric Configuration, then restart to enable it + _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": true, + }) + require.NoError(t, err, "failed calling auto-tidy") + _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ + "mounts": "pki/", + }) + require.NoError(t, err, "failed calling backend reload") + // Check the metrics initialized in order to calculate backendUUID for /pki // BackendUUID not consistent during tests with UUID from /sys/mounts/pki metricsSuffix := "total_certificates_stored" @@ -3828,6 +3930,14 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { if err != nil { t.Fatal(err) } + // Set up Metric Configuration, then restart to enable it + _, err = client.Logical().Write("pki2/config/auto-tidy", map[string]interface{}{ + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": true, + }) + _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ + "mounts": "pki2/", + }) // Create a CSR for the intermediate CA secret, err := client.Logical().Write("pki2/intermediate/generate/internal", nil) @@ -3853,6 +3963,7 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { if err != nil { t.Fatal(err) } + if secret == nil || len(secret.Data) == 0 || len(secret.Data["certificate"].(string)) == 0 { t.Fatal("expected certificate information from read operation") } @@ -3936,21 +4047,35 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { expectedData := map[string]interface{}{ "safety_buffer": json.Number("1"), "issuer_safety_buffer": json.Number("31536000"), + "revocation_queue_safety_buffer": json.Number("172800"), "tidy_cert_store": true, "tidy_revoked_certs": true, "tidy_revoked_cert_issuer_associations": false, "tidy_expired_issuers": false, + "tidy_move_legacy_ca_bundle": false, + "tidy_revocation_queue": false, + "tidy_cross_cluster_revoked_certs": false, "pause_duration": "0s", "state": "Finished", "error": nil, "time_started": nil, "time_finished": nil, + "last_auto_tidy_finished": nil, "message": nil, "cert_store_deleted_count": json.Number("1"), "revoked_cert_deleted_count": json.Number("1"), "missing_issuer_cert_count": json.Number("0"), "current_cert_store_count": json.Number("0"), "current_revoked_cert_count": json.Number("0"), + "revocation_queue_deleted_count": json.Number("0"), + "cross_revoked_cert_deleted_count": json.Number("0"), + "internal_backend_uuid": backendUUID, + "tidy_acme": false, + "acme_account_safety_buffer": json.Number("2592000"), + "acme_orders_deleted_count": json.Number("0"), + "acme_account_revoked_count": json.Number("0"), + "acme_account_deleted_count": json.Number("0"), + "total_acme_account_count": json.Number("0"), } // Let's copy the times from the response so that we can use deep.Equal() timeStarted, ok := tidyStatus.Data["time_started"] @@ -3963,6 +4088,7 @@ func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { t.Fatal("Expected tidy status response to include a value for time_finished") } expectedData["time_finished"] = timeFinished + expectedData["last_auto_tidy_finished"] = tidyStatus.Data["last_auto_tidy_finished"] if diff := deep.Equal(expectedData, tidyStatus.Data); diff != nil { t.Fatal(diff) @@ -4748,6 +4874,7 @@ func TestRootWithExistingKey(t *testing.T) { "key_type": "rsa", "issuer_name": "my-issuer1", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/generate/root/internal"), logical.UpdateOperation), resp, true) require.NoError(t, err) require.NotNil(t, resp.Data["certificate"]) myIssuerId1 := resp.Data["issuer_id"] @@ -4823,9 +4950,9 @@ func TestRootWithExistingKey(t *testing.T) { resp, err = CBList(b, s, "issuers") require.NoError(t, err) require.Equal(t, 3, len(resp.Data["keys"].([]string))) - require.Contains(t, resp.Data["keys"], string(myIssuerId1.(issuerID))) - require.Contains(t, resp.Data["keys"], string(myIssuerId2.(issuerID))) - require.Contains(t, resp.Data["keys"], string(myIssuerId3.(issuerID))) + require.Contains(t, resp.Data["keys"], string(myIssuerId1.(issuing.IssuerID))) + require.Contains(t, resp.Data["keys"], string(myIssuerId2.(issuing.IssuerID))) + require.Contains(t, resp.Data["keys"], string(myIssuerId3.(issuing.IssuerID))) } func TestIntermediateWithExistingKey(t *testing.T) { @@ -4863,6 +4990,7 @@ func TestIntermediateWithExistingKey(t *testing.T) { "common_name": "root myvault.com", "key_type": "rsa", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/generate/intermediate/internal"), logical.UpdateOperation), resp, true) require.NoError(t, err) // csr1 := resp.Data["csr"] myKeyId1 := resp.Data["key_id"] @@ -4925,12 +5053,13 @@ func TestIssuanceTTLs(t *testing.T) { }) require.Error(t, err, "expected issuance to fail due to longer default ttl than cert ttl") - resp, err = CBWrite(b, s, "issuer/root", map[string]interface{}{ - "issuer_name": "root", + resp, err = CBPatch(b, s, "issuer/root", map[string]interface{}{ "leaf_not_after_behavior": "permit", }) require.NoError(t, err) require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["leaf_not_after_behavior"], "permit") _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ "common_name": "testing", @@ -4943,6 +5072,8 @@ func TestIssuanceTTLs(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["leaf_not_after_behavior"], "truncate") _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ "common_name": "testing", @@ -5027,9 +5158,11 @@ func TestPerIssuerAIA(t *testing.T) { require.Empty(t, rootCert.CRLDistributionPoints) // Set some local URLs on the issuer. - _, err = CBWrite(b, s, "issuer/default", map[string]interface{}{ + resp, err = CBWrite(b, s, "issuer/default", map[string]interface{}{ "issuing_certificates": []string{"https://google.com"}, }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/default"), logical.UpdateOperation), resp, true) + require.NoError(t, err) _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ @@ -5082,6 +5215,16 @@ func TestPerIssuerAIA(t *testing.T) { require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://example.com/ca", "https://backup.example.com/ca"}) require.Equal(t, leafCert.OCSPServer, []string{"https://example.com/ocsp", "https://backup.example.com/ocsp"}) require.Equal(t, leafCert.CRLDistributionPoints, []string{"https://example.com/crl", "https://backup.example.com/crl"}) + + // Validate that we can set an issuer name and remove it. + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuer_name": "my-issuer", + }) + require.NoError(t, err) + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuer_name": "", + }) + require.NoError(t, err) } func TestIssuersWithoutCRLBits(t *testing.T) { @@ -5141,6 +5284,7 @@ TgM7RZnmEjNdeaa4M52o7VY= resp, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ "pem_bundle": customBundleWithoutCRLBits, }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/import/bundle"), logical.UpdateOperation), resp, true) require.NoError(t, err) require.NotNil(t, resp) require.NotEmpty(t, resp.Data) @@ -5182,7 +5326,8 @@ func TestBackend_IfModifiedSinceHeaders(t *testing.T) { }, } cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, + HandlerFunc: vaulthttp.Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), }) cluster.Start() defer cluster.Cleanup() @@ -5471,17 +5616,17 @@ func TestBackend_IfModifiedSinceHeaders(t *testing.T) { lastHeaders = client.Headers() } - time.Sleep(4 * time.Second) - - beforeDeltaRotation := time.Now().Add(-2 * time.Second) - // Finally, rebuild the delta CRL and ensure that only that is - // invalidated. We first need to enable it though. + // invalidated. We first need to enable it though, and wait for + // all CRLs to rebuild. _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ "auto_rebuild": true, "enable_delta": true, }) require.NoError(t, err) + time.Sleep(4 * time.Second) + beforeDeltaRotation := time.Now().Add(-2 * time.Second) + resp, err = client.Logical().Read("pki/crl/rotate-delta") require.NoError(t, err) require.NotNil(t, resp) @@ -5570,6 +5715,14 @@ func TestBackend_InitializeCertificateCounts(t *testing.T) { serials[i] = resp.Data["serial_number"].(string) } + // Turn on certificate counting: + CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": false, + }) + // Assert initialize from clean is correct: + b.initializeStoredCertificateCounts(ctx) + // Revoke certificates A + B revocations := serials[0:2] for _, key := range revocations { @@ -5581,19 +5734,18 @@ func TestBackend_InitializeCertificateCounts(t *testing.T) { } } - // Assert initialize from clean is correct: - b.initializeStoredCertificateCounts(ctx) - if atomic.LoadUint32(b.certCount) != 6 { - t.Fatalf("Failed to count six certificates root,A,B,C,D,E, instead counted %d certs", atomic.LoadUint32(b.certCount)) + certCounter := b.GetCertificateCounter() + if certCounter.CertificateCount() != 6 { + t.Fatalf("Failed to count six certificates root,A,B,C,D,E, instead counted %d certs", certCounter.CertificateCount()) } - if atomic.LoadUint32(b.revokedCertCount) != 2 { - t.Fatalf("Failed to count two revoked certificates A+B, instead counted %d certs", atomic.LoadUint32(b.revokedCertCount)) + if certCounter.RevokedCount() != 2 { + t.Fatalf("Failed to count two revoked certificates A+B, instead counted %d certs", certCounter.RevokedCount()) } // Simulates listing while initialize in progress, by "restarting it" - atomic.StoreUint32(b.certCount, 0) - atomic.StoreUint32(b.revokedCertCount, 0) - b.certsCounted.Store(false) + certCounter.certCount.Store(0) + certCounter.revokedCertCount.Store(0) + certCounter.certsCounted.Store(false) // Revoke certificates C, D dirtyRevocations := serials[2:4] @@ -5618,15 +5770,16 @@ func TestBackend_InitializeCertificateCounts(t *testing.T) { } // Run initialize - b.initializeStoredCertificateCounts(ctx) + err = b.initializeStoredCertificateCounts(ctx) + require.NoError(t, err, "failed initializing certificate counts") // Test certificate count - if *(b.certCount) != 8 { - t.Fatalf("Failed to initialize count of certificates root, A,B,C,D,E,F,G counted %d certs", *(b.certCount)) + if certCounter.CertificateCount() != 8 { + t.Fatalf("Failed to initialize count of certificates root, A,B,C,D,E,F,G counted %d certs", certCounter.CertificateCount()) } - if *(b.revokedCertCount) != 4 { - t.Fatalf("Failed to count revoked certificates A,B,C,D counted %d certs", *(b.revokedCertCount)) + if certCounter.RevokedCount() != 4 { + t.Fatalf("Failed to count revoked certificates A,B,C,D counted %d certs", certCounter.RevokedCount()) } return @@ -5896,6 +6049,7 @@ func TestPKI_ListRevokedCerts(t *testing.T) { // Test empty cluster resp, err := CBList(b, s, "certs/revoked") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("certs/revoked"), logical.ListOperation), resp, true) requireSuccessNonNilResponse(t, resp, err, "failed listing empty cluster") require.Empty(t, resp.Data, "response map contained data that we did not expect") @@ -5970,41 +6124,48 @@ func TestPKI_TemplatedAIAs(t *testing.T) { b, s := CreateBackendWithStorage(t) // Setting templated AIAs should succeed. - _, err := CBWrite(b, s, "config/cluster", map[string]interface{}{ - "path": "http://localhost:8200/v1/pki", + resp, err := CBWrite(b, s, "config/cluster", map[string]interface{}{ + "path": "http://localhost:8200/v1/pki", + "aia_path": "http://localhost:8200/cdn/pki", }) require.NoError(t, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/cluster"), logical.UpdateOperation), resp, true) + + resp, err = CBRead(b, s, "config/cluster") + require.NoError(t, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/cluster"), logical.ReadOperation), resp, true) aiaData := map[string]interface{}{ "crl_distribution_points": "{{cluster_path}}/issuer/{{issuer_id}}/crl/der", - "issuing_certificates": "{{cluster_path}}/issuer/{{issuer_id}}/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", "ocsp_servers": "{{cluster_path}}/ocsp", "enable_templating": true, } _, err = CBWrite(b, s, "config/urls", aiaData) require.NoError(t, err) - // But root generation will fail. + // Root generation should succeed, but without AIA info. rootData := map[string]interface{}{ "common_name": "Long-Lived Root X1", "issuer_name": "long-root-x1", "key_type": "ec", } - _, err = CBWrite(b, s, "root/generate/internal", rootData) - require.Error(t, err) - require.Contains(t, err.Error(), "unable to parse AIA URL") + resp, err = CBWrite(b, s, "root/generate/internal", rootData) + require.NoError(t, err) + _, err = CBDelete(b, s, "root") + require.NoError(t, err) - // Clearing the config and regenerating the root should succeed. + // Clearing the config and regenerating the root should still succeed. _, err = CBWrite(b, s, "config/urls", map[string]interface{}{ - "crl_distribution_points": "", - "issuing_certificates": "", - "ocsp_servers": "", - "enable_templating": false, + "crl_distribution_points": "{{cluster_path}}/issuer/my-root-id/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/my-root-id/der", + "ocsp_servers": "{{cluster_path}}/ocsp", + "enable_templating": true, }) require.NoError(t, err) - resp, err := CBWrite(b, s, "root/generate/internal", rootData) + resp, err = CBWrite(b, s, "root/generate/internal", rootData) requireSuccessNonNilResponse(t, resp, err) - issuerId := string(resp.Data["issuer_id"].(issuerID)) + issuerId := string(resp.Data["issuer_id"].(issuing.IssuerID)) // Now write the original AIA config and sign a leaf. _, err = CBWrite(b, s, "config/urls", aiaData) @@ -6023,7 +6184,7 @@ func TestPKI_TemplatedAIAs(t *testing.T) { // Validate the AIA info is correctly templated. cert := parseCert(t, resp.Data["certificate"].(string)) require.Equal(t, cert.OCSPServer, []string{"http://localhost:8200/v1/pki/ocsp"}) - require.Equal(t, cert.IssuingCertificateURL, []string{"http://localhost:8200/v1/pki/issuer/" + issuerId + "/der"}) + require.Equal(t, cert.IssuingCertificateURL, []string{"http://localhost:8200/cdn/pki/issuer/" + issuerId + "/der"}) require.Equal(t, cert.CRLDistributionPoints, []string{"http://localhost:8200/v1/pki/issuer/" + issuerId + "/crl/der"}) // Modify our issuer to set custom AIAs: these URLs are bad. @@ -6069,6 +6230,932 @@ func TestPKI_TemplatedAIAs(t *testing.T) { require.Error(t, err) } +func requireSubjectUserIDAttr(t *testing.T, cert string, target string) { + xCert := parseCert(t, cert) + + for _, attr := range xCert.Subject.Names { + var userID string + if attr.Type.Equal(certutil.SubjectPilotUserIDAttributeOID) { + if target == "" { + t.Fatalf("expected no UserID (OID: %v) subject attributes in cert:\n%v", certutil.SubjectPilotUserIDAttributeOID, cert) + } + + switch aValue := attr.Value.(type) { + case string: + userID = aValue + case []byte: + userID = string(aValue) + default: + t.Fatalf("unknown type for UserID attribute: %v\nCert: %v", attr, cert) + } + + if userID == target { + return + } + } + } + + if target != "" { + t.Fatalf("failed to find UserID (OID: %v) matching %v in cert:\n%v", certutil.SubjectPilotUserIDAttributeOID, target, cert) + } +} + +func TestUserIDsInLeafCerts(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // 1. Setup root issuer. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Vault Root CA", + "key_type": "ec", + "ttl": "7200h", + }) + requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") + + // 2. Allow no user IDs. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs should work. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with user ID should fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // 3. Allow any user IDs. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "*", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with one user ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with two user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid,robot", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") + + // 4. Allow one specific user ID. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "humanoid", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with non-approved user ID should fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // - Issue cert with one approved and one non-approved should also fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid,robot", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // 5. Allow two specific user IDs. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "humanoid,robot", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with one approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with other user ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") + + // - Issue cert with unknown user ID will fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot2", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // - Issue cert with both should succeed. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid,robot", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") + + // 6. Use a glob. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "human*", + "key_type": "ec", + "use_csr_sans": true, // setup for further testing. + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with another approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "human", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "human") + + // - Issue cert with literal glob. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "human*", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "human*") + + // - Still no robotic certs are allowed; will fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // Create a CSR and validate it works with both sign/ and sign-verbatim. + csrTemplate := x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "localhost", + ExtraNames: []pkix.AttributeTypeAndValue{ + { + Type: certutil.SubjectPilotUserIDAttributeOID, + Value: "humanoid", + }, + }, + }, + } + _, _, csrPem := generateCSR(t, &csrTemplate, "ec", 256) + + // Should work with role-based signing. + resp, err = CBWrite(b, s, "sign/testing", map[string]interface{}{ + "csr": csrPem, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("sign/testing"), logical.UpdateOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Definitely will work with sign-verbatim. + resp, err = CBWrite(b, s, "sign-verbatim", map[string]interface{}{ + "csr": csrPem, + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") +} + +// TestStandby_Operations test proper forwarding for PKI requests from a standby node to the +// active node within a cluster. +func TestStandby_Operations(t *testing.T) { + conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + }, nil, teststorage.InmemBackendSetup) + cluster := vault.NewTestCluster(t, conf, opts) + cluster.Start() + defer cluster.Cleanup() + + testhelpers.WaitForActiveNodeAndStandbys(t, cluster) + standbyCores := testhelpers.DeriveStandbyCores(t, cluster) + require.Greater(t, len(standbyCores), 0, "Need at least one standby core.") + client := standbyCores[0].Client + + mountPKIEndpoint(t, client, "pki") + + _, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "root-ca.com", + "ttl": "600h", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + _, err = client.Logical().Write("pki/roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "no_store": "false", // make sure we store this cert + "ttl": "5h", + "key_type": "ec", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + resp, err := client.Logical().Write("pki/issue/example", map[string]interface{}{ + "common_name": "test.example.com", + }) + require.NoError(t, err, "error issuing certificate: %v", err) + require.NotNil(t, resp, "got nil response from issuing request") + serialOfCert := resp.Data["serial_number"].(string) + + resp, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": serialOfCert, + }) + require.NoError(t, err, "error revoking certificate: %v", err) + require.NotNil(t, resp, "got nil response from revoke request") +} + +type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) + +func isPermDenied(err error) bool { + return err != nil && strings.Contains(err.Error(), "permission denied") +} + +func isUnsupportedPathOperation(err error) bool { + return err != nil && (strings.Contains(err.Error(), "unsupported path") || strings.Contains(err.Error(), "unsupported operation")) +} + +func isDeniedOp(err error) bool { + return isPermDenied(err) || isUnsupportedPathOperation(err) +} + +func pathShouldBeAuthed(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to read %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to list %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to write %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to delete %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to patch %v while unauthed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedReadList(t *testing.T, client *api.Client, path string, token string) { + // Should be able to read both with and without a token. + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // Read will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ReadWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to read %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // List will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ListWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to list %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + + // These should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { + t.Fatalf("unexpected failure during write on read-only path %v while unauthed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow read/list, but not modification still. + client.SetToken(token) + resp, err = client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to read %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to list %v while authed: %v / %v", path, err, resp) + } + + // Should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { + t.Fatalf("unexpected failure during write on read-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while authed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedWriteOnly(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. However, on OSS, we might end up with + // a regular 404, which looks like err == resp == nil; hence we only + // fail when there's a non-nil response and/or a non-nil err. + resp, err = client.Logical().ReadWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during read on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during list on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during delete on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during patch on write-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow writing, but nothing else. + client.SetToken(token) + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during read on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + if resp != nil || err != nil { + t.Fatalf("unexpected failure during list on write-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during delete on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during patch on write-only path %v while authed: %v / %v", path, err, resp) + } +} + +type pathAuthChecker int + +const ( + shouldBeAuthed pathAuthChecker = iota + shouldBeUnauthedReadList + shouldBeUnauthedWriteOnly +) + +var pathAuthChckerMap = map[pathAuthChecker]pathAuthCheckerFunc{ + shouldBeAuthed: pathShouldBeAuthed, + shouldBeUnauthedReadList: pathShouldBeUnauthedReadList, + shouldBeUnauthedWriteOnly: pathShouldBeUnauthedWriteOnly, +} + +func TestProperAuthing(t *testing.T) { + t.Parallel() + ctx := context.Background() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + token := client.Token() + + // Mount PKI. + err := client.Sys().MountWithContext(ctx, "pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Setup basic configuration. + _, err = client.Logical().WriteWithContext(ctx, "pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "pki/roles/test", map[string]interface{}{ + "allow_localhost": true, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().WriteWithContext(ctx, "pki/issue/test", map[string]interface{}{ + "common_name": "localhost", + }) + if err != nil || resp == nil { + t.Fatal(err) + } + serial := resp.Data["serial_number"].(string) + eabKid := "13b80844-e60d-42d2-b7e9-152a8e834b90" + paths := map[string]pathAuthChecker{ + "ca_chain": shouldBeUnauthedReadList, + "cert/ca_chain": shouldBeUnauthedReadList, + "ca": shouldBeUnauthedReadList, + "ca/pem": shouldBeUnauthedReadList, + "cert/" + serial: shouldBeUnauthedReadList, + "cert/" + serial + "/raw": shouldBeUnauthedReadList, + "cert/" + serial + "/raw/pem": shouldBeUnauthedReadList, + "cert/crl": shouldBeUnauthedReadList, + "cert/crl/raw": shouldBeUnauthedReadList, + "cert/crl/raw/pem": shouldBeUnauthedReadList, + "cert/delta-crl": shouldBeUnauthedReadList, + "cert/delta-crl/raw": shouldBeUnauthedReadList, + "cert/delta-crl/raw/pem": shouldBeUnauthedReadList, + "cert/unified-crl": shouldBeUnauthedReadList, + "cert/unified-crl/raw": shouldBeUnauthedReadList, + "cert/unified-crl/raw/pem": shouldBeUnauthedReadList, + "cert/unified-delta-crl": shouldBeUnauthedReadList, + "cert/unified-delta-crl/raw": shouldBeUnauthedReadList, + "cert/unified-delta-crl/raw/pem": shouldBeUnauthedReadList, + "certs/": shouldBeAuthed, + "certs/revoked/": shouldBeAuthed, + "certs/revocation-queue/": shouldBeAuthed, + "certs/unified-revoked/": shouldBeAuthed, + "config/acme": shouldBeAuthed, + "config/auto-tidy": shouldBeAuthed, + "config/ca": shouldBeAuthed, + "config/cluster": shouldBeAuthed, + "config/crl": shouldBeAuthed, + "config/issuers": shouldBeAuthed, + "config/keys": shouldBeAuthed, + "config/urls": shouldBeAuthed, + "crl": shouldBeUnauthedReadList, + "crl/pem": shouldBeUnauthedReadList, + "crl/delta": shouldBeUnauthedReadList, + "crl/delta/pem": shouldBeUnauthedReadList, + "crl/rotate": shouldBeAuthed, + "crl/rotate-delta": shouldBeAuthed, + "intermediate/cross-sign": shouldBeAuthed, + "intermediate/generate/exported": shouldBeAuthed, + "intermediate/generate/internal": shouldBeAuthed, + "intermediate/generate/existing": shouldBeAuthed, + "intermediate/generate/kms": shouldBeAuthed, + "intermediate/set-signed": shouldBeAuthed, + "issue/test": shouldBeAuthed, + "issuer/default": shouldBeAuthed, + "issuer/default/der": shouldBeUnauthedReadList, + "issuer/default/json": shouldBeUnauthedReadList, + "issuer/default/pem": shouldBeUnauthedReadList, + "issuer/default/crl": shouldBeUnauthedReadList, + "issuer/default/crl/pem": shouldBeUnauthedReadList, + "issuer/default/crl/der": shouldBeUnauthedReadList, + "issuer/default/crl/delta": shouldBeUnauthedReadList, + "issuer/default/crl/delta/der": shouldBeUnauthedReadList, + "issuer/default/crl/delta/pem": shouldBeUnauthedReadList, + "issuer/default/unified-crl": shouldBeUnauthedReadList, + "issuer/default/unified-crl/pem": shouldBeUnauthedReadList, + "issuer/default/unified-crl/der": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta/der": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta/pem": shouldBeUnauthedReadList, + "issuer/default/issue/test": shouldBeAuthed, + "issuer/default/resign-crls": shouldBeAuthed, + "issuer/default/revoke": shouldBeAuthed, + "issuer/default/sign-intermediate": shouldBeAuthed, + "issuer/default/sign-revocation-list": shouldBeAuthed, + "issuer/default/sign-self-issued": shouldBeAuthed, + "issuer/default/sign-verbatim": shouldBeAuthed, + "issuer/default/sign-verbatim/test": shouldBeAuthed, + "issuer/default/sign/test": shouldBeAuthed, + "issuers/": shouldBeUnauthedReadList, + "issuers/generate/intermediate/exported": shouldBeAuthed, + "issuers/generate/intermediate/internal": shouldBeAuthed, + "issuers/generate/intermediate/existing": shouldBeAuthed, + "issuers/generate/intermediate/kms": shouldBeAuthed, + "issuers/generate/root/exported": shouldBeAuthed, + "issuers/generate/root/internal": shouldBeAuthed, + "issuers/generate/root/existing": shouldBeAuthed, + "issuers/generate/root/kms": shouldBeAuthed, + "issuers/import/cert": shouldBeAuthed, + "issuers/import/bundle": shouldBeAuthed, + "key/default": shouldBeAuthed, + "keys/": shouldBeAuthed, + "keys/generate/internal": shouldBeAuthed, + "keys/generate/exported": shouldBeAuthed, + "keys/generate/kms": shouldBeAuthed, + "keys/import": shouldBeAuthed, + "ocsp": shouldBeUnauthedWriteOnly, + "ocsp/dGVzdAo=": shouldBeUnauthedReadList, + "revoke": shouldBeAuthed, + "revoke-with-key": shouldBeAuthed, + "roles/test": shouldBeAuthed, + "roles/": shouldBeAuthed, + "root": shouldBeAuthed, + "root/generate/exported": shouldBeAuthed, + "root/generate/internal": shouldBeAuthed, + "root/generate/existing": shouldBeAuthed, + "root/generate/kms": shouldBeAuthed, + "root/replace": shouldBeAuthed, + "root/rotate/internal": shouldBeAuthed, + "root/rotate/exported": shouldBeAuthed, + "root/rotate/existing": shouldBeAuthed, + "root/rotate/kms": shouldBeAuthed, + "root/sign-intermediate": shouldBeAuthed, + "root/sign-self-issued": shouldBeAuthed, + "sign-verbatim": shouldBeAuthed, + "sign-verbatim/test": shouldBeAuthed, + "sign/test": shouldBeAuthed, + "tidy": shouldBeAuthed, + "tidy-cancel": shouldBeAuthed, + "tidy-status": shouldBeAuthed, + "unified-crl": shouldBeUnauthedReadList, + "unified-crl/pem": shouldBeUnauthedReadList, + "unified-crl/delta": shouldBeUnauthedReadList, + "unified-crl/delta/pem": shouldBeUnauthedReadList, + "unified-ocsp": shouldBeUnauthedWriteOnly, + "unified-ocsp/dGVzdAo=": shouldBeUnauthedReadList, + "eab/": shouldBeAuthed, + "eab/" + eabKid: shouldBeAuthed, + } + + entPaths := getEntProperAuthingPaths(serial) + maps.Copy(paths, entPaths) + + // Add ACME based paths to the test suite + ossAcmePrefixes := []string{"acme/", "issuer/default/acme/", "roles/test/acme/", "issuer/default/roles/test/acme/"} + entAcmePrefixes := getEntAcmePrefixes() + for _, acmePrefix := range append(ossAcmePrefixes, entAcmePrefixes...) { + paths[acmePrefix+"directory"] = shouldBeUnauthedReadList + paths[acmePrefix+"new-nonce"] = shouldBeUnauthedReadList + paths[acmePrefix+"new-account"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"revoke-cert"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"new-order"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"orders"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"account/hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo="] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"authorization/29da8c38-7a09-465e-b9a6-3d76802b1afd"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"challenge/29da8c38-7a09-465e-b9a6-3d76802b1afd/http-01"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90/finalize"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90/cert"] = shouldBeUnauthedWriteOnly + + // Make sure this new-eab path is auth'd + paths[acmePrefix+"new-eab"] = shouldBeAuthed + } + + for path, checkerType := range paths { + checker := pathAuthChckerMap[checkerType] + checker(t, client, "pki/"+path, token) + } + + client.SetToken(token) + openAPIResp, err := client.Logical().ReadWithContext(ctx, "sys/internal/specs/openapi") + if err != nil { + t.Fatalf("failed to get openapi data: %v", err) + } + + validatedPath := false + for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { + if !strings.HasPrefix(openapi_path, "/pki/") { + t.Logf("Skipping path: %v", openapi_path) + continue + } + + t.Logf("Validating path: %v", openapi_path) + validatedPath = true + // Substitute values in from our testing map. + raw_path := openapi_path[5:] + if strings.Contains(raw_path, "roles/") && strings.Contains(raw_path, "{name}") { + raw_path = strings.ReplaceAll(raw_path, "{name}", "test") + } + if strings.Contains(raw_path, "{role}") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test") + } + if strings.Contains(raw_path, "ocsp/") && strings.Contains(raw_path, "{req}") { + raw_path = strings.ReplaceAll(raw_path, "{req}", "dGVzdAo=") + } + if strings.Contains(raw_path, "{issuer_ref}") { + raw_path = strings.ReplaceAll(raw_path, "{issuer_ref}", "default") + } + if strings.Contains(raw_path, "{key_ref}") { + raw_path = strings.ReplaceAll(raw_path, "{key_ref}", "default") + } + if strings.Contains(raw_path, "{exported}") { + raw_path = strings.ReplaceAll(raw_path, "{exported}", "internal") + } + if strings.Contains(raw_path, "{serial}") { + raw_path = strings.ReplaceAll(raw_path, "{serial}", serial) + } + if strings.Contains(raw_path, "acme/account/") && strings.Contains(raw_path, "{kid}") { + raw_path = strings.ReplaceAll(raw_path, "{kid}", "hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo=") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{auth_id}") { + raw_path = strings.ReplaceAll(raw_path, "{auth_id}", "29da8c38-7a09-465e-b9a6-3d76802b1afd") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{challenge_type}") { + raw_path = strings.ReplaceAll(raw_path, "{challenge_type}", "http-01") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{order_id}") { + raw_path = strings.ReplaceAll(raw_path, "{order_id}", "13b80844-e60d-42d2-b7e9-152a8e834b90") + } + if strings.Contains(raw_path, "eab") && strings.Contains(raw_path, "{key_id}") { + raw_path = strings.ReplaceAll(raw_path, "{key_id}", eabKid) + } + if strings.Contains(raw_path, "external-policy/") && strings.Contains(raw_path, "{policy}") { + raw_path = strings.ReplaceAll(raw_path, "{policy}", "a-policy") + } + + raw_path = entProperAuthingPathReplacer(raw_path) + + handler, present := paths[raw_path] + if !present { + t.Fatalf("OpenAPI reports PKI mount contains %v -> %v but was not tested to be authed or not authed.", + openapi_path, raw_path) + } + + openapi_data := raw_data.(map[string]interface{}) + hasList := false + rawGetData, hasGet := openapi_data["get"] + if hasGet { + getData := rawGetData.(map[string]interface{}) + getParams, paramsPresent := getData["parameters"].(map[string]interface{}) + if getParams != nil && paramsPresent { + if _, hasList = getParams["list"]; hasList { + // LIST is exclusive from GET on the same endpoint usually. + hasGet = false + } + } + } + _, hasPost := openapi_data["post"] + _, hasDelete := openapi_data["delete"] + + if handler == shouldBeUnauthedReadList { + if hasPost || hasDelete { + t.Fatalf("Unauthed read-only endpoints should not have POST/DELETE capabilities: %v->%v", openapi_path, raw_path) + } + } else if handler == shouldBeUnauthedWriteOnly { + if hasGet || hasList { + t.Fatalf("Unauthed write-only endpoints should not have GET/LIST capabilities: %v->%v", openapi_path, raw_path) + } + } + } + + if !validatedPath { + t.Fatalf("Expected to have validated at least one path.") + } +} + +func TestPatchIssuer(t *testing.T) { + t.Parallel() + + type TestCase struct { + Field string + Before interface{} + Patched interface{} + } + testCases := []TestCase{ + { + Field: "issuer_name", + Before: "root", + Patched: "root-new", + }, + { + Field: "leaf_not_after_behavior", + Before: "err", + Patched: "permit", + }, + { + Field: "usage", + Before: "crl-signing,issuing-certificates,ocsp-signing,read-only", + Patched: "issuing-certificates,read-only", + }, + { + Field: "revocation_signature_algorithm", + Before: "ECDSAWithSHA256", + Patched: "ECDSAWithSHA384", + }, + { + Field: "issuing_certificates", + Before: []string{"http://localhost/v1/pki-1/ca"}, + Patched: []string{"http://localhost/v1/pki/ca"}, + }, + { + Field: "crl_distribution_points", + Before: []string{"http://localhost/v1/pki-1/crl"}, + Patched: []string{"http://localhost/v1/pki/crl"}, + }, + { + Field: "ocsp_servers", + Before: []string{"http://localhost/v1/pki-1/ocsp"}, + Patched: []string{"http://localhost/v1/pki/ocsp"}, + }, + { + Field: "enable_aia_url_templating", + Before: false, + Patched: true, + }, + { + Field: "manual_chain", + Before: []string(nil), + Patched: []string{"self"}, + }, + } + + for index, testCase := range testCases { + t.Logf("index: %v / tc: %v", index, testCase) + + b, s := CreateBackendWithStorage(t) + + // 1. Setup root issuer. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Vault Root CA", + "key_type": "ec", + "ttl": "7200h", + "issuer_name": "root", + }) + requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") + id := string(resp.Data["issuer_id"].(issuing.IssuerID)) + + // 2. Enable Cluster paths + resp, err = CBWrite(b, s, "config/urls", map[string]interface{}{ + "path": "https://localhost/v1/pki", + "aia_path": "http://localhost/v1/pki", + }) + requireSuccessNonNilResponse(t, resp, err, "failed updating AIA config") + + // 3. Add AIA information + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuing_certificates": "http://localhost/v1/pki-1/ca", + "crl_distribution_points": "http://localhost/v1/pki-1/crl", + "ocsp_servers": "http://localhost/v1/pki-1/ocsp", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up issuer") + + // 4. Read the issuer before. + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer before") + require.Equal(t, testCase.Before, resp.Data[testCase.Field], "bad expectations") + + // 5. Perform modification. + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + testCase.Field: testCase.Patched, + }) + requireSuccessNonNilResponse(t, resp, err, "failed patching root issuer") + + if testCase.Field != "manual_chain" { + require.Equal(t, testCase.Patched, resp.Data[testCase.Field], "failed persisting value") + } else { + // self->id + require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") + } + + // 6. Ensure it stuck + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer after") + + if testCase.Field != "manual_chain" { + require.Equal(t, testCase.Patched, resp.Data[testCase.Field]) + } else { + // self->id + require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") + } + } +} + +func TestGenerateRootCAWithAIA(t *testing.T) { + // Generate a root CA at /pki-root + b_root, s_root := CreateBackendWithStorage(t) + + // Setup templated AIA information + _, err := CBWrite(b_root, s_root, "config/cluster", map[string]interface{}{ + "path": "https://localhost:8200", + "aia_path": "https://localhost:8200", + }) + require.NoError(t, err, "failed to write AIA settings") + + _, err = CBWrite(b_root, s_root, "config/urls", map[string]interface{}{ + "crl_distribution_points": "{{cluster_path}}/issuer/{{issuer_id}}/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", + "ocsp_servers": "{{cluster_path}}/ocsp", + "enable_templating": true, + }) + require.NoError(t, err, "failed to write AIA settings") + + // Write a root issuer, this should succeed. + resp, err := CBWrite(b_root, s_root, "root/generate/exported", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "expected root generation to succeed") +} + var ( initTest sync.Once rsaCAKey string diff --git a/builtin/logical/pki/ca_test.go b/builtin/logical/pki/ca_test.go index 9dc418c86ba7..4517604f8a0d 100644 --- a/builtin/logical/pki/ca_test.go +++ b/builtin/logical/pki/ca_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( diff --git a/builtin/logical/pki/ca_util.go b/builtin/logical/pki/ca_util.go index d2a59f34bdd2..4ad1887853f7 100644 --- a/builtin/logical/pki/ca_util.go +++ b/builtin/logical/pki/ca_util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -15,9 +18,12 @@ import ( "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" ) -func getGenerationParams(sc *storageContext, data *framework.FieldData) (exported bool, format string, role *roleEntry, errorResp *logical.Response) { +func getGenerationParams(sc *storageContext, data *framework.FieldData) (exported bool, format string, role *issuing.RoleEntry, errorResp *logical.Response) { exportedStr := data.Get("exported").(string) switch exportedStr { case "exported": @@ -44,7 +50,7 @@ func getGenerationParams(sc *storageContext, data *framework.FieldData) (exporte return } - role = &roleEntry{ + role = &issuing.RoleEntry{ TTL: time.Duration(data.Get("ttl").(int)) * time.Second, KeyType: keyType, KeyBits: keyBits, @@ -58,6 +64,7 @@ func getGenerationParams(sc *storageContext, data *framework.FieldData) (exporte AllowedURISANs: []string{"*"}, AllowedOtherSANs: []string{"*"}, AllowedSerialNumbers: []string{"*"}, + AllowedUserIDs: []string{"*"}, OU: data.Get("ou").([]string), Organization: data.Get("organization").([]string), Country: data.Get("country").([]string), @@ -86,7 +93,7 @@ func generateCABundle(sc *storageContext, input *inputBundle, data *certutil.Cre if err != nil { return nil, err } - return generateManagedKeyCABundle(ctx, b, keyId, data, randomSource) + return managed_key.GenerateManagedKeyCABundle(ctx, b, keyId, data, randomSource) } if existingKeyRequested(input) { @@ -100,12 +107,12 @@ func generateCABundle(sc *storageContext, input *inputBundle, data *certutil.Cre return nil, err } - if keyEntry.isManagedPrivateKey() { - keyId, err := keyEntry.getManagedKeyUUID() + if keyEntry.IsManagedPrivateKey() { + keyId, err := issuing.GetManagedKeyUUID(keyEntry) if err != nil { return nil, err } - return generateManagedKeyCABundle(ctx, b, keyId, data, randomSource) + return managed_key.GenerateManagedKeyCABundle(ctx, b, keyId, data, randomSource) } return certutil.CreateCertificateWithKeyGenerator(data, randomSource, existingKeyGeneratorFromBytes(keyEntry)) @@ -124,7 +131,7 @@ func generateCSRBundle(sc *storageContext, input *inputBundle, data *certutil.Cr return nil, err } - return generateManagedKeyCSRBundle(ctx, b, keyId, data, addBasicConstraints, randomSource) + return managed_key.GenerateManagedKeyCSRBundle(ctx, b, keyId, data, addBasicConstraints, randomSource) } if existingKeyRequested(input) { @@ -138,12 +145,12 @@ func generateCSRBundle(sc *storageContext, input *inputBundle, data *certutil.Cr return nil, err } - if key.isManagedPrivateKey() { - keyId, err := key.getManagedKeyUUID() + if key.IsManagedPrivateKey() { + keyId, err := issuing.GetManagedKeyUUID(key) if err != nil { return nil, err } - return generateManagedKeyCSRBundle(ctx, b, keyId, data, addBasicConstraints, randomSource) + return managed_key.GenerateManagedKeyCSRBundle(ctx, b, keyId, data, addBasicConstraints, randomSource) } return certutil.CreateCSRWithKeyGenerator(data, addBasicConstraints, randomSource, existingKeyGeneratorFromBytes(key)) @@ -153,10 +160,7 @@ func generateCSRBundle(sc *storageContext, input *inputBundle, data *certutil.Cr } func parseCABundle(ctx context.Context, b *backend, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { - if bundle.PrivateKeyType == certutil.ManagedPrivateKey { - return parseManagedKeyCABundle(ctx, b, bundle) - } - return bundle.ToParsedCertBundle() + return issuing.ParseCABundle(ctx, b, bundle) } func (sc *storageContext) getKeyTypeAndBitsForRole(data *framework.FieldData) (string, int, error) { @@ -185,10 +189,10 @@ func (sc *storageContext) getKeyTypeAndBitsForRole(data *framework.FieldData) (s if kmsRequestedFromFieldData(data) { keyId, err := getManagedKeyId(data) if err != nil { - return "", 0, errors.New("unable to determine managed key id" + err.Error()) + return "", 0, errors.New("unable to determine managed key id: " + err.Error()) } - pubKeyManagedKey, err := getManagedKeyPublicKey(sc.Context, sc.Backend, keyId) + pubKeyManagedKey, err := managed_key.GetManagedKeyPublicKey(sc.Context, sc.Backend, keyId) if err != nil { return "", 0, errors.New("failed to lookup public key from managed key: " + err.Error()) } @@ -233,7 +237,7 @@ func getKeyTypeAndBitsFromPublicKeyForRole(pubKey crypto.PublicKey) (certutil.Pr keyBits = certutil.GetPublicKeySize(pubKey) case *ecdsa.PublicKey: keyType = certutil.ECPrivateKey - case *ed25519.PublicKey: + case ed25519.PublicKey: keyType = certutil.Ed25519PrivateKey default: return certutil.UnknownPrivateKey, 0, fmt.Errorf("unsupported public key: %#v", pubKey) @@ -241,7 +245,7 @@ func getKeyTypeAndBitsFromPublicKeyForRole(pubKey crypto.PublicKey) (certutil.Pr return keyType, keyBits, nil } -func (sc *storageContext) getExistingKeyFromRef(keyRef string) (*keyEntry, error) { +func (sc *storageContext) getExistingKeyFromRef(keyRef string) (*issuing.KeyEntry, error) { keyId, err := sc.resolveKeyReference(keyRef) if err != nil { return nil, err @@ -249,7 +253,7 @@ func (sc *storageContext) getExistingKeyFromRef(keyRef string) (*keyEntry, error return sc.fetchKeyById(keyId) } -func existingKeyGeneratorFromBytes(key *keyEntry) certutil.KeyGenerator { +func existingKeyGeneratorFromBytes(key *issuing.KeyEntry) certutil.KeyGenerator { return func(_ string, _ int, container certutil.ParsedPrivateKeyContainer, _ io.Reader) error { signer, _, pemBytes, err := getSignerFromKeyEntryBytes(key) if err != nil { diff --git a/builtin/logical/pki/ca_util_test.go b/builtin/logical/pki/ca_util_test.go new file mode 100644 index 000000000000..d4ef64e68fe1 --- /dev/null +++ b/builtin/logical/pki/ca_util_test.go @@ -0,0 +1,82 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "testing" + + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +func TestGetKeyTypeAndBitsFromPublicKeyForRole(t *testing.T) { + rsaKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("error generating rsa key: %s", err) + } + + ecdsaKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + t.Fatalf("error generating ecdsa key: %s", err) + } + + publicKey, _, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + t.Fatalf("error generating ed25519 key: %s", err) + } + + testCases := map[string]struct { + publicKey crypto.PublicKey + expectedKeyType certutil.PrivateKeyType + expectedKeyBits int + expectError bool + }{ + "rsa": { + publicKey: rsaKey.Public(), + expectedKeyType: certutil.RSAPrivateKey, + expectedKeyBits: 2048, + }, + "ecdsa": { + publicKey: ecdsaKey.Public(), + expectedKeyType: certutil.ECPrivateKey, + expectedKeyBits: 0, + }, + "ed25519": { + publicKey: publicKey, + expectedKeyType: certutil.Ed25519PrivateKey, + expectedKeyBits: 0, + }, + "bad key type": { + publicKey: []byte{}, + expectedKeyType: certutil.UnknownPrivateKey, + expectedKeyBits: 0, + expectError: true, + }, + } + + for name, tt := range testCases { + t.Run(name, func(t *testing.T) { + keyType, keyBits, err := getKeyTypeAndBitsFromPublicKeyForRole(tt.publicKey) + if err != nil && !tt.expectError { + t.Fatalf("unexpected error: %s", err) + } + if err == nil && tt.expectError { + t.Fatal("expected error, got nil") + } + + if keyType != tt.expectedKeyType { + t.Fatalf("key type mismatch: expected %s, got %s", tt.expectedKeyType, keyType) + } + + if keyBits != tt.expectedKeyBits { + t.Fatalf("key bits mismatch: expected %d, got %d", tt.expectedKeyBits, keyBits) + } + }) + } +} diff --git a/builtin/logical/pki/cert_util.go b/builtin/logical/pki/cert_util.go index 82b451393699..fa0651fbb8ba 100644 --- a/builtin/logical/pki/cert_util.go +++ b/builtin/logical/pki/cert_util.go @@ -1,17 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" "encoding/base64" - "encoding/hex" "encoding/pem" - "errors" "fmt" "io" "math/big" @@ -22,19 +20,18 @@ import ( "strings" "time" - "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" - "github.com/ryanuber/go-glob" "golang.org/x/crypto/cryptobyte" cbbasn1 "golang.org/x/crypto/cryptobyte/asn1" - "golang.org/x/net/idna" ) type inputBundle struct { - role *roleEntry + role *issuing.RoleEntry req *logical.Request apiData *framework.FieldData } @@ -65,10 +62,60 @@ var ( middleWildRegex = labelRegex + `\*` + labelRegex leftWildLabelRegex = regexp.MustCompile(`^(` + allWildRegex + `|` + startWildRegex + `|` + endWildRegex + `|` + middleWildRegex + `)$`) - // OIDs for X.509 certificate extensions used below. - oidExtensionSubjectAltName = []int{2, 5, 29, 17} + // Cloned from https://github.com/golang/go/blob/82c713feb05da594567631972082af2fcba0ee4f/src/crypto/x509/x509.go#L327-L379 + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + oidSignatureEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} + oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} + + signatureAlgorithmDetails = []struct { + algo x509.SignatureAlgorithm + name string + oid asn1.ObjectIdentifier + pubKeyAlgo x509.PublicKeyAlgorithm + hash crypto.Hash + }{ + {x509.MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, + {x509.MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, + {x509.SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, + {x509.SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA512}, + {x509.DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, + {x509.DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, + {x509.ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, + {x509.ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, + {x509.ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, + {x509.ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, + {x509.PureEd25519, "Ed25519", oidSignatureEd25519, x509.Ed25519, crypto.Hash(0) /* no pre-hashing */}, + } ) +func doesPublicKeyAlgoMatchSignatureAlgo(pubKey x509.PublicKeyAlgorithm, algo x509.SignatureAlgorithm) bool { + for _, detail := range signatureAlgorithmDetails { + if detail.algo == algo { + return pubKey == detail.pubKeyAlgo + } + } + + return false +} + func getFormat(data *framework.FieldData) string { format := data.Get("format").(string) switch format { @@ -84,10 +131,15 @@ func getFormat(data *framework.FieldData) string { // fetchCAInfo will fetch the CA info, will return an error if no ca info exists, this does NOT support // loading using the legacyBundleShimID and should be used with care. This should be called only once // within the request path otherwise you run the risk of a race condition with the issuer migration on perf-secondaries. -func (sc *storageContext) fetchCAInfo(issuerRef string, usage issuerUsage) (*certutil.CAInfoBundle, error) { - var issuerId issuerID +func (sc *storageContext) fetchCAInfo(issuerRef string, usage issuing.IssuerUsage) (*certutil.CAInfoBundle, error) { + bundle, _, err := sc.fetchCAInfoWithIssuer(issuerRef, usage) + return bundle, err +} + +func (sc *storageContext) fetchCAInfoWithIssuer(issuerRef string, usage issuing.IssuerUsage) (*certutil.CAInfoBundle, issuing.IssuerID, error) { + var issuerId issuing.IssuerID - if sc.Backend.useLegacyBundleCaStorage() { + if sc.Backend.UseLegacyBundleCaStorage() { // We have not completed the migration so attempt to load the bundle from the legacy location sc.Backend.Logger().Info("Using legacy CA bundle as PKI migration has not completed.") issuerId = legacyBundleShimID @@ -96,58 +148,22 @@ func (sc *storageContext) fetchCAInfo(issuerRef string, usage issuerUsage) (*cer issuerId, err = sc.resolveIssuerReference(issuerRef) if err != nil { // Usually a bad label from the user or mis-configured default. - return nil, errutil.UserError{Err: err.Error()} - } - } - - return sc.fetchCAInfoByIssuerId(issuerId, usage) -} - -// fetchCAInfoByIssuerId will fetch the CA info, will return an error if no ca info exists for the given issuerId. -// This does support the loading using the legacyBundleShimID -func (sc *storageContext) fetchCAInfoByIssuerId(issuerId issuerID, usage issuerUsage) (*certutil.CAInfoBundle, error) { - entry, bundle, err := sc.fetchCertBundleByIssuerId(issuerId, true) - if err != nil { - switch err.(type) { - case errutil.UserError: - return nil, err - case errutil.InternalError: - return nil, err - default: - return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching CA info: %v", err)} + return nil, issuing.IssuerRefNotFound, errutil.UserError{Err: err.Error()} } } - if err := entry.EnsureUsage(usage); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("error while attempting to use issuer %v: %v", issuerId, err)} - } - - parsedBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) + bundle, err := sc.fetchCAInfoByIssuerId(issuerId, usage) if err != nil { - return nil, errutil.InternalError{Err: err.Error()} + return nil, issuing.IssuerRefNotFound, err } - if parsedBundle.Certificate == nil { - return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"} - } - if parsedBundle.PrivateKey == nil { - return nil, errutil.UserError{Err: fmt.Sprintf("unable to fetch corresponding key for issuer %v; unable to use this issuer for signing", issuerId)} - } - - caInfo := &certutil.CAInfoBundle{ - ParsedCertBundle: *parsedBundle, - URLs: nil, - LeafNotAfterBehavior: entry.LeafNotAfterBehavior, - RevocationSigAlg: entry.RevocationSigAlg, - } - - entries, err := entry.GetAIAURLs(sc) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch AIA URL information: %v", err)} - } - caInfo.URLs = entries + return bundle, issuerId, nil +} - return caInfo, nil +// fetchCAInfoByIssuerId will fetch the CA info, will return an error if no ca info exists for the given issuerId. +// This does support the loading using the legacyBundleShimID +func (sc *storageContext) fetchCAInfoByIssuerId(issuerId issuing.IssuerID, usage issuing.IssuerUsage) (*certutil.CAInfoBundle, error) { + return issuing.FetchCAInfoByIssuerId(sc.Context, sc.Storage, sc.Backend, issuerId, usage) } func fetchCertBySerialBigInt(sc *storageContext, prefix string, serial *big.Int) (*logical.StorageEntry, error) { @@ -173,17 +189,27 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor case strings.HasPrefix(prefix, "revoked/"): legacyPath = "revoked/" + colonSerial path = "revoked/" + hyphenSerial - case serial == legacyCRLPath || serial == deltaCRLPath: - if err = sc.Backend.crlBuilder.rebuildIfForced(sc); err != nil { + case serial == legacyCRLPath || serial == deltaCRLPath || serial == unifiedCRLPath || serial == unifiedDeltaCRLPath: + warnings, err := sc.Backend.CrlBuilder().rebuildIfForced(sc) + if err != nil { return nil, err } - path, err = sc.resolveIssuerCRLPath(defaultRef) + if len(warnings) > 0 { + msg := "During rebuild of CRL for cert fetch, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + sc.Backend.Logger().Warn(msg) + } + + unified := serial == unifiedCRLPath || serial == unifiedDeltaCRLPath + path, err = sc.resolveIssuerCRLPath(defaultRef, unified) if err != nil { return nil, err } - if serial == deltaCRLPath { - if sc.Backend.useLegacyBundleCaStorage() { + if serial == deltaCRLPath || serial == unifiedDeltaCRLPath { + if sc.Backend.UseLegacyBundleCaStorage() { return nil, fmt.Errorf("refusing to serve delta CRL with legacy CA bundle") } @@ -224,7 +250,8 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor // Update old-style paths to new-style paths certEntry.Key = path - certsCounted := sc.Backend.certsCounted.Load() + certCounter := sc.Backend.GetCertificateCounter() + certsCounted := certCounter.IsInitialized() if err = sc.Storage.Put(sc.Context, certEntry); err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("error saving certificate with serial %s to new location", serial)} } @@ -232,9 +259,9 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor // If we fail here, we have an extra (copy) of a cert in storage, add to metrics: switch { case strings.HasPrefix(prefix, "revoked/"): - sc.Backend.incrementTotalRevokedCertificatesCount(certsCounted, path) + certCounter.IncrementTotalRevokedCertificatesCount(certsCounted, path) default: - sc.Backend.incrementTotalCertificatesCount(certsCounted, path) + certCounter.IncrementTotalCertificatesCount(certsCounted, path) } return nil, errutil.InternalError{Err: fmt.Sprintf("error deleting certificate with serial %s from old location", serial)} } @@ -244,339 +271,32 @@ func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.Stor // Given a URI SAN, verify that it is allowed. func validateURISAN(b *backend, data *inputBundle, uri string) bool { - valid := false - for _, allowed := range data.role.AllowedURISANs { - if data.role.AllowedURISANsTemplate { - isTemplate, _ := framework.ValidateIdentityTemplate(allowed) - if isTemplate && data.req.EntityID != "" { - tmpAllowed, err := framework.PopulateIdentityTemplate(allowed, data.req.EntityID, b.System()) - if err != nil { - continue - } - allowed = tmpAllowed - } - } - validURI := glob.Glob(allowed, uri) - if validURI { - valid = true - break - } - } - return valid + entityInfo := issuing.NewEntityInfoFromReq(data.req) + return issuing.ValidateURISAN(b.System(), data.role, entityInfo, uri) } // Validates a given common name, ensuring it's either an email or a hostname // after validating it according to the role parameters, or disables // validation altogether. func validateCommonName(b *backend, data *inputBundle, name string) string { - isDisabled := len(data.role.CNValidations) == 1 && data.role.CNValidations[0] == "disabled" - if isDisabled { - return "" - } - - if validateNames(b, data, []string{name}) != "" { - return name - } - - // Validations weren't disabled, but the role lacked CN Validations, so - // don't restrict types. This case is hit in certain existing tests. - if len(data.role.CNValidations) == 0 { - return "" - } + entityInfo := issuing.NewEntityInfoFromReq(data.req) + return issuing.ValidateCommonName(b.System(), data.role, entityInfo, name) +} - // If there's an at in the data, ensure email type validation is allowed. - // Otherwise, ensure hostname is allowed. - if strings.Contains(name, "@") { - var allowsEmails bool - for _, validation := range data.role.CNValidations { - if validation == "email" { - allowsEmails = true - break - } - } - if !allowsEmails { - return name - } - } else { - var allowsHostnames bool - for _, validation := range data.role.CNValidations { - if validation == "hostname" { - allowsHostnames = true - break - } - } - if !allowsHostnames { - return name - } - } +func isWildcardDomain(name string) bool { + return issuing.IsWildcardDomain(name) +} - return "" +func validateWildcardDomain(name string) (string, string, error) { + return issuing.ValidateWildcardDomain(name) } // Given a set of requested names for a certificate, verifies that all of them // match the various toggles set in the role for controlling issuance. // If one does not pass, it is returned in the string argument. func validateNames(b *backend, data *inputBundle, names []string) string { - for _, name := range names { - // Previously, reducedName was called sanitizedName but this made - // little sense under the previous interpretation of wildcards, - // leading to two bugs in this implementation. We presently call it - // "reduced" to indicate that it is still untrusted input (potentially - // different from the bare Common Name entry we're validating), it - // might have been modified such as by the removal of wildcard labels - // or the email prefix. - reducedName := name - emailDomain := reducedName - wildcardLabel := "" - isEmail := false - isWildcard := false - - // If it has an @, assume it is an email address and separate out the - // user from the hostname portion so that we can act on the hostname. - // Note that this matches behavior from the alt_names parameter. If it - // ends up being problematic for users, I guess that could be separated - // into dns_names and email_names in the future to be explicit, but I - // don't think this is likely. - if strings.Contains(reducedName, "@") { - splitEmail := strings.Split(reducedName, "@") - if len(splitEmail) != 2 { - return name - } - reducedName = splitEmail[1] - emailDomain = splitEmail[1] - isEmail = true - } - - // Per RFC 6125 Section 6.4.3, and explicitly contradicting the earlier - // RFC 2818 which no modern client will validate against, there are two - // main types of wildcards, each with a single wildcard specifier (`*`, - // functionally different from the `*` used as a glob from the - // AllowGlobDomains parsing path) in the left-most label: - // - // 1. Entire label is a single wildcard character (most common and - // well-supported), - // 2. Part of the label contains a single wildcard character (e.g. per - /// RFC 6125: baz*.example.net, *baz.example.net, or b*z.example.net). - // - // We permit issuance of both but not the older RFC 2818 style under - // the new AllowWildcardCertificates option. However, anything with a - // glob character is technically a wildcard. - if strings.Contains(reducedName, "*") { - // Regardless of later rejections below, this common name contains - // a wildcard character and is thus technically a wildcard name. - isWildcard = true - - // Additionally, if AllowWildcardCertificates is explicitly - // forbidden, it takes precedence over AllowAnyName, thus we should - // reject the name now. - // - // We expect the role to have been correctly migrated but guard for - // safety. - if data.role.AllowWildcardCertificates != nil && !*data.role.AllowWildcardCertificates { - return name - } - - if strings.Count(reducedName, "*") > 1 { - // As mentioned above, only one wildcard character is permitted - // under RFC 6125 semantics. - return name - } - - // Split the Common Name into two parts: a left-most label and the - // remaining segments (if present). - splitLabels := strings.SplitN(reducedName, ".", 2) - if len(splitLabels) != 2 { - // We've been given a single-part domain name that consists - // entirely of a wildcard. This is a little tricky to handle, - // but EnforceHostnames validates both the wildcard-containing - // label and the reduced name, but _only_ the latter if it is - // non-empty. This allows us to still validate the only label - // component matches hostname expectations still. - wildcardLabel = splitLabels[0] - reducedName = "" - } else { - // We have a (at least) two label domain name. But before we can - // update our names, we need to validate the wildcard ended up - // in the segment we expected it to. While this is (kinda) - // validated under EnforceHostnames's leftWildLabelRegex, we - // still need to validate it in the non-enforced mode. - // - // By validated assumption above, we know there's strictly one - // wildcard in this domain so we only need to check the wildcard - // label or the reduced name (as one is equivalent to the other). - // Because we later assume reducedName _lacks_ wildcard segments, - // we validate that. - wildcardLabel = splitLabels[0] - reducedName = splitLabels[1] - if strings.Contains(reducedName, "*") { - return name - } - } - } - - // Email addresses using wildcard domain names do not make sense - // in a Common Name field. - if isEmail && isWildcard { - return name - } - - // AllowAnyName is checked after this because EnforceHostnames still - // applies when allowing any name. Also, we check the reduced name to - // ensure that we are not either checking a full email address or a - // wildcard prefix. - if data.role.EnforceHostnames { - if reducedName != "" { - // See note above about splitLabels having only one segment - // and setting reducedName to the empty string. - p := idna.New( - idna.StrictDomainName(true), - idna.VerifyDNSLength(true), - ) - converted, err := p.ToASCII(reducedName) - if err != nil { - return name - } - if !hostnameRegex.MatchString(converted) { - return name - } - } - - // When a wildcard is specified, we additionally need to validate - // the label with the wildcard is correctly formed. - if isWildcard && !leftWildLabelRegex.MatchString(wildcardLabel) { - return name - } - } - - // Self-explanatory, but validations from EnforceHostnames and - // AllowWildcardCertificates take precedence. - if data.role.AllowAnyName { - continue - } - - // The following blocks all work the same basic way: - // 1) If a role allows a certain class of base (localhost, token - // display name, role-configured domains), perform further tests - // - // 2) If there is a perfect match on either the sanitized name or it's an - // email address with a perfect match on the hostname portion, allow it - // - // 3) If subdomains are allowed, we check based on the sanitized name; - // note that if not a wildcard, will be equivalent to the email domain - // for email checks, and we already checked above for both a wildcard - // and email address being present in the same name - // 3a) First we check for a non-wildcard subdomain, as in . - // 3b) Then we check if it's a wildcard and the base domain is a match - // - // Variances are noted in-line - - if data.role.AllowLocalhost { - if reducedName == "localhost" || - reducedName == "localdomain" || - (isEmail && emailDomain == "localhost") || - (isEmail && emailDomain == "localdomain") { - continue - } - - if data.role.AllowSubdomains { - // It is possible, if unlikely, to have a subdomain of "localhost" - if strings.HasSuffix(reducedName, ".localhost") || - (isWildcard && reducedName == "localhost") { - continue - } - - // A subdomain of "localdomain" is also not entirely uncommon - if strings.HasSuffix(reducedName, ".localdomain") || - (isWildcard && reducedName == "localdomain") { - continue - } - } - } - - if data.role.AllowTokenDisplayName { - if name == data.req.DisplayName { - continue - } - - if data.role.AllowSubdomains { - if isEmail { - // If it's an email address, we need to parse the token - // display name in order to do a proper comparison of the - // subdomain - if strings.Contains(data.req.DisplayName, "@") { - splitDisplay := strings.Split(data.req.DisplayName, "@") - if len(splitDisplay) == 2 { - // Compare the sanitized name against the hostname - // portion of the email address in the broken - // display name - if strings.HasSuffix(reducedName, "."+splitDisplay[1]) { - continue - } - } - } - } - - if strings.HasSuffix(reducedName, "."+data.req.DisplayName) || - (isWildcard && reducedName == data.req.DisplayName) { - continue - } - } - } - - if len(data.role.AllowedDomains) > 0 { - valid := false - for _, currDomain := range data.role.AllowedDomains { - // If there is, say, a trailing comma, ignore it - if currDomain == "" { - continue - } - - if data.role.AllowedDomainsTemplate { - isTemplate, _ := framework.ValidateIdentityTemplate(currDomain) - if isTemplate && data.req.EntityID != "" { - tmpCurrDomain, err := framework.PopulateIdentityTemplate(currDomain, data.req.EntityID, b.System()) - if err != nil { - continue - } - - currDomain = tmpCurrDomain - } - } - - // First, allow an exact match of the base domain if that role flag - // is enabled - if data.role.AllowBareDomains && - (strings.EqualFold(name, currDomain) || - (isEmail && strings.EqualFold(emailDomain, currDomain))) { - valid = true - break - } - - if data.role.AllowSubdomains { - if strings.HasSuffix(reducedName, "."+currDomain) || - (isWildcard && strings.EqualFold(reducedName, currDomain)) { - valid = true - break - } - } - - if data.role.AllowGlobDomains && - strings.Contains(currDomain, "*") && - glob.Glob(currDomain, name) { - valid = true - break - } - } - - if valid { - continue - } - } - - return name - } - - return "" + entityInfo := issuing.NewEntityInfoFromReq(data.req) + return issuing.ValidateNames(b.System(), data.role, entityInfo, names) } // validateOtherSANs checks if the values requested are allowed. If an OID @@ -584,83 +304,20 @@ func validateNames(b *backend, data *inputBundle, names []string) string { // allowed, it will be returned as the second string. Empty strings + error // means everything is okay. func validateOtherSANs(data *inputBundle, requested map[string][]string) (string, string, error) { - if len(data.role.AllowedOtherSANs) == 1 && data.role.AllowedOtherSANs[0] == "*" { - // Anything is allowed - return "", "", nil - } - - allowed, err := parseOtherSANs(data.role.AllowedOtherSANs) - if err != nil { - return "", "", fmt.Errorf("error parsing role's allowed SANs: %w", err) - } - for oid, names := range requested { - for _, name := range names { - allowedNames, ok := allowed[oid] - if !ok { - return oid, "", nil - } - - valid := false - for _, allowedName := range allowedNames { - if glob.Glob(allowedName, name) { - valid = true - break - } - } - - if !valid { - return oid, name, nil - } - } - } - - return "", "", nil + return issuing.ValidateOtherSANs(data.role, requested) } func parseOtherSANs(others []string) (map[string][]string, error) { - result := map[string][]string{} - for _, other := range others { - splitOther := strings.SplitN(other, ";", 2) - if len(splitOther) != 2 { - return nil, fmt.Errorf("expected a semicolon in other SAN %q", other) - } - splitType := strings.SplitN(splitOther[1], ":", 2) - if len(splitType) != 2 { - return nil, fmt.Errorf("expected a colon in other SAN %q", other) - } - switch { - case strings.EqualFold(splitType[0], "utf8"): - case strings.EqualFold(splitType[0], "utf-8"): - default: - return nil, fmt.Errorf("only utf8 other SANs are supported; found non-supported type in other SAN %q", other) - } - result[splitOther[0]] = append(result[splitOther[0]], splitType[1]) - } + return issuing.ParseOtherSANs(others) +} - return result, nil +// Returns bool stating whether the given UserId is Valid +func validateUserId(data *inputBundle, userId string) bool { + return issuing.ValidateUserId(data.role, userId) } func validateSerialNumber(data *inputBundle, serialNumber string) string { - valid := false - if len(data.role.AllowedSerialNumbers) > 0 { - for _, currSerialNumber := range data.role.AllowedSerialNumbers { - if currSerialNumber == "" { - continue - } - - if (strings.Contains(currSerialNumber, "*") && - glob.Glob(currSerialNumber, serialNumber)) || - currSerialNumber == serialNumber { - valid = true - break - } - } - } - if !valid { - return serialNumber - } else { - return "" - } + return issuing.ValidateSerialNumber(data.role, serialNumber) } func generateCert(sc *storageContext, @@ -700,9 +357,28 @@ func generateCert(sc *storageContext, return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch AIA URL information: %v", err)} } - uris, err := entries.toURLEntries(sc, issuerID("")) + uris, err := ToURLEntries(sc, issuing.IssuerID(""), entries) if err != nil { - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse AIA URL information: %v\nUsing templated AIA URL's {{issuer_id}} field when generating root certificates is not supported.", err)} + // When generating root issuers, don't err on missing issuer + // ID; there is little value in including AIA info on a root, + // as this info would point back to itself; though RFC 5280 is + // a touch vague on this point, this seems to be consensus + // from public CAs such as DigiCert Global Root G3, ISRG Root + // X1, and others. + // + // This is a UX bug if we do err here, as it requires AIA + // templating to not include issuer id (a best practice for + // child certs issued from root and intermediate mounts + // however), and setting this before root generation (or, on + // root renewal) could cause problems. + if _, nonEmptyIssuerErr := ToURLEntries(sc, issuing.IssuerID("empty-issuer-id"), entries); nonEmptyIssuerErr != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse AIA URL information: %v\nUsing templated AIA URL's {{issuer_id}} field when generating root certificates is not supported.", err)} + } + + uris = &certutil.URLEntries{} + + msg := "When generating root CA, found global AIA configuration with issuer_id template unsuitable for root generation. This AIA configuration has been ignored. To include AIA on this root CA, set the global AIA configuration to not include issuer_id and instead to refer to a static issuer name." + warnings = append(warnings, msg) } data.Params.URLs = uris @@ -726,9 +402,7 @@ func generateCert(sc *storageContext, // N.B.: This is only meant to be used for generating intermediate CAs. // It skips some sanity checks. func generateIntermediateCSR(sc *storageContext, input *inputBundle, randomSource io.Reader) (*certutil.ParsedCSRBundle, []string, error) { - b := sc.Backend - - creation, warnings, err := generateCreationBundle(b, input, nil, nil) + creation, warnings, err := generateCreationBundle(sc.Backend, input, nil, nil) if err != nil { return nil, nil, err } @@ -745,706 +419,147 @@ func generateIntermediateCSR(sc *storageContext, input *inputBundle, randomSourc return parsedBundle, warnings, nil } -func signCert(b *backend, - data *inputBundle, - caSign *certutil.CAInfoBundle, - isCA bool, - useCSRValues bool) (*certutil.ParsedCertBundle, []string, error, -) { - if data.role == nil { - return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} +func NewSignCertInputFromDataFields(data *framework.FieldData, isCA bool, useCSRValues bool) SignCertInputFromDataFields { + certBundle := NewCreationBundleInputFromFieldData(data) + return SignCertInputFromDataFields{ + CreationBundleInputFromFieldData: certBundle, + data: data, + isCA: isCA, + useCSRValues: useCSRValues, } +} + +type SignCertInputFromDataFields struct { + CreationBundleInputFromFieldData + data *framework.FieldData + isCA bool + useCSRValues bool +} - csrString := data.apiData.Get("csr").(string) +var _ issuing.SignCertInput = SignCertInputFromDataFields{} + +func (i SignCertInputFromDataFields) GetCSR() (*x509.CertificateRequest, error) { + csrString := i.data.Get("csr").(string) if csrString == "" { - return nil, nil, errutil.UserError{Err: "\"csr\" is empty"} + return nil, errutil.UserError{Err: "\"csr\" is empty"} } pemBlock, _ := pem.Decode([]byte(csrString)) if pemBlock == nil { - return nil, nil, errutil.UserError{Err: "csr contains no data"} + return nil, errutil.UserError{Err: "csr contains no data"} } csr, err := x509.ParseCertificateRequest(pemBlock.Bytes) if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Sprintf("certificate request could not be parsed: %v", err)} - } - - if csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || csr.PublicKey == nil { - return nil, nil, errutil.UserError{Err: "Refusing to sign CSR with empty PublicKey. This usually means the SubjectPublicKeyInfo field has an OID not recognized by Go, such as 1.2.840.113549.1.1.10 for rsaPSS."} + return nil, errutil.UserError{Err: fmt.Sprintf("certificate request could not be parsed: %v", err)} } - // This switch validates that the CSR key type matches the role and sets - // the value in the actualKeyType/actualKeyBits values. - actualKeyType := "" - actualKeyBits := 0 - - switch data.role.KeyType { - case "rsa": - // Verify that the key matches the role type - if csr.PublicKeyAlgorithm != x509.RSA { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "role requires keys of type %s", - data.role.KeyType)} - } - - pubKey, ok := csr.PublicKey.(*rsa.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } - - actualKeyType = "rsa" - actualKeyBits = pubKey.N.BitLen() - case "ec": - // Verify that the key matches the role type - if csr.PublicKeyAlgorithm != x509.ECDSA { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "role requires keys of type %s", - data.role.KeyType)} - } - pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } - - actualKeyType = "ec" - actualKeyBits = pubKey.Params().BitSize - case "ed25519": - // Verify that the key matches the role type - if csr.PublicKeyAlgorithm != x509.Ed25519 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "role requires keys of type %s", - data.role.KeyType)} - } - - _, ok := csr.PublicKey.(ed25519.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } - - actualKeyType = "ed25519" - actualKeyBits = 0 - case "any": - // We need to compute the actual key type and key bits, to correctly - // validate minimums and SignatureBits below. - switch csr.PublicKeyAlgorithm { - case x509.RSA: - pubKey, ok := csr.PublicKey.(*rsa.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } - if pubKey.N.BitLen() < 2048 { - return nil, nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} - } - - actualKeyType = "rsa" - actualKeyBits = pubKey.N.BitLen() - case x509.ECDSA: - pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } + return csr, nil +} - actualKeyType = "ec" - actualKeyBits = pubKey.Params().BitSize - case x509.Ed25519: - _, ok := csr.PublicKey.(ed25519.PublicKey) - if !ok { - return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} - } +func (i SignCertInputFromDataFields) IsCA() bool { + return i.isCA +} - actualKeyType = "ed25519" - actualKeyBits = 0 - default: - return nil, nil, errutil.UserError{Err: "Unknown key type in CSR: " + csr.PublicKeyAlgorithm.String()} - } - default: - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unsupported key type value: %s", data.role.KeyType)} - } +func (i SignCertInputFromDataFields) UseCSRValues() bool { + return i.useCSRValues +} - // Before validating key lengths, update our KeyBits/SignatureBits based - // on the actual CSR key type. - if data.role.KeyType == "any" { - // We update the value of KeyBits and SignatureBits here (from the - // role), using the specified key type. This allows us to convert - // the default value (0) for SignatureBits and KeyBits to a - // meaningful value. - // - // We ignore the role's original KeyBits value if the KeyType is any - // as legacy (pre-1.10) roles had default values that made sense only - // for RSA keys (key_bits=2048) and the older code paths ignored the role value - // set for KeyBits when KeyType was set to any. This also enforces the - // docs saying when key_type=any, we only enforce our specified minimums - // for signing operations - if data.role.KeyBits, data.role.SignatureBits, err = certutil.ValidateDefaultOrValueKeyTypeSignatureLength( - actualKeyType, 0, data.role.SignatureBits); err != nil { - return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unknown internal error updating default values: %v", err)} - } +func (i SignCertInputFromDataFields) GetPermittedDomains() []string { + return i.data.Get("permitted_dns_domains").([]string) +} - // We're using the KeyBits field as a minimum value below, and P-224 is safe - // and a previously allowed value. However, the above call defaults - // to P-256 as that's a saner default than P-224 (w.r.t. generation), so - // override it here to allow 224 as the smallest size we permit. - if actualKeyType == "ec" { - data.role.KeyBits = 224 - } +func signCert(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle, isCA bool, useCSRValues bool) (*certutil.ParsedCertBundle, []string, error) { + if data.role == nil { + return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} } - // At this point, data.role.KeyBits and data.role.SignatureBits should both - // be non-zero, for RSA and ECDSA keys. Validate the actualKeyBits based on - // the role's values. If the KeyType was any, and KeyBits was set to 0, - // KeyBits should be updated to 2048 unless some other value was chosen - // explicitly. - // - // This validation needs to occur regardless of the role's key type, so - // that we always validate both RSA and ECDSA key sizes. - if actualKeyType == "rsa" { - if actualKeyBits < data.role.KeyBits { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "role requires a minimum of a %d-bit key, but CSR's key is %d bits", - data.role.KeyBits, actualKeyBits)} - } - - if actualKeyBits < 2048 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "Vault requires a minimum of a 2048-bit key, but CSR's key is %d bits", - actualKeyBits)} - } - } else if actualKeyType == "ec" { - if actualKeyBits < data.role.KeyBits { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "role requires a minimum of a %d-bit key, but CSR's key is %d bits", - data.role.KeyBits, - actualKeyBits)} - } - } + entityInfo := issuing.NewEntityInfoFromReq(data.req) + signCertInput := NewSignCertInputFromDataFields(data.apiData, isCA, useCSRValues) - creation, warnings, err := generateCreationBundle(b, data, caSign, csr) - if err != nil { - return nil, nil, err - } - if creation.Params == nil { - return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} - } + return issuing.SignCert(b.System(), data.role, entityInfo, caSign, signCertInput) +} - creation.Params.IsCA = isCA - creation.Params.UseCSRValues = useCSRValues +func getOtherSANsFromX509Extensions(exts []pkix.Extension) ([]certutil.OtherNameUtf8, error) { + return certutil.GetOtherSANsFromX509Extensions(exts) +} - if isCA { - creation.Params.PermittedDNSDomains = data.apiData.Get("permitted_dns_domains").([]string) - } +var _ issuing.CreationBundleInput = CreationBundleInputFromFieldData{} - parsedBundle, err := certutil.SignCertificate(creation) - if err != nil { - return nil, nil, err +func NewCreationBundleInputFromFieldData(data *framework.FieldData) CreationBundleInputFromFieldData { + certNotAfter := NewCertNotAfterInputFromFieldData(data) + return CreationBundleInputFromFieldData{ + CertNotAfterInputFromFieldData: certNotAfter, + data: data, } - - return parsedBundle, warnings, nil } -// otherNameRaw describes a name related to a certificate which is not in one -// of the standard name formats. RFC 5280, 4.2.1.6: -// -// OtherName ::= SEQUENCE { -// type-id OBJECT IDENTIFIER, -// value [0] EXPLICIT ANY DEFINED BY type-id } -type otherNameRaw struct { - TypeID asn1.ObjectIdentifier - Value asn1.RawValue +type CreationBundleInputFromFieldData struct { + CertNotAfterInputFromFieldData + data *framework.FieldData } -type otherNameUtf8 struct { - oid string - value string +func (cb CreationBundleInputFromFieldData) GetCommonName() string { + return cb.data.Get("common_name").(string) } -// ExtractUTF8String returns the UTF8 string contained in the Value, or an error -// if none is present. -func (oraw *otherNameRaw) extractUTF8String() (*otherNameUtf8, error) { - svalue := cryptobyte.String(oraw.Value.Bytes) - var outTag cbbasn1.Tag - var val cryptobyte.String - read := svalue.ReadAnyASN1(&val, &outTag) - - if read && outTag == asn1.TagUTF8String { - return &otherNameUtf8{oid: oraw.TypeID.String(), value: string(val)}, nil - } - return nil, fmt.Errorf("no UTF-8 string found in OtherName") +func (cb CreationBundleInputFromFieldData) GetSerialNumber() string { + return cb.data.Get("serial_number").(string) } -func (o otherNameUtf8) String() string { - return fmt.Sprintf("%s;%s:%s", o.oid, "UTF-8", o.value) +func (cb CreationBundleInputFromFieldData) GetExcludeCnFromSans() bool { + return cb.data.Get("exclude_cn_from_sans").(bool) } -func getOtherSANsFromX509Extensions(exts []pkix.Extension) ([]otherNameUtf8, error) { - var ret []otherNameUtf8 - for _, ext := range exts { - if !ext.Id.Equal(oidExtensionSubjectAltName) { - continue - } - err := forEachSAN(ext.Value, func(tag int, data []byte) error { - if tag != 0 { - return nil - } - - var other otherNameRaw - _, err := asn1.UnmarshalWithParams(data, &other, "tag:0") - if err != nil { - return fmt.Errorf("could not parse requested other SAN: %w", err) - } - val, err := other.extractUTF8String() - if err != nil { - return err - } - ret = append(ret, *val) - return nil - }) - if err != nil { - return nil, err - } - } +func (cb CreationBundleInputFromFieldData) GetOptionalAltNames() (interface{}, bool) { + return cb.data.GetOk("alt_names") +} - return ret, nil +func (cb CreationBundleInputFromFieldData) GetOtherSans() []string { + return cb.data.Get("other_sans").([]string) } -func forEachSAN(extension []byte, callback func(tag int, data []byte) error) error { - // RFC 5280, 4.2.1.6 +func (cb CreationBundleInputFromFieldData) GetIpSans() []string { + return cb.data.Get("ip_sans").([]string) +} - // SubjectAltName ::= GeneralNames - // - // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName - // - // GeneralName ::= CHOICE { - // otherName [0] OtherName, - // rfc822Name [1] IA5String, - // dNSName [2] IA5String, - // x400Address [3] ORAddress, - // directoryName [4] Name, - // ediPartyName [5] EDIPartyName, - // uniformResourceIdentifier [6] IA5String, - // iPAddress [7] OCTET STRING, - // registeredID [8] OBJECT IDENTIFIER } - var seq asn1.RawValue - rest, err := asn1.Unmarshal(extension, &seq) - if err != nil { - return err - } else if len(rest) != 0 { - return fmt.Errorf("x509: trailing data after X.509 extension") - } - if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { - return asn1.StructuralError{Msg: "bad SAN sequence"} - } +func (cb CreationBundleInputFromFieldData) GetURISans() []string { + return cb.data.Get("uri_sans").([]string) +} - rest = seq.Bytes - for len(rest) > 0 { - var v asn1.RawValue - rest, err = asn1.Unmarshal(rest, &v) - if err != nil { - return err - } +func (cb CreationBundleInputFromFieldData) GetOptionalSkid() (interface{}, bool) { + return cb.data.GetOk("skid") +} - if err := callback(v.Tag, v.FullBytes); err != nil { - return err - } - } +func (cb CreationBundleInputFromFieldData) IsUserIdInSchema() (interface{}, bool) { + val, present := cb.data.Schema["user_ids"] + return val, present +} - return nil +func (cb CreationBundleInputFromFieldData) GetUserIds() []string { + return cb.data.Get("user_ids").([]string) } // generateCreationBundle is a shared function that reads parameters supplied // from the various endpoints and generates a CreationParameters with the // parameters that can be used to issue or sign func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle, csr *x509.CertificateRequest) (*certutil.CreationBundle, []string, error) { - // Read in names -- CN, DNS and email addresses - var cn string - var ridSerialNumber string - var warnings []string - dnsNames := []string{} - emailAddresses := []string{} - { - if csr != nil && data.role.UseCSRCommonName { - cn = csr.Subject.CommonName - } - if cn == "" { - cn = data.apiData.Get("common_name").(string) - if cn == "" && data.role.RequireCN { - return nil, nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true, unless "require_cn" is set to false`} - } - } - - ridSerialNumber = data.apiData.Get("serial_number").(string) - - // only take serial number from CSR if one was not supplied via API - if ridSerialNumber == "" && csr != nil { - ridSerialNumber = csr.Subject.SerialNumber - } - - if csr != nil && data.role.UseCSRSANs { - dnsNames = csr.DNSNames - emailAddresses = csr.EmailAddresses - } - - if cn != "" && !data.apiData.Get("exclude_cn_from_sans").(bool) { - if strings.Contains(cn, "@") { - // Note: emails are not disallowed if the role's email protection - // flag is false, because they may well be included for - // informational purposes; it is up to the verifying party to - // ensure that email addresses in a subject alternate name can be - // used for the purpose for which they are presented - emailAddresses = append(emailAddresses, cn) - } else { - // Only add to dnsNames if it's actually a DNS name but convert - // idn first - p := idna.New( - idna.StrictDomainName(true), - idna.VerifyDNSLength(true), - ) - converted, err := p.ToASCII(cn) - if err != nil { - return nil, nil, errutil.UserError{Err: err.Error()} - } - if hostnameRegex.MatchString(converted) { - dnsNames = append(dnsNames, converted) - } - } - } - - if csr == nil || !data.role.UseCSRSANs { - cnAltRaw, ok := data.apiData.GetOk("alt_names") - if ok { - cnAlt := strutil.ParseDedupAndSortStrings(cnAltRaw.(string), ",") - for _, v := range cnAlt { - if strings.Contains(v, "@") { - emailAddresses = append(emailAddresses, v) - } else { - // Only add to dnsNames if it's actually a DNS name but - // convert idn first - p := idna.New( - idna.StrictDomainName(true), - idna.VerifyDNSLength(true), - ) - converted, err := p.ToASCII(v) - if err != nil { - return nil, nil, errutil.UserError{Err: err.Error()} - } - if hostnameRegex.MatchString(converted) { - dnsNames = append(dnsNames, converted) - } - } - } - } - } - - // Check the CN. This ensures that the CN is checked even if it's - // excluded from SANs. - if cn != "" { - badName := validateCommonName(b, data, cn) - if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "common name %s not allowed by this role", badName)} - } - } + entityInfo := issuing.NewEntityInfoFromReq(data.req) + creationBundleInput := NewCreationBundleInputFromFieldData(data.apiData) - if ridSerialNumber != "" { - badName := validateSerialNumber(data, ridSerialNumber) - if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "serial_number %s not allowed by this role", badName)} - } - } - - // Check for bad email and/or DNS names - badName := validateNames(b, data, dnsNames) - if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "subject alternate name %s not allowed by this role", badName)} - } - - badName = validateNames(b, data, emailAddresses) - if len(badName) != 0 { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "email address %s not allowed by this role", badName)} - } - } - - // otherSANsInput has the same format as the other_sans HTTP param in the - // Vault PKI API: it is a list of strings of the form ;: - // where must be UTF8/UTF-8. - var otherSANsInput []string - // otherSANs is the output of parseOtherSANs(otherSANsInput): its keys are - // the value, its values are of the form [, ] - var otherSANs map[string][]string - if sans := data.apiData.Get("other_sans").([]string); len(sans) > 0 { - otherSANsInput = sans - } - if data.role.UseCSRSANs && csr != nil && len(csr.Extensions) > 0 { - others, err := getOtherSANsFromX509Extensions(csr.Extensions) - if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} - } - for _, other := range others { - otherSANsInput = append(otherSANsInput, other.String()) - } - } - if len(otherSANsInput) > 0 { - requested, err := parseOtherSANs(otherSANsInput) - if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} - } - badOID, badName, err := validateOtherSANs(data, requested) - switch { - case err != nil: - return nil, nil, errutil.UserError{Err: err.Error()} - case len(badName) > 0: - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "other SAN %s not allowed for OID %s by this role", badName, badOID)} - case len(badOID) > 0: - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "other SAN OID %s not allowed by this role", badOID)} - default: - otherSANs = requested - } - } - - // Get and verify any IP SANs - ipAddresses := []net.IP{} - { - if csr != nil && data.role.UseCSRSANs { - if len(csr.IPAddresses) > 0 { - if !data.role.AllowIPSANs { - return nil, nil, errutil.UserError{Err: "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR"} - } - ipAddresses = csr.IPAddresses - } - } else { - ipAlt := data.apiData.Get("ip_sans").([]string) - if len(ipAlt) > 0 { - if !data.role.AllowIPSANs { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)} - } - for _, v := range ipAlt { - parsedIP := net.ParseIP(v) - if parsedIP == nil { - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "the value %q is not a valid IP address", v)} - } - ipAddresses = append(ipAddresses, parsedIP) - } - } - } - } - - URIs := []*url.URL{} - { - if csr != nil && data.role.UseCSRSANs { - if len(csr.URIs) > 0 { - if len(data.role.AllowedURISANs) == 0 { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names are not allowed in this role, but were provided via CSR", - } - } - - // validate uri sans - for _, uri := range csr.URIs { - valid := validateURISAN(b, data, uri.String()) - if !valid { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names were provided via CSR which are not valid for this role", - } - } - - URIs = append(URIs, uri) - } - } - } else { - uriAlt := data.apiData.Get("uri_sans").([]string) - if len(uriAlt) > 0 { - if len(data.role.AllowedURISANs) == 0 { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names are not allowed in this role, but were provided via the API", - } - } - - for _, uri := range uriAlt { - valid := validateURISAN(b, data, uri) - if !valid { - return nil, nil, errutil.UserError{ - Err: "URI Subject Alternative Names were provided via the API which are not valid for this role", - } - } - - parsedURI, err := url.Parse(uri) - if parsedURI == nil || err != nil { - return nil, nil, errutil.UserError{ - Err: fmt.Sprintf( - "the provided URI Subject Alternative Name %q is not a valid URI", uri), - } - } - - URIs = append(URIs, parsedURI) - } - } - } - } - - // Most of these could also be RemoveDuplicateStable, or even - // leave duplicates in, but OU is the one most likely to be duplicated. - subject := pkix.Name{ - CommonName: cn, - SerialNumber: ridSerialNumber, - Country: strutil.RemoveDuplicatesStable(data.role.Country, false), - Organization: strutil.RemoveDuplicatesStable(data.role.Organization, false), - OrganizationalUnit: strutil.RemoveDuplicatesStable(data.role.OU, false), - Locality: strutil.RemoveDuplicatesStable(data.role.Locality, false), - Province: strutil.RemoveDuplicatesStable(data.role.Province, false), - StreetAddress: strutil.RemoveDuplicatesStable(data.role.StreetAddress, false), - PostalCode: strutil.RemoveDuplicatesStable(data.role.PostalCode, false), - } - - // Get the TTL and verify it against the max allowed - var ttl time.Duration - var maxTTL time.Duration - var notAfter time.Time - var err error - { - ttl = time.Duration(data.apiData.Get("ttl").(int)) * time.Second - notAfterAlt := data.role.NotAfter - if notAfterAlt == "" { - notAfterAltRaw, ok := data.apiData.GetOk("not_after") - if ok { - notAfterAlt = notAfterAltRaw.(string) - } - - } - if ttl > 0 && notAfterAlt != "" { - return nil, nil, errutil.UserError{ - Err: "Either ttl or not_after should be provided. Both should not be provided in the same request.", - } - } - - if ttl == 0 && data.role.TTL > 0 { - ttl = data.role.TTL - } - - if data.role.MaxTTL > 0 { - maxTTL = data.role.MaxTTL - } - - if ttl == 0 { - ttl = b.System().DefaultLeaseTTL() - } - if maxTTL == 0 { - maxTTL = b.System().MaxLeaseTTL() - } - if ttl > maxTTL { - warnings = append(warnings, fmt.Sprintf("TTL %q is longer than permitted maxTTL %q, so maxTTL is being used", ttl, maxTTL)) - ttl = maxTTL - } - - if notAfterAlt != "" { - notAfter, err = time.Parse(time.RFC3339, notAfterAlt) - if err != nil { - return nil, nil, errutil.UserError{Err: err.Error()} - } - } else { - notAfter = time.Now().Add(ttl) - } - if caSign != nil && notAfter.After(caSign.Certificate.NotAfter) { - // If it's not self-signed, verify that the issued certificate - // won't be valid past the lifetime of the CA certificate, and - // act accordingly. This is dependent based on the issuer's - // LeafNotAfterBehavior argument. - switch caSign.LeafNotAfterBehavior { - case certutil.PermitNotAfterBehavior: - // Explicitly do nothing. - case certutil.TruncateNotAfterBehavior: - notAfter = caSign.Certificate.NotAfter - case certutil.ErrNotAfterBehavior: - fallthrough - default: - return nil, nil, errutil.UserError{Err: fmt.Sprintf( - "cannot satisfy request, as TTL would result in notAfter %s that is beyond the expiration of the CA certificate at %s", notAfter.Format(time.RFC3339Nano), caSign.Certificate.NotAfter.Format(time.RFC3339Nano))} - } - } - } - - // Parse SKID from the request for cross-signing. - var skid []byte - { - if rawSKIDValue, ok := data.apiData.GetOk("skid"); ok { - // Handle removing common separators to make copy/paste from tool - // output easier. Chromium uses space, OpenSSL uses colons, and at - // one point, Vault had preferred dash as a separator for hex - // strings. - var err error - skidValue := rawSKIDValue.(string) - for _, separator := range []string{":", "-", " "} { - skidValue = strings.ReplaceAll(skidValue, separator, "") - } - - skid, err = hex.DecodeString(skidValue) - if err != nil { - return nil, nil, errutil.UserError{Err: fmt.Sprintf("cannot parse requested SKID value as hex: %v", err)} - } - } - } - - creation := &certutil.CreationBundle{ - Params: &certutil.CreationParameters{ - Subject: subject, - DNSNames: strutil.RemoveDuplicates(dnsNames, false), - EmailAddresses: strutil.RemoveDuplicates(emailAddresses, false), - IPAddresses: ipAddresses, - URIs: URIs, - OtherSANs: otherSANs, - KeyType: data.role.KeyType, - KeyBits: data.role.KeyBits, - SignatureBits: data.role.SignatureBits, - UsePSS: data.role.UsePSS, - NotAfter: notAfter, - KeyUsage: x509.KeyUsage(parseKeyUsages(data.role.KeyUsage)), - ExtKeyUsage: parseExtKeyUsages(data.role), - ExtKeyUsageOIDs: data.role.ExtKeyUsageOIDs, - PolicyIdentifiers: data.role.PolicyIdentifiers, - BasicConstraintsValidForNonCA: data.role.BasicConstraintsValidForNonCA, - NotBeforeDuration: data.role.NotBeforeDuration, - ForceAppendCaChain: caSign != nil, - SKID: skid, - }, - SigningBundle: caSign, - CSR: csr, - } - - // Don't deal with URLs or max path length if it's self-signed, as these - // normally come from the signing bundle - if caSign == nil { - return creation, warnings, nil - } - - // This will have been read in from the getGlobalAIAURLs function - creation.Params.URLs = caSign.URLs + return issuing.GenerateCreationBundle(b.System(), data.role, entityInfo, creationBundleInput, caSign, csr) +} - // If the max path length in the role is not nil, it was specified at - // generation time with the max_path_length parameter; otherwise derive it - // from the signing certificate - if data.role.MaxPathLength != nil { - creation.Params.MaxPathLength = *data.role.MaxPathLength - } else { - switch { - case caSign.Certificate.MaxPathLen < 0: - creation.Params.MaxPathLength = -1 - case caSign.Certificate.MaxPathLen == 0 && - caSign.Certificate.MaxPathLenZero: - // The signing function will ensure that we do not issue a CA cert - creation.Params.MaxPathLength = 0 - default: - // If this takes it to zero, we handle this case later if - // necessary - creation.Params.MaxPathLength = caSign.Certificate.MaxPathLen - 1 - } - } +// getCertificateNotAfter compute a certificate's NotAfter date based on the mount ttl, role, signing bundle and input +// api data being sent. Returns a NotAfter time, a set of warnings or an error. +func getCertificateNotAfter(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle) (time.Time, []string, error) { + input := NewCertNotAfterInputFromFieldData(data.apiData) + return issuing.GetCertificateNotAfter(b.System(), data.role, input, caSign) +} - return creation, warnings, nil +// applyIssuerLeafNotAfterBehavior resets a certificate's notAfter time or errors out based on the +// issuer's notAfter date along with the LeafNotAfterBehavior configuration +func applyIssuerLeafNotAfterBehavior(caSign *certutil.CAInfoBundle, notAfter time.Time) (time.Time, error) { + return issuing.ApplyIssuerLeafNotAfterBehavior(caSign, notAfter) } func convertRespToPKCS8(resp *logical.Response) error { @@ -1581,7 +696,7 @@ func handleOtherSANs(in *x509.Certificate, sans map[string][]string) error { // Marshal and add to ExtraExtensions ext := pkix.Extension{ // This is the defined OID for subjectAltName - Id: asn1.ObjectIdentifier(oidExtensionSubjectAltName), + Id: certutil.OidExtensionSubjectAltName, } var err error ext.Value, err = asn1.Marshal(rawValues) @@ -1642,13 +757,23 @@ func stringToOid(in string) (asn1.ObjectIdentifier, error) { } func parseCertificateFromBytes(certBytes []byte) (*x509.Certificate, error) { - block, extra := pem.Decode(certBytes) - if block == nil { - return nil, errors.New("unable to parse certificate: invalid PEM") - } - if len(strings.TrimSpace(string(extra))) > 0 { - return nil, errors.New("unable to parse certificate: trailing PEM data") - } + return parsing.ParseCertificateFromBytes(certBytes) +} + +func NewCertNotAfterInputFromFieldData(data *framework.FieldData) CertNotAfterInputFromFieldData { + return CertNotAfterInputFromFieldData{data: data} +} + +var _ issuing.CertNotAfterInput = CertNotAfterInputFromFieldData{} + +type CertNotAfterInputFromFieldData struct { + data *framework.FieldData +} + +func (i CertNotAfterInputFromFieldData) GetTTL() int { + return i.data.Get("ttl").(int) +} - return x509.ParseCertificate(block.Bytes) +func (i CertNotAfterInputFromFieldData) GetOptionalNotAfter() (interface{}, bool) { + return i.data.GetOk("not_after") } diff --git a/builtin/logical/pki/cert_util_test.go b/builtin/logical/pki/cert_util_test.go index de9c70ee0597..c3fa0bde2927 100644 --- a/builtin/logical/pki/cert_util_test.go +++ b/builtin/logical/pki/cert_util_test.go @@ -1,12 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" + "crypto/x509" + "crypto/x509/pkix" "fmt" + "net" + "net/url" "reflect" "strings" "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/stretchr/testify/require" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -94,7 +109,7 @@ func TestPki_FetchCertBySerial(t *testing.T) { // order-preserving way. func TestPki_MultipleOUs(t *testing.T) { t.Parallel() - var b backend + b, _ := CreateBackendWithStorage(t) fields := addCACommonFields(map[string]*framework.FieldSchema{}) apiData := &framework.FieldData{ @@ -106,12 +121,12 @@ func TestPki_MultipleOUs(t *testing.T) { } input := &inputBundle{ apiData: apiData, - role: &roleEntry{ + role: &issuing.RoleEntry{ MaxTTL: 3600, OU: []string{"Z", "E", "V"}, }, } - cb, _, err := generateCreationBundle(&b, input, nil, nil) + cb, _, err := generateCreationBundle(b, input, nil, nil) if err != nil { t.Fatalf("Error: %v", err) } @@ -126,7 +141,7 @@ func TestPki_MultipleOUs(t *testing.T) { func TestPki_PermitFQDNs(t *testing.T) { t.Parallel() - var b backend + b, _ := CreateBackendWithStorage(t) fields := addCACommonFields(map[string]*framework.FieldSchema{}) cases := map[string]struct { @@ -143,7 +158,7 @@ func TestPki_PermitFQDNs(t *testing.T) { "ttl": 3600, }, }, - role: &roleEntry{ + role: &issuing.RoleEntry{ AllowAnyName: true, MaxTTL: 3600, EnforceHostnames: true, @@ -162,7 +177,7 @@ func TestPki_PermitFQDNs(t *testing.T) { "ttl": 3600, }, }, - role: &roleEntry{ + role: &issuing.RoleEntry{ AllowedDomains: []string{"example.net", "EXAMPLE.COM"}, AllowBareDomains: true, MaxTTL: 3600, @@ -171,6 +186,24 @@ func TestPki_PermitFQDNs(t *testing.T) { expectedDnsNames: []string{"Example.Net", "eXaMPLe.COM"}, expectedEmails: []string{}, }, + "case insensitivity subdomain validation": { + input: &inputBundle{ + apiData: &framework.FieldData{ + Schema: fields, + Raw: map[string]interface{}{ + "common_name": "SUB.EXAMPLE.COM", + "ttl": 3600, + }, + }, + role: &issuing.RoleEntry{ + AllowedDomains: []string{"example.com", "*.Example.com"}, + AllowGlobDomains: true, + MaxTTL: 3600, + }, + }, + expectedDnsNames: []string{"SUB.EXAMPLE.COM"}, + expectedEmails: []string{}, + }, "case email as AllowedDomain with bare domains": { input: &inputBundle{ apiData: &framework.FieldData{ @@ -180,7 +213,7 @@ func TestPki_PermitFQDNs(t *testing.T) { "ttl": 3600, }, }, - role: &roleEntry{ + role: &issuing.RoleEntry{ AllowedDomains: []string{"test@testemail.com"}, AllowBareDomains: true, MaxTTL: 3600, @@ -198,7 +231,7 @@ func TestPki_PermitFQDNs(t *testing.T) { "ttl": 3600, }, }, - role: &roleEntry{ + role: &issuing.RoleEntry{ AllowedDomains: []string{"testemail.com"}, AllowBareDomains: true, MaxTTL: 3600, @@ -213,7 +246,7 @@ func TestPki_PermitFQDNs(t *testing.T) { name := name testCase := testCase t.Run(name, func(t *testing.T) { - cb, _, err := generateCreationBundle(&b, testCase.input, nil, nil) + cb, _, err := generateCreationBundle(b, testCase.input, nil, nil) if err != nil { t.Fatalf("Error: %v", err) } @@ -232,3 +265,688 @@ func TestPki_PermitFQDNs(t *testing.T) { }) } } + +type parseCertificateTestCase struct { + name string + data map[string]interface{} + roleData map[string]interface{} // if a role is to be created + ttl time.Duration + wantParams certutil.CreationParameters + wantFields map[string]interface{} + wantErr bool +} + +func TestParseCertificate(t *testing.T) { + t.Parallel() + + parseURL := func(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + t.Fatal(err) + } + return u + } + + tests := []*parseCertificateTestCase{ + { + name: "simple CA", + data: map[string]interface{}{ + "common_name": "the common name", + "key_type": "ec", + "key_bits": 384, + "ttl": "1h", + "not_before_duration": "30s", + "street_address": "", + }, + ttl: 1 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name", + }, + DNSNames: nil, + EmailAddresses: nil, + IPAddresses: nil, + URIs: nil, + OtherSANs: make(map[string][]string), + IsCA: true, + KeyType: "ec", + KeyBits: 384, + NotAfter: time.Time{}, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: 0, + ExtKeyUsageOIDs: nil, + PolicyIdentifiers: nil, + BasicConstraintsValidForNonCA: false, + SignatureBits: 384, + UsePSS: false, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: nil, + URLs: nil, + MaxPathLength: -1, + NotBeforeDuration: 30, + SKID: []byte("We'll assert that it is not nil as an special case"), + }, + wantFields: map[string]interface{}{ + "common_name": "the common name", + "alt_names": "", + "ip_sans": "", + "uri_sans": "", + "other_sans": "", + "signature_bits": 384, + "exclude_cn_from_sans": true, + "ou": "", + "organization": "", + "country": "", + "locality": "", + "province": "", + "street_address": "", + "postal_code": "", + "serial_number": "", + "ttl": "1h0m30s", + "max_path_length": -1, + "permitted_dns_domains": "", + "use_pss": false, + "key_type": "ec", + "key_bits": 384, + "skid": "We'll assert that it is not nil as an special case", + }, + wantErr: false, + }, + { + // Note that this test's data is used to create the internal CA used by test "full non CA cert" + name: "full CA", + data: map[string]interface{}{ + // using the same order as in https://developer.hashicorp.com/vault/api-docs/secret/pki#sign-certificate + "common_name": "the common name", + "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "ttl": "2h", + "max_path_length": 2, + "permitted_dns_domains": ".example.com,.www.example.com", + "ou": "unit1, unit2", + "organization": "org1, org2", + "country": "US, CA", + "locality": "locality1, locality2", + "province": "province1, province2", + "street_address": "street_address1, street_address2", + "postal_code": "postal_code1, postal_code2", + "not_before_duration": "45s", + "key_type": "rsa", + "use_pss": true, + "key_bits": 2048, + "signature_bits": 384, + // TODO(kitography): Specify key usage + }, + ttl: 2 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name", + OrganizationalUnit: []string{"unit1", "unit2"}, + Organization: []string{"org1", "org2"}, + Country: []string{"CA", "US"}, + Locality: []string{"locality1", "locality2"}, + Province: []string{"province1", "province2"}, + StreetAddress: []string{"street_address1", "street_address2"}, + PostalCode: []string{"postal_code1", "postal_code2"}, + }, + DNSNames: []string{"example.com", "www.example.com"}, + EmailAddresses: []string{"admin@example.com", "user@example.com"}, + IPAddresses: []net.IP{[]byte{1, 2, 3, 4}, []byte{1, 2, 3, 5}}, + URIs: []*url.URL{parseURL("https://example.com"), parseURL("https://www.example.com")}, + OtherSANs: map[string][]string{"1.3.6.1.4.1.311.20.2.3": {"caadmin@example.com"}}, + IsCA: true, + KeyType: "rsa", + KeyBits: 2048, + NotAfter: time.Time{}, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: 0, + ExtKeyUsageOIDs: nil, + PolicyIdentifiers: nil, + BasicConstraintsValidForNonCA: false, + SignatureBits: 384, + UsePSS: true, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: []string{".example.com", ".www.example.com"}, + URLs: nil, + MaxPathLength: 2, + NotBeforeDuration: 45 * time.Second, + SKID: []byte("We'll assert that it is not nil as an special case"), + }, + wantFields: map[string]interface{}{ + "common_name": "the common name", + "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", + "signature_bits": 384, + "exclude_cn_from_sans": true, + "ou": "unit1,unit2", + "organization": "org1,org2", + "country": "CA,US", + "locality": "locality1,locality2", + "province": "province1,province2", + "street_address": "street_address1,street_address2", + "postal_code": "postal_code1,postal_code2", + "serial_number": "", + "ttl": "2h0m45s", + "max_path_length": 2, + "permitted_dns_domains": ".example.com,.www.example.com", + "use_pss": true, + "key_type": "rsa", + "key_bits": 2048, + "skid": "We'll assert that it is not nil as an special case", + }, + wantErr: false, + }, + { + // Note that we use the data of test "full CA" to create the internal CA needed for this test + name: "full non CA cert", + data: map[string]interface{}{ + // using the same order as in https://developer.hashicorp.com/vault/api-docs/secret/pki#generate-certificate-and-key + "common_name": "the common name non ca", + "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "ttl": "2h", + // format + // private_key_format + "exclude_cn_from_sans": true, + // not_after + // remove_roots_from_chain + "user_ids": "humanoid,robot", + }, + roleData: map[string]interface{}{ + "allow_any_name": true, + "cn_validations": "disabled", + "allow_ip_sans": true, + "allowed_other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:*@example.com", + "allowed_uri_sans": "https://example.com,https://www.example.com", + "allowed_user_ids": "*", + "not_before_duration": "45s", + "signature_bits": 384, + "key_usage": "KeyAgreement", + "ext_key_usage": "ServerAuth", + "ext_key_usage_oids": "1.3.6.1.5.5.7.3.67,1.3.6.1.5.5.7.3.68", + "client_flag": false, + "server_flag": false, + "policy_identifiers": "1.2.3.4.5.6.7.8.9.0", + }, + ttl: 2 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name non ca", + }, + DNSNames: []string{"example.com", "www.example.com"}, + EmailAddresses: []string{"admin@example.com", "user@example.com"}, + IPAddresses: []net.IP{[]byte{1, 2, 3, 4}, []byte{1, 2, 3, 5}}, + URIs: []*url.URL{parseURL("https://example.com"), parseURL("https://www.example.com")}, + OtherSANs: map[string][]string{"1.3.6.1.4.1.311.20.2.3": {"caadmin@example.com"}}, + IsCA: false, + KeyType: "rsa", + KeyBits: 2048, + NotAfter: time.Time{}, + KeyUsage: x509.KeyUsageKeyAgreement, + ExtKeyUsage: 0, // Please Ignore + ExtKeyUsageOIDs: []string{"1.3.6.1.5.5.7.3.1", "1.3.6.1.5.5.7.3.67", "1.3.6.1.5.5.7.3.68"}, + PolicyIdentifiers: []string{"1.2.3.4.5.6.7.8.9.0"}, + BasicConstraintsValidForNonCA: false, + SignatureBits: 384, + UsePSS: false, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: nil, + URLs: nil, + MaxPathLength: 0, + NotBeforeDuration: 45, + SKID: []byte("We'll assert that it is not nil as an special case"), + }, + wantFields: map[string]interface{}{ + "common_name": "the common name non ca", + "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", + "signature_bits": 384, + "exclude_cn_from_sans": true, + "ou": "", + "organization": "", + "country": "", + "locality": "", + "province": "", + "street_address": "", + "postal_code": "", + "serial_number": "", + "ttl": "2h0m45s", + "max_path_length": 0, + "permitted_dns_domains": "", + "use_pss": false, + "key_type": "rsa", + "key_bits": 2048, + "skid": "We'll assert that it is not nil as an special case", + }, + wantErr: false, + }, + } + for _, tt := range tests { + + b, s := CreateBackendWithStorage(t) + + var cert *x509.Certificate + issueTime := time.Now() + if tt.wantParams.IsCA { + resp, err := CBWrite(b, s, "root/generate/internal", tt.data) + require.NoError(t, err) + require.NotNil(t, resp) + + certData := resp.Data["certificate"].(string) + cert, err = parsing.ParseCertificateFromString(certData) + require.NoError(t, err) + require.NotNil(t, cert) + } else { + // use the "simple CA" data to create the internal CA + caData := tests[1].data + caData["ttl"] = "3h" + resp, err := CBWrite(b, s, "root/generate/internal", caData) + require.NoError(t, err) + require.NotNil(t, resp) + + // create a role + resp, err = CBWrite(b, s, "roles/test", tt.roleData) + require.NoError(t, err) + require.NotNil(t, resp) + + // create the cert + resp, err = CBWrite(b, s, "issue/test", tt.data) + require.NoError(t, err) + require.NotNil(t, resp) + + certData := resp.Data["certificate"].(string) + cert, err = parsing.ParseCertificateFromString(certData) + require.NoError(t, err) + require.NotNil(t, cert) + } + + t.Run(tt.name+" parameters", func(t *testing.T) { + testParseCertificateToCreationParameters(t, issueTime, tt, cert) + }) + t.Run(tt.name+" fields", func(t *testing.T) { + testParseCertificateToFields(t, issueTime, tt, cert) + }) + } +} + +func testParseCertificateToCreationParameters(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, cert *x509.Certificate) { + params, err := certutil.ParseCertificateToCreationParameters(*cert) + + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + + ignoreBasicConstraintsValidForNonCA := tt.wantParams.IsCA + + var diff []string + for _, d := range deep.Equal(tt.wantParams, params) { + switch { + case strings.HasPrefix(d, "SKID"): + continue + case strings.HasPrefix(d, "BasicConstraintsValidForNonCA") && ignoreBasicConstraintsValidForNonCA: + continue + case strings.HasPrefix(d, "NotBeforeDuration"): + continue + case strings.HasPrefix(d, "NotAfter"): + continue + } + diff = append(diff, d) + } + if diff != nil { + t.Errorf("testParseCertificateToCreationParameters() diff: %s", strings.Join(diff, "\n")) + } + + require.NotNil(t, params.SKID) + require.GreaterOrEqual(t, params.NotBeforeDuration, tt.wantParams.NotBeforeDuration, + "NotBeforeDuration want: %s got: %s", tt.wantParams.NotBeforeDuration, params.NotBeforeDuration) + + require.GreaterOrEqual(t, params.NotAfter, issueTime.Add(tt.ttl).Add(-1*time.Minute), + "NotAfter want: %s got: %s", tt.wantParams.NotAfter, params.NotAfter) + require.LessOrEqual(t, params.NotAfter, issueTime.Add(tt.ttl).Add(1*time.Minute), + "NotAfter want: %s got: %s", tt.wantParams.NotAfter, params.NotAfter) + } +} + +func testParseCertificateToFields(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, cert *x509.Certificate) { + fields, err := certutil.ParseCertificateToFields(*cert) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + + require.NotNil(t, fields["skid"]) + delete(fields, "skid") + delete(tt.wantFields, "skid") + + { + // Sometimes TTL comes back as 1s off, so we'll allow that + expectedTTL, err := parseutil.ParseDurationSecond(tt.wantFields["ttl"].(string)) + require.NoError(t, err) + actualTTL, err := parseutil.ParseDurationSecond(fields["ttl"].(string)) + require.NoError(t, err) + + diff := expectedTTL - actualTTL + require.LessOrEqual(t, actualTTL, expectedTTL, // NotAfter is generated before NotBefore so the time.Now of notBefore may be later, shrinking our calculated TTL during very slow tests + "ttl should be, if off, smaller than expected want: %s got: %s", tt.wantFields["ttl"], fields["ttl"]) + require.LessOrEqual(t, diff, 30*time.Second, // Test can be slow, allow more off in the other direction + "ttl must be at most 30s off, want: %s got: %s", tt.wantFields["ttl"], fields["ttl"]) + delete(fields, "ttl") + delete(tt.wantFields, "ttl") + } + + if diff := deep.Equal(tt.wantFields, fields); diff != nil { + t.Errorf("testParseCertificateToFields() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) + } + } +} + +func TestParseCsr(t *testing.T) { + t.Parallel() + + parseURL := func(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + t.Fatal(err) + } + return u + } + + tests := []*parseCertificateTestCase{ + { + name: "simple CSR", + data: map[string]interface{}{ + "common_name": "the common name", + "key_type": "ec", + "key_bits": 384, + "ttl": "1h", + "not_before_duration": "30s", + "street_address": "", + }, + ttl: 1 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name", + }, + DNSNames: nil, + EmailAddresses: nil, + IPAddresses: nil, + URIs: nil, + OtherSANs: make(map[string][]string), + IsCA: false, + KeyType: "ec", + KeyBits: 384, + NotAfter: time.Time{}, + KeyUsage: 0, + ExtKeyUsage: 0, + ExtKeyUsageOIDs: nil, + PolicyIdentifiers: nil, + BasicConstraintsValidForNonCA: false, + SignatureBits: 384, + UsePSS: false, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: nil, + URLs: nil, + MaxPathLength: 0, + NotBeforeDuration: 0, + SKID: nil, + }, + wantFields: map[string]interface{}{ + "common_name": "the common name", + "ou": "", + "organization": "", + "country": "", + "locality": "", + "province": "", + "street_address": "", + "postal_code": "", + "alt_names": "", + "ip_sans": "", + "uri_sans": "", + "other_sans": "", + "exclude_cn_from_sans": true, + "key_type": "ec", + "key_bits": 384, + "signature_bits": 384, + "use_pss": false, + "serial_number": "", + "add_basic_constraints": false, + }, + wantErr: false, + }, + { + name: "full CSR with basic constraints", + data: map[string]interface{}{ + // using the same order as in https://developer.hashicorp.com/vault/api-docs/secret/pki#generate-intermediate-csr + "common_name": "the common name", + "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + // format + // private_key_format + "key_type": "rsa", + "key_bits": 2048, + "key_name": "the-key-name", + // key_ref + "signature_bits": 384, + // exclude_cn_from_sans + "ou": "unit1, unit2", + "organization": "org1, org2", + "country": "US, CA", + "locality": "locality1, locality2", + "province": "province1, province2", + "street_address": "street_address1, street_address2", + "postal_code": "postal_code1, postal_code2", + "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + "add_basic_constraints": true, + }, + ttl: 2 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name", + OrganizationalUnit: []string{"unit1", "unit2"}, + Organization: []string{"org1", "org2"}, + Country: []string{"CA", "US"}, + Locality: []string{"locality1", "locality2"}, + Province: []string{"province1", "province2"}, + StreetAddress: []string{"street_address1", "street_address2"}, + PostalCode: []string{"postal_code1", "postal_code2"}, + SerialNumber: "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + }, + DNSNames: []string{"example.com", "www.example.com"}, + EmailAddresses: []string{"admin@example.com", "user@example.com"}, + IPAddresses: []net.IP{[]byte{1, 2, 3, 4}, []byte{1, 2, 3, 5}}, + URIs: []*url.URL{parseURL("https://example.com"), parseURL("https://www.example.com")}, + OtherSANs: map[string][]string{"1.3.6.1.4.1.311.20.2.3": {"caadmin@example.com"}}, + IsCA: true, + KeyType: "rsa", + KeyBits: 2048, + NotAfter: time.Time{}, + KeyUsage: 0, // TODO(kitography): Verify with Kit + ExtKeyUsage: 0, // TODO(kitography): Verify with Kit + ExtKeyUsageOIDs: nil, // TODO(kitography): Verify with Kit + PolicyIdentifiers: nil, // TODO(kitography): Verify with Kit + BasicConstraintsValidForNonCA: true, + SignatureBits: 384, + UsePSS: false, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: nil, + URLs: nil, + MaxPathLength: -1, + NotBeforeDuration: 0, + SKID: nil, + }, + wantFields: map[string]interface{}{ + "common_name": "the common name", + "ou": "unit1,unit2", + "organization": "org1,org2", + "country": "CA,US", + "locality": "locality1,locality2", + "province": "province1,province2", + "street_address": "street_address1,street_address2", + "postal_code": "postal_code1,postal_code2", + "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", + "exclude_cn_from_sans": true, + "key_type": "rsa", + "key_bits": 2048, + "signature_bits": 384, + "use_pss": false, + "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + "add_basic_constraints": true, + }, + wantErr: false, + }, + { + name: "full CSR without basic constraints", + data: map[string]interface{}{ + // using the same order as in https://developer.hashicorp.com/vault/api-docs/secret/pki#generate-intermediate-csr + "common_name": "the common name", + "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + // format + // private_key_format + "key_type": "rsa", + "key_bits": 2048, + "key_name": "the-key-name", + // key_ref + "signature_bits": 384, + // exclude_cn_from_sans + "ou": "unit1, unit2", + "organization": "org1, org2", + "country": "CA,US", + "locality": "locality1, locality2", + "province": "province1, province2", + "street_address": "street_address1, street_address2", + "postal_code": "postal_code1, postal_code2", + "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + "add_basic_constraints": false, + }, + ttl: 2 * time.Hour, + wantParams: certutil.CreationParameters{ + Subject: pkix.Name{ + CommonName: "the common name", + OrganizationalUnit: []string{"unit1", "unit2"}, + Organization: []string{"org1", "org2"}, + Country: []string{"CA", "US"}, + Locality: []string{"locality1", "locality2"}, + Province: []string{"province1", "province2"}, + StreetAddress: []string{"street_address1", "street_address2"}, + PostalCode: []string{"postal_code1", "postal_code2"}, + SerialNumber: "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + }, + DNSNames: []string{"example.com", "www.example.com"}, + EmailAddresses: []string{"admin@example.com", "user@example.com"}, + IPAddresses: []net.IP{[]byte{1, 2, 3, 4}, []byte{1, 2, 3, 5}}, + URIs: []*url.URL{parseURL("https://example.com"), parseURL("https://www.example.com")}, + OtherSANs: map[string][]string{"1.3.6.1.4.1.311.20.2.3": {"caadmin@example.com"}}, + IsCA: false, + KeyType: "rsa", + KeyBits: 2048, + NotAfter: time.Time{}, + KeyUsage: 0, + ExtKeyUsage: 0, + ExtKeyUsageOIDs: nil, + PolicyIdentifiers: nil, + BasicConstraintsValidForNonCA: false, + SignatureBits: 384, + UsePSS: false, + ForceAppendCaChain: false, + UseCSRValues: false, + PermittedDNSDomains: nil, + URLs: nil, + MaxPathLength: 0, + NotBeforeDuration: 0, + SKID: nil, + }, + wantFields: map[string]interface{}{ + "common_name": "the common name", + "ou": "unit1,unit2", + "organization": "org1,org2", + "country": "CA,US", + "locality": "locality1,locality2", + "province": "province1,province2", + "street_address": "street_address1,street_address2", + "postal_code": "postal_code1,postal_code2", + "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", + "exclude_cn_from_sans": true, + "key_type": "rsa", + "key_bits": 2048, + "signature_bits": 384, + "use_pss": false, + "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + "add_basic_constraints": false, + }, + wantErr: false, + }, + } + for _, tt := range tests { + + b, s := CreateBackendWithStorage(t) + + issueTime := time.Now() + resp, err := CBWrite(b, s, "intermediate/generate/internal", tt.data) + require.NoError(t, err) + require.NotNil(t, resp) + + csrData := resp.Data["csr"].(string) + csr, err := parsing.ParseCertificateRequestFromString(csrData) + require.NoError(t, err) + require.NotNil(t, csr) + + t.Run(tt.name+" parameters", func(t *testing.T) { + testParseCsrToCreationParameters(t, issueTime, tt, csr) + }) + t.Run(tt.name+" fields", func(t *testing.T) { + testParseCsrToFields(t, issueTime, tt, csr) + }) + } +} + +func testParseCsrToCreationParameters(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, csr *x509.CertificateRequest) { + params, err := certutil.ParseCsrToCreationParameters(*csr) + + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + + if diff := deep.Equal(tt.wantParams, params); diff != nil { + t.Errorf("testParseCertificateToCreationParameters() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) + } + } +} + +func testParseCsrToFields(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, csr *x509.CertificateRequest) { + fields, err := certutil.ParseCsrToFields(*csr) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + + if diff := deep.Equal(tt.wantFields, fields); diff != nil { + t.Errorf("testParseCertificateToFields() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) + } + } +} diff --git a/builtin/logical/pki/chain_test.go b/builtin/logical/pki/chain_test.go index 8af047cffcfb..0dba2cd282ee 100644 --- a/builtin/logical/pki/chain_test.go +++ b/builtin/logical/pki/chain_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -13,6 +16,7 @@ import ( "testing" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/logical" ) @@ -309,6 +313,8 @@ func (c CBValidateChain) PrettyChain(t testing.TB, chain []string, knownCerts ma } func ToCertificate(t testing.TB, cert string) *x509.Certificate { + t.Helper() + block, _ := pem.Decode([]byte(cert)) if block == nil { t.Fatalf("Unable to parse certificate: nil PEM block\n[%v]\n", cert) @@ -323,6 +329,8 @@ func ToCertificate(t testing.TB, cert string) *x509.Certificate { } func ToCRL(t testing.TB, crl string, issuer *x509.Certificate) *pkix.CertificateList { + t.Helper() + block, _ := pem.Decode([]byte(crl)) if block == nil { t.Fatalf("Unable to parse CRL: nil PEM block\n[%v]\n", crl) @@ -568,7 +576,7 @@ func (c CBIssueLeaf) RevokeLeaf(t testing.TB, b *backend, s logical.Storage, kno if resp == nil { t.Fatalf("failed to read default issuer config: nil response") } - defaultID := resp.Data["default"].(issuerID).String() + defaultID := resp.Data["default"].(issuing.IssuerID).String() c.Issuer = defaultID issuer = nil } @@ -630,7 +638,7 @@ func (c CBIssueLeaf) Run(t testing.TB, b *backend, s logical.Storage, knownKeys if resp == nil { t.Fatalf("failed to read default issuer config: nil response") } - defaultID := resp.Data["default"].(issuerID).String() + defaultID := resp.Data["default"].(issuing.IssuerID).String() resp, err = CBRead(b, s, "issuer/"+c.Issuer) if err != nil { @@ -639,7 +647,7 @@ func (c CBIssueLeaf) Run(t testing.TB, b *backend, s logical.Storage, knownKeys if resp == nil { t.Fatalf("failed to read issuer %v: nil response", c.Issuer) } - ourID := resp.Data["issuer_id"].(issuerID).String() + ourID := resp.Data["issuer_id"].(issuing.IssuerID).String() areDefault := ourID == defaultID for _, usage := range []string{"read-only", "crl-signing", "issuing-certificates", "issuing-certificates,crl-signing"} { diff --git a/builtin/logical/pki/chain_util.go b/builtin/logical/pki/chain_util.go index 6b7a6a5c7beb..594319f7adae 100644 --- a/builtin/logical/pki/chain_util.go +++ b/builtin/logical/pki/chain_util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -6,10 +9,11 @@ import ( "fmt" "sort" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/helper/errutil" ) -func prettyIssuer(issuerIdEntryMap map[issuerID]*issuerEntry, issuer issuerID) string { +func prettyIssuer(issuerIdEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, issuer issuing.IssuerID) string { if entry, ok := issuerIdEntryMap[issuer]; ok && len(entry.Name) > 0 { return "[id:" + string(issuer) + "/name:" + entry.Name + "]" } @@ -17,7 +21,7 @@ func prettyIssuer(issuerIdEntryMap map[issuerID]*issuerEntry, issuer issuerID) s return "[" + string(issuer) + "]" } -func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* optional */) error { +func (sc *storageContext) rebuildIssuersChains(referenceCert *issuing.IssuerEntry /* optional */) error { // This function rebuilds the CAChain field of all known issuers. This // function should usually be invoked when a new issuer is added to the // pool of issuers. @@ -113,22 +117,22 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt // fourth maps that certificate back to the other issuers with that // subject (note the keyword _other_: we'll exclude self-loops here) -- // either via a parent or child relationship. - issuerIdEntryMap := make(map[issuerID]*issuerEntry, len(issuers)) - issuerIdCertMap := make(map[issuerID]*x509.Certificate, len(issuers)) - issuerIdParentsMap := make(map[issuerID][]issuerID, len(issuers)) - issuerIdChildrenMap := make(map[issuerID][]issuerID, len(issuers)) + issuerIdEntryMap := make(map[issuing.IssuerID]*issuing.IssuerEntry, len(issuers)) + issuerIdCertMap := make(map[issuing.IssuerID]*x509.Certificate, len(issuers)) + issuerIdParentsMap := make(map[issuing.IssuerID][]issuing.IssuerID, len(issuers)) + issuerIdChildrenMap := make(map[issuing.IssuerID][]issuing.IssuerID, len(issuers)) // For every known issuer, we map that subject back to the id of issuers - // containing that subject. This lets us build our issuerID -> parents + // containing that subject. This lets us build our IssuerID -> parents // mapping efficiently. Worst case we'll have a single linear chain where // every entry has a distinct subject. - subjectIssuerIdsMap := make(map[string][]issuerID, len(issuers)) + subjectIssuerIdsMap := make(map[string][]issuing.IssuerID, len(issuers)) // First, read every issuer entry from storage. We'll propagate entries // to three of the maps here: all but issuerIdParentsMap and // issuerIdChildrenMap, which we'll do in a second pass. for _, identifier := range issuers { - var stored *issuerEntry + var stored *issuing.IssuerEntry // When the reference issuer is provided and matches this identifier, // prefer the updated reference copy instead. @@ -258,8 +262,8 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt // manually building their chain prior to starting the topographical sort. // // This thus runs in O(|V| + |E|) -> O(n^2) in the number of issuers. - processedIssuers := make(map[issuerID]bool, len(issuers)) - toVisit := make([]issuerID, 0, len(issuers)) + processedIssuers := make(map[issuing.IssuerID]bool, len(issuers)) + toVisit := make([]issuing.IssuerID, 0, len(issuers)) // Handle any explicitly constructed certificate chains. Here, we don't // validate much what the user provides; if they provide since-deleted @@ -320,7 +324,7 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt // ensure we don't accidentally infinite-loop (if we introduce a bug). maxVisitCount := len(issuers)*len(issuers)*len(issuers) + 100 for len(toVisit) > 0 && maxVisitCount >= 0 { - var issuer issuerID + var issuer issuing.IssuerID issuer, toVisit = toVisit[0], toVisit[1:] // If (and only if) we're presently starved for next nodes to visit, @@ -384,8 +388,8 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt // However, if you directly step onto the cross-signed, now you're // taken in an alternative direction (via its chain), and must // revisit any roots later. - var roots []issuerID - var intermediates []issuerID + var roots []issuing.IssuerID + var intermediates []issuing.IssuerID for _, parentCertId := range parentCerts { if bytes.Equal(issuerIdCertMap[parentCertId].RawSubject, issuerIdCertMap[parentCertId].RawIssuer) { roots = append(roots, parentCertId) @@ -467,7 +471,7 @@ func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* opt return nil } -func addToChainIfNotExisting(includedParentCerts map[string]bool, entry *issuerEntry, certToAdd string) { +func addToChainIfNotExisting(includedParentCerts map[string]bool, entry *issuing.IssuerEntry, certToAdd string) { included, ok := includedParentCerts[certToAdd] if ok && included { return @@ -478,15 +482,15 @@ func addToChainIfNotExisting(includedParentCerts map[string]bool, entry *issuerE } func processAnyCliqueOrCycle( - issuers []issuerID, - processedIssuers map[issuerID]bool, - toVisit []issuerID, - issuerIdEntryMap map[issuerID]*issuerEntry, - issuerIdCertMap map[issuerID]*x509.Certificate, - issuerIdParentsMap map[issuerID][]issuerID, - issuerIdChildrenMap map[issuerID][]issuerID, - subjectIssuerIdsMap map[string][]issuerID, -) ([]issuerID /* toVisit */, error) { + issuers []issuing.IssuerID, + processedIssuers map[issuing.IssuerID]bool, + toVisit []issuing.IssuerID, + issuerIdEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, + issuerIdCertMap map[issuing.IssuerID]*x509.Certificate, + issuerIdParentsMap map[issuing.IssuerID][]issuing.IssuerID, + issuerIdChildrenMap map[issuing.IssuerID][]issuing.IssuerID, + subjectIssuerIdsMap map[string][]issuing.IssuerID, +) ([]issuing.IssuerID /* toVisit */, error) { // Topological sort really only works on directed acyclic graphs (DAGs). // But a pool of arbitrary (issuer) certificates are actually neither! // This pool could contain both cliques and cycles. Because this could @@ -547,15 +551,15 @@ func processAnyCliqueOrCycle( // Finally -- it isn't enough to consider this chain in isolation // either. We need to consider _all_ parents and ensure they've been // processed before processing this closure. - var cliques [][]issuerID - var cycles [][]issuerID - closure := make(map[issuerID]bool) + var cliques [][]issuing.IssuerID + var cycles [][]issuing.IssuerID + closure := make(map[issuing.IssuerID]bool) - var cliquesToProcess []issuerID + var cliquesToProcess []issuing.IssuerID cliquesToProcess = append(cliquesToProcess, issuer) for len(cliquesToProcess) > 0 { - var node issuerID + var node issuing.IssuerID node, cliquesToProcess = cliquesToProcess[0], cliquesToProcess[1:] // Skip potential clique nodes which have already been processed @@ -750,7 +754,7 @@ func processAnyCliqueOrCycle( return nil, err } - closure := make(map[issuerID]bool) + closure := make(map[issuing.IssuerID]bool) for _, cycle := range cycles { for _, node := range cycle { closure[node] = true @@ -808,14 +812,14 @@ func processAnyCliqueOrCycle( } func findAllCliques( - processedIssuers map[issuerID]bool, - issuerIdCertMap map[issuerID]*x509.Certificate, - subjectIssuerIdsMap map[string][]issuerID, - issuers []issuerID, -) ([][]issuerID, map[issuerID]int, []issuerID, error) { - var allCliques [][]issuerID - issuerIdCliqueMap := make(map[issuerID]int) - var allCliqueNodes []issuerID + processedIssuers map[issuing.IssuerID]bool, + issuerIdCertMap map[issuing.IssuerID]*x509.Certificate, + subjectIssuerIdsMap map[string][]issuing.IssuerID, + issuers []issuing.IssuerID, +) ([][]issuing.IssuerID, map[issuing.IssuerID]int, []issuing.IssuerID, error) { + var allCliques [][]issuing.IssuerID + issuerIdCliqueMap := make(map[issuing.IssuerID]int) + var allCliqueNodes []issuing.IssuerID for _, node := range issuers { // Check if the node has already been visited... @@ -856,11 +860,11 @@ func findAllCliques( } func isOnReissuedClique( - processedIssuers map[issuerID]bool, - issuerIdCertMap map[issuerID]*x509.Certificate, - subjectIssuerIdsMap map[string][]issuerID, - node issuerID, -) ([]issuerID, error) { + processedIssuers map[issuing.IssuerID]bool, + issuerIdCertMap map[issuing.IssuerID]*x509.Certificate, + subjectIssuerIdsMap map[string][]issuing.IssuerID, + node issuing.IssuerID, +) ([]issuing.IssuerID, error) { // Finding max cliques in arbitrary graphs is a nearly pathological // problem, usually left to the realm of SAT solvers and NP-Complete // theoretical. @@ -888,7 +892,7 @@ func isOnReissuedClique( // under this reissued clique detection code). // // What does this mean for our algorithm? A simple greedy search is - // sufficient. If we index our certificates by subject -> issuerID + // sufficient. If we index our certificates by subject -> IssuerID // (and cache its value across calls, which we've already done for // building the parent/child relationship), we can find all other issuers // with the same public key and subject as the existing node fairly @@ -922,7 +926,7 @@ func isOnReissuedClique( // condition (the subject half), so validate they match the other half // (the issuer half) and the second condition. For node (which is // included in candidates), the condition should vacuously hold. - var clique []issuerID + var clique []issuing.IssuerID for _, candidate := range candidates { // Skip already processed nodes, even if they could be clique // candidates. We'll treat them as any other (already processed) @@ -954,7 +958,7 @@ func isOnReissuedClique( return clique, nil } -func containsIssuer(collection []issuerID, target issuerID) bool { +func containsIssuer(collection []issuing.IssuerID, target issuing.IssuerID) bool { if len(collection) == 0 { return false } @@ -968,7 +972,7 @@ func containsIssuer(collection []issuerID, target issuerID) bool { return false } -func appendCycleIfNotExisting(knownCycles [][]issuerID, candidate []issuerID) [][]issuerID { +func appendCycleIfNotExisting(knownCycles [][]issuing.IssuerID, candidate []issuing.IssuerID) [][]issuing.IssuerID { // There's two ways to do cycle detection: canonicalize the cycles, // rewriting them to have the least (or max) element first or just // brute force the detection. @@ -1004,7 +1008,7 @@ func appendCycleIfNotExisting(knownCycles [][]issuerID, candidate []issuerID) [] return knownCycles } -func canonicalizeCycle(cycle []issuerID) []issuerID { +func canonicalizeCycle(cycle []issuing.IssuerID) []issuing.IssuerID { // Find the minimum value and put it at the head, keeping the relative // ordering the same. minIndex := 0 @@ -1023,11 +1027,11 @@ func canonicalizeCycle(cycle []issuerID) []issuerID { } func findCyclesNearClique( - processedIssuers map[issuerID]bool, - issuerIdCertMap map[issuerID]*x509.Certificate, - issuerIdChildrenMap map[issuerID][]issuerID, - cliqueNodes []issuerID, -) ([][]issuerID, error) { + processedIssuers map[issuing.IssuerID]bool, + issuerIdCertMap map[issuing.IssuerID]*x509.Certificate, + issuerIdChildrenMap map[issuing.IssuerID][]issuing.IssuerID, + cliqueNodes []issuing.IssuerID, +) ([][]issuing.IssuerID, error) { // When we have a reissued clique, we need to find all cycles next to it. // Presumably, because they all have non-empty parents, they should not // have been visited yet. We further know that (because we're exploring @@ -1043,7 +1047,7 @@ func findCyclesNearClique( // Copy the clique nodes as excluded nodes; we'll avoid exploring cycles // which have parents that have been already explored. excludeNodes := cliqueNodes[:] - var knownCycles [][]issuerID + var knownCycles [][]issuing.IssuerID // We know the node has at least one child, since the clique is non-empty. for _, child := range issuerIdChildrenMap[cliqueNode] { @@ -1078,12 +1082,12 @@ func findCyclesNearClique( } func findAllCyclesWithNode( - processedIssuers map[issuerID]bool, - issuerIdCertMap map[issuerID]*x509.Certificate, - issuerIdChildrenMap map[issuerID][]issuerID, - source issuerID, - exclude []issuerID, -) ([][]issuerID, error) { + processedIssuers map[issuing.IssuerID]bool, + issuerIdCertMap map[issuing.IssuerID]*x509.Certificate, + issuerIdChildrenMap map[issuing.IssuerID][]issuing.IssuerID, + source issuing.IssuerID, + exclude []issuing.IssuerID, +) ([][]issuing.IssuerID, error) { // We wish to find all cycles involving this particular node and report // the corresponding paths. This is a full-graph traversal (excluding // certain paths) as we're not just checking if a cycle occurred, but @@ -1093,28 +1097,28 @@ func findAllCyclesWithNode( maxCycleSize := 8 // Whether we've visited any given node. - cycleVisited := make(map[issuerID]bool) - visitCounts := make(map[issuerID]int) - parentCounts := make(map[issuerID]map[issuerID]bool) + cycleVisited := make(map[issuing.IssuerID]bool) + visitCounts := make(map[issuing.IssuerID]int) + parentCounts := make(map[issuing.IssuerID]map[issuing.IssuerID]bool) // Paths to the specified node. Some of these might be cycles. - pathsTo := make(map[issuerID][][]issuerID) + pathsTo := make(map[issuing.IssuerID][][]issuing.IssuerID) // Nodes to visit. - var visitQueue []issuerID + var visitQueue []issuing.IssuerID // Add the source node to start. In order to set up the paths to a // given node, we seed pathsTo with the single path involving just // this node visitQueue = append(visitQueue, source) - pathsTo[source] = [][]issuerID{{source}} + pathsTo[source] = [][]issuing.IssuerID{{source}} // Begin building paths. // // Loop invariant: // pathTo[x] contains valid paths to reach this node, from source. for len(visitQueue) > 0 { - var current issuerID + var current issuing.IssuerID current, visitQueue = visitQueue[0], visitQueue[1:] // If we've already processed this node, we have a cycle. Skip this @@ -1159,7 +1163,7 @@ func findAllCyclesWithNode( // Track this parent->child relationship to know when to exit. setOfParents, ok := parentCounts[child] if !ok { - setOfParents = make(map[issuerID]bool) + setOfParents = make(map[issuing.IssuerID]bool) parentCounts[child] = setOfParents } _, existingParent := setOfParents[current] @@ -1176,7 +1180,7 @@ func findAllCyclesWithNode( // externally with an existing path). addedPath := false if _, ok := pathsTo[child]; !ok { - pathsTo[child] = make([][]issuerID, 0) + pathsTo[child] = make([][]issuing.IssuerID, 0) } for _, path := range pathsTo[current] { @@ -1201,7 +1205,7 @@ func findAllCyclesWithNode( return nil, errutil.InternalError{Err: fmt.Sprintf("Error updating certificate path: path of length %d is too long", len(path))} } // Make sure to deep copy the path. - newPath := make([]issuerID, 0, len(path)+1) + newPath := make([]issuing.IssuerID, 0, len(path)+1) newPath = append(newPath, path...) newPath = append(newPath, child) @@ -1246,7 +1250,7 @@ func findAllCyclesWithNode( // Ok, we've now exited from our loop. Any cycles would've been detected // and their paths recorded in pathsTo. Now we can iterate over these // (starting a source), clean them up and validate them. - var cycles [][]issuerID + var cycles [][]issuing.IssuerID for _, cycle := range pathsTo[source] { // Skip the trivial cycle. if len(cycle) == 1 && cycle[0] == source { @@ -1284,8 +1288,8 @@ func findAllCyclesWithNode( return cycles, nil } -func reversedCycle(cycle []issuerID) []issuerID { - var result []issuerID +func reversedCycle(cycle []issuing.IssuerID) []issuing.IssuerID { + var result []issuing.IssuerID for index := len(cycle) - 1; index >= 0; index-- { result = append(result, cycle[index]) } @@ -1294,11 +1298,11 @@ func reversedCycle(cycle []issuerID) []issuerID { } func computeParentsFromClosure( - processedIssuers map[issuerID]bool, - issuerIdParentsMap map[issuerID][]issuerID, - closure map[issuerID]bool, -) (map[issuerID]bool, bool) { - parents := make(map[issuerID]bool) + processedIssuers map[issuing.IssuerID]bool, + issuerIdParentsMap map[issuing.IssuerID][]issuing.IssuerID, + closure map[issuing.IssuerID]bool, +) (map[issuing.IssuerID]bool, bool) { + parents := make(map[issuing.IssuerID]bool) for node := range closure { nodeParents, ok := issuerIdParentsMap[node] if !ok { @@ -1323,11 +1327,11 @@ func computeParentsFromClosure( } func addNodeCertsToEntry( - issuerIdEntryMap map[issuerID]*issuerEntry, - issuerIdChildrenMap map[issuerID][]issuerID, + issuerIdEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, + issuerIdChildrenMap map[issuing.IssuerID][]issuing.IssuerID, includedParentCerts map[string]bool, - entry *issuerEntry, - issuersCollection ...[]issuerID, + entry *issuing.IssuerEntry, + issuersCollection ...[]issuing.IssuerID, ) { for _, collection := range issuersCollection { // Find a starting point into this collection such that it verifies @@ -1366,10 +1370,10 @@ func addNodeCertsToEntry( } func addParentChainsToEntry( - issuerIdEntryMap map[issuerID]*issuerEntry, + issuerIdEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, includedParentCerts map[string]bool, - entry *issuerEntry, - parents map[issuerID]bool, + entry *issuing.IssuerEntry, + parents map[issuing.IssuerID]bool, ) { for parent := range parents { nodeEntry := issuerIdEntryMap[parent] diff --git a/builtin/logical/pki/cieps_util_oss.go b/builtin/logical/pki/cieps_util_oss.go new file mode 100644 index 000000000000..52524b9d4f58 --- /dev/null +++ b/builtin/logical/pki/cieps_util_oss.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package pki + +import ( + "crypto/x509" + "fmt" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// issueAcmeCertUsingCieps based on the passed in ACME information, perform a CIEPS request/response +func issueAcmeCertUsingCieps(_ *backend, _ *acmeContext, _ *logical.Request, _ *framework.FieldData, _ *jwsCtx, _ *acmeAccount, _ *acmeOrder, _ *x509.CertificateRequest) (*certutil.ParsedCertBundle, issuing.IssuerID, error) { + return nil, "", fmt.Errorf("cieps is an enterprise only feature") +} + +func getCiepsAcmeSettings(sc *storageContext, opts acmeWrapperOpts, config *acmeConfigEntry, data *framework.FieldData) (bool, string, error) { + return false, "", nil +} diff --git a/builtin/logical/pki/cmd/pki/main.go b/builtin/logical/pki/cmd/pki/main.go index ffcb4521c895..49bbe146e750 100644 --- a/builtin/logical/pki/cmd/pki/main.go +++ b/builtin/logical/pki/cmd/pki/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: pki.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/pki/config_util.go b/builtin/logical/pki/config_util.go index e4592f68843a..87feab78cadd 100644 --- a/builtin/logical/pki/config_util.go +++ b/builtin/logical/pki/config_util.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( - "fmt" "strings" - "time" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" ) func (sc *storageContext) isDefaultKeySet() (bool, error) { @@ -24,14 +27,14 @@ func (sc *storageContext) isDefaultIssuerSet() (bool, error) { return strings.TrimSpace(config.DefaultIssuerId.String()) != "", nil } -func (sc *storageContext) updateDefaultKeyId(id keyID) error { +func (sc *storageContext) updateDefaultKeyId(id issuing.KeyID) error { config, err := sc.getKeysConfig() if err != nil { return err } if config.DefaultKeyId != id { - return sc.setKeysConfig(&keyConfigEntry{ + return sc.setKeysConfig(&issuing.KeyConfigEntry{ DefaultKeyId: id, }) } @@ -39,7 +42,7 @@ func (sc *storageContext) updateDefaultKeyId(id keyID) error { return nil } -func (sc *storageContext) updateDefaultIssuerId(id issuerID) error { +func (sc *storageContext) updateDefaultIssuerId(id issuing.IssuerID) error { config, err := sc.getIssuersConfig() if err != nil { return err @@ -52,67 +55,3 @@ func (sc *storageContext) updateDefaultIssuerId(id issuerID) error { return nil } - -func (sc *storageContext) changeDefaultIssuerTimestamps(oldDefault issuerID, newDefault issuerID) error { - if newDefault == oldDefault { - return nil - } - - now := time.Now().UTC() - - // When the default issuer changes, we need to modify four - // pieces of information: - // - // 1. The old default issuer's modification time, as it no - // longer works for the /cert/ca path. - // 2. The new default issuer's modification time, as it now - // works for the /cert/ca path. - // 3. & 4. Both issuer's CRLs, as they behave the same, under - // the /cert/crl path! - for _, thisId := range []issuerID{oldDefault, newDefault} { - if len(thisId) == 0 { - continue - } - - // 1 & 2 above. - issuer, err := sc.fetchIssuerById(thisId) - if err != nil { - // Due to the lack of transactions, if we deleted the default - // issuer (successfully), but the subsequent issuer config write - // (to clear the default issuer's old id) failed, we might have - // an inconsistent config. If we later hit this loop (and flush - // these timestamps again -- perhaps because the operator - // selected a new default), we'd have erred out here, because - // the since-deleted default issuer doesn't exist. In this case, - // skip the issuer instead of bailing. - err := fmt.Errorf("unable to update issuer (%v)'s modification time: error fetching issuer: %w", thisId, err) - if strings.Contains(err.Error(), "does not exist") { - sc.Backend.Logger().Warn(err.Error()) - continue - } - - return err - } - - issuer.LastModified = now - err = sc.writeIssuer(issuer) - if err != nil { - return fmt.Errorf("unable to update issuer (%v)'s modification time: error persisting issuer: %w", thisId, err) - } - } - - // Fetch and update the localCRLConfigEntry (3&4). - cfg, err := sc.getLocalCRLConfig() - if err != nil { - return fmt.Errorf("unable to update local CRL config's modification time: error fetching local CRL config: %w", err) - } - - cfg.LastModified = now - cfg.DeltaLastModified = now - err = sc.setLocalCRLConfig(cfg) - if err != nil { - return fmt.Errorf("unable to update local CRL config's modification time: error persisting local CRL config: %w", err) - } - - return nil -} diff --git a/builtin/logical/pki/crl_test.go b/builtin/logical/pki/crl_test.go index 46740b7c0adb..fdda768a8bc0 100644 --- a/builtin/logical/pki/crl_test.go +++ b/builtin/logical/pki/crl_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -10,10 +13,15 @@ import ( "time" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/helper/constants" vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/stretchr/testify/require" ) @@ -90,8 +98,11 @@ func TestBackend_CRLConfig(t *testing.T) { "auto_rebuild_grace_period": tc.autoRebuildGracePeriod, }) requireSuccessNonNilResponse(t, resp, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/crl"), logical.UpdateOperation), resp, true) resp, err = CBRead(b, s, "config/crl") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/crl"), logical.ReadOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err) requireFieldsSetInResp(t, resp, "disable", "expiry", "ocsp_disable", "auto_rebuild", "auto_rebuild_grace_period") @@ -407,15 +418,18 @@ func TestCrlRebuilder(t *testing.T) { cb := newCRLBuilder(true /* can rebuild and write CRLs */) // Force an initial build - err = cb.rebuild(sc, true) + warnings, err := cb.rebuild(sc, true) require.NoError(t, err, "Failed to rebuild CRL") + require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") resp := requestCrlFromBackend(t, s, b) crl1 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) // We shouldn't rebuild within this call. - err = cb.rebuildIfForced(sc) + warnings, err = cb.rebuildIfForced(sc) require.NoError(t, err, "Failed to rebuild if forced CRL") + require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") + resp = requestCrlFromBackend(t, s, b) crl2 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) require.Equal(t, crl1.ThisUpdate, crl2.ThisUpdate, "According to the update field, we rebuilt the CRL") @@ -431,9 +445,12 @@ func TestCrlRebuilder(t *testing.T) { // This should rebuild the CRL cb.requestRebuildIfActiveNode(b) - err = cb.rebuildIfForced(sc) + warnings, err = cb.rebuildIfForced(sc) require.NoError(t, err, "Failed to rebuild if forced CRL") + require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") resp = requestCrlFromBackend(t, s, b) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("crl/pem"), logical.ReadOperation), resp, true) + crl3 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) require.True(t, crl1.ThisUpdate.Before(crl3.ThisUpdate), "initial crl time: %#v not before next crl rebuild time: %#v", crl1.ThisUpdate, crl3.ThisUpdate) @@ -593,10 +610,11 @@ func TestPoP(t *testing.T) { require.NotNil(t, resp) require.NotEmpty(t, resp.Data["certificate"]) - _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ + resp, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ "certificate": resp.Data["certificate"], "private_key": resp.Data["private_key"], }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("revoke-with-key"), logical.UpdateOperation), resp, true) require.NoError(t, err) // Issue a second leaf, but hold onto it for now. @@ -766,12 +784,16 @@ func TestIssuerRevocation(t *testing.T) { // Revoke it. resp, err = CBWrite(b, s, "issuer/root2/revoke", map[string]interface{}{}) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/root2/revoke"), logical.UpdateOperation), resp, true) + require.NoError(t, err) require.NotNil(t, resp) require.NotZero(t, resp.Data["revocation_time"]) // Regenerate the CRLs - _, err = CBRead(b, s, "crl/rotate") + resp, err = CBRead(b, s, "crl/rotate") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("crl/rotate"), logical.ReadOperation), resp, true) + require.NoError(t, err) // Ensure the old cert isn't on its own CRL. @@ -796,7 +818,7 @@ func TestIssuerRevocation(t *testing.T) { require.NoError(t, err) // Issue a leaf cert and ensure it fails (because the issuer is revoked). - _, err = CBWrite(b, s, "issuer/root2/issue/local-testing", map[string]interface{}{ + resp, err = CBWrite(b, s, "issuer/root2/issue/local-testing", map[string]interface{}{ "common_name": "testing", }) require.Error(t, err) @@ -822,6 +844,8 @@ func TestIssuerRevocation(t *testing.T) { resp, err = CBWrite(b, s, "intermediate/set-signed", map[string]interface{}{ "certificate": intCert, }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("intermediate/set-signed"), logical.UpdateOperation), resp, true) + require.NoError(t, err) require.NotNil(t, resp) require.NotEmpty(t, resp.Data["imported_issuers"]) @@ -837,6 +861,8 @@ func TestIssuerRevocation(t *testing.T) { resp, err = CBWrite(b, s, "issuer/int1/issue/local-testing", map[string]interface{}{ "common_name": "testing", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/int1/issue/local-testing"), logical.UpdateOperation), resp, true) + require.NoError(t, err) require.NotNil(t, resp) require.NotEmpty(t, resp.Data["certificate"]) @@ -979,8 +1005,9 @@ func TestAutoRebuild(t *testing.T) { }) require.NoError(t, err) - crl := getCrlCertificateList(t, client, "pki") - lastCRLNumber := crl.Version + defaultCrlPath := "/v1/pki/crl" + crl := getParsedCrlAtPath(t, client, defaultCrlPath).TBSCertList + lastCRLNumber := getCRLNumber(t, crl) lastCRLExpiry := crl.NextUpdate requireSerialNumberInCRL(t, crl, leafSerial) @@ -994,6 +1021,12 @@ func TestAutoRebuild(t *testing.T) { }) require.NoError(t, err) + // Wait for the CRL to update based on the configuration change we just did + // so that it doesn't grab the revocation we are going to do afterwards. + crl = waitForUpdatedCrl(t, client, defaultCrlPath, lastCRLNumber, lastCRLExpiry.Sub(time.Now())) + lastCRLNumber = getCRLNumber(t, crl) + lastCRLExpiry = crl.NextUpdate + // Issue a cert and revoke it. resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ "common_name": "example.com", @@ -1014,13 +1047,7 @@ func TestAutoRebuild(t *testing.T) { // each revocation. Pull the storage from the cluster (via the sys/raw // endpoint which requires the mount UUID) and verify the revInfo contains // a matching issuer. - resp, err = client.Logical().Read("sys/mounts/pki") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["uuid"]) - pkiMount := resp.Data["uuid"].(string) - require.NotEmpty(t, pkiMount) + pkiMount := findStorageMountUuid(t, client, "pki") revEntryPath := "logical/" + pkiMount + "/" + revokedPath + normalizeSerial(newLeafSerial) // storage from cluster.Core[0] is a physical storage copy, not a logical @@ -1037,14 +1064,14 @@ func TestAutoRebuild(t *testing.T) { var revInfo revocationInfo err = json.Unmarshal([]byte(revEntryValue), &revInfo) require.NoError(t, err) - require.Equal(t, revInfo.CertificateIssuer, issuerID(rootIssuer)) + require.Equal(t, revInfo.CertificateIssuer, issuing.IssuerID(rootIssuer)) // New serial should not appear on CRL. crl = getCrlCertificateList(t, client, "pki") - thisCRLNumber := crl.Version + thisCRLNumber := getCRLNumber(t, crl) requireSerialNumberInCRL(t, crl, leafSerial) // But the old one should. now := time.Now() - graceInterval, _ := time.ParseDuration(gracePeriod) + graceInterval, _ := parseutil.ParseDurationSecond(gracePeriod) expectedUpdate := lastCRLExpiry.Add(-1 * graceInterval) if requireSerialNumberInCRL(nil, crl, newLeafSerial) { // If we somehow lagged and we ended up needing to rebuild @@ -1062,7 +1089,7 @@ func TestAutoRebuild(t *testing.T) { } // This serial should exist in the delta WAL section for the mount... - resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + deltaWALPath) + resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + localDeltaWALPath) require.NoError(t, err) require.NotNil(t, resp) require.NotEmpty(t, resp.Data) @@ -1082,7 +1109,7 @@ func TestAutoRebuild(t *testing.T) { default: // Check and see if there's a storage entry for the last rebuild // serial. If so, validate the delta CRL contains this entry. - resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + deltaWALPath) + resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + localDeltaWALPath) require.NoError(t, err) require.NotNil(t, resp) require.NotEmpty(t, resp.Data) @@ -1103,7 +1130,7 @@ func TestAutoRebuild(t *testing.T) { } // Read the marker and see if its correct. - resp, err = client.Logical().Read("sys/raw/logical/" + pkiMount + "/" + deltaWALLastBuildSerial) + resp, err = client.Logical().Read("sys/raw/logical/" + pkiMount + "/" + localDeltaWALLastBuildSerial) require.NoError(t, err) if resp == nil { time.Sleep(1 * time.Second) @@ -1126,8 +1153,13 @@ func TestAutoRebuild(t *testing.T) { deltaCrl := getParsedCrlAtPath(t, client, "/v1/pki/crl/delta").TBSCertList if !requireSerialNumberInCRL(nil, deltaCrl, newLeafSerial) { // Check if it is on the main CRL because its already regenerated. - mainCRL := getParsedCrlAtPath(t, client, "/v1/pki/crl").TBSCertList + mainCRL := getParsedCrlAtPath(t, client, defaultCrlPath).TBSCertList requireSerialNumberInCRL(t, mainCRL, newLeafSerial) + } else { + referenceCrlNum := getCrlReferenceFromDelta(t, deltaCrl) + if lastCRLNumber < referenceCrlNum { + lastCRLNumber = referenceCrlNum + } } } } @@ -1138,32 +1170,20 @@ func TestAutoRebuild(t *testing.T) { time.Sleep(expectedUpdate.Sub(now)) } - // Otherwise, the absolute latest we're willing to wait is some delta - // after CRL expiry (to let stuff regenerate &c). - interruptChan = time.After(lastCRLExpiry.Sub(now) + delta) - for { - select { - case <-interruptChan: - t.Fatalf("expected CRL to regenerate prior to CRL expiry (plus %v grace period)", delta) - default: - crl = getCrlCertificateList(t, client, "pki") - if crl.NextUpdate.Equal(lastCRLExpiry) { - // Hack to ensure we got a net-new CRL. If we didn't, we can - // exit this default conditional and wait for the next - // go-round. When the timer fires, it'll populate the channel - // and we'll exit correctly. - time.Sleep(1 * time.Second) - break - } + crl = waitForUpdatedCrl(t, client, defaultCrlPath, lastCRLNumber, lastCRLExpiry.Sub(now)+delta) + requireSerialNumberInCRL(t, crl, leafSerial) + requireSerialNumberInCRL(t, crl, newLeafSerial) +} - now := time.Now() - require.True(t, crl.ThisUpdate.Before(now)) - require.True(t, crl.NextUpdate.After(now)) - requireSerialNumberInCRL(t, crl, leafSerial) - requireSerialNumberInCRL(t, crl, newLeafSerial) - return - } - } +func findStorageMountUuid(t *testing.T, client *api.Client, mount string) string { + resp, err := client.Logical().Read("sys/mounts/" + mount) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["uuid"]) + pkiMount := resp.Data["uuid"].(string) + require.NotEmpty(t, pkiMount) + return pkiMount } func TestTidyIssuerAssociation(t *testing.T) { @@ -1182,7 +1202,7 @@ func TestTidyIssuerAssociation(t *testing.T) { require.NotEmpty(t, resp.Data["certificate"]) require.NotEmpty(t, resp.Data["issuer_id"]) rootCert := resp.Data["certificate"].(string) - rootID := resp.Data["issuer_id"].(issuerID) + rootID := resp.Data["issuer_id"].(issuing.IssuerID) // Create a role for issuance. _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ @@ -1317,3 +1337,193 @@ func requestCrlFromBackend(t *testing.T, s logical.Storage, b *backend) *logical require.False(t, resp.IsError(), "crl error response: %v", resp) return resp } + +func TestCRLWarningsEmptyKeyUsage(t *testing.T) { + t.Parallel() + + b, s := CreateBackendWithStorage(t) + + // Generated using OpenSSL with a configuration lacking KeyUsage on + // the CA certificate. + cert := `-----BEGIN CERTIFICATE----- +MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhyb290 +LW9sZDAeFw0yMDAxMDEwMTAxMDFaFw0yMTAxMDEwMTAxMDFaMBMxETAPBgNVBAMM +CHJvb3Qtb2xkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzqhSZxAL +PwFhCIPL1jFPq6jxp1wFgo6YNSfVI13gfaGIjfErxsQUbosmlEuTeOc50zXXN3kb +SDufy5Yi1OeSkFZRdJ78zdKzsEDIVR1ukUngVsSrt05gdNMJlh8XOPbcrJo78jYG +lRgtkkFSc/wCu+ue6JqkfKrbUY/G9WK0UM8ppHm1Ux67ZGoypyEgaqqxKHBRC4Yl +D+lAs1vP4C6cavqdUMKgAPTKmMBzlbpCuYPLHSzWh9Com3WQSqCbrlo3uH5RT3V9 +5Gjuk3mMUhY1l6fRL7wG3f+4x+DS+ICQNT0o4lnMxpIsiTh0cEHUFgY7G0iHWYPj +CIN8UDhpZIpoCQIDAQABo2UwYzAdBgNVHQ4EFgQUJlHk3PN7pfC22FGxAb0rWgQt +L4cwHwYDVR0jBBgwFoAUJlHk3PN7pfC22FGxAb0rWgQtL4cwDAYDVR0TBAUwAwEB +/zATBgNVHSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAQEAcaU0FbXb +FfXluBrjKfOzVKz+kvQ1CVv3xe3MBkS6wvqybBjJCFChnqCPxEe57BdSbBXNU5LZ +zCR/OqYas4Csv9+msSn9BI2FSMAmfMDTsp5/6iIQJqlJx9L8a7bjzVMGX6QJm/3x +S/EgGsMETAgewQXeu4jhI6StgJ2V/4Ofe498hYw4LAiBapJmkU/nHezWodNBZJ7h +LcLOzVj0Hu5MZplGBgJFgRqBCVVkqXA0q7tORuhNzYtNdJFpv3pZIhvVFFu3HUPf +wYQPhLye5WNtosz5xKe8X0Q9qp8g6azMTk+5Qe7u1d8MYAA2AIlGuKUvPHRruOmN +NC+gQnS7AK1lCw== +-----END CERTIFICATE-----` + privKey := `-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDOqFJnEAs/AWEI +g8vWMU+rqPGnXAWCjpg1J9UjXeB9oYiN8SvGxBRuiyaUS5N45znTNdc3eRtIO5/L +liLU55KQVlF0nvzN0rOwQMhVHW6RSeBWxKu3TmB00wmWHxc49tysmjvyNgaVGC2S +QVJz/AK7657omqR8qttRj8b1YrRQzymkebVTHrtkajKnISBqqrEocFELhiUP6UCz +W8/gLpxq+p1QwqAA9MqYwHOVukK5g8sdLNaH0KibdZBKoJuuWje4flFPdX3kaO6T +eYxSFjWXp9EvvAbd/7jH4NL4gJA1PSjiWczGkiyJOHRwQdQWBjsbSIdZg+MIg3xQ +OGlkimgJAgMBAAECggEABKmCdmXDwy+eR0ll41aoc/hzPzHRxADAiU51Pf+DrYHj +6UPcF3db+KR2Adl0ocEhqlSoHs3CIk6KC9c+wOvagBwaaVWe4WvT9vF3M4he8rMm +dv6n2xJPFcOfDz5zUSssjk5KdOvoGRv7BzYnDIvOafvmUVwPwuo92Wizddy8saf4 +Xuea0Cupz1PELPKkbXcAqb+TzbAZrwdPj1Y7vTe/KGE4+aoDqCW/sFB1E0UsMGlt +/yfGwFP48b7kdkqSpcEQW5H8+WL3TfqRcolCD9To4vo2J+1Po0S/8qPNRvkNQDDX +AypHtrXFBOWHpJgXT4rKyH+ZGJchrCRDblt9s/sNQwKBgQD7NytvYET3pWemYiX+ +MB9uc6cPuMFONvlzjA9T6dbOSi/HLaeDoW027aMUZqb7QeaQCoWcUwh13dI2SZq0 +5+l9hei4JkWjoDhbWmPe7zDuQr3UMl0CSk3egz3BSHkjAhRAuUxK0QLKGB23zWxz +k8mUWYZaZRA39C6aqMt/jbJjDwKBgQDSl+eO+DjpwPzrjPSphpF4xYo4XDje9ovK +9q4KTHye7Flc3cMCX3WZBmzdt0zbqu6gWZjJH0XbWX/+SkJBGh77XWD0FeQnU7Vk +ipoeb8zTsCVxD9EytQuXti3cqBgClcCMvLKgLOJIcNYTnygojwg3t+jboQqbtV7p +VpQfAC6jZwKBgQCxJ46x1CnOmg4l/0DbqAQCV/yP0bI//fSbz0Ff459fimF3DHL9 +GHF0MtC2Kk3HEgoNud3PB58Hv43mSrGWsZSuuCgM9LBXWz1i7rNPG05eNyK26W09 +mDihmduK2hjS3zx5CDMM76gP7EHIxEyelLGqtBdS18JAMypKVo5rPPl3cQKBgQCG +ueXLImQOr4dfDntLpSqV0BLAQcekZKhEPZJURmCHr37wGXNzpixurJyjL2w9MFqf +PRKwwJAJZ3Wp8kn2qkZd23x2Szb+LeBjJQS6Kh4o44zgixTz0r1K3qLygptxs+pO +Xz4LmQte+skKHo0rfW3tb3vKXnmR6fOBZgE23//2SwKBgHck44hoE1Ex2gDEfIq1 +04OBoS1cpuc9ge4uHEmv+8uANjzwlsYf8hY1qae513MGixRBOkxcI5xX/fYPQV9F +t3Jfh8QX85JjnGntuXuraYZJMUjpwXr3QHPx0jpvAM3Au5j6qD3biC9Vrwq9Chkg +hbiiPARizZA/Tsna/9ox1qDT +-----END PRIVATE KEY-----` + resp, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": cert + "\n" + privKey, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Warnings) + originalWarnings := resp.Warnings + + resp, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Warnings) + + // All CRL-specific warnings should've already occurred earlier on the + // import's CRL rebuild. + for _, warning := range resp.Warnings { + require.Contains(t, originalWarnings, warning) + } + + // Deleting the issuer and key should remove the warning. + _, err = CBDelete(b, s, "root") + require.NoError(t, err) + + resp, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + require.NotNil(t, resp) + require.Empty(t, resp.Warnings) + + // Adding back just the cert shouldn't cause CRL rebuild warnings. + resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": cert, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotNil(t, resp.Data["mapping"]) + require.NotEmpty(t, resp.Data["mapping"]) + require.Equal(t, len(resp.Data["mapping"].(map[string]string)), 1) + for key, value := range resp.Data["mapping"].(map[string]string) { + require.NotEmpty(t, key) + require.Empty(t, value) + } + + resp, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + require.NotNil(t, resp) + require.Empty(t, resp.Warnings) +} + +func TestCRLIssuerRemoval(t *testing.T) { + t.Parallel() + + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + + if constants.IsEnterprise { + // We don't really care about the whole cross cluster replication + // stuff, but we do want to enable unified CRLs if we can, so that + // unified CRLs get built. + _, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "cross_cluster_revocation": true, + "auto_rebuild": true, + }) + require.NoError(t, err, "failed enabling unified CRLs on enterprise") + } + + // Create a single root, configure delta CRLs, and rotate CRLs to prep a + // starting state. + _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root R1", + "key_type": "ec", + }) + require.NoError(t, err) + _, err = CBWrite(b, s, "config/crl", map[string]interface{}{ + "enable_delta": true, + "auto_rebuild": true, + }) + require.NoError(t, err) + _, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + + // List items in storage under both CRL paths so we know what is there in + // the "good" state. + crlList, err := s.List(ctx, "crls/") + require.NoError(t, err) + require.Contains(t, crlList, "config") + require.Greater(t, len(crlList), 1) + + unifiedCRLList, err := s.List(ctx, "unified-crls/") + require.NoError(t, err) + require.Contains(t, unifiedCRLList, "config") + require.Greater(t, len(unifiedCRLList), 1) + + // Now, create a bunch of issuers, generate CRLs, and remove them. + var keyIDs []string + var issuerIDs []string + for i := 1; i <= 25; i++ { + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": fmt.Sprintf("Root X%v", i), + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + key := string(resp.Data["key_id"].(issuing.KeyID)) + keyIDs = append(keyIDs, key) + issuer := string(resp.Data["issuer_id"].(issuing.IssuerID)) + issuerIDs = append(issuerIDs, issuer) + } + _, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + for _, issuer := range issuerIDs { + _, err := CBDelete(b, s, "issuer/"+issuer) + require.NoError(t, err) + } + for _, key := range keyIDs { + _, err := CBDelete(b, s, "key/"+key) + require.NoError(t, err) + } + + // Finally list storage entries again to ensure they are cleaned up. + afterCRLList, err := s.List(ctx, "crls/") + require.NoError(t, err) + for _, entry := range crlList { + require.Contains(t, afterCRLList, entry) + } + require.Equal(t, len(afterCRLList), len(crlList)) + + afterUnifiedCRLList, err := s.List(ctx, "unified-crls/") + require.NoError(t, err) + for _, entry := range unifiedCRLList { + require.Contains(t, afterUnifiedCRLList, entry) + } + require.Equal(t, len(afterUnifiedCRLList), len(unifiedCRLList)) +} diff --git a/builtin/logical/pki/crl_util.go b/builtin/logical/pki/crl_util.go index 2ee12b50301d..78d6a5d74813 100644 --- a/builtin/logical/pki/crl_util.go +++ b/builtin/logical/pki/crl_util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -9,29 +12,51 @@ import ( "math/big" "strings" "sync" + "sync/atomic" "time" - atomic2 "go.uber.org/atomic" - + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) const ( - revokedPath = "revoked/" - deltaWALPath = "delta-wal/" - deltaWALLastBuildSerialName = "last-build-serial" - deltaWALLastBuildSerial = deltaWALPath + deltaWALLastBuildSerialName - deltaWALLastRevokedSerialName = "last-revoked-serial" - deltaWALLastRevokedSerial = deltaWALPath + deltaWALLastRevokedSerialName + revokedPath = "revoked/" + crossRevocationPrefix = "cross-revocation-queue/" + crossRevocationPath = crossRevocationPrefix + "{{clusterId}}/" + deltaWALLastBuildSerialName = "last-build-serial" + deltaWALLastRevokedSerialName = "last-revoked-serial" + localDeltaWALPath = "delta-wal/" + localDeltaWALLastBuildSerial = localDeltaWALPath + deltaWALLastBuildSerialName + localDeltaWALLastRevokedSerial = localDeltaWALPath + deltaWALLastRevokedSerialName + unifiedDeltaWALPrefix = "unified-delta-wal/" + unifiedDeltaWALPath = "unified-delta-wal/{{clusterId}}/" + unifiedDeltaWALLastBuildSerial = unifiedDeltaWALPath + deltaWALLastBuildSerialName + unifiedDeltaWALLastRevokedSerial = unifiedDeltaWALPath + deltaWALLastRevokedSerialName ) type revocationInfo struct { - CertificateBytes []byte `json:"certificate_bytes"` - RevocationTime int64 `json:"revocation_time"` - RevocationTimeUTC time.Time `json:"revocation_time_utc"` - CertificateIssuer issuerID `json:"issuer_id"` + CertificateBytes []byte `json:"certificate_bytes"` + RevocationTime int64 `json:"revocation_time"` + RevocationTimeUTC time.Time `json:"revocation_time_utc"` + CertificateIssuer issuing.IssuerID `json:"issuer_id"` +} + +type revocationRequest struct { + RequestedAt time.Time `json:"requested_at"` +} + +type revocationConfirmed struct { + RevokedAt string `json:"revoked_at"` + Source string `json:"source"` +} + +type revocationQueueEntry struct { + Cluster string + Serial string } type ( @@ -57,26 +82,34 @@ type ( } ) -// crlBuilder is gatekeeper for controlling various read/write operations to the storage of the CRL. +// CrlBuilder is gatekeeper for controlling various read/write operations to the storage of the CRL. // The extra complexity arises from secondary performance clusters seeing various writes to its storage // without the actual API calls. During the storage invalidation process, we do not have the required state // to actually rebuild the CRLs, so we need to schedule it in a deferred fashion. This allows either // read or write calls to perform the operation if required, or have the flag reset upon a write operation // // The CRL builder also tracks the revocation configuration. -type crlBuilder struct { +type CrlBuilder struct { _builder sync.Mutex - forceRebuild *atomic2.Bool + forceRebuild *atomic.Bool canRebuild bool lastDeltaRebuildCheck time.Time - _config sync.RWMutex - dirty *atomic2.Bool - config crlConfig + _config sync.RWMutex + dirty *atomic.Bool + config crlConfig + haveInitializedConfig bool // Whether to invalidate our LastModifiedTime due to write on the // global issuance config. - invalidate *atomic2.Bool + invalidate *atomic.Bool + + // Global revocation queue entries get accepted by the invalidate func + // and passed to the CrlBuilder for processing. + haveInitializedQueue *atomic.Bool + revQueue *revocationQueue + removalQueue *revocationQueue + crossQueue *revocationQueue } const ( @@ -84,25 +117,31 @@ const ( _enforceForceFlag = false ) -func newCRLBuilder(canRebuild bool) *crlBuilder { - return &crlBuilder{ - forceRebuild: atomic2.NewBool(false), +func newCRLBuilder(canRebuild bool) *CrlBuilder { + builder := &CrlBuilder{ + forceRebuild: &atomic.Bool{}, canRebuild: canRebuild, // Set the last delta rebuild window to now, delaying the first delta // rebuild by the first rebuild period to give us some time on startup // to stabilize. lastDeltaRebuildCheck: time.Now(), - dirty: atomic2.NewBool(true), + dirty: &atomic.Bool{}, config: defaultCrlConfig, - invalidate: atomic2.NewBool(false), - } + invalidate: &atomic.Bool{}, + haveInitializedQueue: &atomic.Bool{}, + revQueue: newRevocationQueue(), + removalQueue: newRevocationQueue(), + crossQueue: newRevocationQueue(), + } + builder.dirty.Store(true) + return builder } -func (cb *crlBuilder) markConfigDirty() { +func (cb *CrlBuilder) markConfigDirty() { cb.dirty.Store(true) } -func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { +func (cb *CrlBuilder) reloadConfigIfRequired(sc *storageContext) error { if cb.dirty.Load() { // Acquire a write lock. cb._config.Lock() @@ -119,6 +158,7 @@ func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { return err } + previousConfig := cb.config // Set the default config if none was returned to us. if config != nil { cb.config = *config @@ -128,12 +168,35 @@ func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { // Updated the config; unset dirty. cb.dirty.Store(false) + triggerChangeNotification := true + if !cb.haveInitializedConfig { + cb.haveInitializedConfig = true + triggerChangeNotification = false // do not trigger on the initial loading of configuration. + } + + // Certain things need to be triggered on all server types when crlConfig is loaded. + if triggerChangeNotification { + cb.notifyOnConfigChange(sc, previousConfig, cb.config) + } } return nil } -func (cb *crlBuilder) getConfigWithUpdate(sc *storageContext) (*crlConfig, error) { +func (cb *CrlBuilder) notifyOnConfigChange(sc *storageContext, priorConfig crlConfig, newConfig crlConfig) { + // If you need to hook into a CRL configuration change across different server types + // such as primary clusters as well as performance replicas, it is easier to do here than + // in two places (API layer and in invalidateFunc) + if priorConfig.UnifiedCRL != newConfig.UnifiedCRL && newConfig.UnifiedCRL { + sc.Backend.GetUnifiedTransferStatus().forceRun() + } + + if priorConfig.UseGlobalQueue != newConfig.UseGlobalQueue && newConfig.UseGlobalQueue { + cb.haveInitializedQueue.Store(false) + } +} + +func (cb *CrlBuilder) getConfigWithUpdate(sc *storageContext) (*crlConfig, error) { // Config may mutate immediately after accessing, but will be freshly // fetched if necessary. if err := cb.reloadConfigIfRequired(sc); err != nil { @@ -147,7 +210,42 @@ func (cb *crlBuilder) getConfigWithUpdate(sc *storageContext) (*crlConfig, error return &configCopy, nil } -func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { +func (cb *CrlBuilder) getConfigWithForcedUpdate(sc *storageContext) (*crlConfig, error) { + cb.markConfigDirty() + return cb.getConfigWithUpdate(sc) +} + +func (cb *CrlBuilder) writeConfig(sc *storageContext, config *crlConfig) (*crlConfig, error) { + cb._config.Lock() + defer cb._config.Unlock() + + if err := sc.setRevocationConfig(config); err != nil { + cb.markConfigDirty() + return nil, fmt.Errorf("failed writing CRL config: %w", err) + } + + previousConfig := cb.config + if config != nil { + cb.config = *config + } else { + cb.config = defaultCrlConfig + } + + triggerChangeNotification := true + if !cb.haveInitializedConfig { + cb.haveInitializedConfig = true + triggerChangeNotification = false // do not trigger on the initial loading of configuration. + } + + // Certain things need to be triggered on all server types when crlConfig is loaded. + if triggerChangeNotification { + cb.notifyOnConfigChange(sc, previousConfig, cb.config) + } + + return config, nil +} + +func (cb *CrlBuilder) checkForAutoRebuild(sc *storageContext) error { cfg, err := cb.getConfigWithUpdate(sc) if err != nil { return err @@ -165,21 +263,21 @@ func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { // // We store a list of all (unique) CRLs in the cluster-local CRL // configuration along with their expiration dates. - crlConfig, err := sc.getLocalCRLConfig() + internalCRLConfig, err := sc.getLocalCRLConfig() if err != nil { return fmt.Errorf("error checking for auto-rebuild status: unable to fetch cluster-local CRL configuration: %w", err) } // If there's no config, assume we've gotta rebuild it to get this // information. - if crlConfig == nil { + if internalCRLConfig == nil { cb.forceRebuild.Store(true) return nil } // If the map is empty, assume we need to upgrade and schedule a // rebuild. - if len(crlConfig.CRLExpirationMap) == 0 { + if len(internalCRLConfig.CRLExpirationMap) == 0 { cb.forceRebuild.Store(true) return nil } @@ -188,12 +286,12 @@ func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { // the grace period and act accordingly. now := time.Now() - period, err := time.ParseDuration(cfg.AutoRebuildGracePeriod) + period, err := parseutil.ParseDurationSecond(cfg.AutoRebuildGracePeriod) if err != nil { // This may occur if the duration is empty; in that case // assume the default. The default should be valid and shouldn't // error. - defaultPeriod, defaultErr := time.ParseDuration(defaultCrlConfig.AutoRebuildGracePeriod) + defaultPeriod, defaultErr := parseutil.ParseDurationSecond(defaultCrlConfig.AutoRebuildGracePeriod) if defaultErr != nil { return fmt.Errorf("error checking for auto-rebuild status: unable to parse duration from both config's grace period (%v) and default grace period (%v):\n- config: %v\n- default: %w\n", cfg.AutoRebuildGracePeriod, defaultCrlConfig.AutoRebuildGracePeriod, err, defaultErr) } @@ -201,7 +299,7 @@ func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { period = defaultPeriod } - for _, value := range crlConfig.CRLExpirationMap { + for _, value := range internalCRLConfig.CRLExpirationMap { if value.IsZero() || now.After(value.Add(-1*period)) { cb.forceRebuild.Store(true) return nil @@ -212,14 +310,14 @@ func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { } // Mark the internal LastModifiedTime tracker invalid. -func (cb *crlBuilder) invalidateCRLBuildTime() { +func (cb *CrlBuilder) invalidateCRLBuildTime() { cb.invalidate.Store(true) } // Update the config to mark the modified CRL. See note in // updateDefaultIssuerId about why this is necessary. -func (cb *crlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { - if cb.invalidate.CAS(true, false) { +func (cb *CrlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { + if cb.invalidate.CompareAndSwap(true, false) { // Flush out our invalidation. cfg, err := sc.getLocalCRLConfig() if err != nil { @@ -241,21 +339,21 @@ func (cb *crlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { // rebuildIfForced is to be called by readers or periodic functions that might need to trigger // a refresh of the CRL before the read occurs. -func (cb *crlBuilder) rebuildIfForced(sc *storageContext) error { +func (cb *CrlBuilder) rebuildIfForced(sc *storageContext) ([]string, error) { if cb.forceRebuild.Load() { return cb._doRebuild(sc, true, _enforceForceFlag) } - return nil + return nil, nil } // rebuild is to be called by various write apis that know the CRL is to be updated and can be now. -func (cb *crlBuilder) rebuild(sc *storageContext, forceNew bool) error { +func (cb *CrlBuilder) rebuild(sc *storageContext, forceNew bool) ([]string, error) { return cb._doRebuild(sc, forceNew, _ignoreForceFlag) } // requestRebuildIfActiveNode will schedule a rebuild of the CRL from the next read or write api call assuming we are the active node of a cluster -func (cb *crlBuilder) requestRebuildIfActiveNode(b *backend) { +func (cb *CrlBuilder) requestRebuildIfActiveNode(b *backend) { // Only schedule us on active nodes, as the active node is the only node that can rebuild/write the CRL. // Note 1: The CRL is cluster specific, so this does need to run on the active node of a performance secondary cluster. // Note 2: This is called by the storage invalidation function, so it should not block. @@ -269,7 +367,7 @@ func (cb *crlBuilder) requestRebuildIfActiveNode(b *backend) { cb.forceRebuild.Store(true) } -func (cb *crlBuilder) _doRebuild(sc *storageContext, forceNew bool, ignoreForceFlag bool) error { +func (cb *CrlBuilder) _doRebuild(sc *storageContext, forceNew bool, ignoreForceFlag bool) ([]string, error) { cb._builder.Lock() defer cb._builder.Unlock() // Re-read the lock in case someone beat us to the punch between the previous load op. @@ -286,14 +384,14 @@ func (cb *crlBuilder) _doRebuild(sc *storageContext, forceNew bool, ignoreForceF return buildCRLs(sc, myForceNew) } - return nil + return nil, nil } -func (cb *crlBuilder) getPresentDeltaWALForClearing(sc *storageContext) ([]string, error) { +func (cb *CrlBuilder) _getPresentDeltaWALForClearing(sc *storageContext, path string) ([]string, error) { // Clearing of the delta WAL occurs after a new complete CRL has been built. - walSerials, err := sc.Storage.List(sc.Context, deltaWALPath) + walSerials, err := sc.Storage.List(sc.Context, path) if err != nil { - return nil, fmt.Errorf("error fetching list of delta WAL certificates to clear: %s", err) + return nil, fmt.Errorf("error fetching list of delta WAL certificates to clear: %w", err) } // We _should_ remove the special WAL entries here, but we don't really @@ -302,23 +400,60 @@ func (cb *crlBuilder) getPresentDeltaWALForClearing(sc *storageContext) ([]strin return walSerials, nil } -func (cb *crlBuilder) clearDeltaWAL(sc *storageContext, walSerials []string) error { +func (cb *CrlBuilder) getPresentLocalDeltaWALForClearing(sc *storageContext) ([]string, error) { + return cb._getPresentDeltaWALForClearing(sc, localDeltaWALPath) +} + +func (cb *CrlBuilder) getPresentUnifiedDeltaWALForClearing(sc *storageContext) ([]string, error) { + walClusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) + if err != nil { + return nil, fmt.Errorf("error fetching list of clusters with delta WAL entries: %w", err) + } + + var allPaths []string + for index, cluster := range walClusters { + prefix := unifiedDeltaWALPrefix + cluster + clusterPaths, err := cb._getPresentDeltaWALForClearing(sc, prefix) + if err != nil { + return nil, fmt.Errorf("error fetching delta WAL entries for cluster (%v / %v): %w", index, cluster, err) + } + + // Here, we don't want to include the unifiedDeltaWALPrefix because + // clearUnifiedDeltaWAL handles that for us. Instead, just include + // the cluster identifier. + for _, clusterPath := range clusterPaths { + allPaths = append(allPaths, cluster+clusterPath) + } + } + + return allPaths, nil +} + +func (cb *CrlBuilder) _clearDeltaWAL(sc *storageContext, walSerials []string, path string) error { // Clearing of the delta WAL occurs after a new complete CRL has been built. for _, serial := range walSerials { // Don't remove our special entries! - if serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName { + if strings.HasSuffix(serial, deltaWALLastBuildSerialName) || strings.HasSuffix(serial, deltaWALLastRevokedSerialName) { continue } - if err := sc.Storage.Delete(sc.Context, deltaWALPath+serial); err != nil { - return fmt.Errorf("error clearing delta WAL certificate: %s", err) + if err := sc.Storage.Delete(sc.Context, path+serial); err != nil { + return fmt.Errorf("error clearing delta WAL certificate: %w", err) } } return nil } -func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool) error { +func (cb *CrlBuilder) clearLocalDeltaWAL(sc *storageContext, walSerials []string) error { + return cb._clearDeltaWAL(sc, walSerials, localDeltaWALPath) +} + +func (cb *CrlBuilder) clearUnifiedDeltaWAL(sc *storageContext, walSerials []string) error { + return cb._clearDeltaWAL(sc, walSerials, unifiedDeltaWALPrefix) +} + +func (cb *CrlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool) ([]string, error) { // Delta CRLs use the same expiry duration as the complete CRL. Because // we always rebuild the complete CRL and then the delta CRL, we can // be assured that the delta CRL always expires after a complete CRL, @@ -330,18 +465,18 @@ func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool // within our time window for updating it. cfg, err := cb.getConfigWithUpdate(sc) if err != nil { - return err + return nil, err } if !cfg.EnableDelta { // We explicitly do not update the last check time here, as we // want to persist the last rebuild window if it hasn't been set. - return nil + return nil, nil } - deltaRebuildDuration, err := time.ParseDuration(cfg.DeltaRebuildInterval) + deltaRebuildDuration, err := parseutil.ParseDurationSecond(cfg.DeltaRebuildInterval) if err != nil { - return err + return nil, err } // Acquire CRL building locks before we get too much further. @@ -357,7 +492,7 @@ func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool // If we're still before the time of our next rebuild check, we can // safely return here even if we have certs. We'll wait for a bit, // retrigger this check, and then do the rebuild. - return nil + return nil, nil } // Update our check time. If we bail out below (due to storage errors @@ -366,20 +501,39 @@ func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool // until our next complete CRL build. cb.lastDeltaRebuildCheck = now + rebuildLocal, err := cb._shouldRebuildLocalCRLs(sc, override) + if err != nil { + return nil, fmt.Errorf("error determining if local CRLs should be rebuilt: %w", err) + } + + rebuildUnified, err := cb._shouldRebuildUnifiedCRLs(sc, override) + if err != nil { + return nil, fmt.Errorf("error determining if unified CRLs should be rebuilt: %w", err) + } + + if !rebuildLocal && !rebuildUnified { + return nil, nil + } + + // Finally, we must've needed to do the rebuild. Execute! + return cb.rebuildDeltaCRLsHoldingLock(sc, false) +} + +func (cb *CrlBuilder) _shouldRebuildLocalCRLs(sc *storageContext, override bool) (bool, error) { // Fetch two storage entries to see if we actually need to do this // rebuild, given we're within the window. - lastWALEntry, err := sc.Storage.Get(sc.Context, deltaWALLastRevokedSerial) + lastWALEntry, err := sc.Storage.Get(sc.Context, localDeltaWALLastRevokedSerial) if err != nil || !override && (lastWALEntry == nil || lastWALEntry.Value == nil) { // If this entry does not exist, we don't need to rebuild the // delta WAL due to the expiration assumption above. There must // not have been any new revocations. Since err should be nil // in this case, we can safely return it. - return err + return false, err } - lastBuildEntry, err := sc.Storage.Get(sc.Context, deltaWALLastBuildSerial) + lastBuildEntry, err := sc.Storage.Get(sc.Context, localDeltaWALLastBuildSerial) if err != nil { - return err + return false, err } if !override && lastBuildEntry != nil && lastBuildEntry.Value != nil { @@ -392,56 +546,388 @@ func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool // guard. var walInfo lastWALInfo if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { - return err + return false, err } var deltaInfo lastDeltaInfo if err := lastBuildEntry.DecodeJSON(&deltaInfo); err != nil { - return err + return false, err } // Here, everything decoded properly and we know that no new certs // have been revoked since we built this last delta CRL. We can exit // without rebuilding then. if walInfo.Serial == deltaInfo.Serial { - return nil + return false, nil } } - // Finally, we must've needed to do the rebuild. Execute! - return cb.rebuildDeltaCRLsHoldingLock(sc, false) + return true, nil +} + +func (cb *CrlBuilder) _shouldRebuildUnifiedCRLs(sc *storageContext, override bool) (bool, error) { + // Unified CRL can only be built by the main cluster. + b := sc.Backend + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + return false, nil + } + + // If we're overriding whether we should build Delta CRLs, always return + // true, even if storage errors might've happen. + if override { + return true, nil + } + + // Fetch two storage entries to see if we actually need to do this + // rebuild, given we're within the window. We need to fetch these + // two entries per cluster. + clusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) + if err != nil { + return false, fmt.Errorf("failed to get the list of clusters having written Delta WALs: %w", err) + } + + // If any cluster tells us to rebuild, we should rebuild. + shouldRebuild := false + for index, cluster := range clusters { + prefix := unifiedDeltaWALPrefix + cluster + clusterUnifiedLastRevokedWALEntry := prefix + deltaWALLastRevokedSerialName + clusterUnifiedLastBuiltWALEntry := prefix + deltaWALLastBuildSerialName + + lastWALEntry, err := sc.Storage.Get(sc.Context, clusterUnifiedLastRevokedWALEntry) + if err != nil { + return false, fmt.Errorf("failed fetching last revoked WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + if lastWALEntry == nil || lastWALEntry.Value == nil { + continue + } + + lastBuildEntry, err := sc.Storage.Get(sc.Context, clusterUnifiedLastBuiltWALEntry) + if err != nil { + return false, fmt.Errorf("failed fetching last built CRL WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + if lastBuildEntry == nil || lastBuildEntry.Value == nil { + // If the last build entry doesn't exist, we still want to build a + // new delta WAL, since this could be our very first time doing so. + shouldRebuild = true + break + } + + // Otherwise, here, now that we know it exists, we want to check this + // value against the other value. Since we previously guarded the WAL + // entry being non-empty, we're good to decode everything within this + // guard. + var walInfo lastWALInfo + if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { + return false, fmt.Errorf("failed decoding last revoked WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + var deltaInfo lastDeltaInfo + if err := lastBuildEntry.DecodeJSON(&deltaInfo); err != nil { + return false, fmt.Errorf("failed decoding last built CRL WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + if walInfo.Serial != deltaInfo.Serial { + shouldRebuild = true + break + } + } + + // No errors occurred, so return the result. + return shouldRebuild, nil } -func (cb *crlBuilder) rebuildDeltaCRLs(sc *storageContext, forceNew bool) error { +func (cb *CrlBuilder) rebuildDeltaCRLs(sc *storageContext, forceNew bool) ([]string, error) { cb._builder.Lock() defer cb._builder.Unlock() return cb.rebuildDeltaCRLsHoldingLock(sc, forceNew) } -func (cb *crlBuilder) rebuildDeltaCRLsHoldingLock(sc *storageContext, forceNew bool) error { +func (cb *CrlBuilder) rebuildDeltaCRLsHoldingLock(sc *storageContext, forceNew bool) ([]string, error) { return buildAnyCRLs(sc, forceNew, true /* building delta */) } -// Helper function to fetch a map of issuerID->parsed cert for revocation +func (cb *CrlBuilder) addCertForRevocationCheck(cluster, serial string) { + entry := &revocationQueueEntry{ + Cluster: cluster, + Serial: serial, + } + cb.revQueue.Add(entry) +} + +func (cb *CrlBuilder) addCertForRevocationRemoval(cluster, serial string) { + entry := &revocationQueueEntry{ + Cluster: cluster, + Serial: serial, + } + cb.removalQueue.Add(entry) +} + +func (cb *CrlBuilder) addCertFromCrossRevocation(cluster, serial string) { + entry := &revocationQueueEntry{ + Cluster: cluster, + Serial: serial, + } + cb.crossQueue.Add(entry) +} + +func (cb *CrlBuilder) maybeGatherQueueForFirstProcess(sc *storageContext, isNotPerfPrimary bool) error { + // Assume holding lock. + if cb.haveInitializedQueue.Load() { + return nil + } + + sc.Backend.Logger().Debug(fmt.Sprintf("gathering first time existing revocations")) + + clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revocation queue participating clusters: %w", err) + } + + sc.Backend.Logger().Debug(fmt.Sprintf("found %v clusters: %v", len(clusters), clusters)) + + for cIndex, cluster := range clusters { + cluster = cluster[0 : len(cluster)-1] + cPath := crossRevocationPrefix + cluster + "/" + serials, err := sc.Storage.List(sc.Context, cPath) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revocation queue entries for cluster %v (%v): %w", cluster, cIndex, err) + } + + sc.Backend.Logger().Debug(fmt.Sprintf("found %v serials for cluster %v: %v", len(serials), cluster, serials)) + + for _, serial := range serials { + if serial[len(serial)-1] == '/' { + serial = serial[0 : len(serial)-1] + } + + ePath := cPath + serial + eConfirmPath := ePath + "/confirmed" + removalEntry, err := sc.Storage.Get(sc.Context, eConfirmPath) + + entry := &revocationQueueEntry{ + Cluster: cluster, + Serial: serial, + } + + // No removal entry yet; add to regular queue. Otherwise, slate it + // for removal if we're a perfPrimary. + if err != nil || removalEntry == nil { + cb.revQueue.Add(entry) + } else if !isNotPerfPrimary { + cb.removalQueue.Add(entry) + } // Else, this is a confirmation but we're on a perf secondary so ignore it. + + // Overwrite the error; we don't really care about its contents + // at this step. + err = nil + } + } + + return nil +} + +func (cb *CrlBuilder) processRevocationQueue(sc *storageContext) error { + sc.Backend.Logger().Debug(fmt.Sprintf("starting to process revocation requests")) + + isNotPerfPrimary := sc.Backend.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!sc.Backend.System().LocalMount() && sc.Backend.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) + + if err := cb.maybeGatherQueueForFirstProcess(sc, isNotPerfPrimary); err != nil { + return fmt.Errorf("failed to gather first queue: %w", err) + } + + revQueue := cb.revQueue.Iterate() + removalQueue := cb.removalQueue.Iterate() + + sc.Backend.Logger().Debug(fmt.Sprintf("gathered %v revocations and %v confirmation entries", len(revQueue), len(removalQueue))) + + crlConfig, err := cb.getConfigWithUpdate(sc) + if err != nil { + return err + } + + ourClusterId, err := sc.Backend.System().ClusterID(sc.Context) + if err != nil { + return fmt.Errorf("unable to fetch clusterID to ignore local revocation entries: %w", err) + } + + for _, req := range revQueue { + // Regardless of whether we're on the perf primary or a secondary + // cluster, we can safely ignore revocation requests originating + // from our node, because we've already checked them once (when + // they were created). + if ourClusterId != "" && ourClusterId == req.Cluster { + continue + } + + // Fetch the revocation entry to ensure it exists. + rPath := crossRevocationPrefix + req.Cluster + "/" + req.Serial + entry, err := sc.Storage.Get(sc.Context, rPath) + if err != nil { + return fmt.Errorf("failed to read cross-cluster revocation queue entry: %w", err) + } + if entry == nil { + // Skipping this entry; it was likely an incorrect invalidation + // caused by the primary cluster removing the confirmation. + cb.revQueue.Remove(req) + continue + } + + resp, err := tryRevokeCertBySerial(sc, crlConfig, req.Serial) + if err == nil && resp != nil && !resp.IsError() && resp.Data != nil && resp.Data["state"].(string) == "revoked" { + if isNotPerfPrimary { + // Write a revocation queue removal entry. + confirmed := revocationConfirmed{ + RevokedAt: resp.Data["revocation_time_rfc3339"].(string), + Source: req.Cluster, + } + path := crossRevocationPath + req.Serial + "/confirmed" + confirmedEntry, err := logical.StorageEntryJSON(path, confirmed) + if err != nil { + return fmt.Errorf("failed to create storage entry for cross-cluster revocation confirmed response: %w", err) + } + + if err := sc.Storage.Put(sc.Context, confirmedEntry); err != nil { + return fmt.Errorf("error persisting cross-cluster revocation confirmation: %w", err) + } + } else { + // Since we're the active node of the primary cluster, go ahead + // and just remove it. + path := crossRevocationPrefix + req.Cluster + "/" + req.Serial + if err := sc.Storage.Delete(sc.Context, path); err != nil { + return fmt.Errorf("failed to delete processed revocation request: %w", err) + } + } + } else if err != nil { + // Because we fake being from a lease, we get the guarantee that + // err == nil == resp if the cert was already revoked; this means + // this err should actually be fatal. + return err + } + cb.revQueue.Remove(req) + } + + if isNotPerfPrimary { + sc.Backend.Logger().Debug(fmt.Sprintf("not on perf primary so ignoring any revocation confirmations")) + + // See note in pki/backend.go; this should be empty. + cb.removalQueue.RemoveAll() + cb.haveInitializedQueue.Store(true) + return nil + } + + clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) + if err != nil { + return err + } + + for _, entry := range removalQueue { + // First remove the revocation request. + for cIndex, cluster := range clusters { + eEntry := crossRevocationPrefix + cluster + entry.Serial + if err := sc.Storage.Delete(sc.Context, eEntry); err != nil { + return fmt.Errorf("failed to delete potential cross-cluster revocation entry for cluster %v (%v) and serial %v: %w", cluster, cIndex, entry.Serial, err) + } + } + + // Then remove the confirmation. + if err := sc.Storage.Delete(sc.Context, crossRevocationPrefix+entry.Cluster+"/"+entry.Serial+"/confirmed"); err != nil { + return fmt.Errorf("failed to delete cross-cluster revocation confirmation entry for cluster %v and serial %v: %w", entry.Cluster, entry.Serial, err) + } + + cb.removalQueue.Remove(entry) + } + + cb.haveInitializedQueue.Store(true) + + return nil +} + +func (cb *CrlBuilder) processCrossClusterRevocations(sc *storageContext) error { + sc.Backend.Logger().Debug(fmt.Sprintf("starting to process unified revocations")) + + crlConfig, err := cb.getConfigWithUpdate(sc) + if err != nil { + return err + } + + if !crlConfig.UnifiedCRL { + cb.crossQueue.RemoveAll() + return nil + } + + crossQueue := cb.crossQueue.Iterate() + sc.Backend.Logger().Debug(fmt.Sprintf("gathered %v unified revocations entries", len(crossQueue))) + + ourClusterId, err := sc.Backend.System().ClusterID(sc.Context) + if err != nil { + return fmt.Errorf("unable to fetch clusterID to ignore local unified revocation entries: %w", err) + } + + for _, req := range crossQueue { + // Regardless of whether we're on the perf primary or a secondary + // cluster, we can safely ignore revocation requests originating + // from our node, because we've already checked them once (when + // they were created). + if ourClusterId != "" && ourClusterId == req.Cluster { + continue + } + + // Fetch the revocation entry to ensure it exists and this wasn't + // a delete. + rPath := unifiedRevocationReadPathPrefix + req.Cluster + "/" + req.Serial + entry, err := sc.Storage.Get(sc.Context, rPath) + if err != nil { + return fmt.Errorf("failed to read unified revocation entry: %w", err) + } + if entry == nil { + // Skip this entry: it was likely caused by the deletion of this + // record during tidy. + cb.crossQueue.Remove(req) + continue + } + + resp, err := tryRevokeCertBySerial(sc, crlConfig, req.Serial) + if err == nil && resp != nil && !resp.IsError() && resp.Data != nil && resp.Data["state"].(string) == "revoked" { + // We could theoretically save ourselves from writing a global + // revocation entry during the above certificate revocation, as + // we don't really need it to appear on either the unified CRL + // or its delta CRL, but this would require more plumbing. + cb.crossQueue.Remove(req) + } else if err != nil { + // Because we fake being from a lease, we get the guarantee that + // err == nil == resp if the cert was already revoked; this means + // this err should actually be fatal. + return err + } + } + + return nil +} + +// Helper function to fetch a map of IssuerID->parsed cert for revocation // usage. Unlike other paths, this needs to handle the legacy bundle // more gracefully than rejecting it outright. -func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuerID]*x509.Certificate, error) { +func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuing.IssuerID]*x509.Certificate, error) { var err error - var issuers []issuerID + var issuers []issuing.IssuerID - if !sc.Backend.useLegacyBundleCaStorage() { + if !sc.Backend.UseLegacyBundleCaStorage() { issuers, err = sc.listIssuers() if err != nil { return nil, fmt.Errorf("could not fetch issuers list: %w", err) } } else { - // Hack: this isn't a real issuerID, but it works for fetchCAInfo + // Hack: this isn't a real IssuerID, but it works for fetchCAInfo // since it resolves the reference. - issuers = []issuerID{legacyBundleShimID} + issuers = []issuing.IssuerID{legacyBundleShimID} } - issuerIDCertMap := make(map[issuerID]*x509.Certificate, len(issuers)) + issuerIDCertMap := make(map[issuing.IssuerID]*x509.Certificate, len(issuers)) for _, issuer := range issuers { _, bundle, caErr := sc.fetchCertBundleByIssuerId(issuer, false) if caErr != nil { @@ -467,8 +953,37 @@ func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuerID]*x509 return issuerIDCertMap, nil } +// Revoke a certificate from a given serial number if it is present in local +// storage. +func tryRevokeCertBySerial(sc *storageContext, config *crlConfig, serial string) (*logical.Response, error) { + // revokeCert requires us to hold these locks before calling it. + sc.Backend.GetRevokeStorageLock().Lock() + defer sc.Backend.GetRevokeStorageLock().Unlock() + + certEntry, err := fetchCertBySerial(sc, "certs/", serial) + if err != nil { + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), nil + default: + return nil, err + } + } + + if certEntry == nil { + return nil, nil + } + + cert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %w", err) + } + + return revokeCert(sc, config, cert) +} + // Revokes a cert, and tries to be smart about error recovery -func revokeCert(sc *storageContext, serial string, fromLease bool) (*logical.Response, error) { +func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) (*logical.Response, error) { // As this backend is self-contained and this function does not hook into // third parties to manage users or resources, if the mount is tainted, // revocation doesn't matter anyways -- the CRL that would be written will @@ -478,6 +993,9 @@ func revokeCert(sc *storageContext, serial string, fromLease bool) (*logical.Res return nil, nil } + colonSerial := serialFromCert(cert) + hyphenSerial := normalizeSerial(colonSerial) + // Validate that no issuers match the serial number to be revoked. We need // to gracefully degrade to the legacy cert bundle when it is required, as // secondary PR clusters might not have been upgraded, but still need to @@ -490,103 +1008,94 @@ func revokeCert(sc *storageContext, serial string, fromLease bool) (*logical.Res // Ensure we don't revoke an issuer via this API; use /issuer/:issuer_ref/revoke // instead. for issuer, certificate := range issuerIDCertMap { - colonSerial := strings.ReplaceAll(strings.ToLower(serial), "-", ":") if colonSerial == serialFromCert(certificate) { return logical.ErrorResponse(fmt.Sprintf("adding issuer (id: %v) to its own CRL is not allowed", issuer)), nil } } - alreadyRevoked := false - var revInfo revocationInfo - - revEntry, err := fetchCertBySerial(sc, revokedPath, serial) + curRevInfo, err := sc.fetchRevocationInfo(colonSerial) if err != nil { - switch err.(type) { - case errutil.UserError: - return logical.ErrorResponse(err.Error()), nil - default: - return nil, err - } + return nil, err } - if revEntry != nil { - // Set the revocation info to the existing values - alreadyRevoked = true - err = revEntry.DecodeJSON(&revInfo) - if err != nil { - return nil, fmt.Errorf("error decoding existing revocation info") + if curRevInfo != nil { + resp := &logical.Response{ + Data: map[string]interface{}{ + "revocation_time": curRevInfo.RevocationTime, + "state": "revoked", + }, + } + if !curRevInfo.RevocationTimeUTC.IsZero() { + resp.Data["revocation_time_rfc3339"] = curRevInfo.RevocationTimeUTC.Format(time.RFC3339Nano) } + + return resp, nil } - if !alreadyRevoked { - certEntry, err := fetchCertBySerial(sc, "certs/", serial) - if err != nil { - switch err.(type) { - case errutil.UserError: - return logical.ErrorResponse(err.Error()), nil - default: - return nil, err - } - } - if certEntry == nil { - if fromLease { - // We can't write to revoked/ or update the CRL anyway because we don't have the cert, - // and there's no reason to expect this will work on a subsequent - // retry. Just give up and let the lease get deleted. - return nil, nil - } - return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found", serial)), nil - } + // Add a little wiggle room because leases are stored with a second + // granularity + if cert.NotAfter.Before(time.Now().Add(2 * time.Second)) { + response := &logical.Response{} + response.AddWarning(fmt.Sprintf("certificate with serial %s already expired; refusing to add to CRL", colonSerial)) + return response, nil + } - cert, err := x509.ParseCertificate(certEntry.Value) - if err != nil { - return nil, fmt.Errorf("error parsing certificate: %w", err) - } - if cert == nil { - return nil, fmt.Errorf("got a nil certificate") - } + currTime := time.Now() + revInfo := revocationInfo{ + CertificateBytes: cert.Raw, + RevocationTime: currTime.Unix(), + RevocationTimeUTC: currTime.UTC(), + } - // Add a little wiggle room because leases are stored with a second - // granularity - if cert.NotAfter.Before(time.Now().Add(2 * time.Second)) { - response := &logical.Response{} - response.AddWarning(fmt.Sprintf("certificate with serial %s already expired; refusing to add to CRL", serial)) - return response, nil - } + // We may not find an issuer with this certificate; that's fine so + // ignore the return value. + associateRevokedCertWithIsssuer(&revInfo, cert, issuerIDCertMap) - // Compatibility: Don't revoke CAs if they had leases. New CAs going - // forward aren't issued leases. - if cert.IsCA && fromLease { - return nil, nil - } + revEntry, err := logical.StorageEntryJSON(revokedPath+hyphenSerial, revInfo) + if err != nil { + return nil, fmt.Errorf("error creating revocation entry: %w", err) + } - currTime := time.Now() - revInfo.CertificateBytes = certEntry.Value - revInfo.RevocationTime = currTime.Unix() - revInfo.RevocationTimeUTC = currTime.UTC() + certCounter := sc.Backend.GetCertificateCounter() + certsCounted := certCounter.IsInitialized() + err = sc.Storage.Put(sc.Context, revEntry) + if err != nil { + return nil, fmt.Errorf("error saving revoked certificate to new location: %w", err) + } + certCounter.IncrementTotalRevokedCertificatesCount(certsCounted, revEntry.Key) - // We may not find an issuer with this certificate; that's fine so - // ignore the return value. - associateRevokedCertWithIsssuer(&revInfo, cert, issuerIDCertMap) + // From here on out, the certificate has been revoked locally. Any other + // persistence issues might still err, but any other failure messages + // should be added as warnings to the revocation. + resp := &logical.Response{ + Data: map[string]interface{}{ + "revocation_time": revInfo.RevocationTime, + "revocation_time_rfc3339": revInfo.RevocationTimeUTC.Format(time.RFC3339Nano), + "state": "revoked", + }, + } - revEntry, err = logical.StorageEntryJSON(revokedPath+normalizeSerial(serial), revInfo) - if err != nil { - return nil, fmt.Errorf("error creating revocation entry") + // If this flag is enabled after the fact, existing local entries will be published to + // the unified storage space through a periodic function. + failedWritingUnifiedCRL := false + if config.UnifiedCRL { + entry := &unifiedRevocationEntry{ + SerialNumber: colonSerial, + CertExpiration: cert.NotAfter, + RevocationTimeUTC: revInfo.RevocationTimeUTC, + CertificateIssuer: revInfo.CertificateIssuer, } - certsCounted := sc.Backend.certsCounted.Load() - err = sc.Storage.Put(sc.Context, revEntry) - if err != nil { - return nil, fmt.Errorf("error saving revoked certificate to new location") - } - sc.Backend.incrementTotalRevokedCertificatesCount(certsCounted, revEntry.Key) - } + ignoreErr := writeUnifiedRevocationEntry(sc, entry) + if ignoreErr != nil { + // Just log the error if we fail to write across clusters, a separate background + // thread will reattempt it later on as we have the local write done. + sc.Backend.Logger().Error("Failed to write unified revocation entry, will re-attempt later", + "serial_number", colonSerial, "error", ignoreErr) + sc.Backend.GetUnifiedTransferStatus().forceRun() - // Fetch the config and see if we need to rebuild the CRL. If we have - // auto building enabled, we will wait for the next rebuild period to - // actually rebuild it. - config, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) - if err != nil { - return nil, fmt.Errorf("error building CRL: while updating config: %w", err) + resp.AddWarning(fmt.Sprintf("Failed to write unified revocation entry, will re-attempt later: %v", err)) + failedWritingUnifiedCRL = true + } } if !config.AutoRebuild { @@ -594,7 +1103,7 @@ func revokeCert(sc *storageContext, serial string, fromLease bool) (*logical.Res // already rebuilt the full CRL so the Delta WAL will be cleared // afterwards. Writing an entry only to immediately remove it // isn't necessary. - crlErr := sc.Backend.crlBuilder.rebuild(sc, false) + warnings, crlErr := sc.Backend.CrlBuilder().rebuild(sc, false) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -603,60 +1112,106 @@ func revokeCert(sc *storageContext, serial string, fromLease bool) (*logical.Res return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) } } - } else if !alreadyRevoked { - // Regardless of whether or not we've presently enabled Delta CRLs, - // we should always write the Delta WAL in case it is enabled in the - // future. We could trigger another full CRL rebuild instead (to avoid - // inconsistent state between the CRL and missing Delta WAL entries), - // but writing extra (unused?) WAL entries versus an expensive full - // CRL rebuild is probably a net wash. - /// - // We should only do this when the cert hasn't already been revoked. - // Otherwise, the re-revocation may appear on both an existing CRL and - // on a delta CRL, or a serial may be skipped from the delta CRL if - // there's an A->B->A revocation pattern and the delta was rebuilt - // after the first cert. + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + } else if config.EnableDelta { + if err := writeRevocationDeltaWALs(sc, config, resp, failedWritingUnifiedCRL, hyphenSerial, colonSerial); err != nil { + return nil, fmt.Errorf("failed to write WAL entries for Delta CRLs: %w", err) + } + } + + return resp, nil +} + +func writeRevocationDeltaWALs(sc *storageContext, config *crlConfig, resp *logical.Response, failedWritingUnifiedCRL bool, hyphenSerial string, colonSerial string) error { + if err := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, localDeltaWALPath); err != nil { + return fmt.Errorf("failed to write local delta WAL entry: %w", err) + } + + if config.UnifiedCRL && !failedWritingUnifiedCRL { + // We only need to write cross-cluster unified Delta WAL entries when + // it is enabled; in particular, because we rebuild CRLs when enabling + // this flag, any revocations that happened prior to enabling unified + // revocation will appear on the complete CRL (+/- synchronization: + // in particular, if a perf replica revokes a cert prior to seeing + // unified revocation enabled, but after the main node has done the + // listing for the unified CRL rebuild, this revocation will not + // appear on either the main or the next delta CRL, but will need to + // wait for a subsequent complete CRL rebuild). // - // Currently we don't store any data in the WAL entry. - var walInfo deltaWALInfo - walEntry, err := logical.StorageEntryJSON(deltaWALPath+normalizeSerial(serial), walInfo) - if err != nil { - return nil, fmt.Errorf("unable to create delta CRL WAL entry") + // Lastly, we don't attempt this if the unified CRL entry failed to + // write, as we need that entry before the delta WAL entry will make + // sense. + if ignoredErr := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, unifiedDeltaWALPath); ignoredErr != nil { + // Just log the error if we fail to write across clusters, a separate background + // thread will reattempt it later on as we have the local write done. + sc.Backend.Logger().Error("Failed to write cross-cluster delta WAL entry, will re-attempt later", + "serial_number", colonSerial, "error", ignoredErr) + sc.Backend.GetUnifiedTransferStatus().forceRun() + + resp.AddWarning(fmt.Sprintf("Failed to write cross-cluster delta WAL entry, will re-attempt later: %v", ignoredErr)) } + } else if failedWritingUnifiedCRL { + resp.AddWarning("Skipping cross-cluster delta WAL entry as cross-cluster revocation failed to write; will re-attempt later.") + } - if err = sc.Storage.Put(sc.Context, walEntry); err != nil { - return nil, fmt.Errorf("error saving delta CRL WAL entry") - } + return nil +} - // In order for periodic delta rebuild to be mildly efficient, we - // should write the last revoked delta WAL entry so we know if we - // have new revocations that we should rebuild the delta WAL for. - lastRevSerial := lastWALInfo{Serial: serial} - lastWALEntry, err := logical.StorageEntryJSON(deltaWALLastRevokedSerial, lastRevSerial) - if err != nil { - return nil, fmt.Errorf("unable to create last delta CRL WAL entry") - } - if err = sc.Storage.Put(sc.Context, lastWALEntry); err != nil { - return nil, fmt.Errorf("error saving last delta CRL WAL entry") - } +func writeSpecificRevocationDeltaWALs(sc *storageContext, hyphenSerial string, colonSerial string, pathPrefix string) error { + // Previously, regardless of whether or not we've presently enabled + // Delta CRLs, we would always write the Delta WAL in case it is + // enabled in the future. We though we could trigger another full CRL + // rebuild instead (to avoid inconsistent state between the CRL and + // missing Delta WAL entries), but writing extra (unused?) WAL entries + // versus an expensive full CRL rebuild was thought of as being + // probably a net wash. + // + // However, we've now added unified CRL building, adding cross-cluster + // writes to the revocation path. Because this is relatively expensive, + // we've opted to rebuild the complete+delta CRLs when toggling the + // state of delta enabled, instead of always writing delta CRL entries. + // + // Thus Delta WAL building happens **only** when Delta CRLs are enabled. + // + // We should only do this when the cert hasn't already been revoked. + // Otherwise, the re-revocation may appear on both an existing CRL and + // on a delta CRL, or a serial may be skipped from the delta CRL if + // there's an A->B->A revocation pattern and the delta was rebuilt + // after the first cert. + // + // Currently we don't store any data in the WAL entry. + var walInfo deltaWALInfo + walEntry, err := logical.StorageEntryJSON(pathPrefix+hyphenSerial, walInfo) + if err != nil { + return fmt.Errorf("unable to create delta CRL WAL entry: %w", err) } - resp := &logical.Response{ - Data: map[string]interface{}{ - "revocation_time": revInfo.RevocationTime, - }, + if err = sc.Storage.Put(sc.Context, walEntry); err != nil { + return fmt.Errorf("error saving delta CRL WAL entry: %w", err) + } + + // In order for periodic delta rebuild to be mildly efficient, we + // should write the last revoked delta WAL entry so we know if we + // have new revocations that we should rebuild the delta WAL for. + lastRevSerial := lastWALInfo{Serial: colonSerial} + lastWALEntry, err := logical.StorageEntryJSON(pathPrefix+deltaWALLastRevokedSerialName, lastRevSerial) + if err != nil { + return fmt.Errorf("unable to create last delta CRL WAL entry: %w", err) } - if !revInfo.RevocationTimeUTC.IsZero() { - resp.Data["revocation_time_rfc3339"] = revInfo.RevocationTimeUTC.Format(time.RFC3339Nano) + if err = sc.Storage.Put(sc.Context, lastWALEntry); err != nil { + return fmt.Errorf("error saving last delta CRL WAL entry: %w", err) } - return resp, nil + + return nil } -func buildCRLs(sc *storageContext, forceNew bool) error { +func buildCRLs(sc *storageContext, forceNew bool) ([]string, error) { return buildAnyCRLs(sc, forceNew, false) } -func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { +func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) ([]string, error) { // In order to build all CRLs, we need knowledge of all issuers. Any two // issuers with the same keys _and_ subject should have the same CRL since // they're functionally equivalent. @@ -684,38 +1239,38 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // See the message in revokedCert about rebuilding CRLs: we need to // gracefully handle revoking entries with the legacy cert bundle. var err error - var issuers []issuerID + var issuers []issuing.IssuerID var wasLegacy bool - // First, fetch an updated copy of the CRL config. We'll pass this into - // buildCRL. - globalCRLConfig, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + // First, fetch an updated copy of the CRL config. We'll pass this into buildCRL. + crlBuilder := sc.Backend.CrlBuilder() + globalCRLConfig, err := crlBuilder.getConfigWithUpdate(sc) if err != nil { - return fmt.Errorf("error building CRL: while updating config: %w", err) + return nil, fmt.Errorf("error building CRL: while updating config: %w", err) } if globalCRLConfig.Disable && !forceNew { - // We build a single long-lived empty CRL in the event that we disable - // the CRL, but we don't keep updating it with newer, more-valid empty - // CRLs in the event that we later re-enable it. This is a historical - // behavior. + // We build a single long-lived (but regular validity) empty CRL in + // the event that we disable the CRL, but we don't keep updating it + // with newer, more-valid empty CRLs in the event that we later + // re-enable it. This is a historical behavior. // // So, since tidy can now associate issuers on revocation entries, we // can skip the rest of this function and exit early without updating // anything. - return nil + return nil, nil } - if !sc.Backend.useLegacyBundleCaStorage() { + if !sc.Backend.UseLegacyBundleCaStorage() { issuers, err = sc.listIssuers() if err != nil { - return fmt.Errorf("error building CRL: while listing issuers: %w", err) + return nil, fmt.Errorf("error building CRL: while listing issuers: %w", err) } } else { // Here, we hard-code the legacy issuer entry instead of using the // default ref. This is because we need to hack some of the logic // below for revocation to handle the legacy bundle. - issuers = []issuerID{legacyBundleShimID} + issuers = []issuing.IssuerID{legacyBundleShimID} wasLegacy = true // Here, we avoid building a delta CRL with the legacy CRL bundle. @@ -723,30 +1278,30 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // Users should upgrade symmetrically, rather than attempting // backward compatibility for new features across disparate versions. if isDelta { - return nil + return []string{"refusing to rebuild delta CRL with legacy bundle; finish migrating to newer issuer storage layout"}, nil } } - config, err := sc.getIssuersConfig() + issuersConfig, err := sc.getIssuersConfig() if err != nil { - return fmt.Errorf("error building CRLs: while getting the default config: %w", err) + return nil, fmt.Errorf("error building CRLs: while getting the default config: %w", err) } - // We map issuerID->entry for fast lookup and also issuerID->Cert for + // We map IssuerID->entry for fast lookup and also IssuerID->Cert for // signature verification and correlation of revoked certs. - issuerIDEntryMap := make(map[issuerID]*issuerEntry, len(issuers)) - issuerIDCertMap := make(map[issuerID]*x509.Certificate, len(issuers)) + issuerIDEntryMap := make(map[issuing.IssuerID]*issuing.IssuerEntry, len(issuers)) + issuerIDCertMap := make(map[issuing.IssuerID]*x509.Certificate, len(issuers)) - // We use a double map (keyID->subject->issuerID) to store whether or not this + // We use a double map (KeyID->subject->IssuerID) to store whether or not this // key+subject paring has been seen before. We can then iterate over each // key/subject and choose any representative issuer for that combination. - keySubjectIssuersMap := make(map[keyID]map[string][]issuerID) + keySubjectIssuersMap := make(map[issuing.KeyID]map[string][]issuing.IssuerID) for _, issuer := range issuers { // We don't strictly need this call, but by requesting the bundle, the // legacy path is automatically ignored. thisEntry, _, err := sc.fetchCertBundleByIssuerId(issuer, false) if err != nil { - return fmt.Errorf("error building CRLs: unable to fetch specified issuer (%v): %w", issuer, err) + return nil, fmt.Errorf("error building CRLs: unable to fetch specified issuer (%v): %w", issuer, err) } if len(thisEntry.KeyID) == 0 { @@ -771,23 +1326,230 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { thisCert, err := thisEntry.GetCertificate() if err != nil { - return fmt.Errorf("error building CRLs: unable to parse issuer (%v)'s certificate: %w", issuer, err) + return nil, fmt.Errorf("error building CRLs: unable to parse issuer (%v)'s certificate: %w", issuer, err) } issuerIDCertMap[issuer] = thisCert subject := string(thisCert.RawSubject) if _, ok := keySubjectIssuersMap[thisEntry.KeyID]; !ok { - keySubjectIssuersMap[thisEntry.KeyID] = make(map[string][]issuerID) + keySubjectIssuersMap[thisEntry.KeyID] = make(map[string][]issuing.IssuerID) } keySubjectIssuersMap[thisEntry.KeyID][subject] = append(keySubjectIssuersMap[thisEntry.KeyID][subject], issuer) } + // Now we do two calls: building the cluster-local CRL, and potentially + // building the global CRL if we're on the active node of the performance + // primary. + currLocalDeltaSerials, localWarnings, err := buildAnyLocalCRLs(sc, issuersConfig, globalCRLConfig, + issuers, issuerIDEntryMap, + issuerIDCertMap, keySubjectIssuersMap, + wasLegacy, forceNew, isDelta) + if err != nil { + return nil, err + } + currUnifiedDeltaSerials, unifiedWarnings, err := buildAnyUnifiedCRLs(sc, issuersConfig, globalCRLConfig, + issuers, issuerIDEntryMap, + issuerIDCertMap, keySubjectIssuersMap, + wasLegacy, forceNew, isDelta) + if err != nil { + return nil, err + } + + var warnings []string + for _, warning := range localWarnings { + warnings = append(warnings, fmt.Sprintf("warning from local CRL rebuild: %v", warning)) + } + for _, warning := range unifiedWarnings { + warnings = append(warnings, fmt.Sprintf("warning from unified CRL rebuild: %v", warning)) + } + + // Finally, we decide if we need to rebuild the Delta CRLs again, for both + // global and local CRLs if necessary. + if !isDelta { + // After we've confirmed the primary CRLs have built OK, go ahead and + // clear the delta CRL WAL and rebuild it. + if err := crlBuilder.clearLocalDeltaWAL(sc, currLocalDeltaSerials); err != nil { + return nil, fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) + } + if err := crlBuilder.clearUnifiedDeltaWAL(sc, currUnifiedDeltaSerials); err != nil { + return nil, fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) + } + deltaWarnings, err := crlBuilder.rebuildDeltaCRLsHoldingLock(sc, forceNew) + if err != nil { + return nil, fmt.Errorf("error building CRLs: unable to rebuild empty Delta WAL: %w", err) + } + for _, warning := range deltaWarnings { + warnings = append(warnings, fmt.Sprintf("warning from delta CRL rebuild: %v", warning)) + } + } + + return warnings, nil +} + +func getLastWALSerial(sc *storageContext, path string) (string, error) { + lastWALEntry, err := sc.Storage.Get(sc.Context, path) + if err != nil { + return "", err + } + + if lastWALEntry != nil && lastWALEntry.Value != nil { + var walInfo lastWALInfo + if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { + return "", err + } + + return walInfo.Serial, nil + } + + // No serial to return. + return "", nil +} + +func buildAnyLocalCRLs( + sc *storageContext, + issuersConfig *issuing.IssuerConfigEntry, + globalCRLConfig *crlConfig, + issuers []issuing.IssuerID, + issuerIDEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, + issuerIDCertMap map[issuing.IssuerID]*x509.Certificate, + keySubjectIssuersMap map[issuing.KeyID]map[string][]issuing.IssuerID, + wasLegacy bool, + forceNew bool, + isDelta bool, +) ([]string, []string, error) { + var err error + var warnings []string + + // Before we load cert entries, we want to store the last seen delta WAL + // serial number. The subsequent List will have at LEAST that certificate + // (and potentially more) in it; when we're done writing the delta CRL, + // we'll write this serial as a sentinel to see if we need to rebuild it + // in the future. + var lastDeltaSerial string + if isDelta { + lastDeltaSerial, err = getLastWALSerial(sc, localDeltaWALLastRevokedSerial) + if err != nil { + return nil, nil, err + } + } + + // We fetch a list of delta WAL entries prior to generating the complete + // CRL. This allows us to avoid a lock (to clear such storage): anything + // visible now, should also be visible on the complete CRL we're writing. + var currDeltaCerts []string + if !isDelta { + currDeltaCerts, err = sc.Backend.CrlBuilder().getPresentLocalDeltaWALForClearing(sc) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) + } + } + + var unassignedCerts []pkix.RevokedCertificate + var revokedCertsMap map[issuing.IssuerID][]pkix.RevokedCertificate + + // If the CRL is disabled do not bother reading in all the revoked certificates. + if !globalCRLConfig.Disable { + // Next, we load and parse all revoked certificates. We need to assign + // these certificates to an issuer. Some certificates will not be + // assignable (if they were issued by a since-deleted issuer), so we need + // a separate pool for those. + unassignedCerts, revokedCertsMap, err = getLocalRevokedCertEntries(sc, issuerIDCertMap, isDelta) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) + } + + if !isDelta { + // Revoking an issuer forces us to rebuild our complete CRL, + // regardless of whether or not we've enabled auto rebuilding or + // delta CRLs. If we elide the above isDelta check, this results + // in a non-empty delta CRL, containing the serial of the + // now-revoked issuer, even though it was generated _after_ the + // complete CRL with the issuer on it. There's no reason to + // duplicate this serial number on the delta, hence the above + // guard for isDelta. + if err := augmentWithRevokedIssuers(issuerIDEntryMap, issuerIDCertMap, revokedCertsMap); err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) + } + } + } + // Fetch the cluster-local CRL mapping so we know where to write the // CRLs. - crlConfig, err := sc.getLocalCRLConfig() + internalCRLConfig, err := sc.getLocalCRLConfig() if err != nil { - return fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) + } + + rebuildWarnings, err := buildAnyCRLsWithCerts(sc, issuersConfig, globalCRLConfig, internalCRLConfig, + issuers, issuerIDEntryMap, keySubjectIssuersMap, + unassignedCerts, revokedCertsMap, + forceNew, false /* isUnified */, isDelta) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: %w", err) + } + if len(rebuildWarnings) > 0 { + warnings = append(warnings, rebuildWarnings...) + } + + // Finally, persist our potentially updated local CRL config. Only do this + // if we didn't have a legacy CRL bundle. + if !wasLegacy { + if err := sc.setLocalCRLConfig(internalCRLConfig); err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) + } + } + + if isDelta { + // Update our last build time here so we avoid checking for new certs + // for a while. + sc.Backend.CrlBuilder().lastDeltaRebuildCheck = time.Now() + + if len(lastDeltaSerial) > 0 { + // When we have a last delta serial, write out the relevant info + // so we can skip extra CRL rebuilds. + deltaInfo := lastDeltaInfo{Serial: lastDeltaSerial} + + lastDeltaBuildEntry, err := logical.StorageEntryJSON(localDeltaWALLastBuildSerial, deltaInfo) + if err != nil { + return nil, nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) + } + + err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) + if err != nil { + return nil, nil, fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) + } + } + } + + return currDeltaCerts, warnings, nil +} + +func buildAnyUnifiedCRLs( + sc *storageContext, + issuersConfig *issuing.IssuerConfigEntry, + globalCRLConfig *crlConfig, + issuers []issuing.IssuerID, + issuerIDEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, + issuerIDCertMap map[issuing.IssuerID]*x509.Certificate, + keySubjectIssuersMap map[issuing.KeyID]map[string][]issuing.IssuerID, + wasLegacy bool, + forceNew bool, + isDelta bool, +) ([]string, []string, error) { + var err error + var warnings []string + + // Unified CRL can only be built by the main cluster. + b := sc.Backend + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + return nil, nil, nil + } + + // Unified CRL should only be built if enabled. + if !globalCRLConfig.UnifiedCRL && !forceNew { + return nil, nil, nil } // Before we load cert entries, we want to store the last seen delta WAL @@ -795,20 +1557,23 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // (and potentially more) in it; when we're done writing the delta CRL, // we'll write this serial as a sentinel to see if we need to rebuild it // in the future. - var lastDeltaSerial string + // + // We need to do this per-cluster. + lastDeltaSerial := map[string]string{} if isDelta { - lastWALEntry, err := sc.Storage.Get(sc.Context, deltaWALLastRevokedSerial) + clusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) if err != nil { - return err + return nil, nil, fmt.Errorf("error listing clusters for unified delta WAL building: %w", err) } - if lastWALEntry != nil && lastWALEntry.Value != nil { - var walInfo lastWALInfo - if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { - return err + for index, cluster := range clusters { + path := unifiedDeltaWALPrefix + cluster + deltaWALLastRevokedSerialName + serial, err := getLastWALSerial(sc, path) + if err != nil { + return nil, nil, fmt.Errorf("error getting last written Delta WAL serial for cluster (%v / %v): %w", index, cluster, err) } - lastDeltaSerial = walInfo.Serial + lastDeltaSerial[cluster] = serial } } @@ -817,14 +1582,14 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // visible now, should also be visible on the complete CRL we're writing. var currDeltaCerts []string if !isDelta { - currDeltaCerts, err = sc.Backend.crlBuilder.getPresentDeltaWALForClearing(sc) + currDeltaCerts, err = sc.Backend.CrlBuilder().getPresentUnifiedDeltaWALForClearing(sc) if err != nil { - return fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) } } var unassignedCerts []pkix.RevokedCertificate - var revokedCertsMap map[issuerID][]pkix.RevokedCertificate + var revokedCertsMap map[issuing.IssuerID][]pkix.RevokedCertificate // If the CRL is disabled do not bother reading in all the revoked certificates. if !globalCRLConfig.Disable { @@ -832,9 +1597,9 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // these certificates to an issuer. Some certificates will not be // assignable (if they were issued by a since-deleted issuer), so we need // a separate pool for those. - unassignedCerts, revokedCertsMap, err = getRevokedCertEntries(sc, issuerIDCertMap, isDelta) + unassignedCerts, revokedCertsMap, err = getUnifiedRevokedCertEntries(sc, issuerIDCertMap, isDelta) if err != nil { - return fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) } if !isDelta { @@ -847,13 +1612,87 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // duplicate this serial number on the delta, hence the above // guard for isDelta. if err := augmentWithRevokedIssuers(issuerIDEntryMap, issuerIDCertMap, revokedCertsMap); err != nil { - return fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) + return nil, nil, fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) + } + } + } + + // Fetch the cluster-local CRL mapping so we know where to write the + // CRLs. + internalCRLConfig, err := sc.getUnifiedCRLConfig() + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) + } + + rebuildWarnings, err := buildAnyCRLsWithCerts(sc, issuersConfig, globalCRLConfig, internalCRLConfig, + issuers, issuerIDEntryMap, keySubjectIssuersMap, + unassignedCerts, revokedCertsMap, + forceNew, true /* isUnified */, isDelta) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: %w", err) + } + if len(rebuildWarnings) > 0 { + warnings = append(warnings, rebuildWarnings...) + } + + // Finally, persist our potentially updated local CRL config. Only do this + // if we didn't have a legacy CRL bundle. + if !wasLegacy { + if err := sc.setUnifiedCRLConfig(internalCRLConfig); err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) + } + } + + if isDelta { + // Update our last build time here so we avoid checking for new certs + // for a while. + sc.Backend.CrlBuilder().lastDeltaRebuildCheck = time.Now() + + // Persist all of our known last revoked serial numbers here, as the + // last seen serial during build. This will allow us to detect if any + // new revocations have occurred, forcing us to rebuild the delta CRL. + for cluster, serial := range lastDeltaSerial { + if len(serial) == 0 { + continue + } + + // Make sure to use the cluster-specific path. Since we're on the + // active node of the primary cluster, we own this entry and can + // safely write it. + path := unifiedDeltaWALPrefix + cluster + deltaWALLastBuildSerialName + deltaInfo := lastDeltaInfo{Serial: serial} + lastDeltaBuildEntry, err := logical.StorageEntryJSON(path, deltaInfo) + if err != nil { + return nil, nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) + } + + err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) + if err != nil { + return nil, nil, fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) } } } + return currDeltaCerts, warnings, nil +} + +func buildAnyCRLsWithCerts( + sc *storageContext, + issuersConfig *issuing.IssuerConfigEntry, + globalCRLConfig *crlConfig, + internalCRLConfig *issuing.InternalCRLConfigEntry, + issuers []issuing.IssuerID, + issuerIDEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, + keySubjectIssuersMap map[issuing.KeyID]map[string][]issuing.IssuerID, + unassignedCerts []pkix.RevokedCertificate, + revokedCertsMap map[issuing.IssuerID][]pkix.RevokedCertificate, + forceNew bool, + isUnified bool, + isDelta bool, +) ([]string, error) { // Now we can call buildCRL once, on an arbitrary/representative issuer - // from each of these (keyID, subject) sets. + // from each of these (KeyID, subject) sets. + var warnings []string for _, subjectIssuersMap := range keySubjectIssuersMap { for _, issuersSet := range subjectIssuersMap { if len(issuersSet) == 0 { @@ -861,15 +1700,15 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { } var revokedCerts []pkix.RevokedCertificate - representative := issuerID("") - var crlIdentifier crlID - var crlIdIssuer issuerID + representative := issuing.IssuerID("") + var crlIdentifier issuing.CrlID + var crlIdIssuer issuing.IssuerID for _, issuerId := range issuersSet { // Skip entries which aren't enabled for CRL signing. We don't // particularly care which issuer is ultimately chosen as the // set representative for signing at this point, other than // that it has crl-signing usage. - if err := issuerIDEntryMap[issuerId].EnsureUsage(CRLSigningUsage); err != nil { + if err := issuerIDEntryMap[issuerId].EnsureUsage(issuing.CRLSigningUsage); err != nil { continue } @@ -879,7 +1718,7 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // If it is, we'll also pull in the unassigned certs to remain // compatible with Vault's earlier, potentially questionable // behavior. - if issuerId == config.DefaultIssuerId { + if issuerId == issuersConfig.DefaultIssuerId { if len(unassignedCerts) > 0 { revokedCerts = append(revokedCerts, unassignedCerts...) } @@ -889,7 +1728,7 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // Otherwise, use any other random issuer if we've not yet // chosen one. - if representative == issuerID("") { + if representative == issuing.IssuerID("") { representative = issuerId } @@ -899,9 +1738,9 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { } // Finally, check our crlIdentifier. - if thisCRLId, ok := crlConfig.IssuerIDCRLMap[issuerId]; ok && len(thisCRLId) > 0 { + if thisCRLId, ok := internalCRLConfig.IssuerIDCRLMap[issuerId]; ok && len(thisCRLId) > 0 { if len(crlIdentifier) > 0 && crlIdentifier != thisCRLId { - return fmt.Errorf("error building CRLs: two issuers with same keys/subjects (%v vs %v) have different internal CRL IDs: %v vs %v", issuerId, crlIdIssuer, thisCRLId, crlIdentifier) + return nil, fmt.Errorf("error building CRLs: two issuers with same keys/subjects (%v vs %v) have different internal CRL IDs: %v vs %v", issuerId, crlIdIssuer, thisCRLId, crlIdentifier) } crlIdentifier = thisCRLId @@ -913,30 +1752,48 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // Skip this set for the time being; while we have valid // issuers and associated keys, this occurred because we lack // crl-signing usage on all issuers in this set. + // + // But, tell the user about this, so they can either correct + // this by reissuing the CA certificate or adding an equivalent + // version with KU bits if the CA cert lacks KU altogether. + // + // See also: https://github.com/hashicorp/vault/issues/20137 + warning := "Issuer equivalency set with associated keys lacked an issuer with CRL Signing KeyUsage; refusing to rebuild CRL for this group of issuers: " + var issuers []string + for _, issuerId := range issuersSet { + issuers = append(issuers, issuerId.String()) + } + warning += strings.Join(issuers, ",") + + // We only need this warning once. :-) + if !isUnified && !isDelta { + warnings = append(warnings, warning) + } + continue } if len(crlIdentifier) == 0 { // Create a new random UUID for this CRL if none exists. crlIdentifier = genCRLId() - crlConfig.CRLNumberMap[crlIdentifier] = 1 + internalCRLConfig.CRLNumberMap[crlIdentifier] = 1 } // Update all issuers in this group to set the CRL Issuer for _, issuerId := range issuersSet { - crlConfig.IssuerIDCRLMap[issuerId] = crlIdentifier + internalCRLConfig.IssuerIDCRLMap[issuerId] = crlIdentifier } // We always update the CRL Number since we never want to // duplicate numbers and missing numbers is fine. - crlNumber := crlConfig.CRLNumberMap[crlIdentifier] - crlConfig.CRLNumberMap[crlIdentifier] += 1 + crlNumber := internalCRLConfig.CRLNumberMap[crlIdentifier] + internalCRLConfig.CRLNumberMap[crlIdentifier] += 1 // CRLs (regardless of complete vs delta) are incrementally // numbered. But delta CRLs need to know the number of the // last complete CRL. We assume that's the previous identifier // if no value presently exists. - lastCompleteNumber, haveLast := crlConfig.LastCompleteNumberMap[crlIdentifier] + lastCompleteNumber, haveLast := internalCRLConfig.LastCompleteNumberMap[crlIdentifier] if !haveLast { // We use the value of crlNumber for the current CRL, so // decrement it by one to find the last one. @@ -945,24 +1802,24 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // Update `LastModified` if isDelta { - crlConfig.DeltaLastModified = time.Now().UTC() + internalCRLConfig.DeltaLastModified = time.Now().UTC() } else { - crlConfig.LastModified = time.Now().UTC() + internalCRLConfig.LastModified = time.Now().UTC() } // Lastly, build the CRL. - nextUpdate, err := buildCRL(sc, globalCRLConfig, forceNew, representative, revokedCerts, crlIdentifier, crlNumber, isDelta, lastCompleteNumber) + nextUpdate, err := buildCRL(sc, globalCRLConfig, forceNew, representative, revokedCerts, crlIdentifier, crlNumber, isUnified, isDelta, lastCompleteNumber) if err != nil { - return fmt.Errorf("error building CRLs: unable to build CRL for issuer (%v): %w", representative, err) + return nil, fmt.Errorf("error building CRLs: unable to build CRL for issuer (%v): %w", representative, err) } - crlConfig.CRLExpirationMap[crlIdentifier] = *nextUpdate + internalCRLConfig.CRLExpirationMap[crlIdentifier] = *nextUpdate if !isDelta { - crlConfig.LastCompleteNumberMap[crlIdentifier] = crlNumber + internalCRLConfig.LastCompleteNumberMap[crlIdentifier] = crlNumber } else if !haveLast { // Since we're writing this config anyways, save our guess // as to the last CRL number. - crlConfig.LastCompleteNumberMap[crlIdentifier] = lastCompleteNumber + internalCRLConfig.LastCompleteNumberMap[crlIdentifier] = lastCompleteNumber } } } @@ -978,7 +1835,7 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { // root deletion behavior, but using soft issuer deletes. If there is an // alternate, equivalent issuer however, we'll keep updating the shared // CRL; all equivalent issuers must have their CRLs disabled. - for mapIssuerId := range crlConfig.IssuerIDCRLMap { + for mapIssuerId := range internalCRLConfig.IssuerIDCRLMap { stillHaveIssuer := false for _, listedIssuerId := range issuers { if mapIssuerId == listedIssuerId { @@ -988,12 +1845,12 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { } if !stillHaveIssuer { - delete(crlConfig.IssuerIDCRLMap, mapIssuerId) + delete(internalCRLConfig.IssuerIDCRLMap, mapIssuerId) } } - for crlId := range crlConfig.CRLNumberMap { + for crlId := range internalCRLConfig.CRLNumberMap { stillHaveIssuerForID := false - for _, remainingCRL := range crlConfig.IssuerIDCRLMap { + for _, remainingCRL := range internalCRLConfig.IssuerIDCRLMap { if remainingCRL == crlId { stillHaveIssuerForID = true break @@ -1002,55 +1859,16 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { if !stillHaveIssuerForID { if err := sc.Storage.Delete(sc.Context, "crls/"+crlId.String()); err != nil { - return fmt.Errorf("error building CRLs: unable to clean up deleted issuers' CRL: %w", err) - } - } - } - - // Finally, persist our potentially updated local CRL config. Only do this - // if we didn't have a legacy CRL bundle. - if !wasLegacy { - if err := sc.setLocalCRLConfig(crlConfig); err != nil { - return fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) - } - } - - if !isDelta { - // After we've confirmed the primary CRLs have built OK, go ahead and - // clear the delta CRL WAL and rebuild it. - if err := sc.Backend.crlBuilder.clearDeltaWAL(sc, currDeltaCerts); err != nil { - return fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) - } - if err := sc.Backend.crlBuilder.rebuildDeltaCRLsHoldingLock(sc, forceNew); err != nil { - return fmt.Errorf("error building CRLs: unable to rebuild empty Delta WAL: %w", err) - } - } else { - // Update our last build time here so we avoid checking for new certs - // for a while. - sc.Backend.crlBuilder.lastDeltaRebuildCheck = time.Now() - - if len(lastDeltaSerial) > 0 { - // When we have a last delta serial, write out the relevant info - // so we can skip extra CRL rebuilds. - deltaInfo := lastDeltaInfo{Serial: lastDeltaSerial} - - lastDeltaBuildEntry, err := logical.StorageEntryJSON(deltaWALLastBuildSerial, deltaInfo) - if err != nil { - return fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) - } - - err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) - if err != nil { - return fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) + return nil, fmt.Errorf("error building CRLs: unable to clean up deleted issuers' CRL: %w", err) } } } // All good :-) - return nil + return warnings, nil } -func isRevInfoIssuerValid(revInfo *revocationInfo, issuerIDCertMap map[issuerID]*x509.Certificate) bool { +func isRevInfoIssuerValid(revInfo *revocationInfo, issuerIDCertMap map[issuing.IssuerID]*x509.Certificate) bool { if len(revInfo.CertificateIssuer) > 0 { issuerId := revInfo.CertificateIssuer if _, issuerExists := issuerIDCertMap[issuerId]; issuerExists { @@ -1061,7 +1879,7 @@ func isRevInfoIssuerValid(revInfo *revocationInfo, issuerIDCertMap map[issuerID] return false } -func associateRevokedCertWithIsssuer(revInfo *revocationInfo, revokedCert *x509.Certificate, issuerIDCertMap map[issuerID]*x509.Certificate) bool { +func associateRevokedCertWithIsssuer(revInfo *revocationInfo, revokedCert *x509.Certificate, issuerIDCertMap map[issuing.IssuerID]*x509.Certificate) bool { for issuerId, issuerCert := range issuerIDCertMap { if bytes.Equal(revokedCert.RawIssuer, issuerCert.RawSubject) { if err := revokedCert.CheckSignatureFrom(issuerCert); err == nil { @@ -1075,13 +1893,13 @@ func associateRevokedCertWithIsssuer(revInfo *revocationInfo, revokedCert *x509. return false } -func getRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuerID][]pkix.RevokedCertificate, error) { +func getLocalRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuing.IssuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuing.IssuerID][]pkix.RevokedCertificate, error) { var unassignedCerts []pkix.RevokedCertificate - revokedCertsMap := make(map[issuerID][]pkix.RevokedCertificate) + revokedCertsMap := make(map[issuing.IssuerID][]pkix.RevokedCertificate) listingPath := revokedPath if isDelta { - listingPath = deltaWALPath + listingPath = localDeltaWALPath } revokedSerials, err := sc.Storage.List(sc.Context, listingPath) @@ -1191,12 +2009,108 @@ func getRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x50 // we should update the entry to make future CRL builds faster. revokedEntry, err = logical.StorageEntryJSON(revokedPath+serial, revInfo) if err != nil { - return nil, nil, fmt.Errorf("error creating revocation entry for existing cert: %v", serial) + return nil, nil, fmt.Errorf("error creating revocation entry for existing cert: %v: %w", serial, err) } err = sc.Storage.Put(sc.Context, revokedEntry) if err != nil { - return nil, nil, fmt.Errorf("error updating revoked certificate at existing location: %v", serial) + return nil, nil, fmt.Errorf("error updating revoked certificate at existing location: %v: %w", serial, err) + } + } + } + + return unassignedCerts, revokedCertsMap, nil +} + +func getUnifiedRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuing.IssuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuing.IssuerID][]pkix.RevokedCertificate, error) { + // Getting unified revocation entries is a bit different than getting + // the local ones. In particular, the full copy of the certificate is + // unavailable, so we'll be able to avoid parsing the stored certificate, + // at the expense of potentially having incorrect issuer mappings. + var unassignedCerts []pkix.RevokedCertificate + revokedCertsMap := make(map[issuing.IssuerID][]pkix.RevokedCertificate) + + listingPath := unifiedRevocationReadPathPrefix + if isDelta { + listingPath = unifiedDeltaWALPrefix + } + + // First, we find all clusters that have written certificates. + clusterIds, err := sc.Storage.List(sc.Context, listingPath) + if err != nil { + return nil, nil, fmt.Errorf("failed to list clusters for unified CRL building: %w", err) + } + + // We wish to prevent duplicate revocations on separate clusters from + // being added multiple times to the CRL. While we can't guarantee these + // are the same certificate, it doesn't matter as (as long as they have + // the same issuer), it'd imply issuance of two certs with the same + // serial which'd be an intentional violation of RFC 5280 before importing + // an issuer into Vault, and would be highly unlikely within Vault, due + // to 120-bit random serial numbers. + foundSerials := make(map[string]bool) + + // Then for every cluster, we find its revoked certificates... + for _, clusterId := range clusterIds { + if !strings.HasSuffix(clusterId, "/") { + // No entries + continue + } + + clusterPath := listingPath + clusterId + serials, err := sc.Storage.List(sc.Context, clusterPath) + if err != nil { + return nil, nil, fmt.Errorf("failed to list serials in cluster (%v) for unified CRL building: %w", clusterId, err) + } + + // At this point, we need the storage entry. Rather than using the + // clusterPath and adding the serial, we need to use the true + // cross-cluster revocation entry (as, our above listing might have + // used delta WAL entires without the full revocation info). + serialPrefix := unifiedRevocationReadPathPrefix + clusterId + for _, serial := range serials { + if isDelta && (serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName) { + // Skip our placeholder entries... + continue + } + + serialPath := serialPrefix + serial + entryRaw, err := sc.Storage.Get(sc.Context, serialPath) + if err != nil { + return nil, nil, fmt.Errorf("failed to read unified revocation entry in cluster (%v) for unified CRL building: %w", clusterId, err) + } + if entryRaw == nil { + // Skip empty entries. We'll eventually tidy them. + continue + } + + var xRevEntry unifiedRevocationEntry + if err := entryRaw.DecodeJSON(&xRevEntry); err != nil { + return nil, nil, fmt.Errorf("failed json decoding of unified revocation entry at path %v: %w ", serialPath, err) + } + + // Convert to pkix.RevokedCertificate entries. + var revEntry pkix.RevokedCertificate + var ok bool + revEntry.SerialNumber, ok = serialToBigInt(serial) + if !ok { + return nil, nil, fmt.Errorf("failed to encode serial for CRL building: %v", serial) + } + + revEntry.RevocationTime = xRevEntry.RevocationTimeUTC + + if found, inFoundMap := foundSerials[normalizeSerial(serial)]; found && inFoundMap { + // Serial has already been added to the CRL. + continue + } + foundSerials[normalizeSerial(serial)] = true + + // Finally, add it to the correct mapping. + _, present := issuerIDCertMap[xRevEntry.CertificateIssuer] + if !present { + unassignedCerts = append(unassignedCerts, revEntry) + } else { + revokedCertsMap[xRevEntry.CertificateIssuer] = append(revokedCertsMap[xRevEntry.CertificateIssuer], revEntry) } } } @@ -1204,7 +2118,7 @@ func getRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x50 return unassignedCerts, revokedCertsMap, nil } -func augmentWithRevokedIssuers(issuerIDEntryMap map[issuerID]*issuerEntry, issuerIDCertMap map[issuerID]*x509.Certificate, revokedCertsMap map[issuerID][]pkix.RevokedCertificate) error { +func augmentWithRevokedIssuers(issuerIDEntryMap map[issuing.IssuerID]*issuing.IssuerEntry, issuerIDCertMap map[issuing.IssuerID]*x509.Certificate, revokedCertsMap map[issuing.IssuerID][]pkix.RevokedCertificate) error { // When setup our maps with the legacy CA bundle, we only have a // single entry here. This entry is never revoked, so the outer loop // will exit quickly. @@ -1240,10 +2154,10 @@ func augmentWithRevokedIssuers(issuerIDEntryMap map[issuerID]*issuerEntry, issue // Builds a CRL by going through the list of revoked certificates and building // a new CRL with the stored revocation times and serial numbers. -func buildCRL(sc *storageContext, crlInfo *crlConfig, forceNew bool, thisIssuerId issuerID, revoked []pkix.RevokedCertificate, identifier crlID, crlNumber int64, isDelta bool, lastCompleteNumber int64) (*time.Time, error) { +func buildCRL(sc *storageContext, crlInfo *crlConfig, forceNew bool, thisIssuerId issuing.IssuerID, revoked []pkix.RevokedCertificate, identifier issuing.CrlID, crlNumber int64, isUnified bool, isDelta bool, lastCompleteNumber int64) (*time.Time, error) { var revokedCerts []pkix.RevokedCertificate - crlLifetime, err := time.ParseDuration(crlInfo.Expiry) + crlLifetime, err := parseutil.ParseDurationSecond(crlInfo.Expiry) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("error parsing CRL duration of %s", crlInfo.Expiry)} } @@ -1267,7 +2181,7 @@ func buildCRL(sc *storageContext, crlInfo *crlConfig, forceNew bool, thisIssuerI revokedCerts = revoked WRITE: - signingBundle, caErr := sc.fetchCAInfoByIssuerId(thisIssuerId, CRLSigningUsage) + signingBundle, caErr := sc.fetchCAInfoByIssuerId(thisIssuerId, issuing.CRLSigningUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: @@ -1308,9 +2222,15 @@ WRITE: // Ignore the CRL ID as it won't be persisted anyways; hard-code the // old legacy path and allow it to be updated. writePath = legacyCRLPath - } else if isDelta { - // Write the delta CRL to a unique storage location. - writePath += deltaCRLPathSuffix + } else { + if isUnified { + writePath = unifiedCRLPathPrefix + writePath + } + + if isDelta { + // Write the delta CRL to a unique storage location. + writePath += deltaCRLPathSuffix + } } err = sc.Storage.Put(sc.Context, &logical.StorageEntry{ @@ -1323,3 +2243,9 @@ WRITE: return &nextUpdate, nil } + +// shouldLocalPathsUseUnified assuming a legacy path for a CRL/OCSP request, does our +// configuration say we should be returning the unified response or not +func shouldLocalPathsUseUnified(cfg *crlConfig) bool { + return cfg.UnifiedCRL && cfg.UnifiedCRLOnExistingPaths +} diff --git a/builtin/logical/pki/defaultdirectorypolicytype_enumer.go b/builtin/logical/pki/defaultdirectorypolicytype_enumer.go new file mode 100644 index 000000000000..917225ff834a --- /dev/null +++ b/builtin/logical/pki/defaultdirectorypolicytype_enumer.go @@ -0,0 +1,51 @@ +// Code generated by "enumer -type=DefaultDirectoryPolicyType"; DO NOT EDIT. + +package pki + +import ( + "fmt" +) + +const _DefaultDirectoryPolicyTypeName = "ForbidSignVerbatimRoleExternalPolicy" + +var _DefaultDirectoryPolicyTypeIndex = [...]uint8{0, 6, 18, 22, 36} + +func (i DefaultDirectoryPolicyType) String() string { + if i < 0 || i >= DefaultDirectoryPolicyType(len(_DefaultDirectoryPolicyTypeIndex)-1) { + return fmt.Sprintf("DefaultDirectoryPolicyType(%d)", i) + } + return _DefaultDirectoryPolicyTypeName[_DefaultDirectoryPolicyTypeIndex[i]:_DefaultDirectoryPolicyTypeIndex[i+1]] +} + +var _DefaultDirectoryPolicyTypeValues = []DefaultDirectoryPolicyType{0, 1, 2, 3} + +var _DefaultDirectoryPolicyTypeNameToValueMap = map[string]DefaultDirectoryPolicyType{ + _DefaultDirectoryPolicyTypeName[0:6]: 0, + _DefaultDirectoryPolicyTypeName[6:18]: 1, + _DefaultDirectoryPolicyTypeName[18:22]: 2, + _DefaultDirectoryPolicyTypeName[22:36]: 3, +} + +// DefaultDirectoryPolicyTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func DefaultDirectoryPolicyTypeString(s string) (DefaultDirectoryPolicyType, error) { + if val, ok := _DefaultDirectoryPolicyTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to DefaultDirectoryPolicyType values", s) +} + +// DefaultDirectoryPolicyTypeValues returns all values of the enum +func DefaultDirectoryPolicyTypeValues() []DefaultDirectoryPolicyType { + return _DefaultDirectoryPolicyTypeValues +} + +// IsADefaultDirectoryPolicyType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i DefaultDirectoryPolicyType) IsADefaultDirectoryPolicyType() bool { + for _, v := range _DefaultDirectoryPolicyTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/builtin/logical/pki/dnstest/server.go b/builtin/logical/pki/dnstest/server.go new file mode 100644 index 000000000000..751c0ae873fd --- /dev/null +++ b/builtin/logical/pki/dnstest/server.go @@ -0,0 +1,428 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package dnstest + +import ( + "context" + "fmt" + "net" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/stretchr/testify/require" +) + +type TestServer struct { + t *testing.T + ctx context.Context + log hclog.Logger + + runner *docker.Runner + network string + startup *docker.Service + + lock sync.Mutex + serial int + forwarders []string + domains []string + records map[string]map[string][]string // domain -> record -> value(s). + + cleanup func() +} + +func SetupResolver(t *testing.T, domain string) *TestServer { + return SetupResolverOnNetwork(t, domain, "") +} + +func SetupResolverOnNetwork(t *testing.T, domain string, network string) *TestServer { + var ts TestServer + ts.t = t + ts.ctx = context.Background() + ts.domains = []string{domain} + ts.records = map[string]map[string][]string{} + ts.network = network + ts.log = hclog.L() + + ts.setupRunner(domain, network) + ts.startContainer(network) + ts.PushConfig() + + return &ts +} + +func (ts *TestServer) setupRunner(domain string, network string) { + var err error + ts.runner, err = docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "ubuntu/bind9", + ImageTag: "latest", + ContainerName: "bind9-dns-" + strings.ReplaceAll(domain, ".", "-"), + NetworkName: network, + Ports: []string{"53/udp"}, + // DNS container logging was disabled to reduce content within CI logs. + //LogConsumer: func(s string) { + // ts.log.Info(s) + //}, + }) + require.NoError(ts.t, err) +} + +func (ts *TestServer) startContainer(network string) { + connUpFunc := func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + // Perform a simple connection to this resolver, even though the + // default configuration doesn't do anything useful. + peer, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", host, port)) + if err != nil { + return nil, fmt.Errorf("failed to resolve peer: %v / %v: %w", host, port, err) + } + + conn, err := net.DialUDP("udp", nil, peer) + if err != nil { + return nil, fmt.Errorf("failed to dial peer: %v / %v / %v: %w", host, port, peer, err) + } + defer conn.Close() + + _, err = conn.Write([]byte("garbage-in")) + if err != nil { + return nil, fmt.Errorf("failed to write to peer: %v / %v / %v: %w", host, port, peer, err) + } + + // Connection worked. + return docker.NewServiceHostPort(host, port), nil + } + + result, _, err := ts.runner.StartNewService(ts.ctx, true, true, connUpFunc) + require.NoError(ts.t, err, "failed to start dns resolver for "+ts.domains[0]) + ts.startup = result + + if ts.startup.StartResult.RealIP == "" { + mapping, err := ts.runner.GetNetworkAndAddresses(ts.startup.Container.ID) + require.NoError(ts.t, err, "failed to fetch network addresses to correct missing real IP address") + if len(network) == 0 { + require.Equal(ts.t, 1, len(mapping), "expected exactly one network address") + for network = range mapping { + // Because mapping is a map of network name->ip, we need + // to use the above range's assignment to get the name, + // as there is no other way of getting the keys of a map. + } + } + require.Contains(ts.t, mapping, network, "expected network to be part of the mapping") + ts.startup.StartResult.RealIP = mapping[network] + } + + ts.log.Info(fmt.Sprintf("[dnsserv] Addresses of DNS resolver: local=%v / container=%v", ts.GetLocalAddr(), ts.GetRemoteAddr())) +} + +func (ts *TestServer) buildNamedConf() string { + forwarders := "\n" + if len(ts.forwarders) > 0 { + forwarders = "\tforwarders {\n" + for _, forwarder := range ts.forwarders { + forwarders += "\t\t" + forwarder + ";\n" + } + forwarders += "\t};\n" + } + + zones := "\n" + for _, domain := range ts.domains { + zones += fmt.Sprintf("zone \"%s\" {\n", domain) + zones += "\ttype primary;\n" + zones += fmt.Sprintf("\tfile \"%s.zone\";\n", domain) + zones += "\tallow-update {\n\t\tnone;\n\t};\n" + zones += "\tnotify no;\n" + zones += "};\n\n" + } + + // Reverse lookups are not handles as they're not presently necessary. + + cfg := `options { + directory "/var/cache/bind"; + + dnssec-validation no; + + ` + forwarders + ` +}; + +` + zones + + return cfg +} + +func (ts *TestServer) buildZoneFile(target string) string { + // One second TTL by default to allow quick refreshes. + zone := "$TTL 1;\n" + + ts.serial += 1 + zone += fmt.Sprintf("@\tIN\tSOA\tns.%v.\troot.%v.\t(\n", target, target) + zone += fmt.Sprintf("\t\t\t%d;\n\t\t\t1;\n\t\t\t1;\n\t\t\t2;\n\t\t\t1;\n\t\t\t)\n\n", ts.serial) + zone += fmt.Sprintf("@\tIN\tNS\tns%d.%v.\n", ts.serial, target) + zone += fmt.Sprintf("ns%d.%v.\tIN\tA\t%v\n", ts.serial, target, "127.0.0.1") + + for domain, records := range ts.records { + if !strings.HasSuffix(domain, target) { + continue + } + + for recordType, values := range records { + for _, value := range values { + zone += fmt.Sprintf("%s.\tIN\t%s\t%s\n", domain, recordType, value) + } + } + } + + return zone +} + +func (ts *TestServer) pushNamedConf() { + contents := docker.NewBuildContext() + cfgPath := "/etc/bind/named.conf.options" + namedCfg := ts.buildNamedConf() + contents[cfgPath] = docker.PathContentsFromString(namedCfg) + contents[cfgPath].SetOwners(0, 142) // root, bind + + ts.log.Info(fmt.Sprintf("Generated bind9 config (%s):\n%v\n", cfgPath, namedCfg)) + + err := ts.runner.CopyTo(ts.startup.Container.ID, "/", contents) + require.NoError(ts.t, err, "failed pushing updated named.conf.options to container") +} + +func (ts *TestServer) pushZoneFiles() { + contents := docker.NewBuildContext() + + for _, domain := range ts.domains { + path := "/var/cache/bind/" + domain + ".zone" + zoneFile := ts.buildZoneFile(domain) + contents[path] = docker.PathContentsFromString(zoneFile) + contents[path].SetOwners(0, 142) // root, bind + + ts.log.Info(fmt.Sprintf("Generated bind9 zone file for %v (%s):\n%v\n", domain, path, zoneFile)) + } + + err := ts.runner.CopyTo(ts.startup.Container.ID, "/", contents) + require.NoError(ts.t, err, "failed pushing updated named.conf.options to container") +} + +func (ts *TestServer) PushConfig() { + ts.lock.Lock() + defer ts.lock.Unlock() + + _, _, _, err := ts.runner.RunCmdWithOutput(ts.ctx, ts.startup.Container.ID, []string{"rndc", "freeze"}) + require.NoError(ts.t, err, "failed to freeze DNS config") + + // There's two cases here: + // + // 1. We've added a new top-level domain name. Here, we want to make + // sure the new zone file is pushed before we push the reference + // to it. + // 2. We've just added a new. Here, the order doesn't matter, but + // mostly likely the second push will be a no-op. + ts.pushZoneFiles() + ts.pushNamedConf() + + _, _, _, err = ts.runner.RunCmdWithOutput(ts.ctx, ts.startup.Container.ID, []string{"rndc", "thaw"}) + require.NoError(ts.t, err, "failed to thaw DNS config") + + // Wait until our config has taken. + corehelpers.RetryUntil(ts.t, 15*time.Second, func() error { + // bind reloads based on file mtime, touch files before starting + // to make sure it has been updated more recently than when the + // last update was written. Then issue a new SIGHUP. + for _, domain := range ts.domains { + path := "/var/cache/bind/" + domain + ".zone" + touchCmd := []string{"touch", path} + + _, _, _, err := ts.runner.RunCmdWithOutput(ts.ctx, ts.startup.Container.ID, touchCmd) + if err != nil { + return fmt.Errorf("failed to update zone mtime: %w", err) + } + } + ts.runner.DockerAPI.ContainerKill(ts.ctx, ts.startup.Container.ID, "SIGHUP") + + // Connect to our bind resolver. + resolver := &net.Resolver{ + PreferGo: true, + StrictErrors: false, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := net.Dialer{ + Timeout: 10 * time.Second, + } + return d.DialContext(ctx, network, ts.GetLocalAddr()) + }, + } + + // last domain has the given serial number, which also appears in the + // NS record so we can fetch it via Go. + lastDomain := ts.domains[len(ts.domains)-1] + records, err := resolver.LookupNS(ts.ctx, lastDomain) + if err != nil { + return fmt.Errorf("failed to lookup NS record for %v: %w", lastDomain, err) + } + + if len(records) != 1 { + return fmt.Errorf("expected only 1 NS record for %v, got %v/%v", lastDomain, len(records), records) + } + + expectedNS := fmt.Sprintf("ns%d.%v.", ts.serial, lastDomain) + if records[0].Host != expectedNS { + return fmt.Errorf("expected to find NS %v, got %v indicating reload hadn't completed", expectedNS, records[0]) + } + + return nil + }) +} + +func (ts *TestServer) GetLocalAddr() string { + return ts.startup.Config.Address() +} + +func (ts *TestServer) GetRemoteAddr() string { + return fmt.Sprintf("%s:%d", ts.startup.StartResult.RealIP, 53) +} + +func (ts *TestServer) AddDomain(domain string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + for _, existing := range ts.domains { + if existing == domain { + return + } + } + + ts.domains = append(ts.domains, domain) +} + +func (ts *TestServer) AddRecord(domain string, record string, value string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + ts.t.Fatalf("cannot add record %v/%v :: [%v] -- no domain zone matching (%v)", record, domain, value, ts.domains) + } + + value = strings.TrimSpace(value) + if _, present := ts.records[domain]; !present { + ts.records[domain] = map[string][]string{} + } + + if values, present := ts.records[domain][record]; present { + for _, candidate := range values { + if candidate == value { + // Already present; skip adding. + return + } + } + } + + ts.records[domain][record] = append(ts.records[domain][record], value) +} + +func (ts *TestServer) RemoveRecord(domain string, record string, value string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + // Not found. + return + } + + value = strings.TrimSpace(value) + if _, present := ts.records[domain]; !present { + // Not found. + return + } + + var remaining []string + if values, present := ts.records[domain][record]; present { + for _, candidate := range values { + if candidate != value { + remaining = append(remaining, candidate) + } + } + } + + ts.records[domain][record] = remaining +} + +func (ts *TestServer) RemoveRecordsOfTypeForDomain(domain string, record string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + // Not found. + return + } + + if _, present := ts.records[domain]; !present { + // Not found. + return + } + + delete(ts.records[domain], record) +} + +func (ts *TestServer) RemoveRecordsForDomain(domain string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + // Not found. + return + } + + if _, present := ts.records[domain]; !present { + // Not found. + return + } + + ts.records[domain] = map[string][]string{} +} + +func (ts *TestServer) RemoveAllRecords() { + ts.lock.Lock() + defer ts.lock.Unlock() + + ts.records = map[string]map[string][]string{} +} + +func (ts *TestServer) Cleanup() { + if ts.cleanup != nil { + ts.cleanup() + } + if ts.startup != nil && ts.startup.Cleanup != nil { + ts.startup.Cleanup() + } +} diff --git a/builtin/logical/pki/fields.go b/builtin/logical/pki/fields.go index 78cdd67e8c93..abf0ac4cd2cf 100644 --- a/builtin/logical/pki/fields.go +++ b/builtin/logical/pki/fields.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" ) @@ -13,6 +17,7 @@ const ( keyIdParam = "key_id" keyTypeParam = "key_type" keyBitsParam = "key_bits" + skidParam = "subject_key_id" ) // addIssueAndSignCommonFields adds fields common to both CA and non-CA issuing @@ -152,6 +157,16 @@ The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ`, of the ca_chain field.`, } + fields["user_ids"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `The requested user_ids value to place in the subject, +if any, in a comma-delimited list. Restricted by allowed_user_ids. +Any values are added with OID 0.9.2342.19200300.100.1.1.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User ID(s)", + }, + } + fields = addIssuerRefField(fields) return fields @@ -310,9 +325,8 @@ is required. Ignored for other types.`, Type: framework.TypeInt, Default: 0, Description: `The number of bits to use. Allowed values are -0 (universal default); with rsa key_type: 2048 (default), 3072, or -4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with -ed25519.`, +0 (universal default); with rsa key_type: 2048 (default), 3072, 4096 or 8192; +with ec key_type: 224, 256 (default), 384, or 521; ignored with ed25519.`, DisplayAttrs: &framework.DisplayAttributes{ Value: 0, }, @@ -461,6 +475,40 @@ past the issuer_safety_buffer. No keys will be removed as part of this operation.`, } + fields["tidy_move_legacy_ca_bundle"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to move the legacy ca_bundle from +/config/ca_bundle to /config/ca_bundle.bak. This prevents downgrades +to pre-Vault 1.11 versions (as older PKI engines do not know about +the new multi-issuer storage layout), but improves the performance +on seal wrapped PKI mounts. This will only occur if at least +issuer_safety_buffer time has occurred after the initial storage +migration. + +This backup is saved in case of an issue in future migrations. +Operators may consider removing it via sys/raw if they desire. +The backup will be removed via a DELETE /root call, but note that +this removes ALL issuers within the mount (and is thus not desirable +in most operational scenarios).`, + } + + fields["tidy_acme"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to enable tidying ACME accounts, +orders and authorizations. ACME orders are tidied (deleted) +safety_buffer after the certificate associated with them expires, +or after the order and relevant authorizations have expired if no +certificate was produced. Authorizations are tidied with the +corresponding order. + +When a valid ACME Account is at least acme_account_safety_buffer +old, and has no remaining orders associated with it, the account is +marked as revoked. After another acme_account_safety_buffer has +passed from the revocation or deactivation date, a revoked or +deactivated ACME account is deleted.`, + Default: false, + } + fields["safety_buffer"] = &framework.FieldSchema{ Type: framework.TypeDurationSecond, Description: `The amount of extra time that must have passed @@ -479,6 +527,14 @@ Defaults to 8760 hours (1 year).`, Default: int(defaultTidyConfig.IssuerSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int } + fields["acme_account_safety_buffer"] = &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `The amount of time that must pass after creation +that an account with no orders is marked revoked, and the amount of time +after being marked revoked or deactivated.`, + Default: int(defaultTidyConfig.AcmeAccountSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int + } + fields["pause_duration"] = &framework.FieldSchema{ Type: framework.TypeString, Description: `The amount of time to wait between processing @@ -491,5 +547,100 @@ greater period of time. By default this is zero seconds.`, Default: "0s", } + fields["tidy_revocation_queue"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to remove stale revocation queue entries +that haven't been confirmed by any active cluster. Only runs on the +active primary node`, + Default: defaultTidyConfig.RevocationQueue, + } + + fields["revocation_queue_safety_buffer"] = &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `The amount of time that must pass from the +cross-cluster revocation request being initiated to when it will be +slated for removal. Setting this too low may remove valid revocation +requests before the owning cluster has a chance to process them, +especially if the cluster is offline.`, + Default: int(defaultTidyConfig.QueueSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int + } + + fields["tidy_cross_cluster_revoked_certs"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to enable tidying up +the cross-cluster revoked certificate store. Only runs on the active +primary node.`, + } + + return fields +} + +// generate the entire list of schema fields we need for CSR sign verbatim, this is also +// leveraged by ACME internally. +func getCsrSignVerbatimSchemaFields() map[string]*framework.FieldSchema { + fields := map[string]*framework.FieldSchema{} + fields = addNonCACommonFields(fields) + fields = addSignVerbatimRoleFields(fields) + + fields["csr"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "", + Description: `PEM-format CSR to be signed. Values will be +taken verbatim from the CSR, except for +basic constraints.`, + } + + return fields +} + +// addSignVerbatimRoleFields provides the fields and defaults to be used by anything that is building up the fields +// and their corresponding default values when generating/using a sign-verbatim type role such as buildSignVerbatimRole. +func addSignVerbatimRoleFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["key_usage"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Default: issuing.DefaultRoleKeyUsages, + Description: `A comma-separated string or list of key usages (not extended +key usages). Valid values can be found at +https://golang.org/pkg/crypto/x509/#KeyUsage +-- simply drop the "KeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list.`, + } + + fields["ext_key_usage"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Default: issuing.DefaultRoleEstKeyUsages, + Description: `A comma-separated string or list of extended key usages. Valid values can be found at +https://golang.org/pkg/crypto/x509/#ExtKeyUsage +-- simply drop the "ExtKeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list.`, + } + + fields["ext_key_usage_oids"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Default: issuing.DefaultRoleEstKeyUsageOids, + Description: `A comma-separated string or list of extended key usage oids.`, + } + + fields["signature_bits"] = &framework.FieldSchema{ + Type: framework.TypeInt, + Default: issuing.DefaultRoleSignatureBits, + Description: `The number of bits to use in the signature +algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for +SHA-2-512. Defaults to 0 to automatically detect based on key length +(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: issuing.DefaultRoleSignatureBits, + }, + } + + fields["use_pss"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: issuing.DefaultRoleUsePss, + Description: `Whether or not to use PSS signatures when using a +RSA key-type issuer. Defaults to false.`, + } + return fields } diff --git a/builtin/logical/pki/ifmodifiedreqtype_enumer.go b/builtin/logical/pki/ifmodifiedreqtype_enumer.go new file mode 100644 index 000000000000..b366fd825fc1 --- /dev/null +++ b/builtin/logical/pki/ifmodifiedreqtype_enumer.go @@ -0,0 +1,53 @@ +// Code generated by "enumer -type=ifModifiedReqType -trimprefix=ifModified"; DO NOT EDIT. + +package pki + +import ( + "fmt" +) + +const _ifModifiedReqTypeName = "UnknownCACRLDeltaCRLUnifiedCRLUnifiedDeltaCRL" + +var _ifModifiedReqTypeIndex = [...]uint8{0, 7, 9, 12, 20, 30, 45} + +func (i ifModifiedReqType) String() string { + if i < 0 || i >= ifModifiedReqType(len(_ifModifiedReqTypeIndex)-1) { + return fmt.Sprintf("ifModifiedReqType(%d)", i) + } + return _ifModifiedReqTypeName[_ifModifiedReqTypeIndex[i]:_ifModifiedReqTypeIndex[i+1]] +} + +var _ifModifiedReqTypeValues = []ifModifiedReqType{0, 1, 2, 3, 4, 5} + +var _ifModifiedReqTypeNameToValueMap = map[string]ifModifiedReqType{ + _ifModifiedReqTypeName[0:7]: 0, + _ifModifiedReqTypeName[7:9]: 1, + _ifModifiedReqTypeName[9:12]: 2, + _ifModifiedReqTypeName[12:20]: 3, + _ifModifiedReqTypeName[20:30]: 4, + _ifModifiedReqTypeName[30:45]: 5, +} + +// ifModifiedReqTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ifModifiedReqTypeString(s string) (ifModifiedReqType, error) { + if val, ok := _ifModifiedReqTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ifModifiedReqType values", s) +} + +// ifModifiedReqTypeValues returns all values of the enum +func ifModifiedReqTypeValues() []ifModifiedReqType { + return _ifModifiedReqTypeValues +} + +// IsAifModifiedReqType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ifModifiedReqType) IsAifModifiedReqType() bool { + for _, v := range _ifModifiedReqTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/builtin/logical/pki/integration_test.go b/builtin/logical/pki/integration_test.go index c2bdffbde3fb..5ad534ac902c 100644 --- a/builtin/logical/pki/integration_test.go +++ b/builtin/logical/pki/integration_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -11,7 +14,15 @@ import ( "fmt" "testing" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + vaulthttp "github.com/hashicorp/vault/http" + vaultocsp "github.com/hashicorp/vault/sdk/helper/ocsp" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + + "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" ) @@ -31,7 +42,7 @@ func TestIntegration_RotateRootUsesNext(t *testing.T) { require.NotNil(t, resp, "got nil response from rotate root") require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) - issuerId1 := resp.Data["issuer_id"].(issuerID) + issuerId1 := resp.Data["issuer_id"].(issuing.IssuerID) issuerName1 := resp.Data["issuer_name"] require.NotEmpty(t, issuerId1, "issuer id was empty on initial rotate root command") @@ -51,7 +62,7 @@ func TestIntegration_RotateRootUsesNext(t *testing.T) { require.NotNil(t, resp, "got nil response from rotate root") require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) - issuerId2 := resp.Data["issuer_id"].(issuerID) + issuerId2 := resp.Data["issuer_id"].(issuing.IssuerID) issuerName2 := resp.Data["issuer_name"] require.NotEmpty(t, issuerId2, "issuer id was empty on second rotate root command") @@ -73,7 +84,7 @@ func TestIntegration_RotateRootUsesNext(t *testing.T) { require.NotNil(t, resp, "got nil response from rotate root") require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) - issuerId3 := resp.Data["issuer_id"].(issuerID) + issuerId3 := resp.Data["issuer_id"].(issuing.IssuerID) issuerName3 := resp.Data["issuer_name"] require.NotEmpty(t, issuerId3, "issuer id was empty on third rotate root command") @@ -227,6 +238,8 @@ func TestIntegration_SetSignedWithBackwardsPemBundles(t *testing.T) { require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) rootCert := resp.Data["certificate"].(string) + schema.ValidateResponse(t, schema.GetResponseSchema(t, rootBackend.Route("issuers/generate/root/internal"), logical.UpdateOperation), resp, true) + // generate intermediate resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -288,6 +301,8 @@ func TestIntegration_SetSignedWithBackwardsPemBundles(t *testing.T) { require.NoError(t, err, "failed setting up role example") require.NotNil(t, resp, "got nil response from setting up role example: %#v", resp) + schema.ValidateResponse(t, schema.GetResponseSchema(t, intBackend.Route("roles/example"), logical.UpdateOperation), resp, true) + // Issue cert resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -302,6 +317,8 @@ func TestIntegration_SetSignedWithBackwardsPemBundles(t *testing.T) { require.NoError(t, err, "failed issuing a leaf cert from int ca") require.NotNil(t, resp, "got nil response issuing a leaf cert from int ca") require.False(t, resp.IsError(), "got an error issuing a leaf cert from int ca: %#v", resp) + + schema.ValidateResponse(t, schema.GetResponseSchema(t, intBackend.Route("issue/example"), logical.UpdateOperation), resp, true) } func TestIntegration_CSRGeneration(t *testing.T) { @@ -371,6 +388,7 @@ func TestIntegration_AutoIssuer(t *testing.T) { "issuer_name": "root-1", "key_type": "ec", }) + requireSuccessNonNilResponse(t, resp, err) issuerIdOne := resp.Data["issuer_id"] require.NotEmpty(t, issuerIdOne) @@ -381,12 +399,15 @@ func TestIntegration_AutoIssuer(t *testing.T) { requireSuccessNonNilResponse(t, resp, err) require.Equal(t, issuerIdOne, resp.Data["default"]) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/issuers"), logical.ReadOperation), resp, true) + // Enable the new config option. - _, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ + resp, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ "default": issuerIdOne, "default_follows_latest_issuer": true, }) require.NoError(t, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/issuers"), logical.UpdateOperation), resp, true) // Now generate the second root; it should become default. resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ @@ -416,7 +437,7 @@ func TestIntegration_AutoIssuer(t *testing.T) { "pem_bundle": certOne, }) requireSuccessNonNilResponse(t, resp, err) - issuerIdOneReimported := issuerID(resp.Data["imported_issuers"].([]string)[0]) + issuerIdOneReimported := issuing.IssuerID(resp.Data["imported_issuers"].([]string)[0]) resp, err = CBRead(b, s, "config/issuers") requireSuccessNonNilResponse(t, resp, err) @@ -460,11 +481,252 @@ func TestIntegration_AutoIssuer(t *testing.T) { require.Equal(t, issuerIdOneReimported, resp.Data["default"]) } -func genTestRootCa(t *testing.T, b *backend, s logical.Storage) (issuerID, keyID) { +// TestLDAPAiaCrlUrls validates we can properly handle CRL urls that are ldap based. +func TestLDAPAiaCrlUrls(t *testing.T) { + t.Parallel() + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + singleCore := cluster.Cores[0] + vault.TestWaitActive(t, singleCore.Core) + client := singleCore.Client + + mountPKIEndpoint(t, client, "pki") + + // Attempt multiple urls + crls := []string{ + "ldap://ldap.example.com/cn=example%20CA,dc=example,dc=com?certificateRevocationList;binary", + "ldap://ldap.example.com/cn=CA,dc=example,dc=com?authorityRevocationList;binary", + } + + _, err := client.Logical().Write("pki/config/urls", map[string]interface{}{ + "crl_distribution_points": crls, + }) + require.NoError(t, err) + + resp, err := client.Logical().Read("pki/config/urls") + require.NoError(t, err, "failed reading config/urls") + require.NotNil(t, resp, "resp was nil") + require.NotNil(t, resp.Data, "data within response was nil") + require.NotEmpty(t, resp.Data["crl_distribution_points"], "crl_distribution_points was nil within data") + require.Len(t, resp.Data["crl_distribution_points"], len(crls)) + + for _, crlVal := range crls { + require.Contains(t, resp.Data["crl_distribution_points"], crlVal) + } + + resp, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root R1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["issuer_id"]) + rootIssuerId := resp.Data["issuer_id"].(string) + + _, err = client.Logical().Write("pki/roles/example-root", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "key_type": "ec", + "issuer_ref": rootIssuerId, + }) + require.NoError(t, err) + + resp, err = client.Logical().Write("pki/issue/example-root", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + + certPEM := resp.Data["certificate"].(string) + certBlock, _ := pem.Decode([]byte(certPEM)) + require.NotNil(t, certBlock) + cert, err := x509.ParseCertificate(certBlock.Bytes) + require.NoError(t, err) + + require.EqualValues(t, crls, cert.CRLDistributionPoints) +} + +func TestIntegrationOCSPClientWithPKI(t *testing.T) { + t.Parallel() + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + require.NoError(t, err) + + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root R1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["issuer_id"]) + rootIssuerId := resp.Data["issuer_id"].(string) + + // Set URLs pointing to the issuer. + _, err = client.Logical().Write("pki/config/cluster", map[string]interface{}{ + "path": client.Address() + "/v1/pki", + "aia_path": client.Address() + "/v1/pki", + }) + require.NoError(t, err) + + _, err = client.Logical().Write("pki/config/urls", map[string]interface{}{ + "enable_templating": true, + "crl_distribution_points": "{{cluster_aia_path}}/issuer/{{issuer_id}}/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", + "ocsp_servers": "{{cluster_aia_path}}/ocsp", + }) + require.NoError(t, err) + + // Build an intermediate CA + resp, err = client.Logical().Write("pki/intermediate/generate/internal", map[string]interface{}{ + "common_name": "Int X1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["csr"]) + intermediateCSR := resp.Data["csr"].(string) + + resp, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{ + "csr": intermediateCSR, + "ttl": "20h", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + intermediateCert := resp.Data["certificate"] + + resp, err = client.Logical().Write("pki/intermediate/set-signed", map[string]interface{}{ + "certificate": intermediateCert, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["imported_issuers"]) + rawImportedIssuers := resp.Data["imported_issuers"].([]interface{}) + require.Equal(t, len(rawImportedIssuers), 1) + importedIssuer := rawImportedIssuers[0].(string) + require.NotEmpty(t, importedIssuer) + + // Set intermediate as default. + _, err = client.Logical().Write("pki/config/issuers", map[string]interface{}{ + "default": importedIssuer, + }) + require.NoError(t, err) + + // Setup roles for root, intermediate. + _, err = client.Logical().Write("pki/roles/example-root", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "key_type": "ec", + "issuer_ref": rootIssuerId, + }) + require.NoError(t, err) + + _, err = client.Logical().Write("pki/roles/example-int", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "key_type": "ec", + }) + require.NoError(t, err) + + // Issue certs and validate them against OCSP. + for _, path := range []string{"pki/issue/example-int", "pki/issue/example-root"} { + t.Logf("Validating against path: %v", path) + resp, err = client.Logical().Write(path, map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["issuing_ca"]) + require.NotEmpty(t, resp.Data["serial_number"]) + + certPEM := resp.Data["certificate"].(string) + certBlock, _ := pem.Decode([]byte(certPEM)) + require.NotNil(t, certBlock) + cert, err := x509.ParseCertificate(certBlock.Bytes) + require.NoError(t, err) + require.NotNil(t, cert) + + issuerPEM := resp.Data["issuing_ca"].(string) + issuerBlock, _ := pem.Decode([]byte(issuerPEM)) + require.NotNil(t, issuerBlock) + issuer, err := x509.ParseCertificate(issuerBlock.Bytes) + require.NoError(t, err) + require.NotNil(t, issuer) + + serialNumber := resp.Data["serial_number"].(string) + + testLogger := hclog.New(hclog.DefaultOptions) + + conf := &vaultocsp.VerifyConfig{ + OcspFailureMode: vaultocsp.FailOpenFalse, + ExtraCas: []*x509.Certificate{cluster.CACert}, + } + ocspClient := vaultocsp.New(func() hclog.Logger { + return testLogger + }, 10) + + _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": serialNumber, + }) + require.NoError(t, err) + + err = ocspClient.VerifyLeafCertificate(context.Background(), cert, issuer, conf) + require.Error(t, err) + } +} + +func genTestRootCa(t *testing.T, b *backend, s logical.Storage) (issuing.IssuerID, issuing.KeyID) { return genTestRootCaWithIssuerName(t, b, s, "") } -func genTestRootCaWithIssuerName(t *testing.T, b *backend, s logical.Storage, issuerName string) (issuerID, keyID) { +func genTestRootCaWithIssuerName(t *testing.T, b *backend, s logical.Storage, issuerName string) (issuing.IssuerID, issuing.KeyID) { data := map[string]interface{}{ "common_name": "test.com", } @@ -482,8 +744,8 @@ func genTestRootCaWithIssuerName(t *testing.T, b *backend, s logical.Storage, is require.NotNil(t, resp, "got nil response from generating root ca") require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) - issuerId := resp.Data["issuer_id"].(issuerID) - keyId := resp.Data["key_id"].(keyID) + issuerId := resp.Data["issuer_id"].(issuing.IssuerID) + keyId := resp.Data["key_id"].(issuing.KeyID) require.NotEmpty(t, issuerId, "returned issuer id was empty") require.NotEmpty(t, keyId, "returned key id was empty") diff --git a/builtin/logical/pki/issuing/aia.go b/builtin/logical/pki/issuing/aia.go new file mode 100644 index 000000000000..0f2e76b99f4a --- /dev/null +++ b/builtin/logical/pki/issuing/aia.go @@ -0,0 +1,184 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "fmt" + "net/url" + "strings" + "unicode/utf8" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ClusterConfigPath = "config/cluster" + +type AiaConfigEntry struct { + IssuingCertificates []string `json:"issuing_certificates"` + CRLDistributionPoints []string `json:"crl_distribution_points"` + OCSPServers []string `json:"ocsp_servers"` + EnableTemplating bool `json:"enable_templating"` +} + +type ClusterConfigEntry struct { + Path string `json:"path"` + AIAPath string `json:"aia_path"` +} + +func GetAIAURLs(ctx context.Context, s logical.Storage, i *IssuerEntry) (*certutil.URLEntries, error) { + // Default to the per-issuer AIA URLs. + entries := i.AIAURIs + + // If none are set (either due to a nil entry or because no URLs have + // been provided), fall back to the global AIA URL config. + if entries == nil || (len(entries.IssuingCertificates) == 0 && len(entries.CRLDistributionPoints) == 0 && len(entries.OCSPServers) == 0) { + var err error + + entries, err = GetGlobalAIAURLs(ctx, s) + if err != nil { + return nil, err + } + } + + if entries == nil { + return &certutil.URLEntries{}, nil + } + + return ToURLEntries(ctx, s, i.ID, entries) +} + +func GetGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*AiaConfigEntry, error) { + entry, err := storage.Get(ctx, "urls") + if err != nil { + return nil, err + } + + entries := &AiaConfigEntry{ + IssuingCertificates: []string{}, + CRLDistributionPoints: []string{}, + OCSPServers: []string{}, + EnableTemplating: false, + } + + if entry == nil { + return entries, nil + } + + if err := entry.DecodeJSON(entries); err != nil { + return nil, err + } + + return entries, nil +} + +func ToURLEntries(ctx context.Context, s logical.Storage, issuer IssuerID, c *AiaConfigEntry) (*certutil.URLEntries, error) { + if len(c.IssuingCertificates) == 0 && len(c.CRLDistributionPoints) == 0 && len(c.OCSPServers) == 0 { + return &certutil.URLEntries{}, nil + } + + result := certutil.URLEntries{ + IssuingCertificates: c.IssuingCertificates[:], + CRLDistributionPoints: c.CRLDistributionPoints[:], + OCSPServers: c.OCSPServers[:], + } + + if c.EnableTemplating { + cfg, err := GetClusterConfig(ctx, s) + if err != nil { + return nil, fmt.Errorf("error fetching cluster-local address config: %w", err) + } + + for name, source := range map[string]*[]string{ + "issuing_certificates": &result.IssuingCertificates, + "crl_distribution_points": &result.CRLDistributionPoints, + "ocsp_servers": &result.OCSPServers, + } { + templated := make([]string, len(*source)) + for index, uri := range *source { + if strings.Contains(uri, "{{cluster_path}}") && len(cfg.Path) == 0 { + return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information (path)") + } + if strings.Contains(uri, "{{cluster_aia_path}}") && len(cfg.AIAPath) == 0 { + return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information (aia_path)") + } + if strings.Contains(uri, "{{issuer_id}}") && len(issuer) == 0 { + // Elide issuer AIA info as we lack an issuer_id. + return nil, fmt.Errorf("unable to template AIA URLs as we lack an issuer_id for this operation") + } + + uri = strings.ReplaceAll(uri, "{{cluster_path}}", cfg.Path) + uri = strings.ReplaceAll(uri, "{{cluster_aia_path}}", cfg.AIAPath) + uri = strings.ReplaceAll(uri, "{{issuer_id}}", issuer.String()) + templated[index] = uri + } + + if uri := ValidateURLs(templated); uri != "" { + return nil, fmt.Errorf("error validating templated %v; invalid URI: %v", name, uri) + } + + *source = templated + } + } + + return &result, nil +} + +func GetClusterConfig(ctx context.Context, s logical.Storage) (*ClusterConfigEntry, error) { + entry, err := s.Get(ctx, ClusterConfigPath) + if err != nil { + return nil, err + } + + var result ClusterConfigEntry + if entry == nil { + return &result, nil + } + + if err = entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func ValidateURLs(urls []string) string { + for _, curr := range urls { + if !isURL(curr) || strings.Contains(curr, "{{issuer_id}}") || strings.Contains(curr, "{{cluster_path}}") || strings.Contains(curr, "{{cluster_aia_path}}") { + return curr + } + } + + return "" +} + +const ( + maxURLRuneCount = 2083 + minURLRuneCount = 3 +) + +// IsURL checks if the string is an URL. +func isURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.ParseRequestURI(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return true +} diff --git a/builtin/logical/pki/issuing/config_issuer.go b/builtin/logical/pki/issuing/config_issuer.go new file mode 100644 index 000000000000..aa9e10ec739b --- /dev/null +++ b/builtin/logical/pki/issuing/config_issuer.go @@ -0,0 +1,124 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const StorageIssuerConfig = "config/issuers" + +type IssuerConfigEntry struct { + // This new fetchedDefault field allows us to detect if the default + // issuer was modified, in turn dispatching the timestamp updater + // if necessary. + fetchedDefault IssuerID `json:"-"` + DefaultIssuerId IssuerID `json:"default"` + DefaultFollowsLatestIssuer bool `json:"default_follows_latest_issuer"` +} + +func GetIssuersConfig(ctx context.Context, s logical.Storage) (*IssuerConfigEntry, error) { + entry, err := s.Get(ctx, StorageIssuerConfig) + if err != nil { + return nil, err + } + + issuerConfig := &IssuerConfigEntry{} + if entry != nil { + if err := entry.DecodeJSON(issuerConfig); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode issuer configuration: %v", err)} + } + } + issuerConfig.fetchedDefault = issuerConfig.DefaultIssuerId + + return issuerConfig, nil +} + +func SetIssuersConfig(ctx context.Context, s logical.Storage, config *IssuerConfigEntry) error { + json, err := logical.StorageEntryJSON(StorageIssuerConfig, config) + if err != nil { + return err + } + + if err := s.Put(ctx, json); err != nil { + return err + } + + if err := changeDefaultIssuerTimestamps(ctx, s, config.fetchedDefault, config.DefaultIssuerId); err != nil { + return err + } + + return nil +} + +func changeDefaultIssuerTimestamps(ctx context.Context, s logical.Storage, oldDefault IssuerID, newDefault IssuerID) error { + if newDefault == oldDefault { + return nil + } + + now := time.Now().UTC() + + // When the default issuer changes, we need to modify four + // pieces of information: + // + // 1. The old default issuer's modification time, as it no + // longer works for the /cert/ca path. + // 2. The new default issuer's modification time, as it now + // works for the /cert/ca path. + // 3. & 4. Both issuer's CRLs, as they behave the same, under + // the /cert/crl path! + for _, thisId := range []IssuerID{oldDefault, newDefault} { + if len(thisId) == 0 { + continue + } + + // 1 & 2 above. + issuer, err := FetchIssuerById(ctx, s, thisId) + if err != nil { + // Due to the lack of transactions, if we deleted the default + // issuer (successfully), but the subsequent issuer config write + // (to clear the default issuer's old id) failed, we might have + // an inconsistent config. If we later hit this loop (and flush + // these timestamps again -- perhaps because the operator + // selected a new default), we'd have erred out here, because + // the since-deleted default issuer doesn't exist. In this case, + // skip the issuer instead of bailing. + err := fmt.Errorf("unable to update issuer (%v)'s modification time: error fetching issuer: %w", thisId, err) + if strings.Contains(err.Error(), "does not exist") { + hclog.L().Warn(err.Error()) + continue + } + + return err + } + + issuer.LastModified = now + err = WriteIssuer(ctx, s, issuer) + if err != nil { + return fmt.Errorf("unable to update issuer (%v)'s modification time: error persisting issuer: %w", thisId, err) + } + } + + // Fetch and update the internalCRLConfigEntry (3&4). + cfg, err := GetLocalCRLConfig(ctx, s) + if err != nil { + return fmt.Errorf("unable to update local CRL config's modification time: error fetching local CRL config: %w", err) + } + + cfg.LastModified = now + cfg.DeltaLastModified = now + err = SetLocalCRLConfig(ctx, s, cfg) + if err != nil { + return fmt.Errorf("unable to update local CRL config's modification time: error persisting local CRL config: %w", err) + } + + return nil +} diff --git a/builtin/logical/pki/issuing/config_key.go b/builtin/logical/pki/issuing/config_key.go new file mode 100644 index 000000000000..b0526a25c6fc --- /dev/null +++ b/builtin/logical/pki/issuing/config_key.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + StorageKeyConfig = "config/keys" +) + +type KeyConfigEntry struct { + DefaultKeyId KeyID `json:"default"` +} + +func SetKeysConfig(ctx context.Context, s logical.Storage, config *KeyConfigEntry) error { + json, err := logical.StorageEntryJSON(StorageKeyConfig, config) + if err != nil { + return err + } + + return s.Put(ctx, json) +} + +func GetKeysConfig(ctx context.Context, s logical.Storage) (*KeyConfigEntry, error) { + entry, err := s.Get(ctx, StorageKeyConfig) + if err != nil { + return nil, err + } + + keyConfig := &KeyConfigEntry{} + if entry != nil { + if err := entry.DecodeJSON(keyConfig); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode key configuration: %v", err)} + } + } + + return keyConfig, nil +} diff --git a/builtin/logical/pki/issuing/config_revocation.go b/builtin/logical/pki/issuing/config_revocation.go new file mode 100644 index 000000000000..c1f8ff0ee079 --- /dev/null +++ b/builtin/logical/pki/issuing/config_revocation.go @@ -0,0 +1,190 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + StorageLocalCRLConfig = "crls/config" + StorageUnifiedCRLConfig = "unified-crls/config" +) + +type InternalCRLConfigEntry struct { + IssuerIDCRLMap map[IssuerID]CrlID `json:"issuer_id_crl_map"` + CRLNumberMap map[CrlID]int64 `json:"crl_number_map"` + LastCompleteNumberMap map[CrlID]int64 `json:"last_complete_number_map"` + CRLExpirationMap map[CrlID]time.Time `json:"crl_expiration_map"` + LastModified time.Time `json:"last_modified"` + DeltaLastModified time.Time `json:"delta_last_modified"` + UseGlobalQueue bool `json:"cross_cluster_revocation"` +} + +type CrlID string + +func (p CrlID) String() string { + return string(p) +} + +func GetLocalCRLConfig(ctx context.Context, s logical.Storage) (*InternalCRLConfigEntry, error) { + return _getInternalCRLConfig(ctx, s, StorageLocalCRLConfig) +} + +func GetUnifiedCRLConfig(ctx context.Context, s logical.Storage) (*InternalCRLConfigEntry, error) { + return _getInternalCRLConfig(ctx, s, StorageUnifiedCRLConfig) +} + +func _getInternalCRLConfig(ctx context.Context, s logical.Storage, path string) (*InternalCRLConfigEntry, error) { + entry, err := s.Get(ctx, path) + if err != nil { + return nil, err + } + + mapping := &InternalCRLConfigEntry{} + if entry != nil { + if err := entry.DecodeJSON(mapping); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode cluster-local CRL configuration: %v", err)} + } + } + + if len(mapping.IssuerIDCRLMap) == 0 { + mapping.IssuerIDCRLMap = make(map[IssuerID]CrlID) + } + + if len(mapping.CRLNumberMap) == 0 { + mapping.CRLNumberMap = make(map[CrlID]int64) + } + + if len(mapping.LastCompleteNumberMap) == 0 { + mapping.LastCompleteNumberMap = make(map[CrlID]int64) + + // Since this might not exist on migration, we want to guess as + // to the last full CRL number was. This was likely the last + // Value from CRLNumberMap if it existed, since we're just adding + // the mapping here in this block. + // + // After the next full CRL build, we will have set this Value + // correctly, so it doesn't really matter in the long term if + // we're off here. + for id, number := range mapping.CRLNumberMap { + // Decrement by one, since CRLNumberMap is the future number, + // not the last built number. + mapping.LastCompleteNumberMap[id] = number - 1 + } + } + + if len(mapping.CRLExpirationMap) == 0 { + mapping.CRLExpirationMap = make(map[CrlID]time.Time) + } + + return mapping, nil +} + +func SetLocalCRLConfig(ctx context.Context, s logical.Storage, mapping *InternalCRLConfigEntry) error { + return _setInternalCRLConfig(ctx, s, mapping, StorageLocalCRLConfig) +} + +func SetUnifiedCRLConfig(ctx context.Context, s logical.Storage, mapping *InternalCRLConfigEntry) error { + return _setInternalCRLConfig(ctx, s, mapping, StorageUnifiedCRLConfig) +} + +func _setInternalCRLConfig(ctx context.Context, s logical.Storage, mapping *InternalCRLConfigEntry, path string) error { + if err := _cleanupInternalCRLMapping(ctx, s, mapping, path); err != nil { + return fmt.Errorf("failed to clean up internal CRL mapping: %w", err) + } + + json, err := logical.StorageEntryJSON(path, mapping) + if err != nil { + return err + } + + return s.Put(ctx, json) +} + +func _cleanupInternalCRLMapping(ctx context.Context, s logical.Storage, mapping *InternalCRLConfigEntry, path string) error { + // Track which CRL IDs are presently referred to by issuers; any other CRL + // IDs are subject to cleanup. + // + // Unused IDs both need to be removed from this map (cleaning up the size + // of this storage entry) but also the full CRLs removed from disk. + presentMap := make(map[CrlID]bool) + for _, id := range mapping.IssuerIDCRLMap { + presentMap[id] = true + } + + // Identify which CRL IDs exist and are candidates for removal; + // theoretically these three maps should be in sync, but were added + // at different times. + toRemove := make(map[CrlID]bool) + for id := range mapping.CRLNumberMap { + if !presentMap[id] { + toRemove[id] = true + } + } + for id := range mapping.LastCompleteNumberMap { + if !presentMap[id] { + toRemove[id] = true + } + } + for id := range mapping.CRLExpirationMap { + if !presentMap[id] { + toRemove[id] = true + } + } + + // Depending on which path we're writing this config to, we need to + // remove CRLs from the relevant folder too. + isLocal := path == StorageLocalCRLConfig + baseCRLPath := "crls/" + if !isLocal { + baseCRLPath = "unified-crls/" + } + + for id := range toRemove { + // Clean up space in this mapping... + delete(mapping.CRLNumberMap, id) + delete(mapping.LastCompleteNumberMap, id) + delete(mapping.CRLExpirationMap, id) + + // And clean up space on disk from the fat CRL mapping. + crlPath := baseCRLPath + string(id) + deltaCRLPath := crlPath + "-delta" + if err := s.Delete(ctx, crlPath); err != nil { + return fmt.Errorf("failed to delete unreferenced CRL %v: %w", id, err) + } + if err := s.Delete(ctx, deltaCRLPath); err != nil { + return fmt.Errorf("failed to delete unreferenced delta CRL %v: %w", id, err) + } + } + + // Lastly, some CRLs could've been partially removed from the map but + // not from disk. Check to see if we have any dangling CRLs and remove + // them too. + list, err := s.List(ctx, baseCRLPath) + if err != nil { + return fmt.Errorf("failed listing all CRLs: %w", err) + } + for _, crl := range list { + if crl == "config" || strings.HasSuffix(crl, "/") { + continue + } + + if presentMap[CrlID(crl)] { + continue + } + + if err := s.Delete(ctx, baseCRLPath+"/"+crl); err != nil { + return fmt.Errorf("failed cleaning up orphaned CRL %v: %w", crl, err) + } + } + + return nil +} diff --git a/builtin/logical/pki/issuing/issue_common.go b/builtin/logical/pki/issuing/issue_common.go new file mode 100644 index 000000000000..af1c9f4e8fda --- /dev/null +++ b/builtin/logical/pki/issuing/issue_common.go @@ -0,0 +1,1023 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "fmt" + "net" + "net/url" + "regexp" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/ryanuber/go-glob" + "golang.org/x/net/idna" + + "github.com/hashicorp/vault/builtin/logical/pki/parsing" + "github.com/hashicorp/vault/builtin/logical/pki/pki_backend" +) + +var ( + // labelRegex is a single label from a valid domain name and was extracted + // from hostnameRegex below for use in leftWildLabelRegex, without any + // label separators (`.`). + labelRegex = `([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])` + + // A note on hostnameRegex: although we set the StrictDomainName option + // when doing the idna conversion, this appears to only affect output, not + // input, so it will allow e.g. host^123.example.com straight through. So + // we still need to use this to check the output. + hostnameRegex = regexp.MustCompile(`^(\*\.)?(` + labelRegex + `\.)*` + labelRegex + `\.?$`) + + // Left Wildcard Label Regex is equivalent to a single domain label + // component from hostnameRegex above, but with additional wildcard + // characters added. There are four possibilities here: + // + // 1. Entire label is a wildcard, + // 2. Wildcard exists at the start, + // 3. Wildcard exists at the end, + // 4. Wildcard exists in the middle. + allWildRegex = `\*` + startWildRegex = `\*` + labelRegex + endWildRegex = labelRegex + `\*` + middleWildRegex = labelRegex + `\*` + labelRegex + leftWildLabelRegex = regexp.MustCompile(`^(` + allWildRegex + `|` + startWildRegex + `|` + endWildRegex + `|` + middleWildRegex + `)$`) +) + +type EntityInfo struct { + DisplayName string + EntityID string +} + +func NewEntityInfoFromReq(req *logical.Request) EntityInfo { + if req == nil { + return EntityInfo{} + } + return EntityInfo{ + DisplayName: req.DisplayName, + EntityID: req.EntityID, + } +} + +type CreationBundleInput interface { + CertNotAfterInput + GetCommonName() string + GetSerialNumber() string + GetExcludeCnFromSans() bool + GetOptionalAltNames() (interface{}, bool) + GetOtherSans() []string + GetIpSans() []string + GetURISans() []string + GetOptionalSkid() (interface{}, bool) + IsUserIdInSchema() (interface{}, bool) + GetUserIds() []string +} + +// GenerateCreationBundle is a shared function that reads parameters supplied +// from the various endpoints and generates a CreationParameters with the +// parameters that can be used to issue or sign +func GenerateCreationBundle(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, cb CreationBundleInput, caSign *certutil.CAInfoBundle, csr *x509.CertificateRequest) (*certutil.CreationBundle, []string, error) { + // Read in names -- CN, DNS and email addresses + var cn string + var ridSerialNumber string + var warnings []string + dnsNames := []string{} + emailAddresses := []string{} + { + if csr != nil && role.UseCSRCommonName { + cn = csr.Subject.CommonName + } + if cn == "" { + cn = cb.GetCommonName() + if cn == "" && role.RequireCN { + return nil, nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true, unless "require_cn" is set to false`} + } + } + + ridSerialNumber = cb.GetSerialNumber() + + // only take serial number from CSR if one was not supplied via API + if ridSerialNumber == "" && csr != nil { + ridSerialNumber = csr.Subject.SerialNumber + } + + if csr != nil && role.UseCSRSANs { + dnsNames = csr.DNSNames + emailAddresses = csr.EmailAddresses + } + + if cn != "" && !cb.GetExcludeCnFromSans() { + if strings.Contains(cn, "@") { + // Note: emails are not disallowed if the role's email protection + // flag is false, because they may well be included for + // informational purposes; it is up to the verifying party to + // ensure that email addresses in a subject alternate name can be + // used for the purpose for which they are presented + emailAddresses = append(emailAddresses, cn) + } else { + // Only add to dnsNames if it's actually a DNS name but convert + // idn first + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToASCII(cn) + if err != nil { + return nil, nil, errutil.UserError{Err: err.Error()} + } + if hostnameRegex.MatchString(converted) { + dnsNames = append(dnsNames, converted) + } + } + } + + if csr == nil || !role.UseCSRSANs { + cnAltRaw, ok := cb.GetOptionalAltNames() + if ok { + cnAlt := strutil.ParseDedupAndSortStrings(cnAltRaw.(string), ",") + for _, v := range cnAlt { + if strings.Contains(v, "@") { + emailAddresses = append(emailAddresses, v) + } else { + // Only add to dnsNames if it's actually a DNS name but + // convert idn first + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToASCII(v) + if err != nil { + return nil, nil, errutil.UserError{Err: err.Error()} + } + if hostnameRegex.MatchString(converted) { + dnsNames = append(dnsNames, converted) + } + } + } + } + } + + // Check the CN. This ensures that the CN is checked even if it's + // excluded from SANs. + if cn != "" { + badName := ValidateCommonName(b, role, entityInfo, cn) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "common name %s not allowed by this role", badName)} + } + } + + if ridSerialNumber != "" { + badName := ValidateSerialNumber(role, ridSerialNumber) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "serial_number %s not allowed by this role", badName)} + } + } + + // Check for bad email and/or DNS names + badName := ValidateNames(b, role, entityInfo, dnsNames) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "subject alternate name %s not allowed by this role", badName)} + } + + badName = ValidateNames(b, role, entityInfo, emailAddresses) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "email address %s not allowed by this role", badName)} + } + } + + // otherSANsInput has the same format as the other_sans HTTP param in the + // Vault PKI API: it is a list of strings of the form ;: + // where must be UTF8/UTF-8. + var otherSANsInput []string + // otherSANs is the output of parseOtherSANs(otherSANsInput): its keys are + // the value, its values are of the form [, ] + var otherSANs map[string][]string + if sans := cb.GetOtherSans(); len(sans) > 0 { + otherSANsInput = sans + } + if role.UseCSRSANs && csr != nil && len(csr.Extensions) > 0 { + others, err := certutil.GetOtherSANsFromX509Extensions(csr.Extensions) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} + } + for _, other := range others { + otherSANsInput = append(otherSANsInput, other.String()) + } + } + if len(otherSANsInput) > 0 { + requested, err := ParseOtherSANs(otherSANsInput) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} + } + badOID, badName, err := ValidateOtherSANs(role, requested) + switch { + case err != nil: + return nil, nil, errutil.UserError{Err: err.Error()} + case len(badName) > 0: + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "other SAN %s not allowed for OID %s by this role", badName, badOID)} + case len(badOID) > 0: + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "other SAN OID %s not allowed by this role", badOID)} + default: + otherSANs = requested + } + } + + // Get and verify any IP SANs + ipAddresses := []net.IP{} + { + if csr != nil && role.UseCSRSANs { + if len(csr.IPAddresses) > 0 { + if !role.AllowIPSANs { + return nil, nil, errutil.UserError{Err: "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR"} + } + ipAddresses = csr.IPAddresses + } + } else { + ipAlt := cb.GetIpSans() + if len(ipAlt) > 0 { + if !role.AllowIPSANs { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)} + } + for _, v := range ipAlt { + parsedIP := net.ParseIP(v) + if parsedIP == nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "the value %q is not a valid IP address", v)} + } + ipAddresses = append(ipAddresses, parsedIP) + } + } + } + } + + URIs := []*url.URL{} + { + if csr != nil && role.UseCSRSANs { + if len(csr.URIs) > 0 { + if len(role.AllowedURISANs) == 0 { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names are not allowed in this role, but were provided via CSR", + } + } + + // validate uri sans + for _, uri := range csr.URIs { + valid := ValidateURISAN(b, role, entityInfo, uri.String()) + if !valid { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names were provided via CSR which are not valid for this role", + } + } + + URIs = append(URIs, uri) + } + } + } else { + uriAlt := cb.GetURISans() + if len(uriAlt) > 0 { + if len(role.AllowedURISANs) == 0 { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names are not allowed in this role, but were provided via the API", + } + } + + for _, uri := range uriAlt { + valid := ValidateURISAN(b, role, entityInfo, uri) + if !valid { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names were provided via the API which are not valid for this role", + } + } + + parsedURI, err := url.Parse(uri) + if parsedURI == nil || err != nil { + return nil, nil, errutil.UserError{ + Err: fmt.Sprintf( + "the provided URI Subject Alternative Name %q is not a valid URI", uri), + } + } + + URIs = append(URIs, parsedURI) + } + } + } + } + + // Most of these could also be RemoveDuplicateStable, or even + // leave duplicates in, but OU is the one most likely to be duplicated. + subject := pkix.Name{ + CommonName: cn, + SerialNumber: ridSerialNumber, + Country: strutil.RemoveDuplicatesStable(role.Country, false), + Organization: strutil.RemoveDuplicatesStable(role.Organization, false), + OrganizationalUnit: strutil.RemoveDuplicatesStable(role.OU, false), + Locality: strutil.RemoveDuplicatesStable(role.Locality, false), + Province: strutil.RemoveDuplicatesStable(role.Province, false), + StreetAddress: strutil.RemoveDuplicatesStable(role.StreetAddress, false), + PostalCode: strutil.RemoveDuplicatesStable(role.PostalCode, false), + } + + // Get the TTL and verify it against the max allowed + notAfter, ttlWarnings, err := GetCertificateNotAfter(b, role, cb, caSign) + if err != nil { + return nil, warnings, err + } + warnings = append(warnings, ttlWarnings...) + + // Parse SKID from the request for cross-signing. + var skid []byte + { + if rawSKIDValue, ok := cb.GetOptionalSkid(); ok { + // Handle removing common separators to make copy/paste from tool + // output easier. Chromium uses space, OpenSSL uses colons, and at + // one point, Vault had preferred dash as a separator for hex + // strings. + var err error + skidValue := rawSKIDValue.(string) + for _, separator := range []string{":", "-", " "} { + skidValue = strings.ReplaceAll(skidValue, separator, "") + } + + skid, err = hex.DecodeString(skidValue) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("cannot parse requested SKID value as hex: %v", err)} + } + } + } + + // Add UserIDs into the Subject, if the request type supports it. + if _, present := cb.IsUserIdInSchema(); present { + rawUserIDs := cb.GetUserIds() + + // Only take UserIDs from CSR if one was not supplied via API. + if len(rawUserIDs) == 0 && csr != nil { + for _, attr := range csr.Subject.Names { + if attr.Type.Equal(certutil.SubjectPilotUserIDAttributeOID) { + switch aValue := attr.Value.(type) { + case string: + rawUserIDs = append(rawUserIDs, aValue) + case []byte: + rawUserIDs = append(rawUserIDs, string(aValue)) + default: + return nil, nil, errutil.UserError{Err: "unknown type for user_id attribute in CSR's Subject"} + } + } + } + } + + // Check for bad userIDs and add to the subject. + if len(rawUserIDs) > 0 { + for _, value := range rawUserIDs { + if !ValidateUserId(role, value) { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("user_id %v is not allowed by this role", value)} + } + + subject.ExtraNames = append(subject.ExtraNames, pkix.AttributeTypeAndValue{ + Type: certutil.SubjectPilotUserIDAttributeOID, + Value: value, + }) + } + } + } + + creation := &certutil.CreationBundle{ + Params: &certutil.CreationParameters{ + Subject: subject, + DNSNames: strutil.RemoveDuplicates(dnsNames, false), + EmailAddresses: strutil.RemoveDuplicates(emailAddresses, false), + IPAddresses: ipAddresses, + URIs: URIs, + OtherSANs: otherSANs, + KeyType: role.KeyType, + KeyBits: role.KeyBits, + SignatureBits: role.SignatureBits, + UsePSS: role.UsePSS, + NotAfter: notAfter, + KeyUsage: x509.KeyUsage(parsing.ParseKeyUsages(role.KeyUsage)), + ExtKeyUsage: ParseExtKeyUsagesFromRole(role), + ExtKeyUsageOIDs: role.ExtKeyUsageOIDs, + PolicyIdentifiers: role.PolicyIdentifiers, + BasicConstraintsValidForNonCA: role.BasicConstraintsValidForNonCA, + NotBeforeDuration: role.NotBeforeDuration, + ForceAppendCaChain: caSign != nil, + SKID: skid, + }, + SigningBundle: caSign, + CSR: csr, + } + + // Don't deal with URLs or max path length if it's self-signed, as these + // normally come from the signing bundle + if caSign == nil { + return creation, warnings, nil + } + + // This will have been read in from the getGlobalAIAURLs function + creation.Params.URLs = caSign.URLs + + // If the max path length in the role is not nil, it was specified at + // generation time with the max_path_length parameter; otherwise derive it + // from the signing certificate + if role.MaxPathLength != nil { + creation.Params.MaxPathLength = *role.MaxPathLength + } else { + switch { + case caSign.Certificate.MaxPathLen < 0: + creation.Params.MaxPathLength = -1 + case caSign.Certificate.MaxPathLen == 0 && + caSign.Certificate.MaxPathLenZero: + // The signing function will ensure that we do not issue a CA cert + creation.Params.MaxPathLength = 0 + default: + // If this takes it to zero, we handle this case later if + // necessary + creation.Params.MaxPathLength = caSign.Certificate.MaxPathLen - 1 + } + } + + return creation, warnings, nil +} + +// Given a set of requested names for a certificate, verifies that all of them +// match the various toggles set in the role for controlling issuance. +// If one does not pass, it is returned in the string argument. +func ValidateNames(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, names []string) string { + for _, name := range names { + // Previously, reducedName was called sanitizedName but this made + // little sense under the previous interpretation of wildcards, + // leading to two bugs in this implementation. We presently call it + // "reduced" to indicate that it is still untrusted input (potentially + // different from the bare Common Name entry we're validating), it + // might have been modified such as by the removal of wildcard labels + // or the email prefix. + reducedName := name + emailDomain := reducedName + wildcardLabel := "" + isEmail := false + isWildcard := false + + // If it has an @, assume it is an email address and separate out the + // user from the hostname portion so that we can act on the hostname. + // Note that this matches behavior from the alt_names parameter. If it + // ends up being problematic for users, I guess that could be separated + // into dns_names and email_names in the future to be explicit, but I + // don't think this is likely. + if strings.Contains(reducedName, "@") { + splitEmail := strings.Split(reducedName, "@") + if len(splitEmail) != 2 { + return name + } + reducedName = splitEmail[1] + emailDomain = splitEmail[1] + isEmail = true + } + + if IsWildcardDomain(reducedName) { + // Regardless of later rejections below, this common name contains + // a wildcard character and is thus technically a wildcard name. + isWildcard = true + + // Additionally, if AllowWildcardCertificates is explicitly + // forbidden, it takes precedence over AllowAnyName, thus we should + // reject the name now. + // + // We expect the role to have been correctly migrated but guard for + // safety. + if role.AllowWildcardCertificates != nil && !*role.AllowWildcardCertificates { + return name + } + + // Check that this domain is well-formatted per RFC 6125. + var err error + wildcardLabel, reducedName, err = ValidateWildcardDomain(reducedName) + if err != nil { + return name + } + } + + // Email addresses using wildcard domain names do not make sense + // in a Common Name field. + if isEmail && isWildcard { + return name + } + + // AllowAnyName is checked after this because EnforceHostnames still + // applies when allowing any name. Also, we check the reduced name to + // ensure that we are not either checking a full email address or a + // wildcard prefix. + if role.EnforceHostnames { + if reducedName != "" { + // See note above about splitLabels having only one segment + // and setting reducedName to the empty string. + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToASCII(reducedName) + if err != nil { + return name + } + if !hostnameRegex.MatchString(converted) { + return name + } + } + + // When a wildcard is specified, we additionally need to validate + // the label with the wildcard is correctly formed. + if isWildcard && !leftWildLabelRegex.MatchString(wildcardLabel) { + return name + } + } + + // Self-explanatory, but validations from EnforceHostnames and + // AllowWildcardCertificates take precedence. + if role.AllowAnyName { + continue + } + + // The following blocks all work the same basic way: + // 1) If a role allows a certain class of base (localhost, token + // display name, role-configured domains), perform further tests + // + // 2) If there is a perfect match on either the sanitized name or it's an + // email address with a perfect match on the hostname portion, allow it + // + // 3) If subdomains are allowed, we check based on the sanitized name; + // note that if not a wildcard, will be equivalent to the email domain + // for email checks, and we already checked above for both a wildcard + // and email address being present in the same name + // 3a) First we check for a non-wildcard subdomain, as in . + // 3b) Then we check if it's a wildcard and the base domain is a match + // + // Variances are noted in-line + + if role.AllowLocalhost { + if reducedName == "localhost" || + reducedName == "localdomain" || + (isEmail && emailDomain == "localhost") || + (isEmail && emailDomain == "localdomain") { + continue + } + + if role.AllowSubdomains { + // It is possible, if unlikely, to have a subdomain of "localhost" + if strings.HasSuffix(reducedName, ".localhost") || + (isWildcard && reducedName == "localhost") { + continue + } + + // A subdomain of "localdomain" is also not entirely uncommon + if strings.HasSuffix(reducedName, ".localdomain") || + (isWildcard && reducedName == "localdomain") { + continue + } + } + } + + if role.AllowTokenDisplayName { + if name == entityInfo.DisplayName { + continue + } + + if role.AllowSubdomains { + if isEmail { + // If it's an email address, we need to parse the token + // display name in order to do a proper comparison of the + // subdomain + if strings.Contains(entityInfo.DisplayName, "@") { + splitDisplay := strings.Split(entityInfo.DisplayName, "@") + if len(splitDisplay) == 2 { + // Compare the sanitized name against the hostname + // portion of the email address in the broken + // display name + if strings.HasSuffix(reducedName, "."+splitDisplay[1]) { + continue + } + } + } + } + + if strings.HasSuffix(reducedName, "."+entityInfo.DisplayName) || + (isWildcard && reducedName == entityInfo.DisplayName) { + continue + } + } + } + + if len(role.AllowedDomains) > 0 { + valid := false + for _, currDomain := range role.AllowedDomains { + // If there is, say, a trailing comma, ignore it + if currDomain == "" { + continue + } + + if role.AllowedDomainsTemplate { + isTemplate, _ := framework.ValidateIdentityTemplate(currDomain) + if isTemplate && entityInfo.EntityID != "" { + tmpCurrDomain, err := framework.PopulateIdentityTemplate(currDomain, entityInfo.EntityID, b) + if err != nil { + continue + } + + currDomain = tmpCurrDomain + } + } + + // First, allow an exact match of the base domain if that role flag + // is enabled + if role.AllowBareDomains && + (strings.EqualFold(name, currDomain) || + (isEmail && strings.EqualFold(emailDomain, currDomain))) { + valid = true + break + } + + if role.AllowSubdomains { + if strings.HasSuffix(reducedName, "."+currDomain) || + (isWildcard && strings.EqualFold(reducedName, currDomain)) { + valid = true + break + } + } + + if role.AllowGlobDomains && + strings.Contains(currDomain, "*") && + glob.Glob(strings.ToLower(currDomain), strings.ToLower(name)) { + valid = true + break + } + } + + if valid { + continue + } + } + + return name + } + + return "" +} + +func IsWildcardDomain(name string) bool { + // Per RFC 6125 Section 6.4.3, and explicitly contradicting the earlier + // RFC 2818 which no modern client will validate against, there are two + // main types of wildcards, each with a single wildcard specifier (`*`, + // functionally different from the `*` used as a glob from the + // AllowGlobDomains parsing path) in the left-most label: + // + // 1. Entire label is a single wildcard character (most common and + // well-supported), + // 2. Part of the label contains a single wildcard character (e.g. per + // RFC 6125: baz*.example.net, *baz.example.net, or b*z.example.net). + // + // We permit issuance of both but not the older RFC 2818 style under + // the new AllowWildcardCertificates option. However, anything with a + // glob character is technically a wildcard, though not a valid one. + + return strings.Contains(name, "*") +} + +func ValidateWildcardDomain(name string) (string, string, error) { + // See note in isWildcardDomain(...) about the definition of a wildcard + // domain. + var wildcardLabel string + var reducedName string + + if strings.Count(name, "*") > 1 { + // As mentioned above, only one wildcard character is permitted + // under RFC 6125 semantics. + return wildcardLabel, reducedName, fmt.Errorf("expected only one wildcard identifier in the given domain name") + } + + // Split the Common Name into two parts: a left-most label and the + // remaining segments (if present). + splitLabels := strings.SplitN(name, ".", 2) + if len(splitLabels) != 2 { + // We've been given a single-part domain name that consists + // entirely of a wildcard. This is a little tricky to handle, + // but EnforceHostnames validates both the wildcard-containing + // label and the reduced name, but _only_ the latter if it is + // non-empty. This allows us to still validate the only label + // component matches hostname expectations still. + wildcardLabel = splitLabels[0] + reducedName = "" + } else { + // We have a (at least) two label domain name. But before we can + // update our names, we need to validate the wildcard ended up + // in the segment we expected it to. While this is (kinda) + // validated under EnforceHostnames's leftWildLabelRegex, we + // still need to validate it in the non-enforced mode. + // + // By validated assumption above, we know there's strictly one + // wildcard in this domain so we only need to check the wildcard + // label or the reduced name (as one is equivalent to the other). + // Because we later assume reducedName _lacks_ wildcard segments, + // we validate that. + wildcardLabel = splitLabels[0] + reducedName = splitLabels[1] + if strings.Contains(reducedName, "*") { + return wildcardLabel, reducedName, fmt.Errorf("expected wildcard to only be present in left-most domain label") + } + } + + return wildcardLabel, reducedName, nil +} + +// ValidateCommonName Validates a given common name, ensuring it's either an email or a hostname +// after validating it according to the role parameters, or disables +// validation altogether. +func ValidateCommonName(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, name string) string { + isDisabled := len(role.CNValidations) == 1 && role.CNValidations[0] == "disabled" + if isDisabled { + return "" + } + + if ValidateNames(b, role, entityInfo, []string{name}) != "" { + return name + } + + // Validations weren't disabled, but the role lacked CN Validations, so + // don't restrict types. This case is hit in certain existing tests. + if len(role.CNValidations) == 0 { + return "" + } + + // If there's an at in the data, ensure email type validation is allowed. + // Otherwise, ensure hostname is allowed. + if strings.Contains(name, "@") { + var allowsEmails bool + for _, validation := range role.CNValidations { + if validation == "email" { + allowsEmails = true + break + } + } + if !allowsEmails { + return name + } + } else { + var allowsHostnames bool + for _, validation := range role.CNValidations { + if validation == "hostname" { + allowsHostnames = true + break + } + } + if !allowsHostnames { + return name + } + } + + return "" +} + +// ValidateOtherSANs checks if the values requested are allowed. If an OID +// isn't allowed, it will be returned as the first string. If a Value isn't +// allowed, it will be returned as the second string. Empty strings + error +// means everything is okay. +func ValidateOtherSANs(role *RoleEntry, requested map[string][]string) (string, string, error) { + if len(role.AllowedOtherSANs) == 1 && role.AllowedOtherSANs[0] == "*" { + // Anything is allowed + return "", "", nil + } + + allowed, err := ParseOtherSANs(role.AllowedOtherSANs) + if err != nil { + return "", "", fmt.Errorf("error parsing role's allowed SANs: %w", err) + } + for oid, names := range requested { + for _, name := range names { + allowedNames, ok := allowed[oid] + if !ok { + return oid, "", nil + } + + valid := false + for _, allowedName := range allowedNames { + if glob.Glob(allowedName, name) { + valid = true + break + } + } + + if !valid { + return oid, name, nil + } + } + } + + return "", "", nil +} + +func ParseOtherSANs(others []string) (map[string][]string, error) { + result := map[string][]string{} + for _, other := range others { + splitOther := strings.SplitN(other, ";", 2) + if len(splitOther) != 2 { + return nil, fmt.Errorf("expected a semicolon in other SAN %q", other) + } + splitType := strings.SplitN(splitOther[1], ":", 2) + if len(splitType) != 2 { + return nil, fmt.Errorf("expected a colon in other SAN %q", other) + } + switch { + case strings.EqualFold(splitType[0], "utf8"): + case strings.EqualFold(splitType[0], "utf-8"): + default: + return nil, fmt.Errorf("only utf8 other SANs are supported; found non-supported type in other SAN %q", other) + } + result[splitOther[0]] = append(result[splitOther[0]], splitType[1]) + } + + return result, nil +} + +// Given a URI SAN, verify that it is allowed. +func ValidateURISAN(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, uri string) bool { + valid := false + for _, allowed := range role.AllowedURISANs { + if role.AllowedURISANsTemplate { + isTemplate, _ := framework.ValidateIdentityTemplate(allowed) + if isTemplate && entityInfo.EntityID != "" { + tmpAllowed, err := framework.PopulateIdentityTemplate(allowed, entityInfo.EntityID, b) + if err != nil { + continue + } + allowed = tmpAllowed + } + } + validURI := glob.Glob(allowed, uri) + if validURI { + valid = true + break + } + } + return valid +} + +// ValidateUserId Returns bool stating whether the given UserId is Valid +func ValidateUserId(role *RoleEntry, userId string) bool { + allowedList := role.AllowedUserIDs + + if len(allowedList) == 0 { + // Nothing is allowed. + return false + } + + if strutil.StrListContainsCaseInsensitive(allowedList, userId) { + return true + } + + for _, rolePattern := range allowedList { + if rolePattern == "" { + continue + } + + if strings.Contains(rolePattern, "*") && glob.Glob(rolePattern, userId) { + return true + } + } + + // No matches. + return false +} + +func ValidateSerialNumber(role *RoleEntry, serialNumber string) string { + valid := false + if len(role.AllowedSerialNumbers) > 0 { + for _, currSerialNumber := range role.AllowedSerialNumbers { + if currSerialNumber == "" { + continue + } + + if (strings.Contains(currSerialNumber, "*") && + glob.Glob(currSerialNumber, serialNumber)) || + currSerialNumber == serialNumber { + valid = true + break + } + } + } + if !valid { + return serialNumber + } else { + return "" + } +} + +type CertNotAfterInput interface { + GetTTL() int + GetOptionalNotAfter() (interface{}, bool) +} + +// GetCertificateNotAfter compute a certificate's NotAfter date based on the mount ttl, role, signing bundle and input +// api data being sent. Returns a NotAfter time, a set of warnings or an error. +func GetCertificateNotAfter(b logical.SystemView, role *RoleEntry, input CertNotAfterInput, caSign *certutil.CAInfoBundle) (time.Time, []string, error) { + var warnings []string + var maxTTL time.Duration + var notAfter time.Time + var err error + + ttl := time.Duration(input.GetTTL()) * time.Second + notAfterAlt := role.NotAfter + if notAfterAlt == "" { + notAfterAltRaw, ok := input.GetOptionalNotAfter() + if ok { + notAfterAlt = notAfterAltRaw.(string) + } + } + if ttl > 0 && notAfterAlt != "" { + return time.Time{}, warnings, errutil.UserError{Err: "Either ttl or not_after should be provided. Both should not be provided in the same request."} + } + + if ttl == 0 && role.TTL > 0 { + ttl = role.TTL + } + + if role.MaxTTL > 0 { + maxTTL = role.MaxTTL + } + + if ttl == 0 { + ttl = b.DefaultLeaseTTL() + } + if maxTTL == 0 { + maxTTL = b.MaxLeaseTTL() + } + if ttl > maxTTL { + warnings = append(warnings, fmt.Sprintf("TTL %q is longer than permitted maxTTL %q, so maxTTL is being used", ttl, maxTTL)) + ttl = maxTTL + } + + if notAfterAlt != "" { + notAfter, err = time.Parse(time.RFC3339, notAfterAlt) + if err != nil { + return notAfter, warnings, errutil.UserError{Err: err.Error()} + } + } else { + notAfter = time.Now().Add(ttl) + } + notAfter, err = ApplyIssuerLeafNotAfterBehavior(caSign, notAfter) + if err != nil { + return time.Time{}, warnings, err + } + return notAfter, warnings, nil +} + +// ApplyIssuerLeafNotAfterBehavior resets a certificate's notAfter time or errors out based on the +// issuer's notAfter date along with the LeafNotAfterBehavior configuration +func ApplyIssuerLeafNotAfterBehavior(caSign *certutil.CAInfoBundle, notAfter time.Time) (time.Time, error) { + if caSign != nil && notAfter.After(caSign.Certificate.NotAfter) { + // If it's not self-signed, verify that the issued certificate + // won't be valid past the lifetime of the CA certificate, and + // act accordingly. This is dependent based on the issuer's + // LeafNotAfterBehavior argument. + switch caSign.LeafNotAfterBehavior { + case certutil.PermitNotAfterBehavior: + // Explicitly do nothing. + case certutil.TruncateNotAfterBehavior: + notAfter = caSign.Certificate.NotAfter + case certutil.ErrNotAfterBehavior: + fallthrough + default: + return time.Time{}, errutil.UserError{Err: fmt.Sprintf( + "cannot satisfy request, as TTL would result in notAfter of %s that is beyond the expiration of the CA certificate at %s", notAfter.UTC().Format(time.RFC3339Nano), caSign.Certificate.NotAfter.UTC().Format(time.RFC3339Nano))} + } + } + return notAfter, nil +} + +// StoreCertificate given a certificate bundle that was signed, persist the certificate to storage +func StoreCertificate(ctx context.Context, s logical.Storage, certCounter pki_backend.CertificateCounter, certBundle *certutil.ParsedCertBundle) error { + hyphenSerialNumber := parsing.NormalizeSerialForStorageFromBigInt(certBundle.Certificate.SerialNumber) + key := "certs/" + hyphenSerialNumber + certsCounted := certCounter.IsInitialized() + err := s.Put(ctx, &logical.StorageEntry{ + Key: key, + Value: certBundle.CertificateBytes, + }) + if err != nil { + return fmt.Errorf("unable to store certificate locally: %w", err) + } + certCounter.IncrementTotalCertificatesCount(certsCounted, key) + return nil +} diff --git a/builtin/logical/pki/issuing/issuers.go b/builtin/logical/pki/issuing/issuers.go new file mode 100644 index 000000000000..24343aec7e56 --- /dev/null +++ b/builtin/logical/pki/issuing/issuers.go @@ -0,0 +1,495 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "crypto/x509" + "fmt" + "sort" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" +) + +const ( + ReadOnlyUsage IssuerUsage = iota + IssuanceUsage IssuerUsage = 1 << iota + CRLSigningUsage IssuerUsage = 1 << iota + OCSPSigningUsage IssuerUsage = 1 << iota +) + +const ( + // When adding a new usage in the future, we'll need to create a usage + // mask field on the IssuerEntry and handle migrations to a newer mask, + // inferring a value for the new bits. + AllIssuerUsages = ReadOnlyUsage | IssuanceUsage | CRLSigningUsage | OCSPSigningUsage + + DefaultRef = "default" + IssuerPrefix = "config/issuer/" + + // Used as a quick sanity check for a reference id lookups... + uuidLength = 36 + + IssuerRefNotFound = IssuerID("not-found") + LatestIssuerVersion = 1 + + LegacyCertBundlePath = "config/ca_bundle" + LegacyBundleShimID = IssuerID("legacy-entry-shim-id") + LegacyBundleShimKeyID = KeyID("legacy-entry-shim-key-id") +) + +type IssuerID string + +func (p IssuerID) String() string { + return string(p) +} + +type IssuerUsage uint + +var namedIssuerUsages = map[string]IssuerUsage{ + "read-only": ReadOnlyUsage, + "issuing-certificates": IssuanceUsage, + "crl-signing": CRLSigningUsage, + "ocsp-signing": OCSPSigningUsage, +} + +func (i *IssuerUsage) ToggleUsage(usages ...IssuerUsage) { + for _, usage := range usages { + *i ^= usage + } +} + +func (i IssuerUsage) HasUsage(usage IssuerUsage) bool { + return (i & usage) == usage +} + +func (i IssuerUsage) Names() string { + var names []string + var builtUsage IssuerUsage + + // Return the known set of usages in a sorted order to not have Terraform state files flipping + // saying values are different when it's the same list in a different order. + keys := make([]string, 0, len(namedIssuerUsages)) + for k := range namedIssuerUsages { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, name := range keys { + usage := namedIssuerUsages[name] + if i.HasUsage(usage) { + names = append(names, name) + builtUsage.ToggleUsage(usage) + } + } + + if i != builtUsage { + // Found some unknown usage, we should indicate this in the names. + names = append(names, fmt.Sprintf("unknown:%v", i^builtUsage)) + } + + return strings.Join(names, ",") +} + +func NewIssuerUsageFromNames(names []string) (IssuerUsage, error) { + var result IssuerUsage + for index, name := range names { + usage, ok := namedIssuerUsages[name] + if !ok { + return ReadOnlyUsage, fmt.Errorf("unknown name for usage at index %v: %v", index, name) + } + + result.ToggleUsage(usage) + } + + return result, nil +} + +type IssuerEntry struct { + ID IssuerID `json:"id"` + Name string `json:"name"` + KeyID KeyID `json:"key_id"` + Certificate string `json:"certificate"` + CAChain []string `json:"ca_chain"` + ManualChain []IssuerID `json:"manual_chain"` + SerialNumber string `json:"serial_number"` + LeafNotAfterBehavior certutil.NotAfterBehavior `json:"not_after_behavior"` + Usage IssuerUsage `json:"usage"` + RevocationSigAlg x509.SignatureAlgorithm `json:"revocation_signature_algorithm"` + Revoked bool `json:"revoked"` + RevocationTime int64 `json:"revocation_time"` + RevocationTimeUTC time.Time `json:"revocation_time_utc"` + AIAURIs *AiaConfigEntry `json:"aia_uris,omitempty"` + LastModified time.Time `json:"last_modified"` + Version uint `json:"version"` +} + +func (i IssuerEntry) GetCertificate() (*x509.Certificate, error) { + cert, err := parsing.ParseCertificateFromBytes([]byte(i.Certificate)) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse certificate from issuer: %s: %v", err.Error(), i.ID)} + } + + return cert, nil +} + +func (i IssuerEntry) EnsureUsage(usage IssuerUsage) error { + // We want to spit out a nice error message about missing usages. + if i.Usage.HasUsage(usage) { + return nil + } + + issuerRef := fmt.Sprintf("id:%v", i.ID) + if len(i.Name) > 0 { + issuerRef = fmt.Sprintf("%v / name:%v", issuerRef, i.Name) + } + + // These usages differ at some point in time. We've gotta find the first + // usage that differs and return a logical-sounding error message around + // that difference. + for name, candidate := range namedIssuerUsages { + if usage.HasUsage(candidate) && !i.Usage.HasUsage(candidate) { + return fmt.Errorf("requested usage %v for issuer [%v] but only had usage %v", name, issuerRef, i.Usage.Names()) + } + } + + // Maybe we have an unnamed usage that's requested. + return fmt.Errorf("unknown delta between usages: %v -> %v / for issuer [%v]", usage.Names(), i.Usage.Names(), issuerRef) +} + +func (i IssuerEntry) CanMaybeSignWithAlgo(algo x509.SignatureAlgorithm) error { + // Hack: Go isn't kind enough expose its lovely signatureAlgorithmDetails + // informational struct for our usage. However, we don't want to actually + // fetch the private key and attempt a signature with this algo (as we'll + // mint new, previously unsigned material in the process that could maybe + // be potentially abused if it leaks). + // + // So... + // + // ...we maintain our own mapping of cert.PKI<->sigAlgos. Notably, we + // exclude DSA support as the PKI engine has never supported DSA keys. + if algo == x509.UnknownSignatureAlgorithm { + // Special cased to indicate upgrade and letting Go automatically + // chose the correct value. + return nil + } + + cert, err := i.GetCertificate() + if err != nil { + return fmt.Errorf("unable to parse issuer's potential signature algorithm types: %w", err) + } + + switch cert.PublicKeyAlgorithm { + case x509.RSA: + switch algo { + case x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, + x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, + x509.SHA512WithRSAPSS: + return nil + } + case x509.ECDSA: + switch algo { + case x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512: + return nil + } + case x509.Ed25519: + switch algo { + case x509.PureEd25519: + return nil + } + } + + return fmt.Errorf("unable to use issuer of type %v to sign with %v key type", cert.PublicKeyAlgorithm.String(), algo.String()) +} + +func ResolveIssuerReference(ctx context.Context, s logical.Storage, reference string) (IssuerID, error) { + if reference == DefaultRef { + // Handle fetching the default issuer. + config, err := GetIssuersConfig(ctx, s) + if err != nil { + return IssuerID("config-error"), err + } + if len(config.DefaultIssuerId) == 0 { + return IssuerRefNotFound, fmt.Errorf("no default issuer currently configured") + } + + return config.DefaultIssuerId, nil + } + + // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. + if len(reference) == uuidLength { + entry, err := s.Get(ctx, IssuerPrefix+reference) + if err != nil { + return IssuerID("issuer-read"), err + } + if entry != nil { + return IssuerID(reference), nil + } + } + + // ... than to pull all issuers from storage. + issuers, err := ListIssuers(ctx, s) + if err != nil { + return IssuerID("list-error"), err + } + + for _, issuerId := range issuers { + issuer, err := FetchIssuerById(ctx, s, issuerId) + if err != nil { + return IssuerID("issuer-read"), err + } + + if issuer.Name == reference { + return issuer.ID, nil + } + } + + // Otherwise, we must not have found the issuer. + return IssuerRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI issuer for reference: %v", reference)} +} + +func ListIssuers(ctx context.Context, s logical.Storage) ([]IssuerID, error) { + strList, err := s.List(ctx, IssuerPrefix) + if err != nil { + return nil, err + } + + issuerIds := make([]IssuerID, 0, len(strList)) + for _, entry := range strList { + issuerIds = append(issuerIds, IssuerID(entry)) + } + + return issuerIds, nil +} + +// FetchIssuerById returns an IssuerEntry based on issuerId, if none found an error is returned. +func FetchIssuerById(ctx context.Context, s logical.Storage, issuerId IssuerID) (*IssuerEntry, error) { + if len(issuerId) == 0 { + return nil, errutil.InternalError{Err: "unable to fetch pki issuer: empty issuer identifier"} + } + + entry, err := s.Get(ctx, IssuerPrefix+issuerId.String()) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki issuer: %v", err)} + } + if entry == nil { + return nil, errutil.UserError{Err: fmt.Sprintf("pki issuer id %s does not exist", issuerId.String())} + } + + var issuer IssuerEntry + if err := entry.DecodeJSON(&issuer); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki issuer with id %s: %v", issuerId.String(), err)} + } + + return upgradeIssuerIfRequired(&issuer), nil +} + +func WriteIssuer(ctx context.Context, s logical.Storage, issuer *IssuerEntry) error { + issuerId := issuer.ID + if issuer.LastModified.IsZero() { + issuer.LastModified = time.Now().UTC() + } + + json, err := logical.StorageEntryJSON(IssuerPrefix+issuerId.String(), issuer) + if err != nil { + return err + } + + return s.Put(ctx, json) +} + +func DeleteIssuer(ctx context.Context, s logical.Storage, id IssuerID) (bool, error) { + config, err := GetIssuersConfig(ctx, s) + if err != nil { + return false, err + } + + wasDefault := false + if config.DefaultIssuerId == id { + wasDefault = true + // Overwrite the fetched default issuer as we're going to remove this + // entry. + config.fetchedDefault = IssuerID("") + config.DefaultIssuerId = IssuerID("") + if err := SetIssuersConfig(ctx, s, config); err != nil { + return wasDefault, err + } + } + + return wasDefault, s.Delete(ctx, IssuerPrefix+id.String()) +} + +func upgradeIssuerIfRequired(issuer *IssuerEntry) *IssuerEntry { + // *NOTE*: Don't attempt to write out the issuer here as it may cause ErrReadOnly that will direct the + // request all the way up to the primary cluster which would be horrible for local cluster operations such + // as generating a leaf cert or a revoke. + // Also even though we could tell if we are the primary cluster's active node, we can't tell if we have the + // a full rw issuer lock, so it might not be safe to write. + if issuer.Version == LatestIssuerVersion { + return issuer + } + + if issuer.Version == 0 { + // Upgrade at this step requires interrogating the certificate itself; + // if this decode fails, it indicates internal problems and the + // request will subsequently fail elsewhere. However, decoding this + // certificate is mildly expensive, so we only do it in the event of + // a Version 0 certificate. + cert, err := issuer.GetCertificate() + if err != nil { + return issuer + } + + hadCRL := issuer.Usage.HasUsage(CRLSigningUsage) + // Remove CRL signing usage if it exists on the issuer but doesn't + // exist in the KU of the x509 certificate. + if hadCRL && (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 { + issuer.Usage.ToggleUsage(CRLSigningUsage) + } + + // Handle our new OCSPSigning usage flag for earlier versions. If we + // had it (prior to removing it in this upgrade), we'll add the OCSP + // flag since EKUs don't matter. + if hadCRL && !issuer.Usage.HasUsage(OCSPSigningUsage) { + issuer.Usage.ToggleUsage(OCSPSigningUsage) + } + } + + issuer.Version = LatestIssuerVersion + return issuer +} + +// FetchCAInfoByIssuerId will fetch the CA info, will return an error if no ca info exists for the given issuerId. +// This does support the loading using the legacyBundleShimID +func FetchCAInfoByIssuerId(ctx context.Context, s logical.Storage, mkv managed_key.PkiManagedKeyView, issuerId IssuerID, usage IssuerUsage) (*certutil.CAInfoBundle, error) { + entry, bundle, err := FetchCertBundleByIssuerId(ctx, s, issuerId, true) + if err != nil { + switch err.(type) { + case errutil.UserError: + return nil, err + case errutil.InternalError: + return nil, err + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching CA info: %v", err)} + } + } + + if err = entry.EnsureUsage(usage); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error while attempting to use issuer %v: %v", issuerId, err)} + } + + parsedBundle, err := ParseCABundle(ctx, mkv, bundle) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + if parsedBundle.Certificate == nil { + return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"} + } + if parsedBundle.PrivateKey == nil { + return nil, errutil.UserError{Err: fmt.Sprintf("unable to fetch corresponding key for issuer %v; unable to use this issuer for signing", issuerId)} + } + + caInfo := &certutil.CAInfoBundle{ + ParsedCertBundle: *parsedBundle, + URLs: nil, + LeafNotAfterBehavior: entry.LeafNotAfterBehavior, + RevocationSigAlg: entry.RevocationSigAlg, + } + + entries, err := GetAIAURLs(ctx, s, entry) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch AIA URL information: %v", err)} + } + caInfo.URLs = entries + + return caInfo, nil +} + +func ParseCABundle(ctx context.Context, mkv managed_key.PkiManagedKeyView, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { + if bundle.PrivateKeyType == certutil.ManagedPrivateKey { + return managed_key.ParseManagedKeyCABundle(ctx, mkv, bundle) + } + return bundle.ToParsedCertBundle() +} + +// FetchCertBundleByIssuerId builds a certutil.CertBundle from the specified issuer identifier, +// optionally loading the key or not. This method supports loading legacy +// bundles using the legacyBundleShimID issuerId, and if no entry is found will return an error. +func FetchCertBundleByIssuerId(ctx context.Context, s logical.Storage, id IssuerID, loadKey bool) (*IssuerEntry, *certutil.CertBundle, error) { + if id == LegacyBundleShimID { + // We have not completed the migration, or started a request in legacy mode, so + // attempt to load the bundle from the legacy location + issuer, bundle, err := GetLegacyCertBundle(ctx, s) + if err != nil { + return nil, nil, err + } + if issuer == nil || bundle == nil { + return nil, nil, errutil.UserError{Err: "no legacy cert bundle exists"} + } + + return issuer, bundle, err + } + + issuer, err := FetchIssuerById(ctx, s, id) + if err != nil { + return nil, nil, err + } + + var bundle certutil.CertBundle + bundle.Certificate = issuer.Certificate + bundle.CAChain = issuer.CAChain + bundle.SerialNumber = issuer.SerialNumber + + // Fetch the key if it exists. Sometimes we don't need the key immediately. + if loadKey && issuer.KeyID != KeyID("") { + key, err := FetchKeyById(ctx, s, issuer.KeyID) + if err != nil { + return nil, nil, err + } + + bundle.PrivateKeyType = key.PrivateKeyType + bundle.PrivateKey = key.PrivateKey + } + + return issuer, &bundle, nil +} + +func GetLegacyCertBundle(ctx context.Context, s logical.Storage) (*IssuerEntry, *certutil.CertBundle, error) { + entry, err := s.Get(ctx, LegacyCertBundlePath) + if err != nil { + return nil, nil, err + } + + if entry == nil { + return nil, nil, nil + } + + cb := &certutil.CertBundle{} + err = entry.DecodeJSON(cb) + if err != nil { + return nil, nil, err + } + + // Fake a storage entry with backwards compatibility in mind. + issuer := &IssuerEntry{ + ID: LegacyBundleShimID, + KeyID: LegacyBundleShimKeyID, + Name: "legacy-entry-shim", + Certificate: cb.Certificate, + CAChain: cb.CAChain, + SerialNumber: cb.SerialNumber, + LeafNotAfterBehavior: certutil.ErrNotAfterBehavior, + } + issuer.Usage.ToggleUsage(AllIssuerUsages) + + return issuer, cb, nil +} diff --git a/builtin/logical/pki/issuing/keys.go b/builtin/logical/pki/issuing/keys.go new file mode 100644 index 000000000000..c4cc62ff24bc --- /dev/null +++ b/builtin/logical/pki/issuing/keys.go @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" +) + +const ( + KeyPrefix = "config/key/" + KeyRefNotFound = KeyID("not-found") +) + +type KeyID string + +func (p KeyID) String() string { + return string(p) +} + +type KeyEntry struct { + ID KeyID `json:"id"` + Name string `json:"name"` + PrivateKeyType certutil.PrivateKeyType `json:"private_key_type"` + PrivateKey string `json:"private_key"` +} + +func (e KeyEntry) IsManagedPrivateKey() bool { + return e.PrivateKeyType == certutil.ManagedPrivateKey +} + +func ListKeys(ctx context.Context, s logical.Storage) ([]KeyID, error) { + strList, err := s.List(ctx, KeyPrefix) + if err != nil { + return nil, err + } + + keyIds := make([]KeyID, 0, len(strList)) + for _, entry := range strList { + keyIds = append(keyIds, KeyID(entry)) + } + + return keyIds, nil +} + +func FetchKeyById(ctx context.Context, s logical.Storage, keyId KeyID) (*KeyEntry, error) { + if len(keyId) == 0 { + return nil, errutil.InternalError{Err: "unable to fetch pki key: empty key identifier"} + } + + entry, err := s.Get(ctx, KeyPrefix+keyId.String()) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki key: %v", err)} + } + if entry == nil { + return nil, errutil.UserError{Err: fmt.Sprintf("pki key id %s does not exist", keyId.String())} + } + + var key KeyEntry + if err := entry.DecodeJSON(&key); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki key with id %s: %v", keyId.String(), err)} + } + + return &key, nil +} + +func WriteKey(ctx context.Context, s logical.Storage, key KeyEntry) error { + keyId := key.ID + + json, err := logical.StorageEntryJSON(KeyPrefix+keyId.String(), key) + if err != nil { + return err + } + + return s.Put(ctx, json) +} + +func DeleteKey(ctx context.Context, s logical.Storage, id KeyID) (bool, error) { + config, err := GetKeysConfig(ctx, s) + if err != nil { + return false, err + } + + wasDefault := false + if config.DefaultKeyId == id { + wasDefault = true + config.DefaultKeyId = KeyID("") + if err := SetKeysConfig(ctx, s, config); err != nil { + return wasDefault, err + } + } + + return wasDefault, s.Delete(ctx, KeyPrefix+id.String()) +} + +func ResolveKeyReference(ctx context.Context, s logical.Storage, reference string) (KeyID, error) { + if reference == DefaultRef { + // Handle fetching the default key. + config, err := GetKeysConfig(ctx, s) + if err != nil { + return KeyID("config-error"), err + } + if len(config.DefaultKeyId) == 0 { + return KeyRefNotFound, fmt.Errorf("no default key currently configured") + } + + return config.DefaultKeyId, nil + } + + // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. + if len(reference) == uuidLength { + entry, err := s.Get(ctx, KeyPrefix+reference) + if err != nil { + return KeyID("key-read"), err + } + if entry != nil { + return KeyID(reference), nil + } + } + + // ... than to pull all keys from storage. + keys, err := ListKeys(ctx, s) + if err != nil { + return KeyID("list-error"), err + } + for _, keyId := range keys { + key, err := FetchKeyById(ctx, s, keyId) + if err != nil { + return KeyID("key-read"), err + } + + if key.Name == reference { + return key.ID, nil + } + } + + // Otherwise, we must not have found the key. + return KeyRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI key for reference: %v", reference)} +} + +func GetManagedKeyUUID(key *KeyEntry) (managed_key.UUIDKey, error) { + if !key.IsManagedPrivateKey() { + return "", errutil.InternalError{Err: "getManagedKeyUUID called on a key id %s (%s) "} + } + return managed_key.ExtractManagedKeyId([]byte(key.PrivateKey)) +} diff --git a/builtin/logical/pki/issuing/roles.go b/builtin/logical/pki/issuing/roles.go new file mode 100644 index 000000000000..86b15c2df1e7 --- /dev/null +++ b/builtin/logical/pki/issuing/roles.go @@ -0,0 +1,452 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + DefaultRoleKeyUsages = []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"} + DefaultRoleEstKeyUsages = []string{} + DefaultRoleEstKeyUsageOids = []string{} +) + +const ( + DefaultRoleSignatureBits = 0 + DefaultRoleUsePss = false +) + +type RoleEntry struct { + LeaseMax string `json:"lease_max"` + Lease string `json:"lease"` + DeprecatedMaxTTL string `json:"max_ttl"` + DeprecatedTTL string `json:"ttl"` + TTL time.Duration `json:"ttl_duration"` + MaxTTL time.Duration `json:"max_ttl_duration"` + AllowLocalhost bool `json:"allow_localhost"` + AllowedBaseDomain string `json:"allowed_base_domain"` + AllowedDomainsOld string `json:"allowed_domains,omitempty"` + AllowedDomains []string `json:"allowed_domains_list"` + AllowedDomainsTemplate bool `json:"allowed_domains_template"` + AllowBaseDomain bool `json:"allow_base_domain"` + AllowBareDomains bool `json:"allow_bare_domains"` + AllowTokenDisplayName bool `json:"allow_token_displayname"` + AllowSubdomains bool `json:"allow_subdomains"` + AllowGlobDomains bool `json:"allow_glob_domains"` + AllowWildcardCertificates *bool `json:"allow_wildcard_certificates,omitempty"` + AllowAnyName bool `json:"allow_any_name"` + EnforceHostnames bool `json:"enforce_hostnames"` + AllowIPSANs bool `json:"allow_ip_sans"` + ServerFlag bool `json:"server_flag"` + ClientFlag bool `json:"client_flag"` + CodeSigningFlag bool `json:"code_signing_flag"` + EmailProtectionFlag bool `json:"email_protection_flag"` + UseCSRCommonName bool `json:"use_csr_common_name"` + UseCSRSANs bool `json:"use_csr_sans"` + KeyType string `json:"key_type"` + KeyBits int `json:"key_bits"` + UsePSS bool `json:"use_pss"` + SignatureBits int `json:"signature_bits"` + MaxPathLength *int `json:",omitempty"` + KeyUsageOld string `json:"key_usage,omitempty"` + KeyUsage []string `json:"key_usage_list"` + ExtKeyUsage []string `json:"extended_key_usage_list"` + OUOld string `json:"ou,omitempty"` + OU []string `json:"ou_list"` + OrganizationOld string `json:"organization,omitempty"` + Organization []string `json:"organization_list"` + Country []string `json:"country"` + Locality []string `json:"locality"` + Province []string `json:"province"` + StreetAddress []string `json:"street_address"` + PostalCode []string `json:"postal_code"` + GenerateLease *bool `json:"generate_lease,omitempty"` + NoStore bool `json:"no_store"` + RequireCN bool `json:"require_cn"` + CNValidations []string `json:"cn_validations"` + AllowedOtherSANs []string `json:"allowed_other_sans"` + AllowedSerialNumbers []string `json:"allowed_serial_numbers"` + AllowedUserIDs []string `json:"allowed_user_ids"` + AllowedURISANs []string `json:"allowed_uri_sans"` + AllowedURISANsTemplate bool `json:"allowed_uri_sans_template"` + PolicyIdentifiers []string `json:"policy_identifiers"` + ExtKeyUsageOIDs []string `json:"ext_key_usage_oids"` + BasicConstraintsValidForNonCA bool `json:"basic_constraints_valid_for_non_ca"` + NotBeforeDuration time.Duration `json:"not_before_duration"` + NotAfter string `json:"not_after"` + Issuer string `json:"issuer"` + // Name is only set when the role has been stored, on the fly roles have a blank name + Name string `json:"-"` + // WasModified indicates to callers if the returned entry is different than the persisted version + WasModified bool `json:"-"` +} + +func (r *RoleEntry) ToResponseData() map[string]interface{} { + responseData := map[string]interface{}{ + "ttl": int64(r.TTL.Seconds()), + "max_ttl": int64(r.MaxTTL.Seconds()), + "allow_localhost": r.AllowLocalhost, + "allowed_domains": r.AllowedDomains, + "allowed_domains_template": r.AllowedDomainsTemplate, + "allow_bare_domains": r.AllowBareDomains, + "allow_token_displayname": r.AllowTokenDisplayName, + "allow_subdomains": r.AllowSubdomains, + "allow_glob_domains": r.AllowGlobDomains, + "allow_wildcard_certificates": r.AllowWildcardCertificates, + "allow_any_name": r.AllowAnyName, + "allowed_uri_sans_template": r.AllowedURISANsTemplate, + "enforce_hostnames": r.EnforceHostnames, + "allow_ip_sans": r.AllowIPSANs, + "server_flag": r.ServerFlag, + "client_flag": r.ClientFlag, + "code_signing_flag": r.CodeSigningFlag, + "email_protection_flag": r.EmailProtectionFlag, + "use_csr_common_name": r.UseCSRCommonName, + "use_csr_sans": r.UseCSRSANs, + "key_type": r.KeyType, + "key_bits": r.KeyBits, + "signature_bits": r.SignatureBits, + "use_pss": r.UsePSS, + "key_usage": r.KeyUsage, + "ext_key_usage": r.ExtKeyUsage, + "ext_key_usage_oids": r.ExtKeyUsageOIDs, + "ou": r.OU, + "organization": r.Organization, + "country": r.Country, + "locality": r.Locality, + "province": r.Province, + "street_address": r.StreetAddress, + "postal_code": r.PostalCode, + "no_store": r.NoStore, + "allowed_other_sans": r.AllowedOtherSANs, + "allowed_serial_numbers": r.AllowedSerialNumbers, + "allowed_user_ids": r.AllowedUserIDs, + "allowed_uri_sans": r.AllowedURISANs, + "require_cn": r.RequireCN, + "cn_validations": r.CNValidations, + "policy_identifiers": r.PolicyIdentifiers, + "basic_constraints_valid_for_non_ca": r.BasicConstraintsValidForNonCA, + "not_before_duration": int64(r.NotBeforeDuration.Seconds()), + "not_after": r.NotAfter, + "issuer_ref": r.Issuer, + } + if r.MaxPathLength != nil { + responseData["max_path_length"] = r.MaxPathLength + } + if r.GenerateLease != nil { + responseData["generate_lease"] = r.GenerateLease + } + return responseData +} + +var ErrRoleNotFound = errors.New("role not found") + +// GetRole will load a role from storage based on the provided name and +// update its contents to the latest version if out of date. The WasUpdated field +// will be set to true if modifications were made indicating the caller should if +// possible write them back to disk. If the role is not found an ErrRoleNotFound +// will be returned as an error. +func GetRole(ctx context.Context, s logical.Storage, n string) (*RoleEntry, error) { + entry, err := s.Get(ctx, "role/"+n) + if err != nil { + return nil, fmt.Errorf("failed to load role %s: %w", n, err) + } + if entry == nil { + return nil, fmt.Errorf("%w: with name %s", ErrRoleNotFound, n) + } + + var result RoleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, fmt.Errorf("failed decoding role %s: %w", n, err) + } + + // Migrate existing saved entries and save back if changed + modified := false + if len(result.DeprecatedTTL) == 0 && len(result.Lease) != 0 { + result.DeprecatedTTL = result.Lease + result.Lease = "" + modified = true + } + if result.TTL == 0 && len(result.DeprecatedTTL) != 0 { + parsed, err := parseutil.ParseDurationSecond(result.DeprecatedTTL) + if err != nil { + return nil, err + } + result.TTL = parsed + result.DeprecatedTTL = "" + modified = true + } + if len(result.DeprecatedMaxTTL) == 0 && len(result.LeaseMax) != 0 { + result.DeprecatedMaxTTL = result.LeaseMax + result.LeaseMax = "" + modified = true + } + if result.MaxTTL == 0 && len(result.DeprecatedMaxTTL) != 0 { + parsed, err := parseutil.ParseDurationSecond(result.DeprecatedMaxTTL) + if err != nil { + return nil, fmt.Errorf("failed parsing max_ttl field in %s: %w", n, err) + } + result.MaxTTL = parsed + result.DeprecatedMaxTTL = "" + modified = true + } + if result.AllowBaseDomain { + result.AllowBaseDomain = false + result.AllowBareDomains = true + modified = true + } + if result.AllowedDomainsOld != "" { + result.AllowedDomains = strings.Split(result.AllowedDomainsOld, ",") + result.AllowedDomainsOld = "" + modified = true + } + if result.AllowedBaseDomain != "" { + found := false + for _, v := range result.AllowedDomains { + if v == result.AllowedBaseDomain { + found = true + break + } + } + if !found { + result.AllowedDomains = append(result.AllowedDomains, result.AllowedBaseDomain) + } + result.AllowedBaseDomain = "" + modified = true + } + if result.AllowWildcardCertificates == nil { + // While not the most secure default, when AllowWildcardCertificates isn't + // explicitly specified in the stored Role, we automatically upgrade it to + // true to preserve compatibility with previous versions of Vault. Once this + // field is set, this logic will not be triggered any more. + result.AllowWildcardCertificates = new(bool) + *result.AllowWildcardCertificates = true + modified = true + } + + // Upgrade generate_lease in role + if result.GenerateLease == nil { + // All the new roles will have GenerateLease always set to a Value. A + // nil Value indicates that this role needs an upgrade. Set it to + // `true` to not alter its current behavior. + result.GenerateLease = new(bool) + *result.GenerateLease = true + modified = true + } + + // Upgrade key usages + if result.KeyUsageOld != "" { + result.KeyUsage = strings.Split(result.KeyUsageOld, ",") + result.KeyUsageOld = "" + modified = true + } + + // Upgrade OU + if result.OUOld != "" { + result.OU = strings.Split(result.OUOld, ",") + result.OUOld = "" + modified = true + } + + // Upgrade Organization + if result.OrganizationOld != "" { + result.Organization = strings.Split(result.OrganizationOld, ",") + result.OrganizationOld = "" + modified = true + } + + // Set the issuer field to default if not set. We want to do this + // unconditionally as we should probably never have an empty issuer + // on a stored roles. + if len(result.Issuer) == 0 { + result.Issuer = DefaultRef + modified = true + } + + // Update CN Validations to be the present default, "email,hostname" + if len(result.CNValidations) == 0 { + result.CNValidations = []string{"email", "hostname"} + modified = true + } + + result.Name = n + result.WasModified = modified + + return &result, nil +} + +type RoleModifier func(r *RoleEntry) + +func WithKeyUsage(keyUsages []string) RoleModifier { + return func(r *RoleEntry) { + r.KeyUsage = keyUsages + } +} + +func WithExtKeyUsage(extKeyUsages []string) RoleModifier { + return func(r *RoleEntry) { + r.ExtKeyUsage = extKeyUsages + } +} + +func WithExtKeyUsageOIDs(extKeyUsageOids []string) RoleModifier { + return func(r *RoleEntry) { + r.ExtKeyUsageOIDs = extKeyUsageOids + } +} + +func WithSignatureBits(signatureBits int) RoleModifier { + return func(r *RoleEntry) { + r.SignatureBits = signatureBits + } +} + +func WithUsePSS(usePss bool) RoleModifier { + return func(r *RoleEntry) { + r.UsePSS = usePss + } +} + +func WithTTL(ttl time.Duration) RoleModifier { + return func(r *RoleEntry) { + r.TTL = ttl + } +} + +func WithMaxTTL(ttl time.Duration) RoleModifier { + return func(r *RoleEntry) { + r.MaxTTL = ttl + } +} + +func WithGenerateLease(genLease bool) RoleModifier { + return func(r *RoleEntry) { + *r.GenerateLease = genLease + } +} + +func WithNotBeforeDuration(ttl time.Duration) RoleModifier { + return func(r *RoleEntry) { + r.NotBeforeDuration = ttl + } +} + +func WithNoStore(noStore bool) RoleModifier { + return func(r *RoleEntry) { + r.NoStore = noStore + } +} + +func WithIssuer(issuer string) RoleModifier { + return func(r *RoleEntry) { + if issuer == "" { + issuer = DefaultRef + } + r.Issuer = issuer + } +} + +// SignVerbatimRole create a sign-verbatim role with no overrides. This will store +// the signed certificate, allowing any key type and Value from a role restriction. +func SignVerbatimRole() *RoleEntry { + return SignVerbatimRoleWithOpts() +} + +// SignVerbatimRoleWithOpts create a sign-verbatim role with the normal defaults, +// but allowing any field to be tweaked based on the consumers needs. +func SignVerbatimRoleWithOpts(opts ...RoleModifier) *RoleEntry { + entry := &RoleEntry{ + AllowLocalhost: true, + AllowAnyName: true, + AllowIPSANs: true, + AllowWildcardCertificates: new(bool), + EnforceHostnames: false, + KeyType: "any", + UseCSRCommonName: true, + UseCSRSANs: true, + AllowedOtherSANs: []string{"*"}, + AllowedSerialNumbers: []string{"*"}, + AllowedURISANs: []string{"*"}, + AllowedUserIDs: []string{"*"}, + CNValidations: []string{"disabled"}, + GenerateLease: new(bool), + KeyUsage: DefaultRoleKeyUsages, + ExtKeyUsage: DefaultRoleEstKeyUsages, + ExtKeyUsageOIDs: DefaultRoleEstKeyUsageOids, + SignatureBits: DefaultRoleSignatureBits, + UsePSS: DefaultRoleUsePss, + } + *entry.AllowWildcardCertificates = true + *entry.GenerateLease = false + + if opts != nil { + for _, opt := range opts { + if opt != nil { + opt(entry) + } + } + } + + return entry +} + +func ParseExtKeyUsagesFromRole(role *RoleEntry) certutil.CertExtKeyUsage { + var parsedKeyUsages certutil.CertExtKeyUsage + + if role.ServerFlag { + parsedKeyUsages |= certutil.ServerAuthExtKeyUsage + } + + if role.ClientFlag { + parsedKeyUsages |= certutil.ClientAuthExtKeyUsage + } + + if role.CodeSigningFlag { + parsedKeyUsages |= certutil.CodeSigningExtKeyUsage + } + + if role.EmailProtectionFlag { + parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage + } + + for _, k := range role.ExtKeyUsage { + switch strings.ToLower(strings.TrimSpace(k)) { + case "any": + parsedKeyUsages |= certutil.AnyExtKeyUsage + case "serverauth": + parsedKeyUsages |= certutil.ServerAuthExtKeyUsage + case "clientauth": + parsedKeyUsages |= certutil.ClientAuthExtKeyUsage + case "codesigning": + parsedKeyUsages |= certutil.CodeSigningExtKeyUsage + case "emailprotection": + parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage + case "ipsecendsystem": + parsedKeyUsages |= certutil.IpsecEndSystemExtKeyUsage + case "ipsectunnel": + parsedKeyUsages |= certutil.IpsecTunnelExtKeyUsage + case "ipsecuser": + parsedKeyUsages |= certutil.IpsecUserExtKeyUsage + case "timestamping": + parsedKeyUsages |= certutil.TimeStampingExtKeyUsage + case "ocspsigning": + parsedKeyUsages |= certutil.OcspSigningExtKeyUsage + case "microsoftservergatedcrypto": + parsedKeyUsages |= certutil.MicrosoftServerGatedCryptoExtKeyUsage + case "netscapeservergatedcrypto": + parsedKeyUsages |= certutil.NetscapeServerGatedCryptoExtKeyUsage + } + } + + return parsedKeyUsages +} diff --git a/builtin/logical/pki/issuing/sign_cert.go b/builtin/logical/pki/issuing/sign_cert.go new file mode 100644 index 000000000000..773e8b2a96d2 --- /dev/null +++ b/builtin/logical/pki/issuing/sign_cert.go @@ -0,0 +1,291 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package issuing + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +type SignCertInput interface { + CreationBundleInput + GetCSR() (*x509.CertificateRequest, error) + IsCA() bool + UseCSRValues() bool + GetPermittedDomains() []string +} + +func NewBasicSignCertInput(csr *x509.CertificateRequest, isCA bool, useCSRValues bool) BasicSignCertInput { + return BasicSignCertInput{ + isCA: isCA, + useCSRValues: useCSRValues, + csr: csr, + } +} + +var _ SignCertInput = BasicSignCertInput{} + +type BasicSignCertInput struct { + isCA bool + useCSRValues bool + csr *x509.CertificateRequest +} + +func (b BasicSignCertInput) GetTTL() int { + return 0 +} + +func (b BasicSignCertInput) GetOptionalNotAfter() (interface{}, bool) { + return "", false +} + +func (b BasicSignCertInput) GetCommonName() string { + return "" +} + +func (b BasicSignCertInput) GetSerialNumber() string { + return "" +} + +func (b BasicSignCertInput) GetExcludeCnFromSans() bool { + return false +} + +func (b BasicSignCertInput) GetOptionalAltNames() (interface{}, bool) { + return []string{}, false +} + +func (b BasicSignCertInput) GetOtherSans() []string { + return []string{} +} + +func (b BasicSignCertInput) GetIpSans() []string { + return []string{} +} + +func (b BasicSignCertInput) GetURISans() []string { + return []string{} +} + +func (b BasicSignCertInput) GetOptionalSkid() (interface{}, bool) { + return "", false +} + +func (b BasicSignCertInput) IsUserIdInSchema() (interface{}, bool) { + return []string{}, false +} + +func (b BasicSignCertInput) GetUserIds() []string { + return []string{} +} + +func (b BasicSignCertInput) GetCSR() (*x509.CertificateRequest, error) { + return b.csr, nil +} + +func (b BasicSignCertInput) IsCA() bool { + return b.isCA +} + +func (b BasicSignCertInput) UseCSRValues() bool { + return b.useCSRValues +} + +func (b BasicSignCertInput) GetPermittedDomains() []string { + return []string{} +} + +func SignCert(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, caSign *certutil.CAInfoBundle, signInput SignCertInput) (*certutil.ParsedCertBundle, []string, error) { + if role == nil { + return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} + } + + csr, err := signInput.GetCSR() + if err != nil { + return nil, nil, err + } + + if csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || csr.PublicKey == nil { + return nil, nil, errutil.UserError{Err: "Refusing to sign CSR with empty PublicKey. This usually means the SubjectPublicKeyInfo field has an OID not recognized by Go, such as 1.2.840.113549.1.1.10 for rsaPSS."} + } + + // This switch validates that the CSR key type matches the role and sets + // the Value in the actualKeyType/actualKeyBits values. + actualKeyType := "" + actualKeyBits := 0 + + switch role.KeyType { + case "rsa": + // Verify that the key matches the role type + if csr.PublicKeyAlgorithm != x509.RSA { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("role requires keys of type %s", role.KeyType)} + } + + pubKey, ok := csr.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "rsa" + actualKeyBits = pubKey.N.BitLen() + case "ec": + // Verify that the key matches the role type + if csr.PublicKeyAlgorithm != x509.ECDSA { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires keys of type %s", + role.KeyType)} + } + pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ec" + actualKeyBits = pubKey.Params().BitSize + case "ed25519": + // Verify that the key matches the role type + if csr.PublicKeyAlgorithm != x509.Ed25519 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires keys of type %s", + role.KeyType)} + } + + _, ok := csr.PublicKey.(ed25519.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ed25519" + actualKeyBits = 0 + case "any": + // We need to compute the actual key type and key bits, to correctly + // validate minimums and SignatureBits below. + switch csr.PublicKeyAlgorithm { + case x509.RSA: + pubKey, ok := csr.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + if pubKey.N.BitLen() < 2048 { + return nil, nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} + } + + actualKeyType = "rsa" + actualKeyBits = pubKey.N.BitLen() + case x509.ECDSA: + pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ec" + actualKeyBits = pubKey.Params().BitSize + case x509.Ed25519: + _, ok := csr.PublicKey.(ed25519.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ed25519" + actualKeyBits = 0 + default: + return nil, nil, errutil.UserError{Err: "Unknown key type in CSR: " + csr.PublicKeyAlgorithm.String()} + } + default: + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unsupported key type Value: %s", role.KeyType)} + } + + // Before validating key lengths, update our KeyBits/SignatureBits based + // on the actual CSR key type. + if role.KeyType == "any" { + // We update the Value of KeyBits and SignatureBits here (from the + // role), using the specified key type. This allows us to convert + // the default Value (0) for SignatureBits and KeyBits to a + // meaningful Value. + // + // We ignore the role's original KeyBits Value if the KeyType is any + // as legacy (pre-1.10) roles had default values that made sense only + // for RSA keys (key_bits=2048) and the older code paths ignored the role Value + // set for KeyBits when KeyType was set to any. This also enforces the + // docs saying when key_type=any, we only enforce our specified minimums + // for signing operations + var err error + if role.KeyBits, role.SignatureBits, err = certutil.ValidateDefaultOrValueKeyTypeSignatureLength( + actualKeyType, 0, role.SignatureBits); err != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unknown internal error updating default values: %v", err)} + } + + // We're using the KeyBits field as a minimum Value below, and P-224 is safe + // and a previously allowed Value. However, the above call defaults + // to P-256 as that's a saner default than P-224 (w.r.t. generation), so + // override it here to allow 224 as the smallest size we permit. + if actualKeyType == "ec" { + role.KeyBits = 224 + } + } + + // At this point, role.KeyBits and role.SignatureBits should both + // be non-zero, for RSA and ECDSA keys. Validate the actualKeyBits based on + // the role's values. If the KeyType was any, and KeyBits was set to 0, + // KeyBits should be updated to 2048 unless some other Value was chosen + // explicitly. + // + // This validation needs to occur regardless of the role's key type, so + // that we always validate both RSA and ECDSA key sizes. + if actualKeyType == "rsa" { + if actualKeyBits < role.KeyBits { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires a minimum of a %d-bit key, but CSR's key is %d bits", + role.KeyBits, actualKeyBits)} + } + + if actualKeyBits < 2048 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "Vault requires a minimum of a 2048-bit key, but CSR's key is %d bits", + actualKeyBits)} + } + } else if actualKeyType == "ec" { + if actualKeyBits < role.KeyBits { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires a minimum of a %d-bit key, but CSR's key is %d bits", + role.KeyBits, + actualKeyBits)} + } + } + + creation, warnings, err := GenerateCreationBundle(b, role, entityInfo, signInput, caSign, csr) + if err != nil { + return nil, nil, err + } + if creation.Params == nil { + return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} + } + + creation.Params.IsCA = signInput.IsCA() + creation.Params.UseCSRValues = signInput.UseCSRValues() + + if signInput.IsCA() { + creation.Params.PermittedDNSDomains = signInput.GetPermittedDomains() + } else { + for _, ext := range csr.Extensions { + if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { + warnings = append(warnings, "specified CSR contained a Basic Constraints extension that was ignored during issuance") + } + } + } + + parsedBundle, err := certutil.SignCertificate(creation) + if err != nil { + return nil, nil, err + } + + return parsedBundle, warnings, nil +} diff --git a/builtin/logical/pki/key_util.go b/builtin/logical/pki/key_util.go index c40831714625..ba0f1c956ec4 100644 --- a/builtin/logical/pki/key_util.go +++ b/builtin/logical/pki/key_util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -9,9 +12,12 @@ import ( "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" ) -func comparePublicKey(sc *storageContext, key *keyEntry, publicKey crypto.PublicKey) (bool, error) { +func comparePublicKey(sc *storageContext, key *issuing.KeyEntry, publicKey crypto.PublicKey) (bool, error) { publicKeyForKeyEntry, err := getPublicKey(sc.Context, sc.Backend, key) if err != nil { return false, err @@ -20,13 +26,9 @@ func comparePublicKey(sc *storageContext, key *keyEntry, publicKey crypto.Public return certutil.ComparePublicKeysAndType(publicKeyForKeyEntry, publicKey) } -func getPublicKey(ctx context.Context, b *backend, key *keyEntry) (crypto.PublicKey, error) { +func getPublicKey(ctx context.Context, b *backend, key *issuing.KeyEntry) (crypto.PublicKey, error) { if key.PrivateKeyType == certutil.ManagedPrivateKey { - keyId, err := extractManagedKeyId([]byte(key.PrivateKey)) - if err != nil { - return nil, err - } - return getManagedKeyPublicKey(ctx, b, keyId) + return managed_key.GetPublicKeyFromKeyBytes(ctx, b, []byte(key.PrivateKey)) } signer, _, _, err := getSignerFromKeyEntryBytes(key) @@ -36,7 +38,7 @@ func getPublicKey(ctx context.Context, b *backend, key *keyEntry) (crypto.Public return signer.Public(), nil } -func getSignerFromKeyEntryBytes(key *keyEntry) (crypto.Signer, certutil.BlockType, *pem.Block, error) { +func getSignerFromKeyEntryBytes(key *issuing.KeyEntry) (crypto.Signer, certutil.BlockType, *pem.Block, error) { if key.PrivateKeyType == certutil.UnknownPrivateKey { return nil, certutil.UnknownBlock, nil, errutil.InternalError{Err: fmt.Sprintf("unsupported unknown private key type for key: %s (%s)", key.ID, key.Name)} } @@ -75,7 +77,7 @@ func getPublicKeyFromBytes(keyBytes []byte) (crypto.PublicKey, error) { return signer.Public(), nil } -func importKeyFromBytes(sc *storageContext, keyValue string, keyName string) (*keyEntry, bool, error) { +func importKeyFromBytes(sc *storageContext, keyValue string, keyName string) (*issuing.KeyEntry, bool, error) { signer, _, _, err := getSignerFromBytes([]byte(keyValue)) if err != nil { return nil, false, err diff --git a/builtin/logical/pki/managed_key/common.go b/builtin/logical/pki/managed_key/common.go new file mode 100644 index 000000000000..4637dadb4602 --- /dev/null +++ b/builtin/logical/pki/managed_key/common.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package managed_key + +import ( + "crypto" + "io" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +type ManagedKeyInfo struct { + publicKey crypto.PublicKey + KeyType certutil.PrivateKeyType + Name NameKey + Uuid UUIDKey +} + +type managedKeyId interface { + String() string +} + +type PkiManagedKeyView interface { + BackendUUID() string + IsSecondaryNode() bool + GetManagedKeyView() (logical.ManagedKeySystemView, error) + GetRandomReader() io.Reader +} + +type ( + UUIDKey string + NameKey string +) + +func (u UUIDKey) String() string { + return string(u) +} + +func (n NameKey) String() string { + return string(n) +} diff --git a/builtin/logical/pki/managed_key/managed_key_util_oss.go b/builtin/logical/pki/managed_key/managed_key_util_oss.go new file mode 100644 index 000000000000..ad92b39c6c19 --- /dev/null +++ b/builtin/logical/pki/managed_key/managed_key_util_oss.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package managed_key + +import ( + "context" + "crypto" + "errors" + "io" + + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +var errEntOnly = errors.New("managed keys are supported within enterprise edition only") + +func GetPublicKeyFromKeyBytes(ctx context.Context, mkv PkiManagedKeyView, keyBytes []byte) (crypto.PublicKey, error) { + return nil, errEntOnly +} + +func GenerateManagedKeyCABundle(ctx context.Context, b PkiManagedKeyView, keyId managedKeyId, data *certutil.CreationBundle, randomSource io.Reader) (bundle *certutil.ParsedCertBundle, err error) { + return nil, errEntOnly +} + +func GenerateManagedKeyCSRBundle(ctx context.Context, b PkiManagedKeyView, keyId managedKeyId, data *certutil.CreationBundle, addBasicConstraints bool, randomSource io.Reader) (bundle *certutil.ParsedCSRBundle, err error) { + return nil, errEntOnly +} + +func GetManagedKeyPublicKey(ctx context.Context, b PkiManagedKeyView, keyId managedKeyId) (crypto.PublicKey, error) { + return nil, errEntOnly +} + +func ParseManagedKeyCABundle(ctx context.Context, mkv PkiManagedKeyView, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { + return nil, errEntOnly +} + +func ExtractManagedKeyId(privateKeyBytes []byte) (UUIDKey, error) { + return "", errEntOnly +} + +func CreateKmsKeyBundle(ctx context.Context, mkv PkiManagedKeyView, keyId managedKeyId) (certutil.KeyBundle, certutil.PrivateKeyType, error) { + return certutil.KeyBundle{}, certutil.UnknownPrivateKey, errEntOnly +} + +func GetManagedKeyInfo(ctx context.Context, mkv PkiManagedKeyView, keyId managedKeyId) (*ManagedKeyInfo, error) { + return nil, errEntOnly +} diff --git a/builtin/logical/pki/managed_key_util.go b/builtin/logical/pki/managed_key_util.go deleted file mode 100644 index 29ab43381329..000000000000 --- a/builtin/logical/pki/managed_key_util.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build !enterprise - -package pki - -import ( - "context" - "crypto" - "errors" - "io" - - "github.com/hashicorp/vault/sdk/helper/certutil" -) - -var errEntOnly = errors.New("managed keys are supported within enterprise edition only") - -func generateManagedKeyCABundle(ctx context.Context, b *backend, keyId managedKeyId, data *certutil.CreationBundle, randomSource io.Reader) (bundle *certutil.ParsedCertBundle, err error) { - return nil, errEntOnly -} - -func generateManagedKeyCSRBundle(ctx context.Context, b *backend, keyId managedKeyId, data *certutil.CreationBundle, addBasicConstraints bool, randomSource io.Reader) (bundle *certutil.ParsedCSRBundle, err error) { - return nil, errEntOnly -} - -func getManagedKeyPublicKey(ctx context.Context, b *backend, keyId managedKeyId) (crypto.PublicKey, error) { - return nil, errEntOnly -} - -func parseManagedKeyCABundle(ctx context.Context, b *backend, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { - return nil, errEntOnly -} - -func extractManagedKeyId(privateKeyBytes []byte) (UUIDKey, error) { - return "", errEntOnly -} - -func createKmsKeyBundle(ctx context.Context, b *backend, keyId managedKeyId) (certutil.KeyBundle, certutil.PrivateKeyType, error) { - return certutil.KeyBundle{}, certutil.UnknownPrivateKey, errEntOnly -} - -func getManagedKeyInfo(ctx context.Context, b *backend, keyId managedKeyId) (*managedKeyInfo, error) { - return nil, errEntOnly -} diff --git a/builtin/logical/pki/metrics.go b/builtin/logical/pki/metrics.go new file mode 100644 index 000000000000..c1c8b528c3e1 --- /dev/null +++ b/builtin/logical/pki/metrics.go @@ -0,0 +1,263 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "errors" + "sort" + "strings" + "sync/atomic" + + "github.com/armon/go-metrics" +) + +type CertificateCounter struct { + certCountEnabled *atomic.Bool + publishCertCountMetrics *atomic.Bool + certCount *atomic.Uint32 + revokedCertCount *atomic.Uint32 + certsCounted *atomic.Bool + certCountError error + possibleDoubleCountedSerials []string + possibleDoubleCountedRevokedSerials []string + backendUuid string +} + +func (c *CertificateCounter) IsInitialized() bool { + return c.certsCounted.Load() +} + +func (c *CertificateCounter) IsEnabled() bool { + return c.certCountEnabled.Load() +} + +func (c *CertificateCounter) Error() error { + return c.certCountError +} + +func (c *CertificateCounter) SetError(err error) { + c.certCountError = err +} + +func (c *CertificateCounter) ReconfigureWithTidyConfig(config *tidyConfig) bool { + if config.MaintainCount { + c.enableCertCounting(config.PublishMetrics) + } else { + c.disableCertCounting() + } + + return config.MaintainCount +} + +func (c *CertificateCounter) disableCertCounting() { + c.possibleDoubleCountedRevokedSerials = nil + c.possibleDoubleCountedSerials = nil + c.certsCounted.Store(false) + c.certCount.Store(0) + c.revokedCertCount.Store(0) + c.certCountError = errors.New("Cert Count is Disabled: enable via Tidy Config maintain_stored_certificate_counts") + c.certCountEnabled.Store(false) + c.publishCertCountMetrics.Store(false) +} + +func (c *CertificateCounter) enableCertCounting(publishMetrics bool) { + c.publishCertCountMetrics.Store(publishMetrics) + c.certCountEnabled.Store(true) + + if !c.certsCounted.Load() { + c.certCountError = errors.New("Certificate Counting Has Not Been Initialized, re-initialize this mount") + } +} + +func (c *CertificateCounter) InitializeCountsFromStorage(certs, revoked []string) { + c.certCount.Add(uint32(len(certs))) + c.revokedCertCount.Add(uint32(len(revoked))) + + c.pruneDuplicates(certs, revoked) + c.certCountError = nil + c.certsCounted.Store(true) + + c.emitTotalCertCountMetric() +} + +func (c *CertificateCounter) pruneDuplicates(entries, revokedEntries []string) { + // Now that the metrics are set, we can switch from appending newly-stored certificates to the possible double-count + // list, and instead have them update the counter directly. We need to do this so that we are looking at a static + // slice of possibly double counted serials. Note that certsCounted is computed before the storage operation, so + // there may be some delay here. + + // Sort the listed-entries first, to accommodate that delay. + sort.Slice(entries, func(i, j int) bool { + return entries[i] < entries[j] + }) + + sort.Slice(revokedEntries, func(i, j int) bool { + return revokedEntries[i] < revokedEntries[j] + }) + + // We assume here that these lists are now complete. + sort.Slice(c.possibleDoubleCountedSerials, func(i, j int) bool { + return c.possibleDoubleCountedSerials[i] < c.possibleDoubleCountedSerials[j] + }) + + listEntriesIndex := 0 + possibleDoubleCountIndex := 0 + for { + if listEntriesIndex >= len(entries) { + break + } + if possibleDoubleCountIndex >= len(c.possibleDoubleCountedSerials) { + break + } + if entries[listEntriesIndex] == c.possibleDoubleCountedSerials[possibleDoubleCountIndex] { + // This represents a double-counted entry + c.decrementTotalCertificatesCountNoReport() + listEntriesIndex = listEntriesIndex + 1 + possibleDoubleCountIndex = possibleDoubleCountIndex + 1 + continue + } + if entries[listEntriesIndex] < c.possibleDoubleCountedSerials[possibleDoubleCountIndex] { + listEntriesIndex = listEntriesIndex + 1 + continue + } + if entries[listEntriesIndex] > c.possibleDoubleCountedSerials[possibleDoubleCountIndex] { + possibleDoubleCountIndex = possibleDoubleCountIndex + 1 + continue + } + } + + sort.Slice(c.possibleDoubleCountedRevokedSerials, func(i, j int) bool { + return c.possibleDoubleCountedRevokedSerials[i] < c.possibleDoubleCountedRevokedSerials[j] + }) + + listRevokedEntriesIndex := 0 + possibleRevokedDoubleCountIndex := 0 + for { + if listRevokedEntriesIndex >= len(revokedEntries) { + break + } + if possibleRevokedDoubleCountIndex >= len(c.possibleDoubleCountedRevokedSerials) { + break + } + if revokedEntries[listRevokedEntriesIndex] == c.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { + // This represents a double-counted revoked entry + c.decrementTotalRevokedCertificatesCountNoReport() + listRevokedEntriesIndex = listRevokedEntriesIndex + 1 + possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 + continue + } + if revokedEntries[listRevokedEntriesIndex] < c.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { + listRevokedEntriesIndex = listRevokedEntriesIndex + 1 + continue + } + if revokedEntries[listRevokedEntriesIndex] > c.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { + possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 + continue + } + } + + c.possibleDoubleCountedRevokedSerials = nil + c.possibleDoubleCountedSerials = nil +} + +func (c *CertificateCounter) decrementTotalCertificatesCountNoReport() uint32 { + newCount := c.certCount.Add(^uint32(0)) + return newCount +} + +func (c *CertificateCounter) decrementTotalRevokedCertificatesCountNoReport() uint32 { + newRevokedCertCount := c.revokedCertCount.Add(^uint32(0)) + return newRevokedCertCount +} + +func (c *CertificateCounter) CertificateCount() uint32 { + return c.certCount.Load() +} + +func (c *CertificateCounter) RevokedCount() uint32 { + return c.revokedCertCount.Load() +} + +func (c *CertificateCounter) IncrementTotalCertificatesCount(certsCounted bool, newSerial string) { + if c.certCountEnabled.Load() { + c.certCount.Add(1) + switch { + case !certsCounted: + // This is unsafe, but a good best-attempt + if strings.HasPrefix(newSerial, "certs/") { + newSerial = newSerial[6:] + } + c.possibleDoubleCountedSerials = append(c.possibleDoubleCountedSerials, newSerial) + default: + c.emitTotalCertCountMetric() + } + } +} + +// The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: +// eg. certsCounted := certCounter.IsInitialized() +func (c *CertificateCounter) IncrementTotalRevokedCertificatesCount(certsCounted bool, newSerial string) { + if c.certCountEnabled.Load() { + c.revokedCertCount.Add(1) + switch { + case !certsCounted: + // This is unsafe, but a good best-attempt + if strings.HasPrefix(newSerial, "revoked/") { // allow passing in the path (revoked/serial) OR the serial + newSerial = newSerial[8:] + } + c.possibleDoubleCountedRevokedSerials = append(c.possibleDoubleCountedRevokedSerials, newSerial) + default: + c.emitTotalRevokedCountMetric() + } + } +} + +func (c *CertificateCounter) DecrementTotalCertificatesCountReport() { + if c.certCountEnabled.Load() { + c.decrementTotalCertificatesCountNoReport() + c.emitTotalCertCountMetric() + } +} + +func (c *CertificateCounter) DecrementTotalRevokedCertificatesCountReport() { + if c.certCountEnabled.Load() { + c.decrementTotalRevokedCertificatesCountNoReport() + c.emitTotalRevokedCountMetric() + } +} + +func (c *CertificateCounter) EmitCertStoreMetrics() { + c.emitTotalCertCountMetric() + c.emitTotalRevokedCountMetric() +} + +func (c *CertificateCounter) emitTotalCertCountMetric() { + if c.publishCertCountMetrics.Load() { + certCount := float32(c.CertificateCount()) + metrics.SetGauge([]string{"secrets", "pki", c.backendUuid, "total_certificates_stored"}, certCount) + } +} + +func (c *CertificateCounter) emitTotalRevokedCountMetric() { + if c.publishCertCountMetrics.Load() { + revokedCount := float32(c.RevokedCount()) + metrics.SetGauge([]string{"secrets", "pki", c.backendUuid, "total_revoked_certificates_stored"}, revokedCount) + } +} + +func NewCertificateCounter(backendUuid string) *CertificateCounter { + counter := &CertificateCounter{ + backendUuid: backendUuid, + certCountEnabled: &atomic.Bool{}, + publishCertCountMetrics: &atomic.Bool{}, + certCount: &atomic.Uint32{}, + revokedCertCount: &atomic.Uint32{}, + certsCounted: &atomic.Bool{}, + certCountError: errors.New("Initialize Not Yet Run, Cert Counts Unavailable"), + possibleDoubleCountedSerials: make([]string, 0, 250), + possibleDoubleCountedRevokedSerials: make([]string, 0, 250), + } + + return counter +} diff --git a/builtin/logical/pki/ocsp.go b/builtin/logical/pki/ocsp.go deleted file mode 100644 index 85d532503b9c..000000000000 --- a/builtin/logical/pki/ocsp.go +++ /dev/null @@ -1,445 +0,0 @@ -package pki - -import ( - "bytes" - "context" - "crypto" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "errors" - "fmt" - "io" - "math/big" - "net/http" - "time" - - "github.com/hashicorp/vault/sdk/helper/errutil" - - "golang.org/x/crypto/ocsp" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - ocspReqParam = "req" - ocspResponseContentType = "application/ocsp-response" - maximumRequestSize = 2048 // A normal simple request is 87 bytes, so give us some buffer -) - -type ocspRespInfo struct { - serialNumber *big.Int - ocspStatus int - revocationTimeUTC *time.Time - issuerID issuerID -} - -// These response variables should not be mutated, instead treat them as constants -var ( - OcspUnauthorizedResponse = &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusUnauthorized, - logical.HTTPRawBody: ocsp.UnauthorizedErrorResponse, - }, - } - OcspMalformedResponse = &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusBadRequest, - logical.HTTPRawBody: ocsp.MalformedRequestErrorResponse, - }, - } - OcspInternalErrorResponse = &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusInternalServerError, - logical.HTTPRawBody: ocsp.InternalErrorErrorResponse, - }, - } - - ErrMissingOcspUsage = errors.New("issuer entry did not have the OCSPSigning usage") - ErrIssuerHasNoKey = errors.New("issuer has no key") - ErrUnknownIssuer = errors.New("unknown issuer") -) - -func buildPathOcspGet(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "ocsp/" + framework.MatchAllRegex(ocspReqParam), - Fields: map[string]*framework.FieldSchema{ - ocspReqParam: { - Type: framework.TypeString, - Description: "base-64 encoded ocsp request", - }, - }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.ocspHandler, - }, - }, - - HelpSynopsis: pathOcspHelpSyn, - HelpDescription: pathOcspHelpDesc, - } -} - -func buildPathOcspPost(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "ocsp", - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.ocspHandler, - }, - }, - - HelpSynopsis: pathOcspHelpSyn, - HelpDescription: pathOcspHelpDesc, - } -} - -func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, request.Storage) - cfg, err := b.crlBuilder.getConfigWithUpdate(sc) - if err != nil || cfg.OcspDisable { - return OcspUnauthorizedResponse, nil - } - - derReq, err := fetchDerEncodedRequest(request, data) - if err != nil { - return OcspMalformedResponse, nil - } - - ocspReq, err := ocsp.ParseRequest(derReq) - if err != nil { - return OcspMalformedResponse, nil - } - - ocspStatus, err := getOcspStatus(sc, request, ocspReq) - if err != nil { - return logAndReturnInternalError(b, err), nil - } - - caBundle, issuer, err := lookupOcspIssuer(sc, ocspReq, ocspStatus.issuerID) - if err != nil { - if errors.Is(err, ErrUnknownIssuer) { - // Since we were not able to find a matching issuer for the incoming request - // generate an Unknown OCSP response. This might turn into an Unauthorized if - // we find out that we don't have a default issuer or it's missing the proper Usage flags - return generateUnknownResponse(cfg, sc, ocspReq), nil - } - if errors.Is(err, ErrMissingOcspUsage) { - // If we did find a matching issuer but aren't allowed to sign, the spec says - // we should be responding with an Unauthorized response as we don't have the - // ability to sign the response. - // https://www.rfc-editor.org/rfc/rfc5019#section-2.2.3 - return OcspUnauthorizedResponse, nil - } - return logAndReturnInternalError(b, err), nil - } - - byteResp, err := genResponse(cfg, caBundle, ocspStatus, ocspReq.HashAlgorithm, issuer.RevocationSigAlg) - if err != nil { - return logAndReturnInternalError(b, err), nil - } - - return &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusOK, - logical.HTTPRawBody: byteResp, - }, - }, nil -} - -func generateUnknownResponse(cfg *crlConfig, sc *storageContext, ocspReq *ocsp.Request) *logical.Response { - // Generate an Unknown OCSP response, signing with the default issuer from the mount as we did - // not match the request's issuer. If no default issuer can be used, return with Unauthorized as there - // isn't much else we can do at this point. - config, err := sc.getIssuersConfig() - if err != nil { - return logAndReturnInternalError(sc.Backend, err) - } - - if config.DefaultIssuerId == "" { - // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. - return OcspUnauthorizedResponse - } - - caBundle, issuer, err := getOcspIssuerParsedBundle(sc, config.DefaultIssuerId) - if err != nil { - if errors.Is(err, ErrUnknownIssuer) || errors.Is(err, ErrIssuerHasNoKey) { - // We must have raced on a delete/update of the default issuer, anyways - // no way to sign a response so Unauthorized it is. - return OcspUnauthorizedResponse - } - return logAndReturnInternalError(sc.Backend, err) - } - - if !issuer.Usage.HasUsage(OCSPSigningUsage) { - // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. - return OcspUnauthorizedResponse - } - - info := &ocspRespInfo{ - serialNumber: ocspReq.SerialNumber, - ocspStatus: ocsp.Unknown, - } - - byteResp, err := genResponse(cfg, caBundle, info, ocspReq.HashAlgorithm, issuer.RevocationSigAlg) - if err != nil { - return logAndReturnInternalError(sc.Backend, err) - } - - return &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusOK, - logical.HTTPRawBody: byteResp, - }, - } -} - -func fetchDerEncodedRequest(request *logical.Request, data *framework.FieldData) ([]byte, error) { - switch request.Operation { - case logical.ReadOperation: - // The param within the GET request should have a base64 encoded version of a DER request. - base64Req := data.Get(ocspReqParam).(string) - if base64Req == "" { - return nil, errors.New("no base64 encoded ocsp request was found") - } - - if len(base64Req) >= maximumRequestSize { - return nil, errors.New("request is too large") - } - - return base64.StdEncoding.DecodeString(base64Req) - case logical.UpdateOperation: - // POST bodies should contain the binary form of the DER request. - // NOTE: Writing an empty update request to Vault causes a nil request.HTTPRequest, and that object - // says that it is possible for its Body element to be nil as well, so check both just in case. - if request.HTTPRequest == nil { - return nil, errors.New("no data in request") - } - rawBody := request.HTTPRequest.Body - if rawBody == nil { - return nil, errors.New("no data in request body") - } - defer rawBody.Close() - - requestBytes, err := io.ReadAll(io.LimitReader(rawBody, maximumRequestSize)) - if err != nil { - return nil, err - } - - if len(requestBytes) >= maximumRequestSize { - return nil, errors.New("request is too large") - } - return requestBytes, nil - default: - return nil, fmt.Errorf("unsupported request method: %s", request.Operation) - } -} - -func logAndReturnInternalError(b *backend, err error) *logical.Response { - // Since OCSP might be a high traffic endpoint, we will log at debug level only - // any internal errors we do get. There is no way for us to return to the end-user - // errors, so we rely on the log statement to help in debugging possible - // issues in the field. - b.Logger().Debug("OCSP internal error", "error", err) - return OcspInternalErrorResponse -} - -func getOcspStatus(sc *storageContext, request *logical.Request, ocspReq *ocsp.Request) (*ocspRespInfo, error) { - revEntryRaw, err := fetchCertBySerialBigInt(sc, revokedPath, ocspReq.SerialNumber) - if err != nil { - return nil, err - } - - info := ocspRespInfo{ - serialNumber: ocspReq.SerialNumber, - ocspStatus: ocsp.Good, - } - - if revEntryRaw != nil { - var revEntry revocationInfo - if err := revEntryRaw.DecodeJSON(&revEntry); err != nil { - return nil, err - } - - info.ocspStatus = ocsp.Revoked - info.revocationTimeUTC = &revEntry.RevocationTimeUTC - info.issuerID = revEntry.CertificateIssuer // This might be empty if the CRL hasn't been rebuilt - } - - return &info, nil -} - -func lookupOcspIssuer(sc *storageContext, req *ocsp.Request, optRevokedIssuer issuerID) (*certutil.ParsedCertBundle, *issuerEntry, error) { - reqHash := req.HashAlgorithm - if !reqHash.Available() { - return nil, nil, x509.ErrUnsupportedAlgorithm - } - - // This will prime up issuerIds, with either the optRevokedIssuer value if set - // or if we are operating in legacy storage mode, the shim bundle id or finally - // a list of all our issuers in this mount. - issuerIds, err := lookupIssuerIds(sc, optRevokedIssuer) - if err != nil { - return nil, nil, err - } - - matchedButNoUsage := false - for _, issuerId := range issuerIds { - parsedBundle, issuer, err := getOcspIssuerParsedBundle(sc, issuerId) - if err != nil { - // A bit touchy here as if we get an ErrUnknownIssuer for an issuer id that we picked up - // from a revocation entry, we still return an ErrUnknownOcspIssuer as we can't validate - // the end-user actually meant this specific issuer's cert with serial X. - if errors.Is(err, ErrUnknownIssuer) || errors.Is(err, ErrIssuerHasNoKey) { - // This skips either bad issuer ids, or root certs with no keys that we can't use. - continue - } - return nil, nil, err - } - - // Make sure the client and Vault are talking about the same issuer, otherwise - // we might have a case of a matching serial number for a different issuer which - // we should not respond back in the affirmative about. - matches, err := doesRequestMatchIssuer(parsedBundle, req) - if err != nil { - return nil, nil, err - } - - if matches { - if !issuer.Usage.HasUsage(OCSPSigningUsage) { - matchedButNoUsage = true - // We found a matching issuer, but it's not allowed to sign the - // response, there might be another issuer that we rotated - // that will match though, so keep iterating. - continue - } - - return parsedBundle, issuer, nil - } - } - - if matchedButNoUsage { - // We matched an issuer but it did not have an OCSP signing usage set so bail. - return nil, nil, ErrMissingOcspUsage - } - - return nil, nil, ErrUnknownIssuer -} - -func getOcspIssuerParsedBundle(sc *storageContext, issuerId issuerID) (*certutil.ParsedCertBundle, *issuerEntry, error) { - issuer, bundle, err := sc.fetchCertBundleByIssuerId(issuerId, true) - if err != nil { - switch err.(type) { - case errutil.UserError: - // Most likely the issuer id no longer exists skip it - return nil, nil, ErrUnknownIssuer - default: - return nil, nil, err - } - } - - if issuer.KeyID == "" { - // No point if the key does not exist from the issuer to use as a signer. - return nil, nil, ErrIssuerHasNoKey - } - - caBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) - if err != nil { - return nil, nil, err - } - - return caBundle, issuer, nil -} - -func lookupIssuerIds(sc *storageContext, optRevokedIssuer issuerID) ([]issuerID, error) { - if optRevokedIssuer != "" { - return []issuerID{optRevokedIssuer}, nil - } - - if sc.Backend.useLegacyBundleCaStorage() { - return []issuerID{legacyBundleShimID}, nil - } - - return sc.listIssuers() -} - -func doesRequestMatchIssuer(parsedBundle *certutil.ParsedCertBundle, req *ocsp.Request) (bool, error) { - // issuer name hashing taken from golang.org/x/crypto/ocsp. - var pkInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(parsedBundle.Certificate.RawSubjectPublicKeyInfo, &pkInfo); err != nil { - return false, err - } - - h := req.HashAlgorithm.New() - h.Write(pkInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(parsedBundle.Certificate.RawSubject) - issuerNameHash := h.Sum(nil) - - return bytes.Equal(req.IssuerKeyHash, issuerKeyHash) && bytes.Equal(req.IssuerNameHash, issuerNameHash), nil -} - -func genResponse(cfg *crlConfig, caBundle *certutil.ParsedCertBundle, info *ocspRespInfo, reqHash crypto.Hash, revSigAlg x509.SignatureAlgorithm) ([]byte, error) { - curTime := time.Now() - duration, err := time.ParseDuration(cfg.OcspExpiry) - if err != nil { - return nil, err - } - - // x/crypto/ocsp lives outside of the standard library's crypto/x509 and includes - // ripped-off variants of many internal structures and functions. These - // lack support for PSS signatures altogether, so if we have revSigAlg - // that uses PSS, downgrade it to PKCS#1v1.5. This fixes the lack of - // support in x/ocsp, at the risk of OCSP requests failing due to lack - // of PKCS#1v1.5 (in say, PKCS#11 HSMs or GCP). - // - // Other restrictions, such as hash function selection, will still work - // however. - switch revSigAlg { - case x509.SHA256WithRSAPSS: - revSigAlg = x509.SHA256WithRSA - case x509.SHA384WithRSAPSS: - revSigAlg = x509.SHA384WithRSA - case x509.SHA512WithRSAPSS: - revSigAlg = x509.SHA512WithRSA - } - - template := ocsp.Response{ - IssuerHash: reqHash, - Status: info.ocspStatus, - SerialNumber: info.serialNumber, - ThisUpdate: curTime, - NextUpdate: curTime.Add(duration), - Certificate: caBundle.Certificate, - ExtraExtensions: []pkix.Extension{}, - SignatureAlgorithm: revSigAlg, - } - - if info.ocspStatus == ocsp.Revoked { - template.RevokedAt = *info.revocationTimeUTC - template.RevocationReason = ocsp.Unspecified - } - - return ocsp.CreateResponse(caBundle.Certificate, caBundle.Certificate, template, caBundle.PrivateKey) -} - -const pathOcspHelpSyn = ` -Query a certificate's revocation status through OCSP' -` - -const pathOcspHelpDesc = ` -This endpoint expects DER encoded OCSP requests and returns DER encoded OCSP responses -` diff --git a/builtin/logical/pki/ocsp_test.go b/builtin/logical/pki/ocsp_test.go deleted file mode 100644 index f7bf69ac5e95..000000000000 --- a/builtin/logical/pki/ocsp_test.go +++ /dev/null @@ -1,616 +0,0 @@ -package pki - -import ( - "bytes" - "context" - "crypto" - "crypto/x509" - "encoding/base64" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/sdk/logical" - "github.com/stretchr/testify/require" - "golang.org/x/crypto/ocsp" -) - -// If the ocsp_disabled flag is set to true in the crl configuration make sure we always -// return an Unauthorized error back as we assume an end-user disabling the feature does -// not want us to act as the OCSP authority and the RFC specifies this is the appropriate response. -func TestOcsp_Disabled(t *testing.T) { - t.Parallel() - type testArgs struct { - reqType string - } - var tests []testArgs - for _, reqType := range []string{"get", "post"} { - tests = append(tests, testArgs{ - reqType: reqType, - }) - } - for _, tt := range tests { - localTT := tt - t.Run(localTT.reqType, func(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "rsa") - resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ - "ocsp_disable": "true", - }) - requireSuccessNonNilResponse(t, resp, err) - resp, err = SendOcspRequest(t, b, s, localTT.reqType, testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) - }) - } -} - -// If we can't find the issuer within the request and have no default issuer to sign an Unknown response -// with return an UnauthorizedErrorResponse/according to/the RFC, similar to if we are disabled (lack of authority) -// This behavior differs from CRLs when an issuer is removed from a mount. -func TestOcsp_UnknownIssuerWithNoDefault(t *testing.T) { - t.Parallel() - - _, _, testEnv := setupOcspEnv(t, "ec") - // Create another completely empty mount so the created issuer/certificate above is unknown - b, s := CreateBackendWithStorage(t) - - resp, err := SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// If the issuer in the request does exist, but the request coming in associates the serial with the -// wrong issuer return an Unknown response back to the caller. -func TestOcsp_WrongIssuerInRequest(t *testing.T) { - t.Parallel() - - b, s, testEnv := setupOcspEnv(t, "ec") - serial := serialFromCert(testEnv.leafCertIssuer1) - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serial, - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer2, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Unknown, ocspResp.Status) -} - -// Verify that requests we can't properly decode result in the correct response of MalformedRequestError -func TestOcsp_MalformedRequests(t *testing.T) { - t.Parallel() - type testArgs struct { - reqType string - } - var tests []testArgs - for _, reqType := range []string{"get", "post"} { - tests = append(tests, testArgs{ - reqType: reqType, - }) - } - for _, tt := range tests { - localTT := tt - t.Run(localTT.reqType, func(t *testing.T) { - b, s, _ := setupOcspEnv(t, "rsa") - badReq := []byte("this is a bad request") - var resp *logical.Response - var err error - switch localTT.reqType { - case "get": - resp, err = sendOcspGetRequest(b, s, badReq) - case "post": - resp, err = sendOcspPostRequest(b, s, badReq) - default: - t.Fatalf("bad request type") - } - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 400, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.MalformedRequestErrorResponse, respDer) - }) - } -} - -// Validate that we properly handle a revocation entry that contains an issuer ID that no longer exists, -// the best we can do in this use case is to respond back with the default issuer that we don't know -// the issuer that they are requesting (we can't guarantee that the client is actually requesting a serial -// from that issuer) -func TestOcsp_InvalidIssuerIdInRevocationEntry(t *testing.T) { - t.Parallel() - - b, s, testEnv := setupOcspEnv(t, "ec") - ctx := context.Background() - - // Revoke the entry - serial := serialFromCert(testEnv.leafCertIssuer1) - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serial, - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Twiddle the entry so that the issuer id is no longer valid. - storagePath := revokedPath + normalizeSerial(serial) - var revInfo revocationInfo - revEntry, err := s.Get(ctx, storagePath) - require.NoError(t, err, "failed looking up storage path: %s", storagePath) - err = revEntry.DecodeJSON(&revInfo) - require.NoError(t, err, "failed decoding storage entry: %v", revEntry) - revInfo.CertificateIssuer = "00000000-0000-0000-0000-000000000000" - revEntry, err = logical.StorageEntryJSON(storagePath, revInfo) - require.NoError(t, err, "failed re-encoding revocation info: %v", revInfo) - err = s.Put(ctx, revEntry) - require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) - - // Send the request - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Unknown, ocspResp.Status) -} - -// Validate that we properly handle an unknown issuer use-case but that the default issuer -// does not have the OCSP usage flag set, we can't do much else other than reply with an -// Unauthorized response. -func TestOcsp_UnknownIssuerIdWithDefaultHavingOcspUsageRemoved(t *testing.T) { - t.Parallel() - - b, s, testEnv := setupOcspEnv(t, "ec") - ctx := context.Background() - - // Revoke the entry - serial := serialFromCert(testEnv.leafCertIssuer1) - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serial, - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Twiddle the entry so that the issuer id is no longer valid. - storagePath := revokedPath + normalizeSerial(serial) - var revInfo revocationInfo - revEntry, err := s.Get(ctx, storagePath) - require.NoError(t, err, "failed looking up storage path: %s", storagePath) - err = revEntry.DecodeJSON(&revInfo) - require.NoError(t, err, "failed decoding storage entry: %v", revEntry) - revInfo.CertificateIssuer = "00000000-0000-0000-0000-000000000000" - revEntry, err = logical.StorageEntryJSON(storagePath, revInfo) - require.NoError(t, err, "failed re-encoding revocation info: %v", revInfo) - err = s.Put(ctx, revEntry) - require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) - - // Update our issuers to no longer have the OcspSigning usage - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer1") - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId2.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer2") - - // Send the request - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// Verify that if we do have a revoked certificate entry for the request, that matches an -// issuer but that issuer does not have the OcspUsage flag set that we return an Unauthorized -// response back to the caller -func TestOcsp_RevokedCertHasIssuerWithoutOcspUsage(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "ec") - - // Revoke our certificate - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serialFromCert(testEnv.leafCertIssuer1), - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Update our issuer to no longer have the OcspSigning usage - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") - requireFieldsSetInResp(t, resp, "usage") - - // Do not assume a specific ordering for usage... - usages, err := NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) - require.NoError(t, err, "failed parsing usage return value") - require.True(t, usages.HasUsage(IssuanceUsage)) - require.True(t, usages.HasUsage(CRLSigningUsage)) - require.False(t, usages.HasUsage(OCSPSigningUsage)) - - // Request an OCSP request from it, we should get an Unauthorized response back - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// Verify if our matching issuer for a revocation entry has no key associated with it that -// we bail with an Unauthorized response. -func TestOcsp_RevokedCertHasIssuerWithoutAKey(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "ec") - - // Revoke our certificate - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serialFromCert(testEnv.leafCertIssuer1), - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Delete the key associated with our issuer - resp, err = CBRead(b, s, "issuer/"+testEnv.issuerId1.String()) - requireSuccessNonNilResponse(t, resp, err, "failed reading issuer") - requireFieldsSetInResp(t, resp, "key_id") - keyId := resp.Data["key_id"].(keyID) - - // This is a bit naughty but allow me to delete the key... - sc := b.makeStorageContext(context.Background(), s) - issuer, err := sc.fetchIssuerById(testEnv.issuerId1) - require.NoError(t, err, "failed to get issuer from storage") - issuer.KeyID = "" - err = sc.writeIssuer(issuer) - require.NoError(t, err, "failed to write issuer update") - - resp, err = CBDelete(b, s, "key/"+keyId.String()) - requireSuccessNonNilResponse(t, resp, err, "failed deleting key") - - // Request an OCSP request from it, we should get an Unauthorized response back - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// Verify if for some reason an end-user has rotated an existing certificate using the same -// key so our algo matches multiple issuers and one has OCSP usage disabled. We expect that -// even if a prior issuer issued the certificate, the new matching issuer can respond and sign -// the response to the caller on its behalf. -// -// NOTE: This test is a bit at the mercy of iteration order of the issuer ids. -// -// If it becomes flaky, most likely something is wrong in the code -// and not the test. -func TestOcsp_MultipleMatchingIssuersOneWithoutSigningUsage(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "ec") - - // Create a matching issuer as issuer1 with the same backing key - resp, err := CBWrite(b, s, "root/rotate/existing", map[string]interface{}{ - "key_ref": testEnv.keyId1, - "ttl": "40h", - "common_name": "example-ocsp.com", - }) - requireSuccessNonNilResponse(t, resp, err, "rotate issuer failed") - requireFieldsSetInResp(t, resp, "issuer_id") - rotatedCert := parseCert(t, resp.Data["certificate"].(string)) - - // Remove ocsp signing from our issuer - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") - requireFieldsSetInResp(t, resp, "usage") - // Do not assume a specific ordering for usage... - usages, err := NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) - require.NoError(t, err, "failed parsing usage return value") - require.True(t, usages.HasUsage(IssuanceUsage)) - require.True(t, usages.HasUsage(CRLSigningUsage)) - require.False(t, usages.HasUsage(OCSPSigningUsage)) - - // Request an OCSP request from it, we should get a Good response back, from the rotated cert - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Good, ocspResp.Status) - require.Equal(t, crypto.SHA1, ocspResp.IssuerHash) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - require.Equal(t, rotatedCert, ocspResp.Certificate) - - requireOcspSignatureAlgoForKey(t, rotatedCert.SignatureAlgorithm, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, rotatedCert) -} - -func TestOcsp_ValidRequests(t *testing.T) { - type caKeyConf struct { - keyType string - keyBits int - sigBits int - } - t.Parallel() - type testArgs struct { - reqType string - keyConf caKeyConf - reqHash crypto.Hash - } - var tests []testArgs - for _, reqType := range []string{"get", "post"} { - for _, keyConf := range []caKeyConf{ - {"rsa", 0, 0}, - {"rsa", 0, 384}, - {"rsa", 0, 512}, - {"ec", 0, 0}, - {"ec", 521, 0}, - } { - // "ed25519" is not supported at the moment in x/crypto/ocsp - for _, requestHash := range []crypto.Hash{crypto.SHA1, crypto.SHA256, crypto.SHA384, crypto.SHA512} { - tests = append(tests, testArgs{ - reqType: reqType, - keyConf: keyConf, - reqHash: requestHash, - }) - } - } - } - for _, tt := range tests { - localTT := tt - testName := fmt.Sprintf("%s-%s-keybits-%d-sigbits-%d-reqHash-%s", localTT.reqType, localTT.keyConf.keyType, - localTT.keyConf.keyBits, - localTT.keyConf.sigBits, - localTT.reqHash) - t.Run(testName, func(t *testing.T) { - runOcspRequestTest(t, localTT.reqType, localTT.keyConf.keyType, localTT.keyConf.keyBits, - localTT.keyConf.sigBits, localTT.reqHash) - }) - } -} - -func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKeyBits int, caKeySigBits int, requestHash crypto.Hash) { - b, s, testEnv := setupOcspEnvWithCaKeyConfig(t, caKeyType, caKeyBits, caKeySigBits) - - // Non-revoked cert - resp, err := SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Good, ocspResp.Status) - require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer1, ocspResp.Certificate) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - - requireOcspSignatureAlgoForKey(t, testEnv.issuer1.SignatureAlgorithm, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1) - - // Now revoke it - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serialFromCert(testEnv.leafCertIssuer1), - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - resp, err = SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request with revoked") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer = resp.Data["http_raw_body"].([]byte) - - ocspResp, err = ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response with revoked") - - require.Equal(t, ocsp.Revoked, ocspResp.Status) - require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer1, ocspResp.Certificate) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - - requireOcspSignatureAlgoForKey(t, testEnv.issuer1.SignatureAlgorithm, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1) - - // Request status for our second issuer - resp, err = SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer2, testEnv.issuer2, requestHash) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer = resp.Data["http_raw_body"].([]byte) - - ocspResp, err = ocsp.ParseResponse(respDer, testEnv.issuer2) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Good, ocspResp.Status) - require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer2, ocspResp.Certificate) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer2.SerialNumber, ocspResp.SerialNumber) - - // Verify that our thisUpdate and nextUpdate fields are updated as expected - thisUpdate := ocspResp.ThisUpdate - nextUpdate := ocspResp.NextUpdate - require.True(t, thisUpdate.Before(nextUpdate), - fmt.Sprintf("thisUpdate %s, should have been before nextUpdate: %s", thisUpdate, nextUpdate)) - nextUpdateDiff := nextUpdate.Sub(thisUpdate) - expectedDiff, err := time.ParseDuration(defaultCrlConfig.OcspExpiry) - require.NoError(t, err, "failed to parse default ocsp expiry value") - require.Equal(t, expectedDiff, nextUpdateDiff, - fmt.Sprintf("the delta between thisUpdate %s and nextUpdate: %s should have been around: %s but was %s", - thisUpdate, nextUpdate, defaultCrlConfig.OcspExpiry, nextUpdateDiff)) - - requireOcspSignatureAlgoForKey(t, testEnv.issuer2.SignatureAlgorithm, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer2) -} - -func requireOcspSignatureAlgoForKey(t *testing.T, expected x509.SignatureAlgorithm, actual x509.SignatureAlgorithm) { - require.Equal(t, expected.String(), actual.String()) -} - -type ocspTestEnv struct { - issuer1 *x509.Certificate - issuer2 *x509.Certificate - - issuerId1 issuerID - issuerId2 issuerID - - leafCertIssuer1 *x509.Certificate - leafCertIssuer2 *x509.Certificate - - keyId1 keyID - keyId2 keyID -} - -func setupOcspEnv(t *testing.T, keyType string) (*backend, logical.Storage, *ocspTestEnv) { - return setupOcspEnvWithCaKeyConfig(t, keyType, 0, 0) -} - -func setupOcspEnvWithCaKeyConfig(t *testing.T, keyType string, caKeyBits int, caKeySigBits int) (*backend, logical.Storage, *ocspTestEnv) { - b, s := CreateBackendWithStorage(t) - var issuerCerts []*x509.Certificate - var leafCerts []*x509.Certificate - var issuerIds []issuerID - var keyIds []keyID - - for i := 0; i < 2; i++ { - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "key_type": keyType, - "key_bits": caKeyBits, - "signature_bits": caKeySigBits, - "ttl": "40h", - "common_name": "example-ocsp.com", - }) - requireSuccessNonNilResponse(t, resp, err, "root/generate/internal") - requireFieldsSetInResp(t, resp, "issuer_id", "key_id") - issuerId := resp.Data["issuer_id"].(issuerID) - keyId := resp.Data["key_id"].(keyID) - - resp, err = CBWrite(b, s, "roles/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ - "allow_bare_domains": true, - "allow_subdomains": true, - "allowed_domains": "foobar.com", - "no_store": false, - "generate_lease": false, - "issuer_ref": issuerId, - "key_type": keyType, - }) - requireSuccessNonNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) - - resp, err = CBWrite(b, s, "issue/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ - "common_name": "test.foobar.com", - }) - requireSuccessNonNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) - requireFieldsSetInResp(t, resp, "certificate", "issuing_ca", "serial_number") - leafCert := parseCert(t, resp.Data["certificate"].(string)) - issuingCa := parseCert(t, resp.Data["issuing_ca"].(string)) - - issuerCerts = append(issuerCerts, issuingCa) - leafCerts = append(leafCerts, leafCert) - issuerIds = append(issuerIds, issuerId) - keyIds = append(keyIds, keyId) - } - - testEnv := &ocspTestEnv{ - issuerId1: issuerIds[0], - issuer1: issuerCerts[0], - leafCertIssuer1: leafCerts[0], - keyId1: keyIds[0], - - issuerId2: issuerIds[1], - issuer2: issuerCerts[1], - leafCertIssuer2: leafCerts[1], - keyId2: keyIds[1], - } - - return b, s, testEnv -} - -func SendOcspRequest(t *testing.T, b *backend, s logical.Storage, getOrPost string, cert, issuer *x509.Certificate, requestHash crypto.Hash) (*logical.Response, error) { - ocspRequest := generateRequest(t, requestHash, cert, issuer) - - switch strings.ToLower(getOrPost) { - case "get": - return sendOcspGetRequest(b, s, ocspRequest) - case "post": - return sendOcspPostRequest(b, s, ocspRequest) - default: - t.Fatalf("unsupported value for SendOcspRequest getOrPost arg: %s", getOrPost) - } - return nil, nil -} - -func sendOcspGetRequest(b *backend, s logical.Storage, ocspRequest []byte) (*logical.Response, error) { - urlEncoded := base64.StdEncoding.EncodeToString(ocspRequest) - return CBRead(b, s, "ocsp/"+urlEncoded) -} - -func sendOcspPostRequest(b *backend, s logical.Storage, ocspRequest []byte) (*logical.Response, error) { - reader := io.NopCloser(bytes.NewReader(ocspRequest)) - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "ocsp", - Storage: s, - MountPoint: "pki/", - HTTPRequest: &http.Request{ - Body: reader, - }, - }) - - return resp, err -} - -func generateRequest(t *testing.T, requestHash crypto.Hash, cert *x509.Certificate, issuer *x509.Certificate) []byte { - opts := &ocsp.RequestOptions{Hash: requestHash} - ocspRequestDer, err := ocsp.CreateRequest(cert, issuer, opts) - require.NoError(t, err, "Failed generating OCSP request") - return ocspRequestDer -} - -func requireOcspResponseSignedBy(t *testing.T, ocspResp *ocsp.Response, issuer *x509.Certificate) { - err := ocspResp.CheckSignatureFrom(issuer) - require.NoError(t, err, "Failed signature verification of ocsp response: %w", err) -} diff --git a/builtin/logical/pki/parsing/certificate.go b/builtin/logical/pki/parsing/certificate.go new file mode 100644 index 000000000000..c8f68b98e933 --- /dev/null +++ b/builtin/logical/pki/parsing/certificate.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package parsing + +import ( + "crypto/x509" + "fmt" + "math/big" + "strings" + + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +// NormalizeSerialForStorageFromBigInt given a serial number, format it as a string +// that is safe to store within a filesystem +func NormalizeSerialForStorageFromBigInt(serial *big.Int) string { + return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), "-")) +} + +// NormalizeSerialForStorage given a serial number with ':' characters, convert +// them to '-' which is safe to store within filesystems +func NormalizeSerialForStorage(serial string) string { + return strings.ReplaceAll(strings.ToLower(serial), ":", "-") +} + +func ParseCertificateFromString(pemCert string) (*x509.Certificate, error) { + return ParseCertificateFromBytes([]byte(pemCert)) +} + +func ParseCertificateFromBytes(certBytes []byte) (*x509.Certificate, error) { + block, err := DecodePem(certBytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate: %w", err) + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate: %w", err) + } + + return cert, nil +} + +func ParseCertificatesFromString(pemCerts string) ([]*x509.Certificate, error) { + return ParseCertificatesFromBytes([]byte(pemCerts)) +} + +func ParseCertificatesFromBytes(certBytes []byte) ([]*x509.Certificate, error) { + block, err := DecodePem(certBytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate: %w", err) + } + + cert, err := x509.ParseCertificates(block.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate: %w", err) + } + + return cert, nil +} + +func ParseKeyUsages(input []string) int { + var parsedKeyUsages x509.KeyUsage + for _, k := range input { + switch strings.ToLower(strings.TrimSpace(k)) { + case "digitalsignature": + parsedKeyUsages |= x509.KeyUsageDigitalSignature + case "contentcommitment": + parsedKeyUsages |= x509.KeyUsageContentCommitment + case "keyencipherment": + parsedKeyUsages |= x509.KeyUsageKeyEncipherment + case "dataencipherment": + parsedKeyUsages |= x509.KeyUsageDataEncipherment + case "keyagreement": + parsedKeyUsages |= x509.KeyUsageKeyAgreement + case "certsign": + parsedKeyUsages |= x509.KeyUsageCertSign + case "crlsign": + parsedKeyUsages |= x509.KeyUsageCRLSign + case "encipheronly": + parsedKeyUsages |= x509.KeyUsageEncipherOnly + case "decipheronly": + parsedKeyUsages |= x509.KeyUsageDecipherOnly + } + } + + return int(parsedKeyUsages) +} diff --git a/builtin/logical/pki/parsing/csrs.go b/builtin/logical/pki/parsing/csrs.go new file mode 100644 index 000000000000..34d6c11be31a --- /dev/null +++ b/builtin/logical/pki/parsing/csrs.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package parsing + +import ( + "crypto/x509" + "fmt" +) + +func ParseCertificateRequestFromString(pemCert string) (*x509.CertificateRequest, error) { + return ParseCertificateRequestFromBytes([]byte(pemCert)) +} + +func ParseCertificateRequestFromBytes(certBytes []byte) (*x509.CertificateRequest, error) { + block, err := DecodePem(certBytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate request: %w", err) + } + + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse certificate request: %w", err) + } + + return csr, nil +} diff --git a/builtin/logical/pki/parsing/pem.go b/builtin/logical/pki/parsing/pem.go new file mode 100644 index 000000000000..aa5513ab17c4 --- /dev/null +++ b/builtin/logical/pki/parsing/pem.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package parsing + +import ( + "encoding/pem" + "errors" + "strings" +) + +func DecodePem(certBytes []byte) (*pem.Block, error) { + block, extra := pem.Decode(certBytes) + if block == nil { + return nil, errors.New("invalid PEM") + } + if len(strings.TrimSpace(string(extra))) > 0 { + return nil, errors.New("trailing PEM data") + } + return block, nil +} diff --git a/builtin/logical/pki/path_acme_account.go b/builtin/logical/pki/path_acme_account.go new file mode 100644 index 000000000000..20804a71735a --- /dev/null +++ b/builtin/logical/pki/path_acme_account.go @@ -0,0 +1,481 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + "net/http" + "path" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func uuidNameRegex(name string) string { + return fmt.Sprintf("(?P<%s>[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}?)", name) +} + +func pathAcmeNewAccount(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeNewAccount(b, baseUrl+"/new-account", opts) +} + +func pathAcmeUpdateAccount(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeNewAccount(b, baseUrl+"/account/"+uuidNameRegex("kid"), opts) +} + +func addFieldsForACMEPath(fields map[string]*framework.FieldSchema, pattern string) map[string]*framework.FieldSchema { + if strings.Contains(pattern, framework.GenericNameRegex("role")) { + fields["role"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The desired role for the acme request`, + Required: true, + } + } + if strings.Contains(pattern, framework.GenericNameRegex(issuerRefParam)) { + fields[issuerRefParam] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Reference to an existing issuer name or issuer id`, + Required: true, + } + } + if strings.Contains(pattern, framework.GenericNameRegex("policy")) { + fields["policy"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The policy name to pass through to the CIEPS service`, + Required: true, + } + } + + return fields +} + +func addFieldsForACMERequest(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["protected"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME request 'protected' value", + Required: false, + } + + fields["payload"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME request 'payload' value", + Required: false, + } + + fields["signature"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME request 'signature' value", + Required: false, + } + + return fields +} + +func addFieldsForACMEKidRequest(fields map[string]*framework.FieldSchema, pattern string) map[string]*framework.FieldSchema { + if strings.Contains(pattern, uuidNameRegex("kid")) { + fields["kid"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The key identifier provided by the CA`, + Required: true, + } + } + + return fields +} + +func patternAcmeNewAccount(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEKidRequest(fields, pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeParsedWrapper(opts, b.acmeNewAccountHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeNewAccountHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) { + // Parameters + var ok bool + var onlyReturnExisting bool + var contacts []string + var termsOfServiceAgreed bool + var status string + var eabData map[string]interface{} + + rawContact, present := data["contact"] + if present { + listContact, ok := rawContact.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'contact': %w", rawContact, ErrMalformed) + } + + for index, singleContact := range listContact { + contact, ok := singleContact.(string) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'contact' item %d: %w", singleContact, index, ErrMalformed) + } + + contacts = append(contacts, contact) + } + } + + rawTermsOfServiceAgreed, present := data["termsOfServiceAgreed"] + if present { + termsOfServiceAgreed, ok = rawTermsOfServiceAgreed.(bool) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'termsOfServiceAgreed': %w", rawTermsOfServiceAgreed, ErrMalformed) + } + } + + rawOnlyReturnExisting, present := data["onlyReturnExisting"] + if present { + onlyReturnExisting, ok = rawOnlyReturnExisting.(bool) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'onlyReturnExisting': %w", rawOnlyReturnExisting, ErrMalformed) + } + } + + // Per RFC 8555 7.3.6 Account deactivation, we will handle it within our update API. + rawStatus, present := data["status"] + if present { + status, ok = rawStatus.(string) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'onlyReturnExisting': %w", rawOnlyReturnExisting, ErrMalformed) + } + } + + if eabDataRaw, ok := data["externalAccountBinding"]; ok { + eabData, ok = eabDataRaw.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("%w: externalAccountBinding field was unparseable", ErrMalformed) + } + } + + // We have two paths here: search or create. + if onlyReturnExisting { + return b.acmeAccountSearchHandler(acmeCtx, userCtx) + } + + // Pass through the /new-account API calls to this specific handler as its requirements are different + // from the account update handler. + if strings.HasSuffix(r.Path, "/new-account") { + return b.acmeNewAccountCreateHandler(acmeCtx, userCtx, contacts, termsOfServiceAgreed, r, eabData) + } + + return b.acmeNewAccountUpdateHandler(acmeCtx, userCtx, contacts, status, eabData) +} + +func formatNewAccountResponse(acmeCtx *acmeContext, acct *acmeAccount, eabData map[string]interface{}) *logical.Response { + resp := formatAccountResponse(acmeCtx, acct) + + // Per RFC 8555 Section 7.1.2. Account Objects + // Including this field in a newAccount request indicates approval by + // the holder of an existing non-ACME account to bind that account to + // this ACME account + if acct.Eab != nil && len(eabData) != 0 { + resp.Data["externalAccountBinding"] = eabData + } + + return resp +} + +func formatAccountResponse(acmeCtx *acmeContext, acct *acmeAccount) *logical.Response { + location := acmeCtx.baseUrl.String() + "account/" + acct.KeyId + + resp := &logical.Response{ + Data: map[string]interface{}{ + "status": acct.Status, + "orders": location + "/orders", + }, + Headers: map[string][]string{ + "Location": {location}, + }, + } + + if len(acct.Contact) > 0 { + resp.Data["contact"] = acct.Contact + } + + return resp +} + +func (b *backend) acmeAccountSearchHandler(acmeCtx *acmeContext, userCtx *jwsCtx) (*logical.Response, error) { + thumbprint, err := userCtx.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed generating thumbprint for key: %w", err) + } + + account, err := b.GetAcmeState().LoadAccountByKey(acmeCtx, thumbprint) + if err != nil { + return nil, fmt.Errorf("failed to load account by thumbprint: %w", err) + } + + if account != nil { + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { + return nil, err + } + return formatAccountResponse(acmeCtx, account), nil + } + + // Per RFC 8555 Section 7.3.1. Finding an Account URL Given a Key: + // + // > If a client sends such a request and an account does not exist, + // > then the server MUST return an error response with status code + // > 400 (Bad Request) and type "urn:ietf:params:acme:error:accountDoesNotExist". + return nil, fmt.Errorf("An account with this key does not exist: %w", ErrAccountDoesNotExist) +} + +func (b *backend) acmeNewAccountCreateHandler(acmeCtx *acmeContext, userCtx *jwsCtx, contact []string, termsOfServiceAgreed bool, r *logical.Request, eabData map[string]interface{}) (*logical.Response, error) { + if userCtx.Existing { + return nil, fmt.Errorf("cannot submit to newAccount with 'kid': %w", ErrMalformed) + } + + // If the account already exists, return the existing one. + thumbprint, err := userCtx.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed generating thumbprint for key: %w", err) + } + + accountByKey, err := b.GetAcmeState().LoadAccountByKey(acmeCtx, thumbprint) + if err != nil { + return nil, fmt.Errorf("failed to load account by thumbprint: %w", err) + } + + if accountByKey != nil { + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(accountByKey); err != nil { + return nil, err + } + return formatAccountResponse(acmeCtx, accountByKey), nil + } + + var eab *eabType + if len(eabData) != 0 { + eab, err = verifyEabPayload(b.GetAcmeState(), acmeCtx, userCtx, r.Path, eabData) + if err != nil { + return nil, err + } + } + + // Verify against our EAB policy + if err = acmeCtx.eabPolicy.EnforceForNewAccount(eab); err != nil { + return nil, err + } + + // TODO: Limit this only when ToS are required or set by the operator, since we don't have a + // ToS URL in the directory at the moment, we can not enforce this. + //if !termsOfServiceAgreed { + // return nil, fmt.Errorf("terms of service not agreed to: %w", ErrUserActionRequired) + //} + + if eab != nil { + // We delete the EAB to prevent future re-use after associating it with an account, worst + // case if we fail creating the account we simply nuked the EAB which they can create another + // and retry + wasDeleted, err := b.GetAcmeState().DeleteEab(acmeCtx.sc, eab.KeyID) + if err != nil { + return nil, fmt.Errorf("failed to delete eab reference: %w", err) + } + + if !wasDeleted { + // Something consumed our EAB before we did bail... + return nil, fmt.Errorf("eab was already used: %w", ErrUnauthorized) + } + } + + b.acmeAccountLock.RLock() // Prevents Account Creation and Tidy Interfering + defer b.acmeAccountLock.RUnlock() + + accountByKid, err := b.GetAcmeState().CreateAccount(acmeCtx, userCtx, contact, termsOfServiceAgreed, eab) + if err != nil { + if eab != nil { + return nil, fmt.Errorf("failed to create account: %w; the EAB key used for this request has been deleted as a result of this operation; fetch a new EAB key before retrying", err) + } + return nil, fmt.Errorf("failed to create account: %w", err) + } + + resp := formatNewAccountResponse(acmeCtx, accountByKid, eabData) + + // Per RFC 8555 Section 7.3. Account Management: + // + // > The server returns this account object in a 201 (Created) response, + // > with the account URL in a Location header field. + resp.Data[logical.HTTPStatusCode] = http.StatusCreated + return resp, nil +} + +func (b *backend) acmeNewAccountUpdateHandler(acmeCtx *acmeContext, userCtx *jwsCtx, contact []string, status string, eabData map[string]interface{}) (*logical.Response, error) { + if !userCtx.Existing { + return nil, fmt.Errorf("cannot submit to account updates without a 'kid': %w", ErrMalformed) + } + + if len(eabData) != 0 { + return nil, fmt.Errorf("%w: not allowed to update EAB data in accounts", ErrMalformed) + } + + account, err := b.GetAcmeState().LoadAccount(acmeCtx, userCtx.Kid) + if err != nil { + return nil, fmt.Errorf("error loading account: %w", err) + } + + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { + return nil, err + } + + // Per RFC 8555 7.3.6 Account deactivation, if we were previously deactivated, we should return + // unauthorized. There is no way to reactivate any accounts per ACME RFC. + if account.Status != AccountStatusValid { + // Treating "revoked" and "deactivated" as the same here. + return nil, ErrUnauthorized + } + + shouldUpdate := false + // Check to see if we should update, we don't really care about ordering + if !strutil.EquivalentSlices(account.Contact, contact) { + shouldUpdate = true + account.Contact = contact + } + + // Check to process account de-activation status was requested. + // 7.3.6. Account Deactivation + if string(AccountStatusDeactivated) == status { + shouldUpdate = true + // TODO: This should cancel any ongoing operations (do not revoke certs), + // perhaps we should delete this account here? + account.Status = AccountStatusDeactivated + account.AccountRevokedDate = time.Now() + } + + if shouldUpdate { + err = b.GetAcmeState().UpdateAccount(acmeCtx.sc, account) + if err != nil { + return nil, fmt.Errorf("failed to update account: %w", err) + } + } + + resp := formatAccountResponse(acmeCtx, account) + return resp, nil +} + +func (b *backend) tidyAcmeAccountByThumbprint(as *acmeState, sc *storageContext, keyThumbprint string, certTidyBuffer, accountTidyBuffer time.Duration) error { + thumbprintEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) + if err != nil { + return fmt.Errorf("error retrieving thumbprint entry %v, unable to find corresponding account entry: %w", keyThumbprint, err) + } + if thumbprintEntry == nil { + return fmt.Errorf("empty thumbprint entry %v, unable to find corresponding account entry", keyThumbprint) + } + + var thumbprint acmeThumbprint + err = thumbprintEntry.DecodeJSON(&thumbprint) + if err != nil { + return fmt.Errorf("unable to decode thumbprint entry %v to find account entry: %w", keyThumbprint, err) + } + + if len(thumbprint.Kid) == 0 { + return fmt.Errorf("unable to find account entry: empty kid within thumbprint entry: %s", keyThumbprint) + } + + // Now Get the Account: + accountEntry, err := sc.Storage.Get(sc.Context, acmeAccountPrefix+thumbprint.Kid) + if err != nil { + return err + } + if accountEntry == nil { + // We delete the Thumbprint Associated with the Account, and we are done + err = sc.Storage.Delete(sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) + if err != nil { + return err + } + b.tidyStatusIncDeletedAcmeAccountCount() + return nil + } + + var account acmeAccount + err = accountEntry.DecodeJSON(&account) + if err != nil { + return err + } + account.KeyId = thumbprint.Kid + + // Tidy Orders On the Account + orderIds, err := as.ListOrderIds(sc, thumbprint.Kid) + if err != nil { + return err + } + allOrdersTidied := true + maxCertExpiryUpdated := false + for _, orderId := range orderIds { + wasTidied, orderExpiry, err := b.acmeTidyOrder(sc, thumbprint.Kid, getOrderPath(thumbprint.Kid, orderId), certTidyBuffer) + if err != nil { + return err + } + if !wasTidied { + allOrdersTidied = false + } + + if !orderExpiry.IsZero() && account.MaxCertExpiry.Before(orderExpiry) { + account.MaxCertExpiry = orderExpiry + maxCertExpiryUpdated = true + } + } + + now := time.Now() + if allOrdersTidied && + now.After(account.AccountCreatedDate.Add(accountTidyBuffer)) && + now.After(account.MaxCertExpiry.Add(accountTidyBuffer)) { + // Tidy this account + // If it is Revoked or Deactivated: + if (account.Status == AccountStatusRevoked || account.Status == AccountStatusDeactivated) && now.After(account.AccountRevokedDate.Add(accountTidyBuffer)) { + // We Delete the Account Associated with this Thumbprint: + err = sc.Storage.Delete(sc.Context, path.Join(acmeAccountPrefix, thumbprint.Kid)) + if err != nil { + return err + } + + // Now we delete the Thumbprint Associated with the Account: + err = sc.Storage.Delete(sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) + if err != nil { + return err + } + b.tidyStatusIncDeletedAcmeAccountCount() + } else if account.Status == AccountStatusValid { + // Revoke This Account + account.AccountRevokedDate = now + account.Status = AccountStatusRevoked + err := as.UpdateAccount(sc, &account) + if err != nil { + return err + } + b.tidyStatusIncRevAcmeAccountCount() + } + } + + // Only update the account if we modified the max cert expiry values and the account is still valid, + // to prevent us from adding back a deleted account or not re-writing the revoked account that was + // already written above. + if maxCertExpiryUpdated && account.Status == AccountStatusValid { + // Update our expiry time we previously setup. + err := as.UpdateAccount(sc, &account) + if err != nil { + return err + } + } + + return nil +} diff --git a/builtin/logical/pki/path_acme_authorizations.go b/builtin/logical/pki/path_acme_authorizations.go new file mode 100644 index 000000000000..983c55fee4e5 --- /dev/null +++ b/builtin/logical/pki/path_acme_authorizations.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeAuthorization(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeAuthorization(b, baseUrl+"/authorization/"+framework.MatchAllRegex("auth_id"), opts) +} + +func addFieldsForACMEAuthorization(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["auth_id"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME authorization identifier value", + Required: true, + } + + return fields +} + +func patternAcmeAuthorization(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEAuthorization(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeAuthorizationHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeAuthorizationHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + authId := fields.Get("auth_id").(string) + authz, err := b.GetAcmeState().LoadAuthorization(acmeCtx, userCtx, authId) + if err != nil { + return nil, fmt.Errorf("failed to load authorization: %w", err) + } + + var status string + rawStatus, haveStatus := data["status"] + if haveStatus { + var ok bool + status, ok = rawStatus.(string) + if !ok { + return nil, fmt.Errorf("bad type (%T) for value 'status': %w", rawStatus, ErrMalformed) + } + } + + if len(data) == 0 { + return b.acmeAuthorizationFetchHandler(acmeCtx, r, fields, userCtx, data, authz) + } + + if haveStatus && status == "deactivated" { + return b.acmeAuthorizationDeactivateHandler(acmeCtx, r, fields, userCtx, data, authz) + } + + return nil, ErrMalformed +} + +func (b *backend) acmeAuthorizationFetchHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization) (*logical.Response, error) { + return &logical.Response{ + Data: authz.NetworkMarshal(acmeCtx), + }, nil +} + +func (b *backend) acmeAuthorizationDeactivateHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization) (*logical.Response, error) { + if authz.Status != ACMEAuthorizationPending && authz.Status != ACMEAuthorizationValid { + return nil, fmt.Errorf("unable to deactivate authorization in '%v' status: %w", authz.Status, ErrMalformed) + } + + authz.Status = ACMEAuthorizationDeactivated + for _, challenge := range authz.Challenges { + challenge.Status = ACMEChallengeInvalid + } + + if err := b.GetAcmeState().SaveAuthorization(acmeCtx, authz); err != nil { + return nil, fmt.Errorf("error saving deactivated authorization: %w", err) + } + + return &logical.Response{ + Data: authz.NetworkMarshal(acmeCtx), + }, nil +} diff --git a/builtin/logical/pki/path_acme_challenges.go b/builtin/logical/pki/path_acme_challenges.go new file mode 100644 index 000000000000..eed8d1ea41f7 --- /dev/null +++ b/builtin/logical/pki/path_acme_challenges.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeChallenge(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeChallenge(b, baseUrl+ + "/challenge/"+framework.MatchAllRegex("auth_id")+"/"+framework.MatchAllRegex("challenge_type"), opts) +} + +func addFieldsForACMEChallenge(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["auth_id"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME authorization identifier value", + Required: true, + } + + fields["challenge_type"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME challenge type", + Required: true, + } + + return fields +} + +func patternAcmeChallenge(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEChallenge(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeChallengeHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeChallengeHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + authId := fields.Get("auth_id").(string) + challengeType := fields.Get("challenge_type").(string) + + authz, err := b.GetAcmeState().LoadAuthorization(acmeCtx, userCtx, authId) + if err != nil { + return nil, fmt.Errorf("failed to load authorization: %w", err) + } + + return b.acmeChallengeFetchHandler(acmeCtx, r, fields, userCtx, data, authz, challengeType) +} + +func (b *backend) acmeChallengeFetchHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization, challengeType string) (*logical.Response, error) { + var challenge *ACMEChallenge + for _, c := range authz.Challenges { + if string(c.Type) == challengeType { + challenge = c + break + } + } + + if challenge == nil { + return nil, fmt.Errorf("unknown challenge of type '%v' in authorization: %w", challengeType, ErrMalformed) + } + + // Per RFC 8555 Section 7.5.1. Responding to Challenges: + // + // > The client indicates to the server that it is ready for the challenge + // > validation by sending an empty JSON body ("{}") carried in a POST + // > request to the challenge URL (not the authorization URL). + if len(data) > 0 { + return nil, fmt.Errorf("unexpected request parameters: %w", ErrMalformed) + } + + // If data was nil, we got a POST-as-GET request, just return current challenge without an accept, + // otherwise we most likely got a "{}" payload which we should now accept the challenge. + if data != nil { + thumbprint, err := userCtx.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed to get thumbprint for key: %w", err) + } + + if err := b.GetAcmeState().validator.AcceptChallenge(acmeCtx.sc, userCtx.Kid, authz, challenge, thumbprint); err != nil { + return nil, fmt.Errorf("error submitting challenge for validation: %w", err) + } + } + + return &logical.Response{ + Data: challenge.NetworkMarshal(acmeCtx, authz.Id), + + // Per RFC 8555 Section 7.1. Resources: + // + // > The "up" link relation is used with challenge resources to indicate + // > the authorization resource to which a challenge belongs. + Headers: map[string][]string{ + "Link": {fmt.Sprintf("<%s>;rel=\"up\"", buildAuthorizationUrl(acmeCtx, authz.Id))}, + }, + }, nil +} diff --git a/builtin/logical/pki/path_acme_directory.go b/builtin/logical/pki/path_acme_directory.go new file mode 100644 index 000000000000..ea49a44ee649 --- /dev/null +++ b/builtin/logical/pki/path_acme_directory.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + pathAcmeHelpSync = `An endpoint implementing the standard ACME protocol` + pathAcmeHelpDesc = `This API endpoint implementing a subset of the ACME protocol + defined in RFC 8555, with its own authentication and argument syntax that + does not follow conventional Vault operations. An ACME client tool or library + should be used to interact with these endpoints.` +) + +func pathAcmeDirectory(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeDirectory(b, baseUrl+"/directory", opts) +} + +func patternAcmeDirectory(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.acmeWrapper(opts, b.acmeDirectoryHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeDirectoryHandler(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + rawBody, err := json.Marshal(map[string]interface{}{ + "newNonce": acmeCtx.baseUrl.JoinPath("new-nonce").String(), + "newAccount": acmeCtx.baseUrl.JoinPath("new-account").String(), + "newOrder": acmeCtx.baseUrl.JoinPath("new-order").String(), + "revokeCert": acmeCtx.baseUrl.JoinPath("revoke-cert").String(), + "keyChange": acmeCtx.baseUrl.JoinPath("key-change").String(), + // This is purposefully missing newAuthz as we don't support pre-authorization + "meta": map[string]interface{}{ + "externalAccountRequired": acmeCtx.eabPolicy.IsExternalAccountRequired(), + }, + }) + if err != nil { + return nil, fmt.Errorf("failed encoding response: %w", err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: "application/json", + logical.HTTPStatusCode: http.StatusOK, + logical.HTTPRawBody: rawBody, + }, + }, nil +} diff --git a/builtin/logical/pki/path_acme_eab.go b/builtin/logical/pki/path_acme_eab.go new file mode 100644 index 000000000000..fa026a1c1892 --- /dev/null +++ b/builtin/logical/pki/path_acme_eab.go @@ -0,0 +1,295 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "net/http" + "path" + "strings" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +var decodedTokenPrefix = mustBase64Decode("vault-eab-0-") + +func mustBase64Decode(s string) []byte { + bytes, err := base64.RawURLEncoding.DecodeString(s) + if err != nil { + panic(fmt.Sprintf("Token prefix value: %s failed decoding: %v", s, err)) + } + + // Should be dividable by 3 otherwise our prefix will not be properly honored. + if len(bytes)%3 != 0 { + panic(fmt.Sprintf("Token prefix value: %s is not dividable by 3, will not prefix properly", s)) + } + return bytes +} + +/* + * This file unlike the other path_acme_xxx.go are VAULT APIs to manage the + * ACME External Account Bindings, this isn't providing any APIs that an ACME + * client would use. + */ +func pathAcmeEabList(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "eab/?$", + Fields: map[string]*framework.FieldSchema{}, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathAcmeListEab, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "list-eab-keys", + Description: "List all eab key identifiers yet to be used.", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `A list of unused eab keys`, + Required: true, + }, + "key_info": { + Type: framework.TypeMap, + Description: `EAB details keyed by the eab key id`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: "list external account bindings to be used for ACME", + HelpDescription: `list identifiers that have been generated but yet to be used.`, + } +} + +func pathAcmeNewEab(b *backend, baseUrl string) *framework.Path { + return patternAcmeNewEab(b, baseUrl+"/new-eab") +} + +func patternAcmeNewEab(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + + opSuffix := getAcmeOperationSuffix(pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathAcmeCreateEab, + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate-eab-key", + OperationSuffix: opSuffix, + Description: "Generate an ACME EAB token for a directory", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: `The EAB key identifier`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `The EAB key type`, + Required: true, + }, + "key": { + Type: framework.TypeString, + Description: `The EAB hmac key`, + Required: true, + }, + "acme_directory": { + Type: framework.TypeString, + Description: `The ACME directory to which the key belongs`, + Required: true, + }, + "created_on": { + Type: framework.TypeTime, + Description: `An RFC3339 formatted date time when the EAB token was created`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: "Generate external account bindings to be used for ACME", + HelpDescription: `Generate single use id/key pairs to be used for ACME EAB.`, + } +} + +func pathAcmeEabDelete(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "eab/" + uuidNameRegex("key_id"), + + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: "EAB key identifier", + Required: true, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathAcmeDeleteEab, + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "delete-eab-key", + Description: "Delete an unused EAB token", + }, + }, + }, + + HelpSynopsis: "Delete an external account binding id prior to its use within an ACME account", + HelpDescription: `Allows an operator to delete an external account binding, +before its bound to a new ACME account. If the identifier provided does not exist or +was already consumed by an ACME account a successful response is returned along with +a warning that it did not exist.`, + } +} + +type eabType struct { + KeyID string `json:"-"` + KeyType string `json:"key-type"` + PrivateBytes []byte `json:"private-bytes"` + AcmeDirectory string `json:"acme-directory"` + CreatedOn time.Time `json:"created-on"` +} + +func (b *backend) pathAcmeListEab(ctx context.Context, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, r.Storage) + + acmeState := b.GetAcmeState() + eabIds, err := acmeState.ListEabIds(sc) + if err != nil { + return nil, err + } + + var warnings []string + var keyIds []string + keyInfos := map[string]interface{}{} + + for _, eabKey := range eabIds { + eab, err := acmeState.LoadEab(sc, eabKey) + if err != nil { + warnings = append(warnings, fmt.Sprintf("failed loading eab entry %s: %v", eabKey, err)) + continue + } + + keyIds = append(keyIds, eab.KeyID) + keyInfos[eab.KeyID] = map[string]interface{}{ + "key_type": eab.KeyType, + "acme_directory": path.Join(eab.AcmeDirectory, "directory"), + "created_on": eab.CreatedOn.Format(time.RFC3339), + } + } + + resp := logical.ListResponseWithInfo(keyIds, keyInfos) + for _, warning := range warnings { + resp.AddWarning(warning) + } + return resp, nil +} + +func (b *backend) pathAcmeCreateEab(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { + kid := genUuid() + size := 32 + bytes, err := uuid.GenerateRandomBytesWithReader(size, rand.Reader) + if err != nil { + return nil, fmt.Errorf("failed generating eab key: %w", err) + } + + acmeDirectory, err := getAcmeDirectory(r) + if err != nil { + return nil, err + } + + eab := &eabType{ + KeyID: kid, + KeyType: "hs", + PrivateBytes: append(decodedTokenPrefix, bytes...), // we do this to avoid generating tokens that start with - + AcmeDirectory: acmeDirectory, + CreatedOn: time.Now(), + } + + sc := b.makeStorageContext(ctx, r.Storage) + err = b.GetAcmeState().SaveEab(sc, eab) + if err != nil { + return nil, fmt.Errorf("failed saving generated eab: %w", err) + } + + encodedKey := base64.RawURLEncoding.EncodeToString(eab.PrivateBytes) + + return &logical.Response{ + Data: map[string]interface{}{ + "id": eab.KeyID, + "key_type": eab.KeyType, + "key": encodedKey, + "acme_directory": path.Join(eab.AcmeDirectory, "directory"), + "created_on": eab.CreatedOn.Format(time.RFC3339), + }, + }, nil +} + +func (b *backend) pathAcmeDeleteEab(ctx context.Context, r *logical.Request, d *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, r.Storage) + keyId := d.Get("key_id").(string) + + _, err := uuid.ParseUUID(keyId) + if err != nil { + return nil, fmt.Errorf("badly formatted key_id field") + } + + deleted, err := b.GetAcmeState().DeleteEab(sc, keyId) + if err != nil { + return nil, fmt.Errorf("failed deleting key id: %w", err) + } + + resp := &logical.Response{} + if !deleted { + resp.AddWarning("No key id found with id: " + keyId) + } + return resp, nil +} + +// getAcmeOperationSuffix used mainly to compute the OpenAPI spec suffix value to distinguish +// different versions of ACME Vault APIs based on directory paths +func getAcmeOperationSuffix(pattern string) string { + hasRole := strings.Contains(pattern, framework.GenericNameRegex("role")) + hasIssuer := strings.Contains(pattern, framework.GenericNameRegex(issuerRefParam)) + + switch { + case hasRole && hasIssuer: + return "for-issuer-and-role" + case hasRole: + return "for-role" + case hasIssuer: + return "for-issuer" + default: + return "" + } +} diff --git a/builtin/logical/pki/path_acme_nonce.go b/builtin/logical/pki/path_acme_nonce.go new file mode 100644 index 000000000000..7c8d5d407bb9 --- /dev/null +++ b/builtin/logical/pki/path_acme_nonce.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + "net/http" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeNonce(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeNonce(b, baseUrl+"/new-nonce", opts) +} + +func patternAcmeNonce(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.HeaderOperation: &framework.PathOperation{ + Callback: b.acmeWrapper(opts, b.acmeNonceHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.acmeWrapper(opts, b.acmeNonceHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeNonceHandler(ctx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + nonce, _, err := b.GetAcmeState().GetNonce() + if err != nil { + return nil, err + } + + // Header operations return 200, GET return 204. + httpStatus := http.StatusOK + if r.Operation == logical.ReadOperation { + httpStatus = http.StatusNoContent + } + + return &logical.Response{ + Headers: map[string][]string{ + "Cache-Control": {"no-store"}, + "Replay-Nonce": {nonce}, + "Link": genAcmeLinkHeader(ctx), + }, + Data: map[string]interface{}{ + logical.HTTPStatusCode: httpStatus, + // Get around Vault limitation of requiring a body set if the status is not http.StatusNoContent + // for our HEAD request responses. + logical.HTTPContentType: "", + }, + }, nil +} + +func genAcmeLinkHeader(ctx *acmeContext) []string { + path := fmt.Sprintf("<%s>;rel=\"index\"", ctx.baseUrl.JoinPath("directory").String()) + return []string{path} +} diff --git a/builtin/logical/pki/path_acme_order.go b/builtin/logical/pki/path_acme_order.go new file mode 100644 index 000000000000..7df867a5ee88 --- /dev/null +++ b/builtin/logical/pki/path_acme_order.go @@ -0,0 +1,1090 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "net" + "net/http" + "sort" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/helper/strutil" + + "github.com/hashicorp/vault/sdk/helper/certutil" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/net/idna" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" +) + +var maxAcmeCertTTL = 90 * (24 * time.Hour) + +func pathAcmeListOrders(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeListOrders(b, baseUrl+"/orders", opts) +} + +func pathAcmeGetOrder(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeGetOrder(b, baseUrl+"/order/"+uuidNameRegex("order_id"), opts) +} + +func pathAcmeNewOrder(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeNewOrder(b, baseUrl+"/new-order", opts) +} + +func pathAcmeFinalizeOrder(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeFinalizeOrder(b, baseUrl+"/order/"+uuidNameRegex("order_id")+"/finalize", opts) +} + +func pathAcmeFetchOrderCert(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeFetchOrderCert(b, baseUrl+"/order/"+uuidNameRegex("order_id")+"/cert", opts) +} + +func patternAcmeNewOrder(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeNewOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeListOrders(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeListOrdersHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeGetOrder(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEOrder(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeGetOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeFinalizeOrder(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEOrder(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeFinalizeOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeFetchOrderCert(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEOrder(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(opts, b.acmeFetchCertOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func addFieldsForACMEOrder(fields map[string]*framework.FieldSchema) { + fields["order_id"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The ACME order identifier to fetch`, + Required: true, + } +} + +func (b *backend) acmeFetchCertOrderHandler(ac *acmeContext, _ *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + orderId := fields.Get("order_id").(string) + + order, err := b.GetAcmeState().LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + if order.Status != ACMEOrderValid { + return nil, fmt.Errorf("%w: order is status %s, needs to be in valid state", ErrOrderNotReady, order.Status) + } + + if len(order.IssuerId) == 0 || len(order.CertificateSerialNumber) == 0 { + return nil, fmt.Errorf("order is missing required fields to load certificate") + } + + certEntry, err := fetchCertBySerial(ac.sc, "certs/", order.CertificateSerialNumber) + if err != nil { + return nil, fmt.Errorf("failed reading certificate %s from storage: %w", order.CertificateSerialNumber, err) + } + if certEntry == nil || len(certEntry.Value) == 0 { + return nil, fmt.Errorf("missing certificate %s from storage", order.CertificateSerialNumber) + } + + cert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("failed parsing certificate %s: %w", order.CertificateSerialNumber, err) + } + + issuer, err := ac.sc.fetchIssuerById(order.IssuerId) + if err != nil { + return nil, fmt.Errorf("failed loading certificate issuer %s from storage: %w", order.IssuerId, err) + } + + allPems, err := func() ([]byte, error) { + leafPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }) + + chains := []byte(issuer.Certificate) + for _, chainVal := range issuer.CAChain { + if chainVal == issuer.Certificate { + continue + } + chains = append(chains, []byte(chainVal)...) + } + + return append(leafPEM, chains...), nil + }() + if err != nil { + return nil, fmt.Errorf("failed encoding certificate ca chain: %w", err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: "application/pem-certificate-chain", + logical.HTTPStatusCode: http.StatusOK, + logical.HTTPRawBody: allPems, + }, + }, nil +} + +func (b *backend) acmeFinalizeOrderHandler(ac *acmeContext, r *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}, account *acmeAccount) (*logical.Response, error) { + orderId := fields.Get("order_id").(string) + + csr, err := parseCsrFromFinalize(data) + if err != nil { + return nil, err + } + + order, err := b.GetAcmeState().LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + order.Status, err = computeOrderStatus(ac, uc, order) + if err != nil { + return nil, err + } + + if order.Status != ACMEOrderReady { + return nil, fmt.Errorf("%w: order is status %s, needs to be in ready state", ErrOrderNotReady, order.Status) + } + + now := time.Now() + if !order.Expires.IsZero() && now.After(order.Expires) { + return nil, fmt.Errorf("%w: order %s is expired", ErrMalformed, orderId) + } + + if err = validateCsrMatchesOrder(csr, order); err != nil { + return nil, err + } + + if err = validateCsrNotUsingAccountKey(csr, uc); err != nil { + return nil, err + } + + var signedCertBundle *certutil.ParsedCertBundle + var issuerId issuing.IssuerID + if ac.runtimeOpts.isCiepsEnabled { + // Note that issueAcmeCertUsingCieps enforces storage requirements and + // does the certificate storage for us + signedCertBundle, issuerId, err = issueAcmeCertUsingCieps(b, ac, r, fields, uc, account, order, csr) + if err != nil { + return nil, err + } + } else { + signedCertBundle, issuerId, err = issueCertFromCsr(ac, csr) + if err != nil { + return nil, err + } + + err = issuing.StoreCertificate(ac.sc.Context, ac.sc.Storage, ac.sc.Backend.GetCertificateCounter(), signedCertBundle) + if err != nil { + return nil, err + } + } + hyphenSerialNumber := normalizeSerialFromBigInt(signedCertBundle.Certificate.SerialNumber) + + if err := b.GetAcmeState().TrackIssuedCert(ac, order.AccountId, hyphenSerialNumber, order.OrderId); err != nil { + b.Logger().Warn("orphaned generated ACME certificate due to error saving account->cert->order reference", "serial_number", hyphenSerialNumber, "error", err) + return nil, err + } + + order.Status = ACMEOrderValid + order.CertificateSerialNumber = hyphenSerialNumber + order.CertificateExpiry = signedCertBundle.Certificate.NotAfter + order.IssuerId = issuerId + + err = b.GetAcmeState().SaveOrder(ac, order) + if err != nil { + b.Logger().Warn("orphaned generated ACME certificate due to error saving order", "serial_number", hyphenSerialNumber, "error", err) + return nil, fmt.Errorf("failed saving updated order: %w", err) + } + + if err := b.doTrackBilling(ac.sc.Context, order.Identifiers); err != nil { + b.Logger().Error("failed to track billing for order", "order", orderId, "error", err) + err = nil + } + + return formatOrderResponse(ac, order), nil +} + +func computeOrderStatus(ac *acmeContext, uc *jwsCtx, order *acmeOrder) (ACMEOrderStatusType, error) { + // If we reached a final stage, no use computing anything else + if order.Status == ACMEOrderInvalid || order.Status == ACMEOrderValid { + return order.Status, nil + } + + // We aren't in a final state yet, check for expiry + if time.Now().After(order.Expires) { + return ACMEOrderInvalid, nil + } + + // Intermediary steps passed authorizations should short circuit us as well + if order.Status == ACMEOrderReady || order.Status == ACMEOrderProcessing { + return order.Status, nil + } + + // If we have no authorizations attached to the order, nothing to compute either + if len(order.AuthorizationIds) == 0 { + return ACMEOrderPending, nil + } + + anyFailed := false + allPassed := true + for _, authId := range order.AuthorizationIds { + authorization, err := ac.getAcmeState().LoadAuthorization(ac, uc, authId) + if err != nil { + return order.Status, fmt.Errorf("failed loading authorization: %s: %w", authId, err) + } + + if authorization.Status == ACMEAuthorizationPending { + allPassed = false + continue + } + + if authorization.Status != ACMEAuthorizationValid { + // Per RFC 8555 - 7.1.6. Status Changes + // The order also moves to the "invalid" state if it expires or + // one of its authorizations enters a final state other than + // "valid" ("expired", "revoked", or "deactivated"). + allPassed = false + anyFailed = true + break + } + } + + if anyFailed { + return ACMEOrderInvalid, nil + } + + if allPassed { + return ACMEOrderReady, nil + } + + // The order has not expired, no authorizations have yet to be marked as failed + // nor have we passed them all. + return ACMEOrderPending, nil +} + +func validateCsrNotUsingAccountKey(csr *x509.CertificateRequest, uc *jwsCtx) error { + csrKey := csr.PublicKey + userKey := uc.Key.Public().Key + + sameKey, err := certutil.ComparePublicKeysAndType(csrKey, userKey) + if err != nil { + return err + } + + if sameKey { + return fmt.Errorf("%w: certificate public key must not match account key", ErrBadCSR) + } + + return nil +} + +func validateCsrMatchesOrder(csr *x509.CertificateRequest, order *acmeOrder) error { + csrDNSIdentifiers, csrIPIdentifiers := getIdentifiersFromCSR(csr) + orderDNSIdentifiers := strutil.RemoveDuplicates(order.getIdentifierDNSValues(), true) + orderIPIdentifiers := removeDuplicatesAndSortIps(order.getIdentifierIPValues()) + + if len(orderDNSIdentifiers) == 0 && len(orderIPIdentifiers) == 0 { + return fmt.Errorf("%w: order did not include any identifiers", ErrServerInternal) + } + + if len(orderDNSIdentifiers) != len(csrDNSIdentifiers) { + return fmt.Errorf("%w: Order (%v) and CSR (%v) mismatch on number of DNS identifiers", ErrBadCSR, len(orderDNSIdentifiers), len(csrDNSIdentifiers)) + } + + if len(orderIPIdentifiers) != len(csrIPIdentifiers) { + return fmt.Errorf("%w: Order (%v) and CSR (%v) mismatch on number of IP identifiers", ErrBadCSR, len(orderIPIdentifiers), len(csrIPIdentifiers)) + } + + for i, identifier := range orderDNSIdentifiers { + if identifier != csrDNSIdentifiers[i] { + return fmt.Errorf("%w: CSR is missing order DNS identifier %s", ErrBadCSR, identifier) + } + } + + for i, identifier := range orderIPIdentifiers { + if !identifier.Equal(csrIPIdentifiers[i]) { + return fmt.Errorf("%w: CSR is missing order IP identifier %s", ErrBadCSR, identifier.String()) + } + } + + // Since we do not support NotBefore/NotAfter dates at this time no need to validate CSR/Order match. + + return nil +} + +func (b *backend) validateIdentifiersAgainstRole(role *issuing.RoleEntry, identifiers []*ACMEIdentifier) error { + for _, identifier := range identifiers { + switch identifier.Type { + case ACMEDNSIdentifier: + data := &inputBundle{ + role: role, + req: &logical.Request{}, + apiData: &framework.FieldData{}, + } + + if validateNames(b, data, []string{identifier.OriginalValue}) != "" { + return fmt.Errorf("%w: role (%s) will not issue certificate for name %v", + ErrRejectedIdentifier, role.Name, identifier.OriginalValue) + } + case ACMEIPIdentifier: + if !role.AllowIPSANs { + return fmt.Errorf("%w: role (%s) does not allow IP sans, so cannot issue certificate for %v", + ErrRejectedIdentifier, role.Name, identifier.OriginalValue) + } + default: + return fmt.Errorf("unknown type of identifier: %v for %v", identifier.Type, identifier.OriginalValue) + } + } + + return nil +} + +func getIdentifiersFromCSR(csr *x509.CertificateRequest) ([]string, []net.IP) { + dnsIdentifiers := append([]string(nil), csr.DNSNames...) + ipIdentifiers := append([]net.IP(nil), csr.IPAddresses...) + + if csr.Subject.CommonName != "" { + ip := net.ParseIP(csr.Subject.CommonName) + if ip != nil { + ipIdentifiers = append(ipIdentifiers, ip) + } else { + dnsIdentifiers = append(dnsIdentifiers, csr.Subject.CommonName) + } + } + + return strutil.RemoveDuplicates(dnsIdentifiers, true), removeDuplicatesAndSortIps(ipIdentifiers) +} + +func removeDuplicatesAndSortIps(ipIdentifiers []net.IP) []net.IP { + var uniqueIpIdentifiers []net.IP + for _, ip := range ipIdentifiers { + found := false + for _, curIp := range uniqueIpIdentifiers { + if curIp.Equal(ip) { + found = true + } + } + + if !found { + uniqueIpIdentifiers = append(uniqueIpIdentifiers, ip) + } + } + + sort.Slice(uniqueIpIdentifiers, func(i, j int) bool { + return uniqueIpIdentifiers[i].String() < uniqueIpIdentifiers[j].String() + }) + return uniqueIpIdentifiers +} + +func maybeAugmentReqDataWithSuitableCN(ac *acmeContext, csr *x509.CertificateRequest, data *framework.FieldData) { + // Role doesn't require a CN, so we don't care. + if !ac.role.RequireCN { + return + } + + // CSR contains a CN, so use that one. + if csr.Subject.CommonName != "" { + return + } + + // Choose a CN in the order wildcard -> DNS -> IP -> fail. + for _, name := range csr.DNSNames { + if strings.Contains(name, "*") { + data.Raw["common_name"] = name + return + } + } + if len(csr.DNSNames) > 0 { + data.Raw["common_name"] = csr.DNSNames[0] + return + } + if len(csr.IPAddresses) > 0 { + data.Raw["common_name"] = csr.IPAddresses[0].String() + return + } +} + +func issueCertFromCsr(ac *acmeContext, csr *x509.CertificateRequest) (*certutil.ParsedCertBundle, issuing.IssuerID, error) { + pemBlock := &pem.Block{ + Type: "CERTIFICATE REQUEST", + Headers: nil, + Bytes: csr.Raw, + } + pemCsr := string(pem.EncodeToMemory(pemBlock)) + + data := &framework.FieldData{ + Raw: map[string]interface{}{ + "csr": pemCsr, + }, + Schema: getCsrSignVerbatimSchemaFields(), + } + + // XXX: Usability hack: by default, minimalist roles have require_cn=true, + // but some ACME clients do not provision one in the certificate as modern + // (TLS) clients are mostly verifying against server's DNS SANs. + maybeAugmentReqDataWithSuitableCN(ac, csr, data) + + signingBundle, issuerId, err := ac.sc.fetchCAInfoWithIssuer(ac.issuer.ID.String(), issuing.IssuanceUsage) + if err != nil { + return nil, "", fmt.Errorf("failed loading CA %s: %w", ac.issuer.ID.String(), err) + } + + // ACME issued cert will override the TTL values to truncate to the issuer's + // expiration if we go beyond, no matter the setting + if signingBundle.LeafNotAfterBehavior == certutil.ErrNotAfterBehavior { + signingBundle.LeafNotAfterBehavior = certutil.TruncateNotAfterBehavior + } + + input := &inputBundle{ + req: &logical.Request{}, + apiData: data, + role: ac.role, + } + + normalNotAfter, _, err := getCertificateNotAfter(ac.sc.Backend, input, signingBundle) + if err != nil { + return nil, "", fmt.Errorf("failed computing certificate TTL from role/mount: %v: %w", err, ErrMalformed) + } + + // Force a maximum 90 day TTL or lower for ACME + if time.Now().Add(maxAcmeCertTTL).Before(normalNotAfter) { + input.apiData.Raw["ttl"] = maxAcmeCertTTL + } + + if csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || csr.PublicKey == nil { + return nil, "", fmt.Errorf("%w: Refusing to sign CSR with empty PublicKey", ErrBadCSR) + } + + // UseCSRValues as defined in certutil/helpers.go accepts the following + // fields off of the CSR: + // + // 1. Subject fields, + // 2. SANs, + // 3. Extensions (except for a BasicConstraint extension) + // + // Because we have stricter validation of subject parameters, and no way + // to validate or allow extensions, we do not wish to use the CSR's + // parameters for these values. If a CSR sets, e.g., an organizational + // unit, we have no way of validating this (via ACME here, without perhaps + // an external policy engine), and thus should not be setting it on our + // final issued certificate. + parsedBundle, _, err := signCert(ac.sc.Backend, input, signingBundle, false /* is_ca=false */, false /* use_csr_values */) + if err != nil { + return nil, "", fmt.Errorf("%w: refusing to sign CSR: %s", ErrBadCSR, err.Error()) + } + + if err = parsedBundle.Verify(); err != nil { + return nil, "", fmt.Errorf("verification of parsed bundle failed: %w", err) + } + + // We only allow ServerAuth key usage from ACME issued certs + // when configuration does not allow usage of ExtKeyusage field. + config, err := ac.sc.Backend.GetAcmeState().getConfigWithUpdate(ac.sc) + if err != nil { + return nil, "", fmt.Errorf("failed to fetch ACME configuration: %w", err) + } + + if !config.AllowRoleExtKeyUsage { + for _, usage := range parsedBundle.Certificate.ExtKeyUsage { + if usage != x509.ExtKeyUsageServerAuth { + return nil, "", fmt.Errorf("%w: ACME certs only allow ServerAuth key usage", ErrBadCSR) + } + } + } + + return parsedBundle, issuerId, err +} + +func parseCsrFromFinalize(data map[string]interface{}) (*x509.CertificateRequest, error) { + csrInterface, present := data["csr"] + if !present { + return nil, fmt.Errorf("%w: missing csr in payload", ErrMalformed) + } + + base64Csr, ok := csrInterface.(string) + if !ok { + return nil, fmt.Errorf("%w: csr in payload not the expected type: %T", ErrMalformed, csrInterface) + } + + derCsr, err := base64.RawURLEncoding.DecodeString(base64Csr) + if err != nil { + return nil, fmt.Errorf("%w: failed base64 decoding csr: %s", ErrMalformed, err.Error()) + } + + csr, err := x509.ParseCertificateRequest(derCsr) + if err != nil { + return nil, fmt.Errorf("%w: failed to parse csr: %s", ErrMalformed, err.Error()) + } + + if csr.PublicKey == nil || csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm { + return nil, fmt.Errorf("%w: failed to parse csr no public key info or unknown key algorithm used", ErrBadCSR) + } + + for _, ext := range csr.Extensions { + if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { + isCa, _, err := certutil.ParseBasicConstraintExtension(ext) + if err != nil { + return nil, fmt.Errorf("%w: refusing to accept CSR with Basic Constraints extension: %v", ErrBadCSR, err.Error()) + } + + if isCa { + return nil, fmt.Errorf("%w: refusing to accept CSR with Basic Constraints extension with CA set to true", ErrBadCSR) + } + } + } + + return csr, nil +} + +func (b *backend) acmeGetOrderHandler(ac *acmeContext, _ *logical.Request, fields *framework.FieldData, uc *jwsCtx, _ map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + orderId := fields.Get("order_id").(string) + + order, err := b.GetAcmeState().LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + order.Status, err = computeOrderStatus(ac, uc, order) + if err != nil { + return nil, err + } + + // Per RFC 8555 -> 7.1.3. Order Objects + // For final orders (in the "valid" or "invalid" state), the authorizations that were completed. + // + // Otherwise, for "pending" orders we will return our list as it was originally saved. + requiresFiltering := order.Status == ACMEOrderValid || order.Status == ACMEOrderInvalid + if requiresFiltering { + filteredAuthorizationIds := []string{} + + for _, authId := range order.AuthorizationIds { + authorization, err := b.GetAcmeState().LoadAuthorization(ac, uc, authId) + if err != nil { + return nil, err + } + + if (order.Status == ACMEOrderInvalid || order.Status == ACMEOrderValid) && + authorization.Status == ACMEAuthorizationValid { + filteredAuthorizationIds = append(filteredAuthorizationIds, authId) + } + } + + order.AuthorizationIds = filteredAuthorizationIds + } + + return formatOrderResponse(ac, order), nil +} + +func (b *backend) acmeListOrdersHandler(ac *acmeContext, _ *logical.Request, _ *framework.FieldData, uc *jwsCtx, _ map[string]interface{}, acct *acmeAccount) (*logical.Response, error) { + orderIds, err := b.GetAcmeState().ListOrderIds(ac.sc, acct.KeyId) + if err != nil { + return nil, err + } + + orderUrls := []string{} + for _, orderId := range orderIds { + order, err := b.GetAcmeState().LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + if order.Status == ACMEOrderInvalid { + // Per RFC8555 -> 7.1.2.1 - Orders List + // The server SHOULD include pending orders and SHOULD NOT + // include orders that are invalid in the array of URLs. + continue + } + + orderUrls = append(orderUrls, buildOrderUrl(ac, orderId)) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "orders": orderUrls, + }, + } + + return resp, nil +} + +func (b *backend) acmeNewOrderHandler(ac *acmeContext, _ *logical.Request, _ *framework.FieldData, _ *jwsCtx, data map[string]interface{}, account *acmeAccount) (*logical.Response, error) { + identifiers, err := parseOrderIdentifiers(data) + if err != nil { + return nil, err + } + + notBefore, err := parseOptRFC3339Field(data, "notBefore") + if err != nil { + return nil, err + } + + notAfter, err := parseOptRFC3339Field(data, "notAfter") + if err != nil { + return nil, err + } + + if !notBefore.IsZero() || !notAfter.IsZero() { + return nil, fmt.Errorf("%w: NotBefore and NotAfter are not supported", ErrMalformed) + } + + err = validateAcmeProvidedOrderDates(notBefore, notAfter) + if err != nil { + return nil, err + } + + err = b.validateIdentifiersAgainstRole(ac.role, identifiers) + if err != nil { + return nil, err + } + + // Per RFC 8555 -> 7.1.3. Order Objects + // For pending orders, the authorizations that the client needs to complete before the + // requested certificate can be issued (see Section 7.5), including + // unexpired authorizations that the client has completed in the past + // for identifiers specified in the order. + // + // Since we are generating all authorizations here, there is no need to filter them out + // IF/WHEN we support pre-authz workflows and associate existing authorizations to this + // order they will need filtering. + var authorizations []*ACMEAuthorization + var authorizationIds []string + for _, identifier := range identifiers { + authz, err := generateAuthorization(account, identifier) + if err != nil { + return nil, fmt.Errorf("error generating authorizations: %w", err) + } + authorizations = append(authorizations, authz) + + err = b.GetAcmeState().SaveAuthorization(ac, authz) + if err != nil { + return nil, fmt.Errorf("failed storing authorization: %w", err) + } + + authorizationIds = append(authorizationIds, authz.Id) + } + + order := &acmeOrder{ + OrderId: genUuid(), + AccountId: account.KeyId, + Status: ACMEOrderPending, + Expires: time.Now().Add(24 * time.Hour), // TODO: Readjust this based on authz and/or config + Identifiers: identifiers, + AuthorizationIds: authorizationIds, + } + + err = b.GetAcmeState().SaveOrder(ac, order) + if err != nil { + return nil, fmt.Errorf("failed storing order: %w", err) + } + + resp := formatOrderResponse(ac, order) + + // Per RFC 8555 Section 7.4. Applying for Certificate Issuance: + // + // > If the server is willing to issue the requested certificate, it + // > responds with a 201 (Created) response. + resp.Data[logical.HTTPStatusCode] = http.StatusCreated + return resp, nil +} + +func validateAcmeProvidedOrderDates(notBefore time.Time, notAfter time.Time) error { + if !notBefore.IsZero() && !notAfter.IsZero() { + if notBefore.Equal(notAfter) { + return fmt.Errorf("%w: provided notBefore and notAfter dates can not be equal", ErrMalformed) + } + + if notBefore.After(notAfter) { + return fmt.Errorf("%w: provided notBefore can not be greater than notAfter", ErrMalformed) + } + } + + if !notAfter.IsZero() { + if time.Now().After(notAfter) { + return fmt.Errorf("%w: provided notAfter can not be in the past", ErrMalformed) + } + } + + return nil +} + +func formatOrderResponse(acmeCtx *acmeContext, order *acmeOrder) *logical.Response { + baseOrderUrl := buildOrderUrl(acmeCtx, order.OrderId) + + var authorizationUrls []string + for _, authId := range order.AuthorizationIds { + authorizationUrls = append(authorizationUrls, buildAuthorizationUrl(acmeCtx, authId)) + } + + var identifiers []map[string]interface{} + for _, identifier := range order.Identifiers { + identifiers = append(identifiers, identifier.NetworkMarshal( /* use original value */ true)) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "status": order.Status, + "expires": order.Expires.Format(time.RFC3339), + "identifiers": identifiers, + "authorizations": authorizationUrls, + "finalize": baseOrderUrl + "/finalize", + }, + Headers: map[string][]string{ + "Location": {baseOrderUrl}, + }, + } + + // Only reply with the certificate URL if we are in a valid order state. + if order.Status == ACMEOrderValid { + resp.Data["certificate"] = baseOrderUrl + "/cert" + } + + return resp +} + +func buildAuthorizationUrl(acmeCtx *acmeContext, authId string) string { + return acmeCtx.baseUrl.JoinPath("authorization", authId).String() +} + +func buildOrderUrl(acmeCtx *acmeContext, orderId string) string { + return acmeCtx.baseUrl.JoinPath("order", orderId).String() +} + +func generateAuthorization(acct *acmeAccount, identifier *ACMEIdentifier) (*ACMEAuthorization, error) { + authId := genUuid() + + // Certain challenges have certain restrictions: DNS challenges cannot + // be used to validate IP addresses, and only DNS challenges can be used + // to validate wildcards. + allowedChallenges := []ACMEChallengeType{ACMEHTTPChallenge, ACMEDNSChallenge, ACMEALPNChallenge} + if identifier.Type == ACMEIPIdentifier { + allowedChallenges = []ACMEChallengeType{ACMEHTTPChallenge} + } else if identifier.IsWildcard { + allowedChallenges = []ACMEChallengeType{ACMEDNSChallenge} + } + + var challenges []*ACMEChallenge + for _, challengeType := range allowedChallenges { + token, err := getACMEToken() + if err != nil { + return nil, err + } + + challenge := &ACMEChallenge{ + Type: challengeType, + Status: ACMEChallengePending, + ChallengeFields: map[string]interface{}{ + "token": token, + }, + } + + challenges = append(challenges, challenge) + } + + return &ACMEAuthorization{ + Id: authId, + AccountId: acct.KeyId, + Identifier: identifier, + Status: ACMEAuthorizationPending, + Expires: "", // only populated when it switches to valid. + Challenges: challenges, + Wildcard: identifier.IsWildcard, + }, nil +} + +func parseOptRFC3339Field(data map[string]interface{}, keyName string) (time.Time, error) { + var timeVal time.Time + var err error + + rawBefore, present := data[keyName] + if present { + beforeStr, ok := rawBefore.(string) + if !ok { + return timeVal, fmt.Errorf("invalid type (%T) for field '%s': %w", rawBefore, keyName, ErrMalformed) + } + timeVal, err = time.Parse(time.RFC3339, beforeStr) + if err != nil { + return timeVal, fmt.Errorf("failed parsing field '%s' (%s): %s: %w", keyName, rawBefore, err.Error(), ErrMalformed) + } + + if timeVal.IsZero() { + return timeVal, fmt.Errorf("provided time value is invalid '%s' (%s): %w", keyName, rawBefore, ErrMalformed) + } + } + + return timeVal, nil +} + +func parseOrderIdentifiers(data map[string]interface{}) ([]*ACMEIdentifier, error) { + rawIdentifiers, present := data["identifiers"] + if !present { + return nil, fmt.Errorf("missing required identifiers argument: %w", ErrMalformed) + } + + listIdentifiers, ok := rawIdentifiers.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'identifiers': %w", rawIdentifiers, ErrMalformed) + } + + var identifiers []*ACMEIdentifier + for _, rawIdentifier := range listIdentifiers { + mapIdentifier, ok := rawIdentifier.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for value in 'identifiers': %w", rawIdentifier, ErrMalformed) + } + + typeVal, present := mapIdentifier["type"] + if !present { + return nil, fmt.Errorf("missing type argument for value in 'identifiers': %w", ErrMalformed) + } + typeStr, ok := typeVal.(string) + if !ok { + return nil, fmt.Errorf("invalid type for type argument (%T) for value in 'identifiers': %w", typeStr, ErrMalformed) + } + + valueVal, present := mapIdentifier["value"] + if !present { + return nil, fmt.Errorf("missing value argument for value in 'identifiers': %w", ErrMalformed) + } + valueStr, ok := valueVal.(string) + if !ok { + return nil, fmt.Errorf("invalid type for value argument (%T) for value in 'identifiers': %w", valueStr, ErrMalformed) + } + + if len(valueStr) == 0 { + return nil, fmt.Errorf("value argument for value in 'identifiers' can not be blank: %w", ErrMalformed) + } + + identifier := &ACMEIdentifier{ + Value: valueStr, + OriginalValue: valueStr, + } + + switch typeStr { + case string(ACMEIPIdentifier): + identifier.Type = ACMEIPIdentifier + ip := net.ParseIP(valueStr) + if ip == nil { + return nil, fmt.Errorf("value argument (%s) failed validation: failed parsing as IP: %w", valueStr, ErrMalformed) + } + case string(ACMEDNSIdentifier): + identifier.Type = ACMEDNSIdentifier + + // This check modifies the identifier if it is a wildcard, + // removing the non-wildcard portion. We do this before the + // IP address checks, in case of an attempt to bypass the IP/DNS + // check via including a leading wildcard (e.g., *.127.0.0.1). + // + // Per RFC 8555 Section 7.1.4. Authorization Objects: + // + // > Wildcard domain names (with "*" as the first label) MUST NOT + // > be included in authorization objects. + if _, _, err := identifier.MaybeParseWildcard(); err != nil { + return nil, fmt.Errorf("value argument (%s) failed validation: invalid wildcard: %v: %w", valueStr, err, ErrMalformed) + } + + if isIP := net.ParseIP(identifier.Value); isIP != nil { + return nil, fmt.Errorf("refusing to accept argument (%s) as DNS type identifier: parsed OK as IP address: %w", valueStr, ErrMalformed) + } + + // Use the reduced (identifier.Value) in case this was a wildcard + // domain. + p := idna.New(idna.ValidateForRegistration()) + converted, err := p.ToASCII(identifier.Value) + if err != nil { + return nil, fmt.Errorf("value argument (%s) failed validation: %s: %w", valueStr, err.Error(), ErrMalformed) + } + + // Per RFC 8555 Section 7.1.4. Authorization Objects: + // + // > The domain name MUST be encoded in the form in which it + // > would appear in a certificate. That is, it MUST be encoded + // > according to the rules in Section 7 of [RFC5280]. Servers + // > MUST verify any identifier values that begin with the + // > ASCII-Compatible Encoding prefix "xn--" as defined in + // > [RFC5890] are properly encoded. + if identifier.Value != converted { + return nil, fmt.Errorf("value argument (%s) failed IDNA round-tripping to ASCII: %w", valueStr, ErrMalformed) + } + default: + return nil, fmt.Errorf("unsupported identifier type %s: %w", typeStr, ErrUnsupportedIdentifier) + } + + identifiers = append(identifiers, identifier) + } + + return identifiers, nil +} + +func (b *backend) acmeTidyOrder(sc *storageContext, accountId string, orderPath string, certTidyBuffer time.Duration) (bool, time.Time, error) { + // First we get the order; note that the orderPath includes the account + // It's only accessed at acme/orders/ with the account context + // It's saved at acme//orders/ + entry, err := sc.Storage.Get(sc.Context, orderPath) + if err != nil { + return false, time.Time{}, fmt.Errorf("error loading order: %w", err) + } + if entry == nil { + return false, time.Time{}, fmt.Errorf("order does not exist: %w", ErrMalformed) + } + var order acmeOrder + err = entry.DecodeJSON(&order) + if err != nil { + return false, time.Time{}, fmt.Errorf("error decoding order: %w", err) + } + + // Determine whether we should tidy this order + shouldTidy := false + + // Track either the order expiry or certificate expiry to return to the caller, this + // can be used to influence the account's expiry + orderExpiry := order.CertificateExpiry + + // It is faster to check certificate information on the order entry rather than fetch the cert entry to parse: + if !order.CertificateExpiry.IsZero() { + // This implies that a certificate exists + // When a certificate exists, we want to expire and tidy the order when we tidy the certificate: + if time.Now().After(order.CertificateExpiry.Add(certTidyBuffer)) { // It's time to clean + shouldTidy = true + } + } else { + // This implies that no certificate exists + // In this case, we want to expire the order after it has expired (+ some safety buffer) + if time.Now().After(order.Expires) { + shouldTidy = true + } + orderExpiry = order.Expires + } + if shouldTidy == false { + return shouldTidy, orderExpiry, nil + } + + // Tidy this Order + // That includes any certificate acme//orders/orderPath/cert + // That also includes any related authorizations: acme//authorizations/ + + // First Authorizations + for _, authorizationId := range order.AuthorizationIds { + err = sc.Storage.Delete(sc.Context, getAuthorizationPath(accountId, authorizationId)) + if err != nil { + return false, orderExpiry, err + } + } + + // Normal Tidy will Take Care of the Certificate, we need to clean up the certificate to account tracker though + err = sc.Storage.Delete(sc.Context, getAcmeSerialToAccountTrackerPath(accountId, order.CertificateSerialNumber)) + if err != nil { + return false, orderExpiry, err + } + + // And Finally, the order: + err = sc.Storage.Delete(sc.Context, orderPath) + if err != nil { + return false, orderExpiry, err + } + b.tidyStatusIncDelAcmeOrderCount() + + return true, orderExpiry, nil +} diff --git a/builtin/logical/pki/path_acme_order_test.go b/builtin/logical/pki/path_acme_order_test.go new file mode 100644 index 000000000000..b41bae157b39 --- /dev/null +++ b/builtin/logical/pki/path_acme_order_test.go @@ -0,0 +1,144 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "net" + "testing" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" +) + +// TestACME_ValidateIdentifiersAgainstRole Verify the ACME order creation +// function verifies somewhat the identifiers that were provided have a +// decent chance of being allowed by the selected role. +func TestACME_ValidateIdentifiersAgainstRole(t *testing.T) { + b, _ := CreateBackendWithStorage(t) + + tests := []struct { + name string + role *issuing.RoleEntry + identifiers []*ACMEIdentifier + expectErr bool + }{ + { + name: "verbatim-role-allows-dns-ip", + role: issuing.SignVerbatimRole(), + identifiers: _buildACMEIdentifiers("test.com", "127.0.0.1"), + expectErr: false, + }, + { + name: "default-role-does-not-allow-dns", + role: buildTestRole(t, nil), + identifiers: _buildACMEIdentifiers("www.test.com"), + expectErr: true, + }, + { + name: "default-role-allows-ip", + role: buildTestRole(t, nil), + identifiers: _buildACMEIdentifiers("192.168.0.1"), + expectErr: false, + }, + { + name: "disable-ip-sans-forbids-ip", + role: buildTestRole(t, map[string]interface{}{"allow_ip_sans": false}), + identifiers: _buildACMEIdentifiers("192.168.0.1"), + expectErr: true, + }, + { + name: "role-no-wildcards-allowed-without", + role: buildTestRole(t, map[string]interface{}{ + "allow_subdomains": true, + "allow_bare_domains": true, + "allowed_domains": []string{"test.com"}, + "allow_wildcard_certificates": false, + }), + identifiers: _buildACMEIdentifiers("www.test.com", "test.com"), + expectErr: false, + }, + { + name: "role-no-wildcards-allowed-with-wildcard", + role: buildTestRole(t, map[string]interface{}{ + "allow_subdomains": true, + "allowed_domains": []string{"test.com"}, + "allow_wildcard_certificates": false, + }), + identifiers: _buildACMEIdentifiers("*.test.com"), + expectErr: true, + }, + { + name: "role-wildcards-allowed-with-wildcard", + role: buildTestRole(t, map[string]interface{}{ + "allow_subdomains": true, + "allowed_domains": []string{"test.com"}, + "allow_wildcard_certificates": true, + }), + identifiers: _buildACMEIdentifiers("*.test.com"), + expectErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := b.validateIdentifiersAgainstRole(tt.role, tt.identifiers) + + if tt.expectErr { + require.Error(t, err, "validateIdentifiersAgainstRole(%v, %v)", tt.role.ToResponseData(), tt.identifiers) + // If we did return an error if should be classified as a ErrRejectedIdentifier + require.ErrorIs(t, err, ErrRejectedIdentifier) + } else { + require.NoError(t, err, "validateIdentifiersAgainstRole(%v, %v)", tt.role.ToResponseData(), tt.identifiers) + } + }) + } +} + +func _buildACMEIdentifiers(values ...string) []*ACMEIdentifier { + var identifiers []*ACMEIdentifier + + for _, value := range values { + identifiers = append(identifiers, _buildACMEIdentifier(value)) + } + + return identifiers +} + +func _buildACMEIdentifier(val string) *ACMEIdentifier { + ip := net.ParseIP(val) + if ip == nil { + identifier := &ACMEIdentifier{Type: "dns", Value: val, OriginalValue: val, IsWildcard: false} + _, _, _ = identifier.MaybeParseWildcard() + return identifier + } + + return &ACMEIdentifier{Type: "ip", Value: val, OriginalValue: val, IsWildcard: false} +} + +// Easily allow tests to create valid roles with proper defaults, since we don't have an easy +// way to generate roles with proper defaults, go through the createRole handler with the handlers +// field data so we pickup all the defaults specified there. +func buildTestRole(t *testing.T, config map[string]interface{}) *issuing.RoleEntry { + b, s := CreateBackendWithStorage(t) + + path := pathRoles(b) + fields := path.Fields + if config == nil { + config = map[string]interface{}{} + } + + if _, exists := config["name"]; !exists { + config["name"] = genUuid() + } + + _, err := b.pathRoleCreate(ctx, &logical.Request{Storage: s}, &framework.FieldData{Raw: config, Schema: fields}) + require.NoError(t, err, "failed generating role with config %v", config) + + role, err := b.GetRole(ctx, s, config["name"].(string)) + require.NoError(t, err, "failed loading stored role") + + return role +} diff --git a/builtin/logical/pki/path_acme_revoke.go b/builtin/logical/pki/path_acme_revoke.go new file mode 100644 index 000000000000..8cab32a8e1d8 --- /dev/null +++ b/builtin/logical/pki/path_acme_revoke.go @@ -0,0 +1,182 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeRevoke(b *backend, baseUrl string, opts acmeWrapperOpts) *framework.Path { + return patternAcmeRevoke(b, baseUrl+"/revoke-cert", opts) +} + +func patternAcmeRevoke(b *backend, pattern string, opts acmeWrapperOpts) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeParsedWrapper(opts, b.acmeRevocationHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeRevocationHandler(acmeCtx *acmeContext, _ *logical.Request, _ *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) { + var cert *x509.Certificate + + rawCertificate, present := data["certificate"] + if present { + certBase64, ok := rawCertificate.(string) + if !ok { + return nil, fmt.Errorf("invalid type (%T; expected string) for field 'certificate': %w", rawCertificate, ErrMalformed) + } + + certBytes, err := base64.RawURLEncoding.DecodeString(certBase64) + if err != nil { + return nil, fmt.Errorf("failed to base64 decode certificate: %v: %w", err, ErrMalformed) + } + + cert, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %v: %w", err, ErrMalformed) + } + } else { + return nil, fmt.Errorf("bad request was lacking required field 'certificate': %w", ErrMalformed) + } + + rawReason, present := data["reason"] + if present { + reason, ok := rawReason.(float64) + if !ok { + return nil, fmt.Errorf("invalid type (%T; expected float64) for field 'reason': %w", rawReason, ErrMalformed) + } + + if int(reason) != 0 { + return nil, fmt.Errorf("Vault does not support revocation reasons (got %v; expected omitted or 0/unspecified): %w", int(reason), ErrBadRevocationReason) + } + } + + // If the certificate expired, there's no point in revoking it. + if cert.NotAfter.Before(time.Now()) { + return nil, fmt.Errorf("refusing to revoke expired certificate: %w", ErrMalformed) + } + + // Fetch the CRL config as we need it to ultimately do the + // revocation. This should be cached and thus relatively fast. + config, err := b.CrlBuilder().getConfigWithUpdate(acmeCtx.sc) + if err != nil { + return nil, fmt.Errorf("unable to revoke certificate: failed reading revocation config: %v: %w", err, ErrServerInternal) + } + + // Load our certificate from storage to ensure it exists and matches + // what was given to us. + serial := serialFromCert(cert) + certEntry, err := fetchCertBySerial(acmeCtx.sc, "certs/", serial) + if err != nil { + return nil, fmt.Errorf("unable to revoke certificate: err reading global cert entry: %v: %w", err, ErrServerInternal) + } + if certEntry == nil { + return nil, fmt.Errorf("unable to revoke certificate: no global cert entry found: %w", ErrServerInternal) + } + + // Validate that the provided certificate matches the stored + // certificate. This completes the chain of: + // + // provided_auth -> provided_cert == stored cert. + // + // Allowing revocation to be safe. + // + // We use the non-subtle unsafe bytes equality check here as we have + // already fetched this certificate from storage, thus already leaking + // timing information that this cert exists. The user could thus simply + // fetch the cert from Vault matching this serial number via the unauthed + // pki/certs/:serial API endpoint. + if !bytes.Equal(certEntry.Value, cert.Raw) { + return nil, fmt.Errorf("unable to revoke certificate: supplied certificate does not match CA's stored value: %w", ErrMalformed) + } + + // Check if it was already revoked; in this case, we do not need to + // revoke it again and want to respond with an appropriate error message. + revEntry, err := fetchCertBySerial(acmeCtx.sc, "revoked/", serial) + if err != nil { + return nil, fmt.Errorf("unable to revoke certificate: err reading revocation entry: %v: %w", err, ErrServerInternal) + } + if revEntry != nil { + return nil, fmt.Errorf("unable to revoke certificate: %w", ErrAlreadyRevoked) + } + + // Finally, do the relevant permissions/authorization check as + // appropriate based on the type of revocation happening. + if !userCtx.Existing { + return b.acmeRevocationByPoP(acmeCtx, userCtx, cert, config) + } + + return b.acmeRevocationByAccount(acmeCtx, userCtx, cert, config) +} + +func (b *backend) acmeRevocationByPoP(acmeCtx *acmeContext, userCtx *jwsCtx, cert *x509.Certificate, config *crlConfig) (*logical.Response, error) { + // Since this account does not exist, ensure we've gotten a private key + // matching the certificate's public key. This private key isn't + // explicitly provided, but instead provided by proxy (public key, + // signature over message). That signature is validated by an earlier + // wrapper (VerifyJWS called by ParseRequestParams). What still remains + // is validating that this implicit private key (with given public key + // and valid JWS signature) matches the certificate's public key. + givenPublic, ok := userCtx.Key.Key.(crypto.PublicKey) + if !ok { + return nil, fmt.Errorf("unable to revoke certificate: unable to parse message header's JWS key of type (%T): %w", userCtx.Key.Key, ErrMalformed) + } + + // Ensure that our PoP's implicit private key matches this certificate's + // public key. + if err := validatePublicKeyMatchesCert(givenPublic, cert); err != nil { + return nil, fmt.Errorf("unable to revoke certificate: unable to verify proof of possession of private key provided by proxy: %v: %w", err, ErrMalformed) + } + + // Now it is safe to revoke. + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() + + return revokeCert(acmeCtx.sc, config, cert) +} + +func (b *backend) acmeRevocationByAccount(acmeCtx *acmeContext, userCtx *jwsCtx, cert *x509.Certificate, config *crlConfig) (*logical.Response, error) { + // Fetch the account; disallow revocations from non-valid-status accounts. + _, err := requireValidAcmeAccount(acmeCtx, userCtx) + if err != nil { + return nil, fmt.Errorf("failed to lookup account: %w", err) + } + + // We only support certificates issued by this user, we don't support + // cross-account revocations. + serial := serialFromCert(cert) + acmeEntry, err := b.GetAcmeState().GetIssuedCert(acmeCtx, userCtx.Kid, serial) + if err != nil || acmeEntry == nil { + return nil, fmt.Errorf("unable to revoke certificate: %v: %w", err, ErrMalformed) + } + + // Now it is safe to revoke. + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() + + return revokeCert(acmeCtx.sc, config, cert) +} diff --git a/builtin/logical/pki/path_acme_test.go b/builtin/logical/pki/path_acme_test.go new file mode 100644 index 000000000000..b378eda66cbd --- /dev/null +++ b/builtin/logical/pki/path_acme_test.go @@ -0,0 +1,1835 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/helper/certutil" + + "github.com/go-test/deep" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/acme" + "golang.org/x/net/http2" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/testhelpers" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +// TestAcmeBasicWorkflow a test that will validate a basic ACME workflow using the Golang ACME client. +func TestAcmeBasicWorkflow(t *testing.T) { + t.Parallel() + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + cases := []struct { + name string + prefixUrl string + }{ + {"root", "acme/"}, + {"role", "roles/test-role/acme/"}, + {"issuer", "issuer/int-ca/acme/"}, + {"issuer_role", "issuer/int-ca/roles/test-role/acme/"}, + } + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + baseAcmeURL := "/v1/pki/" + tc.prefixUrl + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + t.Logf("Testing discover on %s", baseAcmeURL) + discovery, err := acmeClient.Discover(testCtx) + require.NoError(t, err, "failed acme discovery call") + + discoveryBaseUrl := client.Address() + baseAcmeURL + require.Equal(t, discoveryBaseUrl+"new-nonce", discovery.NonceURL) + require.Equal(t, discoveryBaseUrl+"new-account", discovery.RegURL) + require.Equal(t, discoveryBaseUrl+"new-order", discovery.OrderURL) + require.Equal(t, discoveryBaseUrl+"revoke-cert", discovery.RevokeURL) + require.Equal(t, discoveryBaseUrl+"key-change", discovery.KeyChangeURL) + require.False(t, discovery.ExternalAccountRequired, "bad value for external account required in directory") + + // Attempt to update prior to creating an account + t.Logf("Testing updates with no proper account fail on %s", baseAcmeURL) + _, err = acmeClient.UpdateReg(testCtx, &acme.Account{Contact: []string{"mailto:shouldfail@example.com"}}) + require.ErrorIs(t, err, acme.ErrNoAccount, "expected failure attempting to update prior to account registration") + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{ + Contact: []string{"mailto:test@example.com", "mailto:test2@test.com"}, + }, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + require.Equal(t, acme.StatusValid, acct.Status) + require.Contains(t, acct.Contact, "mailto:test@example.com") + require.Contains(t, acct.Contact, "mailto:test2@test.com") + require.Len(t, acct.Contact, 2) + + // Call register again we should get existing account + t.Logf("Testing duplicate register returns existing account on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) + require.ErrorIs(t, err, acme.ErrAccountAlreadyExists, + "We should have returned a 200 status code which would have triggered an error in the golang acme"+ + " library") + + // Update contact + t.Logf("Testing Update account contacts on %s", baseAcmeURL) + acct.Contact = []string{"mailto:test3@example.com"} + acct2, err := acmeClient.UpdateReg(testCtx, acct) + require.NoError(t, err, "failed updating account") + require.Equal(t, acme.StatusValid, acct2.Status) + // We should get this back, not the original values. + require.Contains(t, acct2.Contact, "mailto:test3@example.com") + require.Len(t, acct2.Contact, 1) + + // Make sure order's do not accept dates + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}}, + acme.WithOrderNotBefore(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with NotBefore set") + + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}}, + acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with NotAfter set") + + // Make sure DNS identifiers cannot include IP addresses + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "127.0.0.1"}}, + acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier") + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "*.127.0.0.1"}}, + acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"localhost.localdomain", "*.localdomain"} + createOrder, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + {Type: "dns", Value: identifiers[1]}, + }) + require.NoError(t, err, "failed creating order") + require.Equal(t, acme.StatusPending, createOrder.Status) + require.Empty(t, createOrder.CertURL) + require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL) + require.Len(t, createOrder.AuthzURLs, 2, "expected two authzurls") + + // Get order + t.Logf("Testing GetOrder on %s", baseAcmeURL) + getOrder, err := acmeClient.GetOrder(testCtx, createOrder.URI) + require.NoError(t, err, "failed fetching order") + require.Equal(t, acme.StatusPending, createOrder.Status) + if diffs := deep.Equal(createOrder, getOrder); diffs != nil { + t.Fatalf("Differences exist between create and get order: \n%v", strings.Join(diffs, "\n")) + } + + // Make sure the identifiers returned in the order contain the original values + var ids []string + for _, id := range getOrder.Identifiers { + require.Equal(t, "dns", id.Type) + ids = append(ids, id.Value) + } + require.ElementsMatch(t, identifiers, ids, "order responses should have all original identifiers") + + // Load authorizations + var authorizations []*acme.Authorization + for _, authUrl := range getOrder.AuthzURLs { + auth, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed fetching authorization: %s", authUrl) + + authorizations = append(authorizations, auth) + } + + // We should have 2 separate auth challenges as we have two separate identifier + require.Len(t, authorizations, 2, "expected 2 authorizations in order") + + var wildcardAuth *acme.Authorization + var domainAuth *acme.Authorization + for _, auth := range authorizations { + if auth.Wildcard { + wildcardAuth = auth + } else { + domainAuth = auth + } + } + + // Test the values for the domain authentication + require.Equal(t, acme.StatusPending, domainAuth.Status) + require.Equal(t, "dns", domainAuth.Identifier.Type) + require.Equal(t, "localhost.localdomain", domainAuth.Identifier.Value) + require.False(t, domainAuth.Wildcard, "should not be a wildcard") + require.True(t, domainAuth.Expires.IsZero(), "authorization should only have expiry set on valid status") + + require.Len(t, domainAuth.Challenges, 3, "expected three challenges") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status) + require.True(t, domainAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "http-01", domainAuth.Challenges[0].Type) + require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[1].Status) + require.True(t, domainAuth.Challenges[1].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "dns-01", domainAuth.Challenges[1].Type) + require.NotEmpty(t, domainAuth.Challenges[1].Token, "missing challenge token") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[2].Status) + require.True(t, domainAuth.Challenges[2].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "tls-alpn-01", domainAuth.Challenges[2].Type) + require.NotEmpty(t, domainAuth.Challenges[2].Token, "missing challenge token") + + // Test the values for the wildcard authentication + require.Equal(t, acme.StatusPending, wildcardAuth.Status) + require.Equal(t, "dns", wildcardAuth.Identifier.Type) + require.Equal(t, "localdomain", wildcardAuth.Identifier.Value) // Make sure we strip the *. in auth responses + require.True(t, wildcardAuth.Wildcard, "should be a wildcard") + require.True(t, wildcardAuth.Expires.IsZero(), "authorization should only have expiry set on valid status") + + require.Len(t, wildcardAuth.Challenges, 1, "expected one challenge") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status) + require.True(t, wildcardAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "dns-01", wildcardAuth.Challenges[0].Type) + require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token") + + // Make sure that getting a challenge does not start it. + challenge, err := acmeClient.GetChallenge(testCtx, domainAuth.Challenges[0].URI) + require.NoError(t, err, "failed to load challenge") + require.Equal(t, acme.StatusPending, challenge.Status) + require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "http-01", challenge.Type) + + // Accept a challenge; this triggers validation to start. + challenge, err = acmeClient.Accept(testCtx, domainAuth.Challenges[0]) + require.NoError(t, err, "failed to load challenge") + require.Equal(t, acme.StatusProcessing, challenge.Status) + require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "http-01", challenge.Type) + + require.NotEmpty(t, challenge.Token, "missing challenge token") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, client, acmeClient, acct, getOrder) + + // Make sure sending a CSR with the account key gets rejected. + goodCr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: identifiers[1]}, + DNSNames: []string{identifiers[0], identifiers[1]}, + } + t.Logf("csr: %v", goodCr) + + // We want to make sure people are not using the same keys for CSR/Certs and their ACME account. + csrSignedWithAccountKey, err := x509.CreateCertificateRequest(rand.Reader, goodCr, accountKey) + require.NoError(t, err, "failed generating csr") + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrSignedWithAccountKey, true) + require.Error(t, err, "should not be allowed to use the account key for a CSR") + + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + + // Validate we reject CSRs that contain CN that aren't in the original order + badCr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "not-in-original-order.com"}, + DNSNames: []string{identifiers[0], identifiers[1]}, + } + t.Logf("csr: %v", badCr) + + csrWithBadCName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad common name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadCName, true) + require.Error(t, err, "should not be allowed to csr with different common names than order") + + // Validate we reject CSRs that contain DNS names that aren't in the original order + badCr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value}, + DNSNames: []string{"www.notinorder.com"}, + } + + csrWithBadName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true) + require.Error(t, err, "should not be allowed to csr with different names than order") + + // Validate we reject CSRs that contain IP addresses that weren't in the original order + badCr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value}, + IPAddresses: []net.IP{{127, 0, 0, 1}}, + } + + csrWithBadIP, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadIP, true) + require.Error(t, err, "should not be allowed to csr with different ip address than order") + + // Validate we reject CSRs that contains fewer names than in the original order. + badCr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: identifiers[0]}, + } + + csrWithBadName, err = x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true) + require.Error(t, err, "should not be allowed to csr with different names than order") + + // Finally test a proper CSR, with the correct name and signed with a different key works. + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true) + require.NoError(t, err, "failed finalizing order") + require.Len(t, certs, 3, "expected three items within the returned certs") + + testAcmeCertSignedByCa(t, client, certs, "int-ca") + + // Make sure the certificate has a NotAfter date of a maximum of 90 days + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + maxAcmeNotAfter := time.Now().Add(maxAcmeCertTTL) + if maxAcmeNotAfter.Before(acmeCert.NotAfter) { + require.Fail(t, fmt.Sprintf("certificate has a NotAfter value %v greater than ACME max ttl %v", acmeCert.NotAfter, maxAcmeNotAfter)) + } + + // Can we revoke it using the account key revocation + err = acmeClient.RevokeCert(ctx, nil, certs[0], acme.CRLReasonUnspecified) + require.NoError(t, err, "failed to revoke certificate through account key") + + // Make sure it was actually revoked + certResp, err := client.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime := certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err := revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Greater(t, revocationTimeInt, int64(0), + "revocation time was not greater than 0, revocation did not work value was: %v", revocationTimeInt) + + // Make sure we can revoke an authorization as a client + err = acmeClient.RevokeAuthorization(ctx, authorizations[0].URI) + require.NoError(t, err, "failed revoking authorization status") + + revokedAuth, err := acmeClient.GetAuthorization(ctx, authorizations[0].URI) + require.NoError(t, err, "failed fetching authorization") + require.Equal(t, acme.StatusDeactivated, revokedAuth.Status) + + // Deactivate account + t.Logf("Testing deactivate account on %s", baseAcmeURL) + err = acmeClient.DeactivateReg(testCtx) + require.NoError(t, err, "failed deactivating account") + + // Make sure we get an unauthorized error trying to update the account again. + t.Logf("Testing update on deactivated account fails on %s", baseAcmeURL) + _, err = acmeClient.UpdateReg(testCtx, acct) + require.Error(t, err, "expected account to be deactivated") + require.IsType(t, &acme.Error{}, err, "expected acme error type") + acmeErr := err.(*acme.Error) + require.Equal(t, "urn:ietf:params:acme:error:unauthorized", acmeErr.ProblemType) + }) + } +} + +// TestAcmeBasicWorkflowWithEab verify that new accounts require EAB's if enforced by configuration. +func TestAcmeBasicWorkflowWithEab(t *testing.T) { + t.Parallel() + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Enable EAB + _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "always-required", + }) + require.NoError(t, err) + + cases := []struct { + name string + prefixUrl string + }{ + {"root", "acme/"}, + {"role", "roles/test-role/acme/"}, + {"issuer", "issuer/int-ca/acme/"}, + {"issuer_role", "issuer/int-ca/roles/test-role/acme/"}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + baseAcmeURL := "/v1/pki/" + tc.prefixUrl + accountKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed creating ec key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + t.Logf("Testing discover on %s", baseAcmeURL) + discovery, err := acmeClient.Discover(testCtx) + require.NoError(t, err, "failed acme discovery call") + require.True(t, discovery.ExternalAccountRequired, "bad value for external account required in directory") + + // Create new account without EAB, should fail + t.Logf("Testing register on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.ErrorContains(t, err, "urn:ietf:params:acme:error:externalAccountRequired", + "expected failure creating an account without eab") + + // Test fetch, list, delete workflow + kid, _ := getEABKey(t, client, tc.prefixUrl) + resp, err := client.Logical().ListWithContext(testCtx, "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.NotNil(t, resp, "list response for eab tokens should not be nil") + require.Contains(t, resp.Data, "keys") + require.Contains(t, resp.Data, "key_info") + require.Len(t, resp.Data["keys"], 1) + require.Contains(t, resp.Data["keys"], kid) + + _, err = client.Logical().DeleteWithContext(testCtx, "pki/eab/"+kid) + require.NoError(t, err, "failed to delete eab") + + // List eabs should return zero results + resp, err = client.Logical().ListWithContext(testCtx, "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.Nil(t, resp, "list response for eab tokens should have been nil") + + // fetch a new EAB + kid, eabKeyBytes := getEABKey(t, client, tc.prefixUrl) + acct := &acme.Account{ + ExternalAccountBinding: &acme.ExternalAccountBinding{ + KID: kid, + Key: eabKeyBytes, + }, + } + + // Make sure we can list our key + resp, err = client.Logical().ListWithContext(testCtx, "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.NotNil(t, resp, "list response for eab tokens should not be nil") + require.Contains(t, resp.Data, "keys") + require.Contains(t, resp.Data, "key_info") + require.Len(t, resp.Data["keys"], 1) + require.Contains(t, resp.Data["keys"], kid) + + keyInfo := resp.Data["key_info"].(map[string]interface{}) + require.Contains(t, keyInfo, kid) + + infoForKid := keyInfo[kid].(map[string]interface{}) + require.Equal(t, "hs", infoForKid["key_type"]) + require.Equal(t, tc.prefixUrl+"directory", infoForKid["acme_directory"]) + + // Create new account with EAB + t.Logf("Testing register on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering new account with eab") + + // Make sure our EAB is no longer available + resp, err = client.Logical().ListWithContext(context.Background(), "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.Nil(t, resp, "list response for eab tokens should have been nil due to empty list") + + // Attempt to create another account with the same EAB as before -- should fail + accountKey2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed creating ec key") + + acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2) + acct2 := &acme.Account{ + ExternalAccountBinding: &acme.ExternalAccountBinding{ + KID: kid, + Key: eabKeyBytes, + }, + } + + _, err = acmeClient2.Register(testCtx, acct2, func(tosURL string) bool { return true }) + require.ErrorContains(t, err, "urn:ietf:params:acme:error:unauthorized", "should fail due to EAB re-use") + + // We can lookup/find an existing account without EAB if we have the account key + _, err = acmeClient.GetReg(testCtx /* unused url */, "") + require.NoError(t, err, "expected to lookup existing account without eab") + }) + } +} + +// TestAcmeNonce a basic test that will validate we get back a nonce with the proper status codes +// based on the +func TestAcmeNonce(t *testing.T) { + t.Parallel() + cluster, client, pathConfig := setupAcmeBackend(t) + defer cluster.Cleanup() + + cases := []struct { + name string + prefixUrl string + directoryUrl string + }{ + {"root", "", "pki/acme/new-nonce"}, + {"role", "/roles/test-role", "pki/roles/test-role/acme/new-nonce"}, + {"issuer", "/issuer/default", "pki/issuer/default/acme/new-nonce"}, + {"issuer_role", "/issuer/default/roles/test-role", "pki/issuer/default/roles/test-role/acme/new-nonce"}, + } + + for _, tc := range cases { + for _, httpOp := range []string{"get", "header"} { + t.Run(fmt.Sprintf("%s-%s", tc.name, httpOp), func(t *testing.T) { + var req *api.Request + switch httpOp { + case "get": + req = client.NewRequest(http.MethodGet, "/v1/"+tc.directoryUrl) + case "header": + req = client.NewRequest(http.MethodHead, "/v1/"+tc.directoryUrl) + } + res, err := client.RawRequestWithContext(ctx, req) + require.NoError(t, err, "failed sending raw request") + _ = res.Body.Close() + + // Proper Status Code + switch httpOp { + case "get": + require.Equal(t, http.StatusNoContent, res.StatusCode) + case "header": + require.Equal(t, http.StatusOK, res.StatusCode) + } + + // Make sure we don't have a Content-Type header. + require.Equal(t, "", res.Header.Get("Content-Type")) + + // Make sure we return the Cache-Control header + require.Contains(t, res.Header.Get("Cache-Control"), "no-store", + "missing Cache-Control header with no-store header value") + + // Test for our nonce header value + require.NotEmpty(t, res.Header.Get("Replay-Nonce"), "missing Replay-Nonce header with an actual value") + + // Test Link header value + expectedLinkHeader := fmt.Sprintf("<%s>;rel=\"index\"", pathConfig+tc.prefixUrl+"/acme/directory") + require.Contains(t, res.Header.Get("Link"), expectedLinkHeader, + "different value for link header than expected") + }) + } + } +} + +// TestAcmeClusterPathNotConfigured basic testing of the ACME error handler. +func TestAcmeClusterPathNotConfigured(t *testing.T) { + t.Parallel() + cluster, client := setupTestPkiCluster(t) + defer cluster.Cleanup() + + // Go sneaky, sneaky and update the acme configuration through sys/raw to bypass config/cluster path checks + pkiMount := findStorageMountUuid(t, client, "pki") + rawPath := path.Join("/sys/raw/logical/", pkiMount, storageAcmeConfig) + _, err := client.Logical().WriteWithContext(context.Background(), rawPath, map[string]interface{}{ + "value": "{\"enabled\": true, \"eab_policy_name\": \"not-required\"}", + }) + require.NoError(t, err, "failed updating acme config through sys/raw") + + // Force reload the plugin so we read the new config we slipped in. + _, err = client.Sys().ReloadPluginWithContext(context.Background(), &api.ReloadPluginInput{Mounts: []string{"pki"}}) + require.NoError(t, err, "failed reloading plugin") + + // Do not fill in the path option within the local cluster configuration + cases := []struct { + name string + directoryUrl string + }{ + {"root", "pki/acme/directory"}, + {"role", "pki/roles/test-role/acme/directory"}, + {"issuer", "pki/issuer/default/acme/directory"}, + {"issuer_role", "pki/issuer/default/roles/test-role/acme/directory"}, + } + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + dirResp, err := client.Logical().ReadRawWithContext(testCtx, tc.directoryUrl) + require.Error(t, err, "expected failure reading ACME directory configuration got none") + + require.Equal(t, "application/problem+json", dirResp.Header.Get("Content-Type")) + require.Equal(t, http.StatusInternalServerError, dirResp.StatusCode) + + rawBodyBytes, err := io.ReadAll(dirResp.Body) + require.NoError(t, err, "failed reading from directory response body") + _ = dirResp.Body.Close() + + respType := map[string]interface{}{} + err = json.Unmarshal(rawBodyBytes, &respType) + require.NoError(t, err, "failed unmarshalling ACME directory response body") + + require.Equal(t, "urn:ietf:params:acme:error:serverInternal", respType["type"]) + require.NotEmpty(t, respType["detail"]) + }) + } +} + +// TestAcmeAccountsCrossingDirectoryPath make sure that if an account attempts to use a different ACME +// directory path that we get an error. +func TestAcmeAccountsCrossingDirectoryPath(t *testing.T) { + t.Parallel() + cluster, _, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Try to update the account under another ACME directory + baseAcmeURL2 := "/v1/pki/roles/test-role/acme/" + acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL2, accountKey) + acct.Contact = []string{"mailto:test3@example.com"} + _, err = acmeClient2.UpdateReg(testCtx, acct) + require.Error(t, err, "successfully updated account when we should have failed due to different directory") + // We don't test for the specific error about using the wrong directory, as the golang library + // swallows the error we are sending back to a no account error +} + +// TestAcmeEabCrossingDirectoryPath make sure that if an account attempts to use a different ACME +// directory path that an EAB was created within we get an error. +func TestAcmeEabCrossingDirectoryPath(t *testing.T) { + t.Parallel() + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Enable EAB + _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "always-required", + }) + require.NoError(t, err) + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // fetch a new EAB + kid, eabKeyBytes := getEABKey(t, client, "roles/test-role/acme/") + acct := &acme.Account{ + ExternalAccountBinding: &acme.ExternalAccountBinding{ + KID: kid, + Key: eabKeyBytes, + }, + } + + // Create new account + _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) + require.ErrorContains(t, err, "failed to verify eab", "should have failed as EAB is for a different directory") +} + +// TestAcmeDisabledWithEnvVar verifies if VAULT_DISABLE_PUBLIC_ACME is set that we completely +// disable the ACME service +func TestAcmeDisabledWithEnvVar(t *testing.T) { + // Setup a cluster with the configuration set to not-required, initially as the + // configuration will validate if the environment var is set + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Seal setup the environment variable, and unseal which now means we have a cluster + // with ACME configuration saying it is enabled with a bad EAB policy. + cluster.EnsureCoresSealed(t) + t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true") + cluster.UnsealCores(t) + + // Make sure that ACME is disabled now. + for _, method := range []string{http.MethodHead, http.MethodGet} { + t.Run(fmt.Sprintf("%s", method), func(t *testing.T) { + req := client.NewRequest(method, "/v1/pki/acme/new-nonce") + _, err := client.RawRequestWithContext(ctx, req) + require.Error(t, err, "should have received an error as ACME should have been disabled") + + if apiError, ok := err.(*api.ResponseError); ok { + require.Equal(t, 404, apiError.StatusCode) + } + }) + } +} + +// TestAcmeConfigChecksPublicAcmeEnv verifies certain EAB policy values can not be set if ENV var is enabled +func TestAcmeConfigChecksPublicAcmeEnv(t *testing.T) { + t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true") + cluster, client := setupTestPkiCluster(t) + defer cluster.Cleanup() + + _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/cluster", map[string]interface{}{ + "path": "https://dadgarcorp.com/v1/pki", + }) + require.NoError(t, err) + + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": string(eabPolicyAlwaysRequired), + }) + require.NoError(t, err) + + for _, policyName := range []EabPolicyName{eabPolicyNewAccountRequired, eabPolicyNotRequired} { + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": string(policyName), + }) + require.Error(t, err, "eab policy %s should have not been allowed to be set") + } + + // Make sure we can disable ACME and the eab policy is not checked + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": false, + "eab_policy": string(eabPolicyNotRequired), + }) + require.NoError(t, err) +} + +// TestAcmeTruncatesToIssuerExpiry make sure that if the selected issuer's expiry is shorter than the +// CSR's selected TTL value in ACME and the issuer's leaf_not_after_behavior setting is set to Err, +// we will override the configured behavior and truncate to the issuer's NotAfter +func TestAcmeTruncatesToIssuerExpiry(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + mount := "pki" + resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal", + map[string]interface{}{ + "key_name": "short-key", + "key_type": "ec", + "common_name": "test.com", + }) + require.NoError(t, err, "failed creating intermediary CSR") + intermediateCSR := resp.Data["csr"].(string) + + // Sign the intermediate CSR using /pki + resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{ + "csr": intermediateCSR, + "ttl": "10m", + "max_ttl": "1h", + }) + require.NoError(t, err, "failed signing intermediary CSR") + intermediateCertPEM := resp.Data["certificate"].(string) + + shortCa := parseCert(t, intermediateCertPEM) + + // Configure the intermediate cert as the CA in /pki2 + resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{ + "pem_bundle": intermediateCertPEM, + }) + require.NoError(t, err, "failed importing intermediary cert") + importedIssuersRaw := resp.Data["imported_issuers"].([]interface{}) + require.Len(t, importedIssuersRaw, 1) + shortCaUuid := importedIssuersRaw[0].(string) + + _, err = client.Logical().Write(mount+"/issuer/"+shortCaUuid, map[string]interface{}{ + "leaf_not_after_behavior": "err", + "issuer_name": "short-ca", + }) + require.NoError(t, err, "failed updating issuer name") + + baseAcmeURL := "/v1/pki/issuer/short-ca/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + // Build a proper CSR, with the correct name and signed with a different key works. + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "failed finalizing order") + require.Len(t, certs, 3, "expected full acme chain") + + testAcmeCertSignedByCa(t, client, certs, "short-ca") + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + require.Equal(t, shortCa.NotAfter, acmeCert.NotAfter, "certificate times aren't the same") +} + +// TestAcmeRoleExtKeyUsage verify that ACME by default ignores the role's various ExtKeyUsage flags, +// but if the ACME configuration override of allow_role_ext_key_usage is set that we then honor +// the role's flag. +func TestAcmeRoleExtKeyUsage(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + roleName := "test-role" + + roleOpt := map[string]interface{}{ + "ttl": "365h", + "max_ttl": "720h", + "key_type": "any", + "allowed_domains": "localdomain", + "allow_subdomains": "true", + "allow_wildcard_certificates": "true", + "require_cn": "true", /* explicit default */ + "server_flag": "true", + "client_flag": "true", + "code_signing_flag": "true", + "email_protection_flag": "true", + } + + _, err := client.Logical().Write("pki/roles/"+roleName, roleOpt) + + baseAcmeURL := "/v1/pki/roles/" + roleName + "/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + require.NoError(t, err, "failed creating role test-role") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + // Build a proper CSR, with the correct name and signed with a different key works. + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization failed") + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + require.Equal(t, 1, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages") + require.ElementsMatch(t, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, acmeCert.ExtKeyUsage, + "mismatch of ExtKeyUsage flags") + + // Now turn the ACME configuration allow_role_ext_key_usage and retest to make sure we get a certificate + // with them all + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "not-required", + "allow_role_ext_key_usage": true, + }) + require.NoError(t, err, "failed updating ACME configuration") + + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + order, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + certs, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization failed") + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + acmeCert, err = x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + require.Equal(t, 4, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages") + require.ElementsMatch(t, []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageCodeSigning, x509.ExtKeyUsageEmailProtection, + }, + acmeCert.ExtKeyUsage, "mismatch of ExtKeyUsage flags") +} + +func TestIssuerRoleDirectoryAssociations(t *testing.T) { + t.Parallel() + + // This creates two issuers for us (root-ca, int-ca) and two + // roles (test-role, acme) that we can use with various directory + // configurations. + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Setup DNS for validations. + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + dns := dnstest.SetupResolver(t, "dadgarcorp.com") + defer dns.Cleanup() + _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "dns_resolver": dns.GetLocalAddr(), + }) + require.NoError(t, err, "failed to specify dns resolver") + + // 1. Use a forbidden role should fail. + resp, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "enabled": true, + "allowed_roles": []string{"acme"}, + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer") + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under int-ca issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under root-ca issuer") + + // 2. Use a forbidden issuer should fail. + resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "allowed_roles": []string{"acme"}, + "allowed_issuers": []string{"int-ca"}, + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer") + + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory") + require.Error(t, err, "failed to forbid usage of acme under root-ca issuer") + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under int-ca issuer") + + // 3. Setting the default directory to be a sign-verbatim policy and + // using two different CAs should result in certs signed by each CA. + resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "allowed_roles": []string{"*"}, + "allowed_issuers": []string{"*"}, + "default_directory_policy": "sign-verbatim", + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + // default == int-ca + acmeClientDefault := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/default/acme/", nil) + defaultLeafCert := doACMEForDomainWithDNS(t, dns, acmeClientDefault, []string{"default-ca.dadgarcorp.com"}) + requireSignedByAtPath(t, client, defaultLeafCert, "pki/issuer/int-ca") + + acmeClientIntCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/int-ca/acme/", nil) + intCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientIntCA, []string{"int-ca.dadgarcorp.com"}) + requireSignedByAtPath(t, client, intCALeafCert, "pki/issuer/int-ca") + + acmeClientRootCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/root-ca/acme/", nil) + rootCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientRootCA, []string{"root-ca.dadgarcorp.com"}) + requireSignedByAtPath(t, client, rootCALeafCert, "pki/issuer/root-ca") + + // 4. Using a role-based default directory should allow us to control leaf + // issuance on the base and issuer-specific directories. + resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "allowed_roles": []string{"*"}, + "allowed_issuers": []string{"*"}, + "default_directory_policy": "role:acme", + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + resp, err = client.Logical().JSONMergePatch(testCtx, "pki/roles/acme", map[string]interface{}{ + "ou": "IT Security", + "organization": []string{"Dadgar Corporation, Limited"}, + "allow_any_name": true, + }) + require.NoError(t, err, "failed to write role differentiator") + require.NotNil(t, resp) + + for _, issuer := range []string{"", "default", "int-ca", "root-ca"} { + // Path should override role. + directory := "/v1/pki/issuer/" + issuer + "/acme/" + issuerPath := "/pki/issuer/" + issuer + if issuer == "" { + directory = "/v1/pki/acme/" + issuerPath = "/pki/issuer/int-ca" + } else if issuer == "default" { + issuerPath = "/pki/issuer/int-ca" + } + + t.Logf("using directory: %v / issuer: %v", directory, issuerPath) + + acmeClient := getAcmeClientForCluster(t, cluster, directory, nil) + leafCert := doACMEForDomainWithDNS(t, dns, acmeClient, []string{"role-restricted.dadgarcorp.com"}) + require.Contains(t, leafCert.Subject.Organization, "Dadgar Corporation, Limited", "on directory: %v", directory) + require.Contains(t, leafCert.Subject.OrganizationalUnit, "IT Security", "on directory: %v", directory) + requireSignedByAtPath(t, client, leafCert, issuerPath) + } +} + +func TestACMESubjectFieldsAndExtensionsIgnored(t *testing.T) { + t.Parallel() + + // This creates two issuers for us (root-ca, int-ca) and two + // roles (test-role, acme) that we can use with various directory + // configurations. + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Setup DNS for validations. + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + dns := dnstest.SetupResolver(t, "dadgarcorp.com") + defer dns.Cleanup() + _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "dns_resolver": dns.GetLocalAddr(), + }) + require.NoError(t, err, "failed to specify dns resolver") + + // Use the default sign-verbatim policy and ensure OU does not get set. + directory := "/v1/pki/acme/" + domains := []string{"no-ou.dadgarcorp.com"} + acmeClient := getAcmeClientForCluster(t, cluster, directory, nil) + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: domains[0], OrganizationalUnit: []string{"DadgarCorp IT"}}, + DNSNames: domains, + } + cert := doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr) + t.Logf("Got certificate: %v", cert) + require.Empty(t, cert.Subject.OrganizationalUnit) + + // Use the default sign-verbatim policy and ensure extension does not get set. + domains = []string{"no-ext.dadgarcorp.com"} + extension, err := certutil.CreateDeltaCRLIndicatorExt(12345) + require.NoError(t, err) + cr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: domains[0]}, + DNSNames: domains, + ExtraExtensions: []pkix.Extension{extension}, + } + cert = doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr) + t.Logf("Got certificate: %v", cert) + for _, ext := range cert.Extensions { + require.False(t, ext.Id.Equal(certutil.DeltaCRLIndicatorOID)) + } + require.NotEmpty(t, cert.Extensions) +} + +// TestAcmeWithCsrIncludingBasicConstraintExtension verify that we error out for a CSR that is requesting a +// certificate with the IsCA set to true, false is okay, within the basic constraints extension and that no matter what +// the extension is not present on the returned certificate. +func TestAcmeWithCsrIncludingBasicConstraintExtension(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + // Build a CSR with IsCA set to true, making sure we reject it + extension, err := certutil.CreateBasicConstraintExtension(true, -1) + require.NoError(t, err, "failed generating basic constraint extension") + + isCATrueCSR := &x509.CertificateRequest{ + DNSNames: []string{identifiers[0]}, + ExtraExtensions: []pkix.Extension{extension}, + } + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, isCATrueCSR, csrKey) + require.NoError(t, err, "failed generating csr") + + _, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.Error(t, err, "order finalization should have failed with IsCA set to true") + + extension, err = certutil.CreateBasicConstraintExtension(false, -1) + require.NoError(t, err, "failed generating basic constraint extension") + isCAFalseCSR := &x509.CertificateRequest{ + DNSNames: []string{identifiers[0]}, + Extensions: []pkix.Extension{extension}, + } + + csr, err = x509.CreateCertificateRequest(rand.Reader, isCAFalseCSR, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization should have failed with IsCA set to false") + + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + // Make sure we don't have any basic constraint extension within the returned cert + for _, ext := range acmeCert.Extensions { + if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { + // We shouldn't have this extension in our cert + t.Fatalf("acme csr contained a basic constraints extension") + } + } +} + +func markAuthorizationSuccess(t *testing.T, client *api.Client, acmeClient *acme.Client, acct *acme.Account, order *acme.Order) { + testCtx := context.Background() + + pkiMount := findStorageMountUuid(t, client, "pki") + + // Delete any and all challenge validation entries to stop the engine from overwriting our hack here + i := 0 + for { + deleteCvEntries(t, client, pkiMount) + + accountId := acct.URI[strings.LastIndex(acct.URI, "/"):] + for _, authURI := range order.AuthzURLs { + authId := authURI[strings.LastIndex(authURI, "/"):] + + // sys/raw does not work with namespaces + baseClient := client.WithNamespace("") + + values, err := baseClient.Logical().ListWithContext(testCtx, "sys/raw/logical/") + require.NoError(t, err) + require.True(t, true, "values: %v", values) + + rawPath := path.Join("sys/raw/logical/", pkiMount, getAuthorizationPath(accountId, authId)) + resp, err := baseClient.Logical().ReadWithContext(testCtx, rawPath) + require.NoError(t, err, "failed looking up authorization storage") + require.NotNil(t, resp, "sys raw response was nil") + require.NotEmpty(t, resp.Data["value"], "no value field in sys raw response") + + var authz ACMEAuthorization + err = jsonutil.DecodeJSON([]byte(resp.Data["value"].(string)), &authz) + require.NoError(t, err, "error decoding authorization: %w", err) + authz.Status = ACMEAuthorizationValid + for _, challenge := range authz.Challenges { + challenge.Status = ACMEChallengeValid + } + + encodeJSON, err := jsonutil.EncodeJSON(authz) + require.NoError(t, err, "failed encoding authz json") + _, err = baseClient.Logical().WriteWithContext(testCtx, rawPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + require.NoError(t, err, "failed writing authorization storage") + } + + // Give some time + time.Sleep(200 * time.Millisecond) + + // Check to see if we have fixed up the status and no new entries have appeared. + if !deleteCvEntries(t, client, pkiMount) { + // No entries found + // Look to see if we raced against the engine + orderLookup, err := acmeClient.GetOrder(testCtx, order.URI) + require.NoError(t, err, "failed loading order status after manually ") + + if orderLookup.Status == string(ACMEOrderReady) { + // Our order seems to be in the proper status, should be safe-ish to go ahead now + break + } else { + t.Logf("order status was not ready, retrying") + } + } else { + t.Logf("new challenge entries appeared after deletion, retrying") + } + + if i > 5 { + t.Fatalf("We are constantly deleting cv entries or order status is not changing, something is wrong") + } + + i++ + } +} + +func deleteCvEntries(t *testing.T, client *api.Client, pkiMount string) bool { + testCtx := context.Background() + + baseClient := client.WithNamespace("") + + cvPath := path.Join("sys/raw/logical/", pkiMount, acmeValidationPrefix) + resp, err := baseClient.Logical().ListWithContext(testCtx, cvPath) + require.NoError(t, err, "failed listing cv path items") + + deletedEntries := false + if resp != nil { + cvEntries := resp.Data["keys"].([]interface{}) + for _, cvEntry := range cvEntries { + cvEntryPath := path.Join(cvPath, cvEntry.(string)) + _, err = baseClient.Logical().DeleteWithContext(testCtx, cvEntryPath) + require.NoError(t, err, "failed to delete cv entry") + deletedEntries = true + } + } + + return deletedEntries +} + +func setupAcmeBackend(t *testing.T) (*vault.TestCluster, *api.Client, string) { + cluster, client := setupTestPkiCluster(t) + + return setupAcmeBackendOnClusterAtPath(t, cluster, client, "pki") +} + +func setupAcmeBackendOnClusterAtPath(t *testing.T, cluster *vault.TestCluster, client *api.Client, mount string) (*vault.TestCluster, *api.Client, string) { + mount = strings.Trim(mount, "/") + + // Setting templated AIAs should succeed. + pathConfig := client.Address() + "/v1/" + mount + + namespace := "" + mountName := mount + if mount != "pki" { + if strings.Contains(mount, "/") && constants.IsEnterprise { + ns_pieces := strings.Split(mount, "/") + c := len(ns_pieces) + // mount is c-1 + ns_name := ns_pieces[c-2] + if len(ns_pieces) > 2 { + // Parent's namespaces + parent := strings.Join(ns_pieces[0:c-2], "/") + _, err := client.WithNamespace(parent).Logical().Write("/sys/namespaces/"+ns_name, nil) + require.NoError(t, err, "failed to create nested namespaces "+parent+" -> "+ns_name) + } else { + _, err := client.Logical().Write("/sys/namespaces/"+ns_name, nil) + require.NoError(t, err, "failed to create nested namespace "+ns_name) + } + namespace = strings.Join(ns_pieces[0:c-1], "/") + mountName = ns_pieces[c-1] + } + + err := client.WithNamespace(namespace).Sys().Mount(mountName, &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "3000h", + MaxLeaseTTL: "600000h", + }, + }) + require.NoError(t, err, "failed to mount new PKI instance at "+mount) + } + + err := client.Sys().TuneMountWithContext(ctx, mount, api.MountConfigInput{ + DefaultLeaseTTL: "3000h", + MaxLeaseTTL: "600000h", + }) + require.NoError(t, err, "failed updating mount lease times "+mount) + + _, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/cluster", map[string]interface{}{ + "path": pathConfig, + "aia_path": "http://localhost:8200/cdn/" + mount, + }) + require.NoError(t, err) + + _, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "not-required", + }) + require.NoError(t, err) + + // Allow certain headers to pass through for ACME support + _, err = client.WithNamespace(namespace).Logical().WriteWithContext(context.Background(), "sys/mounts/"+mountName+"/tune", map[string]interface{}{ + "allowed_response_headers": []string{"Last-Modified", "Replay-Nonce", "Link", "Location"}, + "max_lease_ttl": "920000h", + }) + require.NoError(t, err, "failed tuning mount response headers") + + resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/root/internal", + map[string]interface{}{ + "issuer_name": "root-ca", + "key_name": "root-key", + "key_type": "ec", + "common_name": "Test Root R1 " + mount, + "ttl": "7200h", + "max_ttl": "920000h", + }) + require.NoError(t, err, "failed creating root CA") + + resp, err = client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal", + map[string]interface{}{ + "key_name": "int-key", + "key_type": "ec", + "common_name": "Test Int X1 " + mount, + }) + require.NoError(t, err, "failed creating intermediary CSR") + intermediateCSR := resp.Data["csr"].(string) + + // Sign the intermediate CSR using /pki + resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{ + "csr": intermediateCSR, + "ttl": "7100h", + "max_ttl": "910000h", + }) + require.NoError(t, err, "failed signing intermediary CSR") + intermediateCertPEM := resp.Data["certificate"].(string) + + // Configure the intermediate cert as the CA in /pki2 + resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{ + "pem_bundle": intermediateCertPEM, + }) + require.NoError(t, err, "failed importing intermediary cert") + importedIssuersRaw := resp.Data["imported_issuers"].([]interface{}) + require.Len(t, importedIssuersRaw, 1) + intCaUuid := importedIssuersRaw[0].(string) + + _, err = client.Logical().Write(mount+"/issuer/"+intCaUuid, map[string]interface{}{ + "issuer_name": "int-ca", + }) + require.NoError(t, err, "failed updating issuer name") + + _, err = client.Logical().Write(mount+"/config/issuers", map[string]interface{}{ + "default": "int-ca", + }) + require.NoError(t, err, "failed updating default issuer") + + _, err = client.Logical().Write(mount+"/roles/test-role", map[string]interface{}{ + "ttl": "168h", + "max_ttl": "168h", + "key_type": "any", + "allowed_domains": "localdomain", + "allow_subdomains": "true", + "allow_wildcard_certificates": "true", + }) + require.NoError(t, err, "failed creating role test-role") + + _, err = client.Logical().Write(mount+"/roles/acme", map[string]interface{}{ + "ttl": "3650h", + "max_ttl": "7200h", + "key_type": "any", + }) + require.NoError(t, err, "failed creating role acme") + + return cluster, client, pathConfig +} + +func testAcmeCertSignedByCa(t *testing.T, client *api.Client, derCerts [][]byte, issuerRef string) { + t.Helper() + require.NotEmpty(t, derCerts) + acmeCert, err := x509.ParseCertificate(derCerts[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + resp, err := client.Logical().ReadWithContext(context.Background(), "pki/issuer/"+issuerRef) + require.NoError(t, err, "failed reading issuer with name %s", issuerRef) + issuerCert := parseCert(t, resp.Data["certificate"].(string)) + issuerChainRaw := resp.Data["ca_chain"].([]interface{}) + + err = acmeCert.CheckSignatureFrom(issuerCert) + require.NoError(t, err, "issuer %s did not sign provided cert", issuerRef) + + expectedCerts := [][]byte{derCerts[0]} + + for _, entry := range issuerChainRaw { + chainCert := parseCert(t, entry.(string)) + expectedCerts = append(expectedCerts, chainCert.Raw) + } + + if diffs := deep.Equal(expectedCerts, derCerts); diffs != nil { + t.Fatalf("diffs were found between the acme chain returned and the expected value: \n%v", diffs) + } +} + +// TestAcmeValidationError make sure that we properly return errors on validation errors. +func TestAcmeValidationError(t *testing.T) { + t.Parallel() + cluster, _, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"www.dadgarcorp.com"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // Load authorizations + var authorizations []*acme.Authorization + for _, authUrl := range order.AuthzURLs { + auth, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed fetching authorization: %s", authUrl) + + authorizations = append(authorizations, auth) + } + require.Len(t, authorizations, 1, "expected a certain number of authorizations") + require.Len(t, authorizations[0].Challenges, 3, "expected a certain number of challenges associated with authorization") + + acceptedAuth, err := acmeClient.Accept(testCtx, authorizations[0].Challenges[0]) + require.NoError(t, err, "Should have been allowed to accept challenge 1") + require.Equal(t, string(ACMEChallengeProcessing), acceptedAuth.Status) + + _, err = acmeClient.Accept(testCtx, authorizations[0].Challenges[1]) + require.Error(t, err, "Should have been prevented to accept challenge 2") + + // Make sure our challenge returns errors + testhelpers.RetryUntil(t, 30*time.Second, func() error { + challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI) + if err != nil { + return err + } + + if challenge.Error == nil { + return fmt.Errorf("no error set in challenge yet") + } + + acmeError, ok := challenge.Error.(*acme.Error) + if !ok { + return fmt.Errorf("unexpected error back: %v", err) + } + + if acmeError.ProblemType != "urn:ietf:params:acme:error:incorrectResponse" { + return fmt.Errorf("unexpected ACME error back: %v", acmeError) + } + + return nil + }) + + // Make sure our challenge,auth and order status change. + // This takes a little too long to run in CI properly, we need the ability to influence + // how long the validations take before CI can go wild on this. + if os.Getenv("CI") == "" { + testhelpers.RetryUntil(t, 10*time.Minute, func() error { + challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI) + if err != nil { + return fmt.Errorf("failed to load challenge: %w", err) + } + + if challenge.Status != string(ACMEChallengeInvalid) { + return fmt.Errorf("challenge state was not changed to invalid: %v", challenge) + } + + authz, err := acmeClient.GetAuthorization(testCtx, authorizations[0].URI) + if err != nil { + return fmt.Errorf("failed to load authorization: %w", err) + } + + if authz.Status != string(ACMEAuthorizationInvalid) { + return fmt.Errorf("authz state was not changed to invalid: %v", authz) + } + + myOrder, err := acmeClient.GetOrder(testCtx, order.URI) + if err != nil { + return fmt.Errorf("failed to load order: %w", err) + } + + if myOrder.Status != string(ACMEOrderInvalid) { + return fmt.Errorf("order state was not changed to invalid: %v", order) + } + + return nil + }) + } +} + +// TestAcmeRevocationAcrossAccounts makes sure that we can revoke certificates using different accounts if +// we have another ACME account or not but access to the certificate key. Also verifies we can't revoke +// certificates across account keys. +func TestAcmeRevocationAcrossAccounts(t *testing.T) { + t.Parallel() + + cluster, vaultClient, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + baseAcmeURL := "/v1/pki/acme/" + accountKey1, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient1 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey1) + + leafKey, certs := doACMEWorkflow(t, vaultClient, acmeClient1) + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + // Make sure our cert is not revoked + certResp, err := vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime := certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err := revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Equal(t, revocationTimeInt, int64(0), + "revocation time was not 0, cert was already revoked: %v", revocationTimeInt) + + // Test that we can't revoke the certificate with another account's key + accountKey2, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err, "failed creating rsa key") + + acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2) + _, err = acmeClient2.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering second account") + + err = acmeClient2.RevokeCert(ctx, nil, certs[0], acme.CRLReasonUnspecified) + require.Error(t, err, "should have failed revoking the certificate with a different account") + + // Make sure our cert is not revoked + certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime = certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err = revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Equal(t, revocationTimeInt, int64(0), + "revocation time was not 0, cert was already revoked: %v", revocationTimeInt) + + // But we can revoke if we sign the request with the certificate's key and a different account + err = acmeClient2.RevokeCert(ctx, leafKey, certs[0], acme.CRLReasonUnspecified) + require.NoError(t, err, "should have been allowed to revoke certificate with csr key across accounts") + + // Make sure our cert is now revoked + certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime = certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err = revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Greater(t, revocationTimeInt, int64(0), + "revocation time was not greater than 0, cert was not revoked: %v", revocationTimeInt) + + // Make sure we can revoke a certificate without a registered ACME account + leafKey2, certs2 := doACMEWorkflow(t, vaultClient, acmeClient1) + + acmeClient3 := getAcmeClientForCluster(t, cluster, baseAcmeURL, nil) + err = acmeClient3.RevokeCert(ctx, leafKey2, certs2[0], acme.CRLReasonUnspecified) + require.NoError(t, err, "should be allowed to revoke a cert with no ACME account but with cert key") + + // Make sure our cert is now revoked + acmeCert2, err := x509.ParseCertificate(certs2[0]) + require.NoError(t, err, "failed parsing acme cert 2 bytes") + + certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert2)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime = certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err = revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Greater(t, revocationTimeInt, int64(0), + "revocation time was not greater than 0, cert was not revoked: %v", revocationTimeInt) +} + +func doACMEWorkflow(t *testing.T, vaultClient *api.Client, acmeClient *acme.Client) (*ecdsa.PrivateKey, [][]byte) { + testCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // Create new account + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + if err != nil { + if strings.Contains(err.Error(), "acme: account already exists") { + acct, err = acmeClient.GetReg(testCtx, "") + require.NoError(t, err, "failed looking up account after account exists error?") + } else { + require.NoError(t, err, "failed registering account") + } + } + + // Create an order + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, vaultClient, acmeClient, acct, order) + + // Build a proper CSR, with the correct name and signed with a different key works. + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "failed finalizing order") + require.Len(t, certs, 3, "expected full acme chain") + + return csrKey, certs +} + +func setupTestPkiCluster(t *testing.T) (*vault.TestCluster, *api.Client) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + EnableRaw: true, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + client := cluster.Cores[0].Client + mountPKIEndpoint(t, client, "pki") + return cluster, client +} + +func getAcmeClientForCluster(t *testing.T, cluster *vault.TestCluster, baseUrl string, key crypto.Signer) *acme.Client { + coreAddr := cluster.Cores[0].Listeners[0].Address + tlsConfig := cluster.Cores[0].TLSConfig() + + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = tlsConfig.Clone() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + httpClient := &http.Client{Transport: transport} + if baseUrl[0] == '/' { + baseUrl = baseUrl[1:] + } + if !strings.HasPrefix(baseUrl, "v1/") { + baseUrl = "v1/" + baseUrl + } + if !strings.HasSuffix(baseUrl, "/") { + baseUrl = baseUrl + "/" + } + baseAcmeURL := fmt.Sprintf("https://%s/%s", coreAddr.String(), baseUrl) + return &acme.Client{ + Key: key, + HTTPClient: httpClient, + DirectoryURL: baseAcmeURL + "directory", + } +} + +func getEABKey(t *testing.T, client *api.Client, baseUrl string) (string, []byte) { + t.Helper() + + resp, err := client.Logical().WriteWithContext(ctx, path.Join("pki/", baseUrl, "/new-eab"), map[string]interface{}{}) + require.NoError(t, err, "failed getting eab key") + require.NotNil(t, resp, "eab key returned nil response") + require.NotEmpty(t, resp.Data["id"], "eab key response missing id field") + kid := resp.Data["id"].(string) + + require.NotEmpty(t, resp.Data["key"], "eab key response missing private_key field") + base64Key := resp.Data["key"].(string) + require.True(t, strings.HasPrefix(base64Key, "vault-eab-0-"), "%s should have had a prefix of vault-eab-0-", base64Key) + privateKeyBytes, err := base64.RawURLEncoding.DecodeString(base64Key) + require.NoError(t, err, "failed base 64 decoding eab key response") + + require.Equal(t, "hs", resp.Data["key_type"], "eab key_type field mis-match") + require.Equal(t, baseUrl+"directory", resp.Data["acme_directory"], "eab acme_directory field mis-match") + require.NotEmpty(t, resp.Data["created_on"], "empty created_on field") + _, err = time.Parse(time.RFC3339, resp.Data["created_on"].(string)) + require.NoError(t, err, "failed parsing eab created_on field") + + return kid, privateKeyBytes +} + +func TestACMEClientRequestLimits(t *testing.T) { + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + cases := []struct { + name string + authorizations []acme.AuthzID + requestCSR x509.CertificateRequest + valid bool + }{ + { + "validate-only-cn", + []acme.AuthzID{ + {"dns", "localhost"}, + }, + x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "localhost"}, + }, + true, + }, + { + "validate-only-san", + []acme.AuthzID{ + {"dns", "localhost"}, + }, + x509.CertificateRequest{ + DNSNames: []string{"localhost"}, + }, + true, + }, + { + "validate-only-ip-address", + []acme.AuthzID{ + {"ip", "127.0.0.1"}, + }, + x509.CertificateRequest{ + IPAddresses: []net.IP{{127, 0, 0, 1}}, + }, + true, + }, + } + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + acmeConfig := map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "sign-verbatim", + "dns_resolver": "", + "eab_policy_name": "", + } + _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", acmeConfig) + require.NoError(t, err, "error configuring acme") + + for _, tc := range cases { + + // First Create Our Client + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + acmeClient := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", accountKey) + + discovery, err := acmeClient.Discover(testCtx) + require.NoError(t, err, "failed acme discovery call") + t.Logf("%v", discovery) + + acct, err := acmeClient.Register(testCtx, &acme.Account{ + Contact: []string{"mailto:test@example.com"}, + }, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + require.Equal(t, acme.StatusValid, acct.Status) + require.Contains(t, acct.Contact, "mailto:test@example.com") + require.Len(t, acct.Contact, 1) + + // Create an order + t.Logf("Testing Authorize Order on %s", "pki/acme") + identifiers := make([]string, len(tc.authorizations)) + for index, auth := range tc.authorizations { + identifiers[index] = auth.Value + } + + createOrder, err := acmeClient.AuthorizeOrder(testCtx, tc.authorizations) + require.NoError(t, err, "failed creating order") + require.Equal(t, acme.StatusPending, createOrder.Status) + require.Empty(t, createOrder.CertURL) + require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL) + require.Len(t, createOrder.AuthzURLs, len(tc.authorizations), "expected same number of authzurls as identifiers") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, client, acmeClient, acct, createOrder) + + // Submit the CSR + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, &tc.requestCSR, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true) + + if tc.valid { + require.NoError(t, err, "failed finalizing order") + + // Validate we get a signed cert back + testAcmeCertSignedByCa(t, client, certs, "int-ca") + } else { + require.Error(t, err, "Not a valid CSR, should err") + } + } +} diff --git a/builtin/logical/pki/path_config_acme.go b/builtin/logical/pki/path_config_acme.go new file mode 100644 index 000000000000..5125f5c0039b --- /dev/null +++ b/builtin/logical/pki/path_config_acme.go @@ -0,0 +1,405 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "fmt" + "net" + "os" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + storageAcmeConfig = "config/acme" + pathConfigAcmeHelpSyn = "Configuration of ACME Endpoints" + pathConfigAcmeHelpDesc = "Here we configure:\n\nenabled=false, whether ACME is enabled, defaults to false meaning that clusters will by default not get ACME support,\nallowed_issuers=\"default\", which issuers are allowed for use with ACME; by default, this will only be the primary (default) issuer,\nallowed_roles=\"*\", which roles are allowed for use with ACME; by default these will be all roles matching our selection criteria,\ndefault_directory_policy=\"\", either \"forbid\", preventing the default directory from being used at all, \"role:\" which is the role to be used for non-role-qualified ACME requests; or \"sign-verbatim\", the default meaning ACME issuance will be equivalent to sign-verbatim.,\ndns_resolver=\"\", which specifies a custom DNS resolver to use for all ACME-related DNS lookups" + disableAcmeEnvVar = "VAULT_DISABLE_PUBLIC_ACME" +) + +type acmeConfigEntry struct { + Enabled bool `json:"enabled"` + AllowedIssuers []string `json:"allowed_issuers="` + AllowedRoles []string `json:"allowed_roles"` + AllowRoleExtKeyUsage bool `json:"allow_role_ext_key_usage"` + DefaultDirectoryPolicy string `json:"default_directory_policy"` + DNSResolver string `json:"dns_resolver"` + EabPolicyName EabPolicyName `json:"eab_policy_name"` +} + +var defaultAcmeConfig = acmeConfigEntry{ + Enabled: false, + AllowedIssuers: []string{"*"}, + AllowedRoles: []string{"*"}, + AllowRoleExtKeyUsage: false, + DefaultDirectoryPolicy: "sign-verbatim", + DNSResolver: "", + EabPolicyName: eabPolicyNotRequired, +} + +var ( + extPolicyPrefix = "external-policy" + extPolicyPrefixLength = len(extPolicyPrefix) + extPolicyRegex = regexp.MustCompile(framework.GenericNameRegex("policy")) + rolePrefix = "role:" + rolePrefixLength = len(rolePrefix) +) + +func (sc *storageContext) getAcmeConfig() (*acmeConfigEntry, error) { + entry, err := sc.Storage.Get(sc.Context, storageAcmeConfig) + if err != nil { + return nil, err + } + + var mapping acmeConfigEntry + if entry == nil { + mapping = defaultAcmeConfig + return &mapping, nil + } + + if err := entry.DecodeJSON(&mapping); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode ACME configuration: %v", err)} + } + + return &mapping, nil +} + +func (sc *storageContext) setAcmeConfig(entry *acmeConfigEntry) error { + json, err := logical.StorageEntryJSON(storageAcmeConfig, entry) + if err != nil { + return fmt.Errorf("failed creating storage entry: %w", err) + } + + if err := sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("failed writing storage entry: %w", err) + } + + return nil +} + +func pathAcmeConfig(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/acme", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + + Fields: map[string]*framework.FieldSchema{ + "enabled": { + Type: framework.TypeBool, + Description: `whether ACME is enabled, defaults to false meaning that clusters will by default not get ACME support`, + Default: false, + }, + "allowed_issuers": { + Type: framework.TypeCommaStringSlice, + Description: `which issuers are allowed for use with ACME; by default, this will only be the primary (default) issuer`, + Default: []string{"*"}, + }, + "allowed_roles": { + Type: framework.TypeCommaStringSlice, + Description: `which roles are allowed for use with ACME; by default via '*', these will be all roles including sign-verbatim; when concrete role names are specified, any default_directory_policy role must be included to allow usage of the default acme directories under /pki/acme/directory and /pki/issuer/:issuer_id/acme/directory.`, + Default: []string{"*"}, + }, + "allow_role_ext_key_usage": { + Type: framework.TypeBool, + Description: `whether the ExtKeyUsage field from a role is used, defaults to false meaning that certificate will be signed with ServerAuth.`, + Default: false, + }, + "default_directory_policy": { + Type: framework.TypeString, + Description: `the policy to be used for non-role-qualified ACME requests; by default ACME issuance will be otherwise unrestricted, equivalent to the sign-verbatim endpoint; one may also specify a role to use as this policy, as "role:", the specified role must be allowed by allowed_roles`, + Default: "sign-verbatim", + }, + "dns_resolver": { + Type: framework.TypeString, + Description: `DNS resolver to use for domain resolution on this mount. Defaults to using the default system resolver. Must be in the format :, with both parts mandatory.`, + Default: "", + }, + "eab_policy": { + Type: framework.TypeString, + Description: `Specify the policy to use for external account binding behaviour, 'not-required', 'new-account-required' or 'always-required'`, + Default: "always-required", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "acme-configuration", + }, + Callback: b.pathAcmeRead, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathAcmeWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "acme", + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathConfigAcmeHelpSyn, + HelpDescription: pathConfigAcmeHelpDesc, + } +} + +func (b *backend) pathAcmeRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + config, err := b.GetAcmeState().getConfigWithForcedUpdate(sc) + if err != nil { + return nil, err + } + + var warnings []string + if config.Enabled { + _, err := getBasePathFromClusterConfig(sc) + if err != nil { + warnings = append(warnings, err.Error()) + } + } + + return genResponseFromAcmeConfig(config, warnings), nil +} + +func genResponseFromAcmeConfig(config *acmeConfigEntry, warnings []string) *logical.Response { + response := &logical.Response{ + Data: map[string]interface{}{ + "allowed_roles": config.AllowedRoles, + "allow_role_ext_key_usage": config.AllowRoleExtKeyUsage, + "allowed_issuers": config.AllowedIssuers, + "default_directory_policy": config.DefaultDirectoryPolicy, + "enabled": config.Enabled, + "dns_resolver": config.DNSResolver, + "eab_policy": config.EabPolicyName, + }, + Warnings: warnings, + } + + // TODO: Add some nice warning if we are on a replication cluster and path isn't set + + return response +} + +func (b *backend) pathAcmeWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + + config, err := b.GetAcmeState().getConfigWithForcedUpdate(sc) + if err != nil { + return nil, err + } + + if enabledRaw, ok := d.GetOk("enabled"); ok { + config.Enabled = enabledRaw.(bool) + } + + if allowedRolesRaw, ok := d.GetOk("allowed_roles"); ok { + config.AllowedRoles = allowedRolesRaw.([]string) + if len(config.AllowedRoles) == 0 { + return nil, fmt.Errorf("allowed_roles must take a non-zero length value; specify '*' as the value to allow anything or specify enabled=false to disable ACME entirely") + } + } + + if allowRoleExtKeyUsageRaw, ok := d.GetOk("allow_role_ext_key_usage"); ok { + config.AllowRoleExtKeyUsage = allowRoleExtKeyUsageRaw.(bool) + } + + if defaultDirectoryPolicyRaw, ok := d.GetOk("default_directory_policy"); ok { + config.DefaultDirectoryPolicy = defaultDirectoryPolicyRaw.(string) + } + + if allowedIssuersRaw, ok := d.GetOk("allowed_issuers"); ok { + config.AllowedIssuers = allowedIssuersRaw.([]string) + if len(config.AllowedIssuers) == 0 { + return nil, fmt.Errorf("allowed_issuers must take a non-zero length value; specify '*' as the value to allow anything or specify enabled=false to disable ACME entirely") + } + } + + if dnsResolverRaw, ok := d.GetOk("dns_resolver"); ok { + config.DNSResolver = dnsResolverRaw.(string) + if config.DNSResolver != "" { + addr, _, err := net.SplitHostPort(config.DNSResolver) + if err != nil { + return nil, fmt.Errorf("failed to parse DNS resolver address: %w", err) + } + if addr == "" { + return nil, fmt.Errorf("failed to parse DNS resolver address: got empty address") + } + if net.ParseIP(addr) == nil { + return nil, fmt.Errorf("failed to parse DNS resolver address: expected IPv4/IPv6 address, likely got hostname") + } + } + } + + if eabPolicyRaw, ok := d.GetOk("eab_policy"); ok { + eabPolicy, err := getEabPolicyByString(eabPolicyRaw.(string)) + if err != nil { + return nil, fmt.Errorf("invalid eab policy name provided, valid values are '%s', '%s', '%s'", + eabPolicyNotRequired, eabPolicyNewAccountRequired, eabPolicyAlwaysRequired) + } + config.EabPolicyName = eabPolicy.Name + } + + // Validate Default Directory Behavior: + defaultDirectoryPolicyType, extraInfo, err := getDefaultDirectoryPolicyType(config.DefaultDirectoryPolicy) + if err != nil { + return nil, fmt.Errorf("invalid default_directory_policy: %w", err) + } + defaultDirectoryRoleName := "" + switch defaultDirectoryPolicyType { + case Forbid: + case SignVerbatim: + case ExternalPolicy: + if !constants.IsEnterprise { + return nil, fmt.Errorf("external-policy is only available in enterprise versions of Vault") + } + case Role: + defaultDirectoryRoleName = extraInfo + + _, err := getAndValidateAcmeRole(sc, defaultDirectoryRoleName) + if err != nil { + return nil, fmt.Errorf("default directory policy role %v is not a valid ACME role: %w", defaultDirectoryRoleName, err) + } + default: + return nil, fmt.Errorf("validation for the type of policy defined by %v is undefined", config.DefaultDirectoryPolicy) + } + + // Validate Allowed Roles + allowAnyRole := len(config.AllowedRoles) == 1 && config.AllowedRoles[0] == "*" + foundDefault := false + if !allowAnyRole { + for index, name := range config.AllowedRoles { + if name == "*" { + return nil, fmt.Errorf("cannot use '*' as role name at index %d", index) + } + + _, err := getAndValidateAcmeRole(sc, name) + if err != nil { + return nil, fmt.Errorf("allowed_role %v is not a valid acme role: %w", name, err) + } + + if defaultDirectoryPolicyType == Role && name == defaultDirectoryRoleName { + foundDefault = true + } + } + + if !foundDefault && defaultDirectoryPolicyType == Role { + return nil, fmt.Errorf("default directory policy %v was not specified in allowed_roles: %v", config.DefaultDirectoryPolicy, config.AllowedRoles) + } + } + + allowAnyIssuer := len(config.AllowedIssuers) == 1 && config.AllowedIssuers[0] == "*" + if !allowAnyIssuer { + for index, name := range config.AllowedIssuers { + if name == "*" { + return nil, fmt.Errorf("cannot use '*' as issuer name at index %d", index) + } + + _, err := sc.resolveIssuerReference(name) + if err != nil { + return nil, fmt.Errorf("failed validating allowed_issuers: unable to fetch issuer: %v: %w", name, err) + } + } + } + + // Check to make sure that we have a proper value for the cluster path which ACME requires + if config.Enabled { + _, err = getBasePathFromClusterConfig(sc) + if err != nil { + return nil, err + } + } + + var warnings []string + // Lastly lets verify that the configuration is honored/invalidated by the public ACME env var. + isPublicAcmeDisabledByEnv, err := isPublicACMEDisabledByEnv() + if err != nil { + warnings = append(warnings, err.Error()) + } + if isPublicAcmeDisabledByEnv && config.Enabled { + eabPolicy := getEabPolicyByName(config.EabPolicyName) + if !eabPolicy.OverrideEnvDisablingPublicAcme() { + resp := logical.ErrorResponse("%s env var is enabled, ACME EAB policy needs to be '%s' with ACME enabled", + disableAcmeEnvVar, eabPolicyAlwaysRequired) + resp.Warnings = warnings + return resp, nil + } + } + + if _, err := b.GetAcmeState().writeConfig(sc, config); err != nil { + return nil, fmt.Errorf("failed persisting: %w", err) + } + + return genResponseFromAcmeConfig(config, warnings), nil +} + +func isPublicACMEDisabledByEnv() (bool, error) { + disableAcmeRaw, ok := os.LookupEnv(disableAcmeEnvVar) + if !ok { + return false, nil + } + + disableAcme, err := strconv.ParseBool(disableAcmeRaw) + if err != nil { + // So the environment variable was set but we couldn't parse the value as a string, assume + // the operator wanted public ACME disabled. + return true, fmt.Errorf("failed parsing environment variable %s: %w", disableAcmeEnvVar, err) + } + + return disableAcme, nil +} + +func getDefaultDirectoryPolicyType(defaultDirectoryPolicy string) (DefaultDirectoryPolicyType, string, error) { + switch { + case defaultDirectoryPolicy == "forbid": + return Forbid, "", nil + case defaultDirectoryPolicy == "sign-verbatim": + return SignVerbatim, "", nil + case strings.HasPrefix(defaultDirectoryPolicy, rolePrefix): + if len(defaultDirectoryPolicy) == rolePrefixLength { + return Forbid, "", fmt.Errorf("no role specified by policy %v", defaultDirectoryPolicy) + } + roleName := defaultDirectoryPolicy[rolePrefixLength:] + return Role, roleName, nil + case strings.HasPrefix(defaultDirectoryPolicy, extPolicyPrefix): + if len(defaultDirectoryPolicy) == extPolicyPrefixLength { + // default external-policy case without a specified policy + return ExternalPolicy, "", nil + } + + if strings.HasPrefix(defaultDirectoryPolicy, extPolicyPrefix+":") && + len(defaultDirectoryPolicy) == extPolicyPrefixLength+1 { + // end user set 'external-policy:', so no policy which is acceptable + return ExternalPolicy, "", nil + } + + policyName := defaultDirectoryPolicy[extPolicyPrefixLength+1:] + if ok := extPolicyRegex.MatchString(policyName); !ok { + return Forbid, "", fmt.Errorf("invalid characters within external-policy name: %s", defaultDirectoryPolicy) + } + return ExternalPolicy, policyName, nil + default: + return Forbid, "", fmt.Errorf("string %v not a valid Default Directory Policy", defaultDirectoryPolicy) + } +} + +//go:generate enumer -type=DefaultDirectoryPolicyType +type DefaultDirectoryPolicyType int + +const ( + Forbid DefaultDirectoryPolicyType = iota + SignVerbatim + Role + ExternalPolicy +) diff --git a/builtin/logical/pki/path_config_acme_test.go b/builtin/logical/pki/path_config_acme_test.go new file mode 100644 index 000000000000..47ba1f817dec --- /dev/null +++ b/builtin/logical/pki/path_config_acme_test.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "testing" + "time" + + "github.com/hashicorp/vault/helper/constants" + "github.com/stretchr/testify/require" +) + +func TestAcmeConfig(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + cases := []struct { + name string + AcmeConfig map[string]interface{} + prefixUrl string + validConfig bool + works bool + }{ + {"unspecified-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, true}, + {"bad-policy-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "bad", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", false, false}, + {"forbid-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "forbid", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, false}, + {"sign-verbatim-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "sign-verbatim", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, true}, + {"role-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "role:exists", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, true}, + {"bad-role-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "role:notgood", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", false, true}, + {"disallowed-role-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "good", + "default_directory_policy": "role:exists", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", false, false}, + } + + roleConfig := map[string]interface{}{ + "issuer_ref": "default", + "allowed_domains": "example.com", + "allow_subdomains": true, + "max_ttl": "720h", + } + + testCtx := context.Background() + + for _, tc := range cases { + deadline := time.Now().Add(1 * time.Minute) + subTestCtx, _ := context.WithDeadline(testCtx, deadline) + + _, err := client.Logical().WriteWithContext(subTestCtx, "pki/roles/exists", roleConfig) + require.NoError(t, err) + _, err = client.Logical().WriteWithContext(subTestCtx, "pki/roles/good", roleConfig) + require.NoError(t, err) + + t.Run(tc.name, func(t *testing.T) { + _, err := client.Logical().WriteWithContext(subTestCtx, "pki/config/acme", tc.AcmeConfig) + + if tc.validConfig { + require.NoError(t, err) + } else { + require.Error(t, err) + return + } + + _, err = client.Logical().ReadWithContext(subTestCtx, "pki/acme/directory") + if tc.works { + require.NoError(t, err) + + baseAcmeURL := "/v1/pki/" + tc.prefixUrl + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + _, err = acmeClient.Discover(subTestCtx) + require.NoError(t, err, "failed acme discovery call") + } else { + require.Error(t, err, "Acme Configuration should prevent usage") + } + }) + } +} + +// TestAcmeExternalPolicyOss make sure setting external-policy on OSS within acme configuration fails +func TestAcmeExternalPolicyOss(t *testing.T) { + if constants.IsEnterprise { + t.Skip("this test is only valid on OSS") + } + + t.Parallel() + b, s := CreateBackendWithStorage(t) + + values := []string{"external-policy", "external-policy:", "external-policy:test"} + for _, value := range values { + t.Run(value, func(st *testing.T) { + _, err := CBWrite(b, s, "config/acme", map[string]interface{}{ + "enabled": true, + "default_directory_policy": value, + }) + + require.Error(st, err, "should have failed setting acme config") + }) + } +} diff --git a/builtin/logical/pki/path_config_ca.go b/builtin/logical/pki/path_config_ca.go index 2399db4e5ee0..a898b811befb 100644 --- a/builtin/logical/pki/path_config_ca.go +++ b/builtin/logical/pki/path_config_ca.go @@ -1,8 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" + "net/http" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -10,6 +15,13 @@ import ( func pathConfigCA(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/ca", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "configure", + OperationSuffix: "ca", + }, + Fields: map[string]*framework.FieldSchema{ "pem_bundle": { Type: framework.TypeString, @@ -21,6 +33,38 @@ secret key and certificate.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathImportIssuers, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "mapping": { + Type: framework.TypeMap, + Description: "A mapping of issuer_id to key_id for all issuers included in this request", + Required: true, + }, + "imported_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new keys imported as a part of this request", + Required: true, + }, + "imported_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new issuers imported as a part of this request", + Required: true, + }, + "existing_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Existing keys specified as part of the import bundle of this request", + Required: true, + }, + "existing_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Existing issuers specified as part of the import bundle of this request", + Required: true, + }, + }, + }}, + }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -47,6 +91,11 @@ For security reasons, the secret key cannot be retrieved later. func pathConfigIssuers(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/issuers", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: map[string]*framework.FieldSchema{ defaultRef: { Type: framework.TypeString, @@ -58,13 +107,51 @@ func pathConfigIssuers(b *backend) *framework.Path { Default: false, }, }, - Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathCAIssuersRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "issuers-configuration", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + Required: true, + }, + "default_follows_latest_issuer": { + Type: framework.TypeBool, + Description: `Whether the default issuer should automatically follow the latest generated or imported issuer. Defaults to false.`, + Required: true, + }, + }, + }}, + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCAIssuersWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "issuers", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + }, + "default_follows_latest_issuer": { + Type: framework.TypeBool, + Description: `Whether the default issuer should automatically follow the latest generated or imported issuer. Defaults to false.`, + }, + }, + }}, + }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -79,6 +166,13 @@ func pathConfigIssuers(b *backend) *framework.Path { func pathReplaceRoot(b *backend) *framework.Path { return &framework.Path{ Pattern: "root/replace", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "replace", + OperationSuffix: "root", + }, + Fields: map[string]*framework.FieldSchema{ "default": { Type: framework.TypeString, @@ -90,6 +184,23 @@ func pathReplaceRoot(b *backend) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCAIssuersWrite, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + Required: true, + }, + "default_follows_latest_issuer": { + Type: framework.TypeBool, + Description: `Whether the default issuer should automatically follow the latest generated or imported issuer. Defaults to false.`, + Required: true, + }, + }, + }}, + }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -102,7 +213,7 @@ func pathReplaceRoot(b *backend) *framework.Path { } func (b *backend) pathCAIssuersRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Cannot read defaults until migration has completed"), nil } @@ -115,7 +226,7 @@ func (b *backend) pathCAIssuersRead(ctx context.Context, req *logical.Request, _ return b.formatCAIssuerConfigRead(config), nil } -func (b *backend) formatCAIssuerConfigRead(config *issuerConfigEntry) *logical.Response { +func (b *backend) formatCAIssuerConfigRead(config *issuing.IssuerConfigEntry) *logical.Response { return &logical.Response{ Data: map[string]interface{}{ defaultRef: config.DefaultIssuerId, @@ -130,7 +241,7 @@ func (b *backend) pathCAIssuersWrite(ctx context.Context, req *logical.Request, b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Cannot update defaults until migration has completed"), nil } @@ -199,6 +310,11 @@ value of the issuer with the name "next", if it exists. func pathConfigKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/keys", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: map[string]*framework.FieldSchema{ defaultRef: { Type: framework.TypeString, @@ -208,12 +324,42 @@ func pathConfigKeys(b *backend) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathKeyDefaultWrite, + Callback: b.pathKeyDefaultWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "keys", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + Required: true, + }, + }, + }}, + }, ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathKeyDefaultRead, + Callback: b.pathKeyDefaultRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "keys-configuration", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + }, + }, + }}, + }, ForwardPerformanceStandby: false, ForwardPerformanceSecondary: false, }, @@ -225,7 +371,7 @@ func pathConfigKeys(b *backend) *framework.Path { } func (b *backend) pathKeyDefaultRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Cannot read key defaults until migration has completed"), nil } @@ -248,7 +394,7 @@ func (b *backend) pathKeyDefaultWrite(ctx context.Context, req *logical.Request, b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Cannot update key defaults until migration has completed"), nil } diff --git a/builtin/logical/pki/path_config_cluster.go b/builtin/logical/pki/path_config_cluster.go index 440dcc874f47..a97769831b1f 100644 --- a/builtin/logical/pki/path_config_cluster.go +++ b/builtin/logical/pki/path_config_cluster.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" "fmt" + "net/http" "github.com/asaskevich/govalidator" "github.com/hashicorp/vault/sdk/framework" @@ -12,6 +16,11 @@ import ( func pathConfigCluster(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/cluster", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: map[string]*framework.FieldSchema{ "path": { Type: framework.TypeString, @@ -26,14 +35,92 @@ including standby nodes, and need not always point to the active node. For example: https://pr1.vault.example.com:8200/v1/pki`, }, + "aia_path": { + Type: framework.TypeString, + Description: `Optional URI to this mount's AIA distribution +point; may refer to an external non-Vault responder. This is for resolving AIA +URLs and providing the {{cluster_aia_path}} template parameter and will not +be used for other purposes. As such, unlike path above, this could safely +be an insecure transit mechanism (like HTTP without TLS). + +For example: http://cdn.example.com/pr1/pki`, + }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "cluster", + }, Callback: b.pathWriteCluster, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: `Canonical URI to this mount on this performance +replication cluster's external address. This is for resolving AIA URLs and +providing the {{cluster_path}} template parameter but might be used for other +purposes in the future. + +This should only point back to this particular PR replica and should not ever +point to another PR cluster. It may point to any node in the PR replica, +including standby nodes, and need not always point to the active node. + +For example: https://pr1.vault.example.com:8200/v1/pki`, + }, + "aia_path": { + Type: framework.TypeString, + Description: `Optional URI to this mount's AIA distribution +point; may refer to an external non-Vault responder. This is for resolving AIA +URLs and providing the {{cluster_aia_path}} template parameter and will not +be used for other purposes. As such, unlike path above, this could safely +be an insecure transit mechanism (like HTTP without TLS). + +For example: http://cdn.example.com/pr1/pki`, + }, + }, + }}, + }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathReadCluster, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "cluster-configuration", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: `Canonical URI to this mount on this performance +replication cluster's external address. This is for resolving AIA URLs and +providing the {{cluster_path}} template parameter but might be used for other +purposes in the future. + +This should only point back to this particular PR replica and should not ever +point to another PR cluster. It may point to any node in the PR replica, +including standby nodes, and need not always point to the active node. + +For example: https://pr1.vault.example.com:8200/v1/pki`, + Required: true, + }, + "aia_path": { + Type: framework.TypeString, + Description: `Optional URI to this mount's AIA distribution +point; may refer to an external non-Vault responder. This is for resolving AIA +URLs and providing the {{cluster_aia_path}} template parameter and will not +be used for other purposes. As such, unlike path above, this could safely +be an insecure transit mechanism (like HTTP without TLS). + +For example: http://cdn.example.com/pr1/pki`, + }, + }, + }}, + }, }, }, @@ -51,7 +138,8 @@ func (b *backend) pathReadCluster(ctx context.Context, req *logical.Request, _ * resp := &logical.Response{ Data: map[string]interface{}{ - "path": cfg.Path, + "path": cfg.Path, + "aia_path": cfg.AIAPath, }, } @@ -65,9 +153,21 @@ func (b *backend) pathWriteCluster(ctx context.Context, req *logical.Request, da return nil, err } - cfg.Path = data.Get("path").(string) - if !govalidator.IsURL(cfg.Path) { - return nil, fmt.Errorf("invalid, non-URL path given to cluster: %v", cfg.Path) + if value, ok := data.GetOk("path"); ok { + cfg.Path = value.(string) + + // This field is required by ACME, if ever we allow un-setting in the + // future, this code will need to verify that ACME is not enabled. + if !govalidator.IsURL(cfg.Path) { + return nil, fmt.Errorf("invalid, non-URL path given to cluster: %v", cfg.Path) + } + } + + if value, ok := data.GetOk("aia_path"); ok { + cfg.AIAPath = value.(string) + if !govalidator.IsURL(cfg.AIAPath) { + return nil, fmt.Errorf("invalid, non-URL aia_path given to cluster: %v", cfg.AIAPath) + } } if err := sc.writeClusterConfig(cfg); err != nil { @@ -76,7 +176,8 @@ func (b *backend) pathWriteCluster(ctx context.Context, req *logical.Request, da resp := &logical.Response{ Data: map[string]interface{}{ - "path": cfg.Path, + "path": cfg.Path, + "aia_path": cfg.AIAPath, }, } diff --git a/builtin/logical/pki/path_config_crl.go b/builtin/logical/pki/path_config_crl.go index ed73ce5c4925..cd28643f4a7e 100644 --- a/builtin/logical/pki/path_config_crl.go +++ b/builtin/logical/pki/path_config_crl.go @@ -1,10 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" "fmt" - "time" + "net/http" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" @@ -14,33 +19,44 @@ const latestCrlConfigVersion = 1 // CRLConfig holds basic CRL configuration information type crlConfig struct { - Version int `json:"version"` - Expiry string `json:"expiry"` - Disable bool `json:"disable"` - OcspDisable bool `json:"ocsp_disable"` - AutoRebuild bool `json:"auto_rebuild"` - AutoRebuildGracePeriod string `json:"auto_rebuild_grace_period"` - OcspExpiry string `json:"ocsp_expiry"` - EnableDelta bool `json:"enable_delta"` - DeltaRebuildInterval string `json:"delta_rebuild_interval"` + Version int `json:"version"` + Expiry string `json:"expiry"` + Disable bool `json:"disable"` + OcspDisable bool `json:"ocsp_disable"` + AutoRebuild bool `json:"auto_rebuild"` + AutoRebuildGracePeriod string `json:"auto_rebuild_grace_period"` + OcspExpiry string `json:"ocsp_expiry"` + EnableDelta bool `json:"enable_delta"` + DeltaRebuildInterval string `json:"delta_rebuild_interval"` + UseGlobalQueue bool `json:"cross_cluster_revocation"` + UnifiedCRL bool `json:"unified_crl"` + UnifiedCRLOnExistingPaths bool `json:"unified_crl_on_existing_paths"` } // Implicit default values for the config if it does not exist. var defaultCrlConfig = crlConfig{ - Version: latestCrlConfigVersion, - Expiry: "72h", - Disable: false, - OcspDisable: false, - OcspExpiry: "12h", - AutoRebuild: false, - AutoRebuildGracePeriod: "12h", - EnableDelta: false, - DeltaRebuildInterval: "15m", + Version: latestCrlConfigVersion, + Expiry: "72h", + Disable: false, + OcspDisable: false, + OcspExpiry: "12h", + AutoRebuild: false, + AutoRebuildGracePeriod: "12h", + EnableDelta: false, + DeltaRebuildInterval: "15m", + UseGlobalQueue: false, + UnifiedCRL: false, + UnifiedCRLOnExistingPaths: false, } func pathConfigCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/crl", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: map[string]*framework.FieldSchema{ "expiry": { Type: framework.TypeString, @@ -80,14 +96,171 @@ the NextUpdate field); defaults to 12 hours`, Description: `The time between delta CRL rebuilds if a new revocation has occurred. Must be shorter than the CRL expiry. Defaults to 15m.`, Default: "15m", }, + "cross_cluster_revocation": { + Type: framework.TypeBool, + Description: `Whether to enable a global, cross-cluster revocation queue. +Must be used with auto_rebuild=true.`, + }, + "unified_crl": { + Type: framework.TypeBool, + Description: `If set to true enables global replication of revocation entries, +also enabling unified versions of OCSP and CRLs if their respective features are enabled. +disable for CRLs and ocsp_disable for OCSP.`, + Default: "false", + }, + "unified_crl_on_existing_paths": { + Type: framework.TypeBool, + Description: `If set to true, +existing CRL and OCSP paths will return the unified CRL instead of a response based on cluster-local data`, + Default: "false", + }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "crl-configuration", + }, Callback: b.pathCRLRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "expiry": { + Type: framework.TypeString, + Description: `The amount of time the generated CRL should be +valid; defaults to 72 hours`, + Required: true, + }, + "disable": { + Type: framework.TypeBool, + Description: `If set to true, disables generating the CRL entirely.`, + Required: true, + }, + "ocsp_disable": { + Type: framework.TypeBool, + Description: `If set to true, ocsp unauthorized responses will be returned.`, + Required: true, + }, + "ocsp_expiry": { + Type: framework.TypeString, + Description: `The amount of time an OCSP response will be valid (controls +the NextUpdate field); defaults to 12 hours`, + Required: true, + }, + "auto_rebuild": { + Type: framework.TypeBool, + Description: `If set to true, enables automatic rebuilding of the CRL`, + Required: true, + }, + "auto_rebuild_grace_period": { + Type: framework.TypeString, + Description: `The time before the CRL expires to automatically rebuild it, when enabled. Must be shorter than the CRL expiry. Defaults to 12h.`, + Required: true, + }, + "enable_delta": { + Type: framework.TypeBool, + Description: `Whether to enable delta CRLs between authoritative CRL rebuilds`, + Required: true, + }, + "delta_rebuild_interval": { + Type: framework.TypeString, + Description: `The time between delta CRL rebuilds if a new revocation has occurred. Must be shorter than the CRL expiry. Defaults to 15m.`, + Required: true, + }, + "cross_cluster_revocation": { + Type: framework.TypeBool, + Description: `Whether to enable a global, cross-cluster revocation queue. +Must be used with auto_rebuild=true.`, + Required: true, + }, + "unified_crl": { + Type: framework.TypeBool, + Description: `If set to true enables global replication of revocation entries, +also enabling unified versions of OCSP and CRLs if their respective features are enabled. +disable for CRLs and ocsp_disable for OCSP.`, + Required: true, + }, + "unified_crl_on_existing_paths": { + Type: framework.TypeBool, + Description: `If set to true, +existing CRL and OCSP paths will return the unified CRL instead of a response based on cluster-local data`, + Required: true, + }, + }, + }}, + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCRLWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "crl", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "expiry": { + Type: framework.TypeString, + Description: `The amount of time the generated CRL should be +valid; defaults to 72 hours`, + Default: "72h", + }, + "disable": { + Type: framework.TypeBool, + Description: `If set to true, disables generating the CRL entirely.`, + }, + "ocsp_disable": { + Type: framework.TypeBool, + Description: `If set to true, ocsp unauthorized responses will be returned.`, + }, + "ocsp_expiry": { + Type: framework.TypeString, + Description: `The amount of time an OCSP response will be valid (controls +the NextUpdate field); defaults to 12 hours`, + Default: "1h", + }, + "auto_rebuild": { + Type: framework.TypeBool, + Description: `If set to true, enables automatic rebuilding of the CRL`, + }, + "auto_rebuild_grace_period": { + Type: framework.TypeString, + Description: `The time before the CRL expires to automatically rebuild it, when enabled. Must be shorter than the CRL expiry. Defaults to 12h.`, + Default: "12h", + }, + "enable_delta": { + Type: framework.TypeBool, + Description: `Whether to enable delta CRLs between authoritative CRL rebuilds`, + }, + "delta_rebuild_interval": { + Type: framework.TypeString, + Description: `The time between delta CRL rebuilds if a new revocation has occurred. Must be shorter than the CRL expiry. Defaults to 15m.`, + Default: "15m", + }, + "cross_cluster_revocation": { + Type: framework.TypeBool, + Description: `Whether to enable a global, cross-cluster revocation queue. +Must be used with auto_rebuild=true.`, + Required: false, + }, + "unified_crl": { + Type: framework.TypeBool, + Description: `If set to true enables global replication of revocation entries, +also enabling unified versions of OCSP and CRLs if their respective features are enabled. +disable for CRLs and ocsp_disable for OCSP.`, + Required: false, + }, + "unified_crl_on_existing_paths": { + Type: framework.TypeBool, + Description: `If set to true, +existing CRL and OCSP paths will return the unified CRL instead of a response based on cluster-local data`, + Required: false, + }, + }, + }}, + }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -101,35 +274,25 @@ the NextUpdate field); defaults to 12 hours`, func (b *backend) pathCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getRevocationConfig() + + config, err := b.CrlBuilder().getConfigWithForcedUpdate(sc) if err != nil { - return nil, err + return nil, fmt.Errorf("failed fetching CRL config: %w", err) } - return &logical.Response{ - Data: map[string]interface{}{ - "expiry": config.Expiry, - "disable": config.Disable, - "ocsp_disable": config.OcspDisable, - "ocsp_expiry": config.OcspExpiry, - "auto_rebuild": config.AutoRebuild, - "auto_rebuild_grace_period": config.AutoRebuildGracePeriod, - "enable_delta": config.EnableDelta, - "delta_rebuild_interval": config.DeltaRebuildInterval, - }, - }, nil + return genResponseFromCrlConfig(config), nil } func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { sc := b.makeStorageContext(ctx, req.Storage) - config, err := sc.getRevocationConfig() + config, err := b.CrlBuilder().getConfigWithForcedUpdate(sc) if err != nil { return nil, err } if expiryRaw, ok := d.GetOk("expiry"); ok { expiry := expiryRaw.(string) - _, err := time.ParseDuration(expiry) + _, err := parseutil.ParseDurationSecond(expiry) if err != nil { return logical.ErrorResponse(fmt.Sprintf("given expiry could not be decoded: %s", err)), nil } @@ -147,7 +310,7 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra if expiryRaw, ok := d.GetOk("ocsp_expiry"); ok { expiry := expiryRaw.(string) - duration, err := time.ParseDuration(expiry) + duration, err := parseutil.ParseDurationSecond(expiry) if err != nil { return logical.ErrorResponse(fmt.Sprintf("given ocsp_expiry could not be decoded: %s", err)), nil } @@ -164,59 +327,104 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra if autoRebuildGracePeriodRaw, ok := d.GetOk("auto_rebuild_grace_period"); ok { autoRebuildGracePeriod := autoRebuildGracePeriodRaw.(string) - if _, err := time.ParseDuration(autoRebuildGracePeriod); err != nil { + if _, err := parseutil.ParseDurationSecond(autoRebuildGracePeriod); err != nil { return logical.ErrorResponse(fmt.Sprintf("given auto_rebuild_grace_period could not be decoded: %s", err)), nil } config.AutoRebuildGracePeriod = autoRebuildGracePeriod } + oldEnableDelta := config.EnableDelta if enableDeltaRaw, ok := d.GetOk("enable_delta"); ok { config.EnableDelta = enableDeltaRaw.(bool) } if deltaRebuildIntervalRaw, ok := d.GetOk("delta_rebuild_interval"); ok { deltaRebuildInterval := deltaRebuildIntervalRaw.(string) - if _, err := time.ParseDuration(deltaRebuildInterval); err != nil { + if _, err := parseutil.ParseDurationSecond(deltaRebuildInterval); err != nil { return logical.ErrorResponse(fmt.Sprintf("given delta_rebuild_interval could not be decoded: %s", err)), nil } config.DeltaRebuildInterval = deltaRebuildInterval } - expiry, _ := time.ParseDuration(config.Expiry) + if useGlobalQueue, ok := d.GetOk("cross_cluster_revocation"); ok { + config.UseGlobalQueue = useGlobalQueue.(bool) + } + + oldUnifiedCRL := config.UnifiedCRL + if unifiedCrlRaw, ok := d.GetOk("unified_crl"); ok { + config.UnifiedCRL = unifiedCrlRaw.(bool) + } + + if unifiedCrlOnExistingPathsRaw, ok := d.GetOk("unified_crl_on_existing_paths"); ok { + config.UnifiedCRLOnExistingPaths = unifiedCrlOnExistingPathsRaw.(bool) + } + + if config.UnifiedCRLOnExistingPaths && !config.UnifiedCRL { + return logical.ErrorResponse("unified_crl_on_existing_paths cannot be enabled if unified_crl is disabled"), nil + } + + expiry, _ := parseutil.ParseDurationSecond(config.Expiry) if config.AutoRebuild { - gracePeriod, _ := time.ParseDuration(config.AutoRebuildGracePeriod) + gracePeriod, _ := parseutil.ParseDurationSecond(config.AutoRebuildGracePeriod) if gracePeriod >= expiry { return logical.ErrorResponse(fmt.Sprintf("CRL auto-rebuilding grace period (%v) must be strictly shorter than CRL expiry (%v) value when auto-rebuilding of CRLs is enabled", config.AutoRebuildGracePeriod, config.Expiry)), nil } } if config.EnableDelta { - deltaRebuildInterval, _ := time.ParseDuration(config.DeltaRebuildInterval) + deltaRebuildInterval, _ := parseutil.ParseDurationSecond(config.DeltaRebuildInterval) if deltaRebuildInterval >= expiry { return logical.ErrorResponse(fmt.Sprintf("CRL delta rebuild window (%v) must be strictly shorter than CRL expiry (%v) value when delta CRLs are enabled", config.DeltaRebuildInterval, config.Expiry)), nil } } - if config.EnableDelta && !config.AutoRebuild { - return logical.ErrorResponse("Delta CRLs cannot be enabled when auto rebuilding is disabled as the complete CRL is always regenerated!"), nil + if !config.AutoRebuild { + if config.EnableDelta { + return logical.ErrorResponse("Delta CRLs cannot be enabled when auto rebuilding is disabled as the complete CRL is always regenerated!"), nil + } + + if config.UseGlobalQueue { + return logical.ErrorResponse("Global, cross-cluster revocation queue cannot be enabled when auto rebuilding is disabled as the local cluster may not have the certificate entry!"), nil + } } - entry, err := logical.StorageEntryJSON("config/crl", config) - if err != nil { - return nil, err + if !constants.IsEnterprise && config.UseGlobalQueue { + return logical.ErrorResponse("Global, cross-cluster revocation queue (cross_cluster_revocation) can only be enabled on Vault Enterprise."), nil } - err = req.Storage.Put(ctx, entry) - if err != nil { - return nil, err + + if !constants.IsEnterprise && config.UnifiedCRL { + return logical.ErrorResponse("unified_crl can only be enabled on Vault Enterprise"), nil + } + + isLocalMount := b.System().LocalMount() + if isLocalMount && config.UseGlobalQueue { + return logical.ErrorResponse("Global, cross-cluster revocation queue (cross_cluster_revocation) cannot be enabled on local mounts."), + nil + } + + if isLocalMount && config.UnifiedCRL { + return logical.ErrorResponse("unified_crl cannot be enabled on local mounts."), nil } - b.crlBuilder.markConfigDirty() - b.crlBuilder.reloadConfigIfRequired(sc) + if !config.AutoRebuild && config.UnifiedCRL { + return logical.ErrorResponse("unified_crl=true requires auto_rebuild=true, as unified CRLs cannot be rebuilt on every revocation."), nil + } + + if _, err := b.CrlBuilder().writeConfig(sc, config); err != nil { + return nil, fmt.Errorf("failed persisting CRL config: %w", err) + } - if oldDisable != config.Disable || (oldAutoRebuild && !config.AutoRebuild) { + resp := genResponseFromCrlConfig(config) + + // Note this only affects/happens on the main cluster node, if you need to + // notify something based on a configuration change on all server types + // have a look at CrlBuilder::reloadConfigIfRequired + if oldDisable != config.Disable || (oldAutoRebuild && !config.AutoRebuild) || (oldEnableDelta != config.EnableDelta) || (oldUnifiedCRL != config.UnifiedCRL) { // It wasn't disabled but now it is (or equivalently, we were set to - // auto-rebuild and we aren't now), so rotate the CRL. - crlErr := b.crlBuilder.rebuild(sc, true) + // auto-rebuild and we aren't now or equivalently, we changed our + // mind about delta CRLs and need a new complete one or equivalently, + // we changed our mind about unified CRLs), rotate the CRLs. + warnings, crlErr := b.CrlBuilder().rebuild(sc, true) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -225,20 +433,30 @@ func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *fra return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) } } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } } + return resp, nil +} + +func genResponseFromCrlConfig(config *crlConfig) *logical.Response { return &logical.Response{ Data: map[string]interface{}{ - "expiry": config.Expiry, - "disable": config.Disable, - "ocsp_disable": config.OcspDisable, - "ocsp_expiry": config.OcspExpiry, - "auto_rebuild": config.AutoRebuild, - "auto_rebuild_grace_period": config.AutoRebuildGracePeriod, - "enable_delta": config.EnableDelta, - "delta_rebuild_interval": config.DeltaRebuildInterval, + "expiry": config.Expiry, + "disable": config.Disable, + "ocsp_disable": config.OcspDisable, + "ocsp_expiry": config.OcspExpiry, + "auto_rebuild": config.AutoRebuild, + "auto_rebuild_grace_period": config.AutoRebuildGracePeriod, + "enable_delta": config.EnableDelta, + "delta_rebuild_interval": config.DeltaRebuildInterval, + "cross_cluster_revocation": config.UseGlobalQueue, + "unified_crl": config.UnifiedCRL, + "unified_crl_on_existing_paths": config.UnifiedCRLOnExistingPaths, }, - }, nil + } } const pathConfigCRLHelpSyn = ` diff --git a/builtin/logical/pki/path_config_urls.go b/builtin/logical/pki/path_config_urls.go index 5b67ad080892..c79102b0350a 100644 --- a/builtin/logical/pki/path_config_urls.go +++ b/builtin/logical/pki/path_config_urls.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" "fmt" - "strings" + "net/http" - "github.com/asaskevich/govalidator" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -13,6 +16,11 @@ import ( func pathConfigURLs(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/urls", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: map[string]*framework.FieldSchema{ "issuing_certificates": { Type: framework.TypeCommaStringSlice, @@ -35,20 +43,94 @@ for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, "enable_templating": { Type: framework.TypeBool, Description: `Whether or not to enabling templating of the -above AIA fields. When templating is enabled the special values '{{issuer_id}}' -and '{{cluster_path}}' are available, but the addresses are not checked for -URI validity until issuance time. This requires /config/cluster's path to be -set on all PR Secondary clusters.`, +above AIA fields. When templating is enabled the special values '{{issuer_id}}', +'{{cluster_path}}', and '{{cluster_aia_path}}' are available, but the addresses +are not checked for URI validity until issuance time. Using '{{cluster_path}}' +requires /config/cluster's 'path' member to be set on all PR Secondary clusters +and using '{{cluster_aia_path}}' requires /config/cluster's 'aia_path' member +to be set on all PR secondary clusters.`, Default: false, }, }, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "urls", + }, Callback: b.pathWriteURL, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuing_certificates": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the issuing certificate attribute. See also RFC 5280 Section 4.2.2.1.`, + }, + "crl_distribution_points": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, + }, + "ocsp_servers": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, + }, + "enable_templating": { + Type: framework.TypeBool, + Description: `Whether or not to enabling templating of the +above AIA fields. When templating is enabled the special values '{{issuer_id}}' +and '{{cluster_path}}' are available, but the addresses are not checked for +URI validity until issuance time. This requires /config/cluster's path to be +set on all PR Secondary clusters.`, + Default: false, + }, + }, + }}, + }, }, logical.ReadOperation: &framework.PathOperation{ Callback: b.pathReadURL, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "urls-configuration", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuing_certificates": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the issuing certificate attribute. See also RFC 5280 Section 4.2.2.1.`, + Required: true, + }, + "crl_distribution_points": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, + Required: true, + }, + "ocsp_servers": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, + Required: true, + }, + "enable_templating": { + Type: framework.TypeBool, + Description: `Whether or not to enable templating of the +above AIA fields. When templating is enabled the special values '{{issuer_id}}' +and '{{cluster_path}}' are available, but the addresses are not checked for +URI validity until issuance time. This requires /config/cluster's path to be +set on all PR Secondary clusters.`, + Required: true, + }, + }, + }}, + }, }, }, @@ -57,23 +139,13 @@ set on all PR Secondary clusters.`, } } -func validateURLs(urls []string) string { - for _, curr := range urls { - if !govalidator.IsURL(curr) || strings.Contains(curr, "{{issuer_id}}") || strings.Contains(curr, "{{cluster_path}}") { - return curr - } - } - - return "" -} - -func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*aiaConfigEntry, error) { +func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*issuing.AiaConfigEntry, error) { entry, err := storage.Get(ctx, "urls") if err != nil { return nil, err } - entries := &aiaConfigEntry{ + entries := &issuing.AiaConfigEntry{ IssuingCertificates: []string{}, CRLDistributionPoints: []string{}, OCSPServers: []string{}, @@ -91,7 +163,7 @@ func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*aiaConfigE return entries, nil } -func writeURLs(ctx context.Context, storage logical.Storage, entries *aiaConfigEntry) error { +func writeURLs(ctx context.Context, storage logical.Storage, entries *issuing.AiaConfigEntry) error { entry, err := logical.StorageEntryJSON("urls", entries) if err != nil { return err @@ -154,7 +226,7 @@ func (b *backend) pathWriteURL(ctx context.Context, req *logical.Request, data * }, } - if entries.EnableTemplating && !b.useLegacyBundleCaStorage() { + if entries.EnableTemplating && !b.UseLegacyBundleCaStorage() { sc := b.makeStorageContext(ctx, req.Storage) issuers, err := sc.listIssuers() if err != nil { @@ -167,23 +239,23 @@ func (b *backend) pathWriteURL(ctx context.Context, req *logical.Request, data * return nil, fmt.Errorf("unable to read issuer to validate templated URIs: %w", err) } - _, err = entries.toURLEntries(sc, issuer.ID) + _, err = ToURLEntries(sc, issuer.ID, entries) if err != nil { resp.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", err)) } } } else if !entries.EnableTemplating { - if badURL := validateURLs(entries.IssuingCertificates); badURL != "" { + if badURL := issuing.ValidateURLs(entries.IssuingCertificates); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( "invalid URL found in Authority Information Access (AIA) parameter issuing_certificates: %s", badURL)), nil } - if badURL := validateURLs(entries.CRLDistributionPoints); badURL != "" { + if badURL := issuing.ValidateURLs(entries.CRLDistributionPoints); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( "invalid URL found in Authority Information Access (AIA) parameter crl_distribution_points: %s", badURL)), nil } - if badURL := validateURLs(entries.OCSPServers); badURL != "" { + if badURL := issuing.ValidateURLs(entries.OCSPServers); badURL != "" { return logical.ErrorResponse(fmt.Sprintf( "invalid URL found in Authority Information Access (AIA) parameter ocsp_servers: %s", badURL)), nil } diff --git a/builtin/logical/pki/path_fetch.go b/builtin/logical/pki/path_fetch.go index e15e2f69f899..da31c65f68c6 100644 --- a/builtin/logical/pki/path_fetch.go +++ b/builtin/logical/pki/path_fetch.go @@ -1,25 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" "encoding/pem" "fmt" + "net/http" "strings" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) +var pathFetchReadSchema = map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: false, + }, + "revocation_time": { + Type: framework.TypeInt64, + Description: `Revocation time`, + Required: false, + }, + "revocation_time_rfc3339": { + Type: framework.TypeString, + Description: `Revocation time RFC 3339 formatted`, + Required: false, + }, + "issuer_id": { + Type: framework.TypeString, + Description: `ID of the issuer`, + Required: false, + }, + "ca_chain": { + Type: framework.TypeString, + Description: `Issuing CA Chain`, + Required: false, + }, + }, + }}, +} + // Returns the CA in raw format func pathFetchCA(b *backend) *framework.Path { return &framework.Path{ Pattern: `ca(/pem)?`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "ca-der|ca-pem", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, }, }, @@ -33,9 +79,15 @@ func pathFetchCAChain(b *backend) *framework.Path { return &framework.Path{ Pattern: `(cert/)?ca_chain`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "ca-chain-pem|cert-ca-chain", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, }, }, @@ -49,6 +101,33 @@ func pathFetchCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: `crl(/pem|/delta(/pem)?)?`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "crl-der|crl-pem|crl-delta|crl-delta-pem", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, + }, + }, + + HelpSynopsis: pathFetchHelpSyn, + HelpDescription: pathFetchHelpDesc, + } +} + +// Returns the CRL in raw format +func pathFetchUnifiedCRL(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `unified-crl(/pem|/delta(/pem)?)?`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "unified-crl-der|unified-crl-pem|unified-crl-delta|unified-crl-delta-pem", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathFetchRead, @@ -64,6 +143,12 @@ func pathFetchCRL(b *backend) *framework.Path { func pathFetchValidRaw(b *backend) *framework.Path { return &framework.Path{ Pattern: `cert/(?P[0-9A-Fa-f-:]+)/raw(/pem)?`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "cert-raw-der|cert-raw-pem", + }, + Fields: map[string]*framework.FieldSchema{ "serial": { Type: framework.TypeString, @@ -74,7 +159,8 @@ hyphen-separated octal`, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, }, }, @@ -88,6 +174,12 @@ hyphen-separated octal`, func pathFetchValid(b *backend) *framework.Path { return &framework.Path{ Pattern: `cert/(?P[0-9A-Fa-f-:]+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "cert", + }, + Fields: map[string]*framework.FieldSchema{ "serial": { Type: framework.TypeString, @@ -98,7 +190,8 @@ hyphen-separated octal`, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, }, }, @@ -109,12 +202,23 @@ hyphen-separated octal`, // This returns the CRL in a non-raw format func pathFetchCRLViaCertPath(b *backend) *framework.Path { + pattern := `cert/(crl|delta-crl)` + if constants.IsEnterprise { + pattern = `cert/(crl|delta-crl|unified-crl|unified-delta-crl)` + } + return &framework.Path{ - Pattern: `cert/(crl|delta-crl)`, + Pattern: pattern, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "cert-crl|cert-delta-crl|cert-unified-crl|cert-unified-delta-crl", + }, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathFetchRead, + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, }, }, @@ -128,6 +232,11 @@ func pathFetchListCerts(b *backend) *framework.Path { return &framework.Path{ Pattern: "certs/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "certs", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathFetchCertList, @@ -197,11 +306,30 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data if req.Path == "ca_chain" { contentType = "application/pkix-cert" } - case req.Path == "crl" || req.Path == "crl/pem" || req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/crl" || req.Path == "cert/crl/raw" || req.Path == "cert/crl/raw/pem" || req.Path == "cert/delta-crl": - modifiedCtx.reqType = ifModifiedCRL + case req.Path == "crl" || req.Path == "crl/pem" || req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/crl" || req.Path == "cert/crl/raw" || req.Path == "cert/crl/raw/pem" || req.Path == "cert/delta-crl" || req.Path == "cert/delta-crl/raw" || req.Path == "cert/delta-crl/raw/pem" || req.Path == "unified-crl" || req.Path == "unified-crl/pem" || req.Path == "unified-crl/delta" || req.Path == "unified-crl/delta/pem" || req.Path == "cert/unified-crl" || req.Path == "cert/unified-crl/raw" || req.Path == "cert/unified-crl/raw/pem" || req.Path == "cert/unified-delta-crl" || req.Path == "cert/unified-delta-crl/raw" || req.Path == "cert/unified-delta-crl/raw/pem": + config, err := b.CrlBuilder().getConfigWithUpdate(sc) + if err != nil { + retErr = err + goto reply + } + var isDelta bool + var isUnified bool if strings.Contains(req.Path, "delta") { + isDelta = true + } + if strings.Contains(req.Path, "unified") || shouldLocalPathsUseUnified(config) { + isUnified = true + } + + modifiedCtx.reqType = ifModifiedCRL + if !isUnified && isDelta { modifiedCtx.reqType = ifModifiedDeltaCRL + } else if isUnified && !isDelta { + modifiedCtx.reqType = ifModifiedUnifiedCRL + } else if isUnified && isDelta { + modifiedCtx.reqType = ifModifiedUnifiedDeltaCRL } + ret, err := sendNotModifiedResponseIfNecessary(modifiedCtx, sc, response) if err != nil || ret { retErr = err @@ -209,14 +337,19 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data } serial = legacyCRLPath - if req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/delta-crl" { + if !isUnified && isDelta { serial = deltaCRLPath + } else if isUnified && !isDelta { + serial = unifiedCRLPath + } else if isUnified && isDelta { + serial = unifiedDeltaCRLPath } + contentType = "application/pkix-crl" - if req.Path == "crl/pem" || req.Path == "crl/delta/pem" { + if strings.Contains(req.Path, "pem") { pemType = "X509 CRL" contentType = "application/x-pem-file" - } else if req.Path == "cert/crl" || req.Path == "cert/delta-crl" { + } else if req.Path == "cert/crl" || req.Path == "cert/delta-crl" || req.Path == "cert/unified-crl" || req.Path == "cert/unified-delta-crl" { pemType = "X509 CRL" contentType = "" } @@ -238,7 +371,7 @@ func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data // Prefer fetchCAInfo to fetchCertBySerial for CA certificates. if serial == "ca_chain" || serial == "ca" { - caInfo, err := sc.fetchCAInfo(defaultRef, ReadOnlyUsage) + caInfo, err := sc.fetchCAInfo(defaultRef, issuing.ReadOnlyUsage) if err != nil { switch err.(type) { case errutil.UserError: diff --git a/builtin/logical/pki/path_fetch_issuers.go b/builtin/logical/pki/path_fetch_issuers.go index 58a97305f5bd..bd5d8e63560b 100644 --- a/builtin/logical/pki/path_fetch_issuers.go +++ b/builtin/logical/pki/path_fetch_issuers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -5,9 +8,11 @@ import ( "crypto/x509" "encoding/pem" "fmt" + "net/http" "strings" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -17,9 +22,31 @@ func pathListIssuers(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuers/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "issuers", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathListIssuersHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `A list of keys`, + Required: true, + }, + "key_info": { + Type: framework.TypeMap, + Description: `Key info with issuer name`, + Required: false, + }, + }, + }}, + }, }, }, @@ -29,7 +56,7 @@ func pathListIssuers(b *backend) *framework.Path { } func (b *backend) pathListIssuersHandler(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not list issuers until migration has completed"), nil } @@ -58,8 +85,28 @@ func (b *backend) pathListIssuersHandler(ctx context.Context, req *logical.Reque responseKeys = append(responseKeys, string(identifier)) responseInfo[string(identifier)] = map[string]interface{}{ - "issuer_name": issuer.Name, - "is_default": identifier == config.DefaultIssuerId, + "issuer_name": issuer.Name, + "is_default": identifier == config.DefaultIssuerId, + "serial_number": issuer.SerialNumber, + + // While nominally this could be considered sensitive information + // to be returned on an unauthed endpoint, there's two mitigating + // circumstances: + // + // 1. Key IDs are purely random numbers generated by Vault and + // have no relationship to the actual key material. + // 2. They also don't _do_ anything by themselves. There is no + // modification of KeyIDs allowed, you need to be authenticated + // to Vault to understand what they mean, you _essentially_ + // get the same information from looking at/comparing various + // cert's SubjectPublicKeyInfo field, and there's the `default` + // reference that anyone with issuer generation capabilities + // can use even if they can't access any of the other /key/* + // endpoints. + // + // So all in all, exposing this value is not a security risk and + // is otherwise beneficial for the UI, hence its inclusion. + "key_id": issuer.KeyID, } } @@ -75,11 +122,28 @@ their identifier and their name (if set). ) func pathGetIssuer(b *backend) *framework.Path { - pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "(/der|/pem|/json)?" - return buildPathGetIssuer(b, pattern) + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "$" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "issuer", + } + + return buildPathIssuer(b, pattern, displayAttrs) +} + +func pathGetUnauthedIssuer(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/(json|der|pem)$" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "issuer-json|issuer-der|issuer-pem", + } + + return buildPathGetIssuer(b, pattern, displayAttrs) } -func buildPathGetIssuer(b *backend, pattern string) *framework.Path { +func buildPathIssuer(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { fields := map[string]*framework.FieldSchema{} fields = addIssuerRefNameFields(fields) @@ -137,36 +201,134 @@ for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, fields["enable_aia_url_templating"] = &framework.FieldSchema{ Type: framework.TypeBool, Description: `Whether or not to enabling templating of the -above AIA fields. When templating is enabled the special values '{{issuer_id}}' -and '{{cluster_path}}' are available, but the addresses are not checked for -URL validity until issuance time. This requires /config/cluster's path to be -set on all PR Secondary clusters.`, +above AIA fields. When templating is enabled the special values '{{issuer_id}}', +'{{cluster_path}}', '{{cluster_aia_path}}' are available, but the addresses are +not checked for URL validity until issuance time. Using '{{cluster_path}}' +requires /config/cluster's 'path' member to be set on all PR Secondary clusters +and using '{{cluster_aia_path}}' requires /config/cluster's 'aia_path' member +to be set on all PR secondary clusters.`, Default: false, } + updateIssuerSchema := map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuer_id": { + Type: framework.TypeString, + Description: `Issuer Id`, + Required: false, + }, + "issuer_name": { + Type: framework.TypeString, + Description: `Issuer Name`, + Required: false, + }, + "key_id": { + Type: framework.TypeString, + Description: `Key Id`, + Required: false, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: false, + }, + "manual_chain": { + Type: framework.TypeStringSlice, + Description: `Manual Chain`, + Required: false, + }, + "ca_chain": { + Type: framework.TypeStringSlice, + Description: `CA Chain`, + Required: false, + }, + "leaf_not_after_behavior": { + Type: framework.TypeString, + Description: `Leaf Not After Behavior`, + Required: false, + }, + "usage": { + Type: framework.TypeString, + Description: `Usage`, + Required: false, + }, + "revocation_signature_algorithm": { + Type: framework.TypeString, + Description: `Revocation Signature Alogrithm`, + Required: false, + }, + "revoked": { + Type: framework.TypeBool, + Description: `Revoked`, + Required: false, + }, + "revocation_time": { + Type: framework.TypeInt, + Required: false, + }, + "revocation_time_rfc3339": { + Type: framework.TypeString, + Required: false, + }, + "issuing_certificates": { + Type: framework.TypeStringSlice, + Description: `Issuing Certificates`, + Required: false, + }, + "crl_distribution_points": { + Type: framework.TypeStringSlice, + Description: `CRL Distribution Points`, + Required: false, + }, + "ocsp_servers": { + Type: framework.TypeStringSlice, + Description: `OCSP Servers`, + Required: false, + }, + "enable_aia_url_templating": { + Type: framework.TypeBool, + Description: `Whether or not templating is enabled for AIA fields`, + Required: false, + }, + }, + }}, + } + return &framework.Path{ // Returns a JSON entry. - Pattern: pattern, - Fields: fields, + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathGetIssuer, + Callback: b.pathGetIssuer, + Responses: updateIssuerSchema, }, logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathUpdateIssuer, + Callback: b.pathUpdateIssuer, + Responses: updateIssuerSchema, + // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathDeleteIssuer, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: "No Content", + }}, + }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.PatchOperation: &framework.PathOperation{ - Callback: b.pathPatchIssuer, + Callback: b.pathPatchIssuer, + Responses: updateIssuerSchema, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -178,17 +340,70 @@ set on all PR Secondary clusters.`, } } +func buildPathGetIssuer(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + fields := map[string]*framework.FieldSchema{} + fields = addIssuerRefField(fields) + + getIssuerSchema := map[int][]framework.Response{ + http.StatusNotModified: {{ + Description: "Not Modified", + }}, + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuer_id": { + Type: framework.TypeString, + Description: `Issuer Id`, + Required: true, + }, + "issuer_name": { + Type: framework.TypeString, + Description: `Issuer Name`, + Required: true, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeStringSlice, + Description: `CA Chain`, + Required: true, + }, + }, + }}, + } + + return &framework.Path{ + // Returns a JSON entry. + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathGetIssuer, + Responses: getIssuerSchema, + }, + }, + + HelpSynopsis: pathGetIssuerHelpSyn, + HelpDescription: pathGetIssuerHelpDesc, + } +} + func (b *backend) pathGetIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { // Handle raw issuers first. if strings.HasSuffix(req.Path, "/der") || strings.HasSuffix(req.Path, "/pem") || strings.HasSuffix(req.Path, "/json") { return b.pathGetRawIssuer(ctx, req, data) } - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not get issuer until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -210,7 +425,7 @@ func (b *backend) pathGetIssuer(ctx context.Context, req *logical.Request, data return respondReadIssuer(issuer) } -func respondReadIssuer(issuer *issuerEntry) (*logical.Response, error) { +func respondReadIssuer(issuer *issuing.IssuerEntry) (*logical.Response, error) { var respManualChain []string for _, entity := range issuer.ManualChain { respManualChain = append(respManualChain, string(entity)) @@ -249,6 +464,7 @@ func respondReadIssuer(issuer *issuerEntry) (*logical.Response, error) { data["issuing_certificates"] = issuer.AIAURIs.IssuingCertificates data["crl_distribution_points"] = issuer.AIAURIs.CRLDistributionPoints data["ocsp_servers"] = issuer.AIAURIs.OCSPServers + data["enable_aia_url_templating"] = issuer.AIAURIs.EnableTemplating } response := &logical.Response{ @@ -268,11 +484,11 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not update issuer until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -322,9 +538,9 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da } rawUsage := data.Get("usage").([]string) - newUsage, err := NewIssuerUsageFromNames(rawUsage) + newUsage, err := issuing.NewIssuerUsageFromNames(rawUsage) if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, AllIssuerUsages.Names())), nil + return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, issuing.AllIssuerUsages.Names())), nil } // Revocation signature algorithm changes @@ -347,15 +563,15 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da // AIA access changes enableTemplating := data.Get("enable_aia_url_templating").(bool) issuerCertificates := data.Get("issuing_certificates").([]string) - if badURL := validateURLs(issuerCertificates); !enableTemplating && badURL != "" { + if badURL := issuing.ValidateURLs(issuerCertificates); !enableTemplating && badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter issuing_certificates: %s", badURL)), nil } crlDistributionPoints := data.Get("crl_distribution_points").([]string) - if badURL := validateURLs(crlDistributionPoints); !enableTemplating && badURL != "" { + if badURL := issuing.ValidateURLs(crlDistributionPoints); !enableTemplating && badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter crl_distribution_points: %s", badURL)), nil } ocspServers := data.Get("ocsp_servers").([]string) - if badURL := validateURLs(ocspServers); !enableTemplating && badURL != "" { + if badURL := issuing.ValidateURLs(ocspServers); !enableTemplating && badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter ocsp_servers: %s", badURL)), nil } @@ -367,8 +583,8 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da issuer.Name = newName issuer.LastModified = time.Now().UTC() // See note in updateDefaultIssuerId about why this is necessary. - b.crlBuilder.invalidateCRLBuildTime() - b.crlBuilder.flushCRLBuildTimeInvalidation(sc) + b.CrlBuilder().invalidateCRLBuildTime() + b.CrlBuilder().flushCRLBuildTimeInvalidation(sc) modified = true } @@ -378,7 +594,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da } if newUsage != issuer.Usage { - if issuer.Revoked && newUsage.HasUsage(IssuanceUsage) { + if issuer.Revoked && newUsage.HasUsage(issuing.IssuanceUsage) { // Forbid allowing cert signing on its usage. return logical.ErrorResponse("This issuer was revoked; unable to modify its usage to include certificate signing again. Reissue this certificate (preferably with a new key) and modify that entry instead."), nil } @@ -389,7 +605,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da if err != nil { return nil, fmt.Errorf("unable to parse issuer's certificate: %w", err) } - if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(CRLSigningUsage) { + if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(issuing.CRLSigningUsage) { return logical.ErrorResponse("This issuer's underlying certificate lacks the CRLSign KeyUsage value; unable to set CRLSigningUsage on this issuer as a result."), nil } @@ -403,7 +619,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da } if issuer.AIAURIs == nil && (len(issuerCertificates) > 0 || len(crlDistributionPoints) > 0 || len(ocspServers) > 0) { - issuer.AIAURIs = &aiaConfigEntry{} + issuer.AIAURIs = &issuing.AiaConfigEntry{} } if issuer.AIAURIs != nil { // Associative mapping from data source to destination on the @@ -450,7 +666,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da // it'll write it out to disk for us. We'd hate to then modify the issuer // again and write it a second time. var updateChain bool - var constructedChain []issuerID + var constructedChain []issuing.IssuerID for index, newPathRef := range newPath { // Allow self for the first entry. if index == 0 && newPathRef == "self" { @@ -500,7 +716,7 @@ func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, da addWarningOnDereferencing(sc, oldName, response) } if issuer.AIAURIs != nil && issuer.AIAURIs.EnableTemplating { - _, aiaErr := issuer.AIAURIs.toURLEntries(sc, issuer.ID) + _, aiaErr := ToURLEntries(sc, issuer.ID, issuer.AIAURIs) if aiaErr != nil { response.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", aiaErr)) } @@ -515,12 +731,12 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not patch issuer until migration has completed"), nil } // First we fetch the issuer - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -548,7 +764,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat var newName string if ok { newName, err = getIssuerName(sc, data) - if err != nil && err != errIssuerNameInUse { + if err != nil && err != errIssuerNameInUse && err != errIssuerNameIsEmpty { // If the error is name already in use, and the new name is the // old name for this issuer, we're not actually updating the // issuer name (or causing a conflict) -- so don't err out. Other @@ -567,14 +783,14 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat issuer.Name = newName issuer.LastModified = time.Now().UTC() // See note in updateDefaultIssuerId about why this is necessary. - b.crlBuilder.invalidateCRLBuildTime() - b.crlBuilder.flushCRLBuildTimeInvalidation(sc) + b.CrlBuilder().invalidateCRLBuildTime() + b.CrlBuilder().flushCRLBuildTimeInvalidation(sc) modified = true } } // Leaf Not After Changes - rawLeafBehaviorData, ok := data.GetOk("leaf_not_after_behaivor") + rawLeafBehaviorData, ok := data.GetOk("leaf_not_after_behavior") if ok { rawLeafBehavior := rawLeafBehaviorData.(string) var newLeafBehavior certutil.NotAfterBehavior @@ -598,12 +814,12 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat rawUsageData, ok := data.GetOk("usage") if ok { rawUsage := rawUsageData.([]string) - newUsage, err := NewIssuerUsageFromNames(rawUsage) + newUsage, err := issuing.NewIssuerUsageFromNames(rawUsage) if err != nil { - return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, AllIssuerUsages.Names())), nil + return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, issuing.AllIssuerUsages.Names())), nil } if newUsage != issuer.Usage { - if issuer.Revoked && newUsage.HasUsage(IssuanceUsage) { + if issuer.Revoked && newUsage.HasUsage(issuing.IssuanceUsage) { // Forbid allowing cert signing on its usage. return logical.ErrorResponse("This issuer was revoked; unable to modify its usage to include certificate signing again. Reissue this certificate (preferably with a new key) and modify that entry instead."), nil } @@ -612,7 +828,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat if err != nil { return nil, fmt.Errorf("unable to parse issuer's certificate: %w", err) } - if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(CRLSigningUsage) { + if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(issuing.CRLSigningUsage) { return logical.ErrorResponse("This issuer's underlying certificate lacks the CRLSign KeyUsage value; unable to set CRLSigningUsage on this issuer as a result."), nil } @@ -649,7 +865,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat // AIA access changes. if issuer.AIAURIs == nil { - issuer.AIAURIs = &aiaConfigEntry{} + issuer.AIAURIs = &issuing.AiaConfigEntry{} } // Associative mapping from data source to destination on the @@ -688,7 +904,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat rawURLsValue, ok := data.GetOk(pair.Source) if ok { urlsValue := rawURLsValue.([]string) - if badURL := validateURLs(urlsValue); !issuer.AIAURIs.EnableTemplating && badURL != "" { + if badURL := issuing.ValidateURLs(urlsValue); !issuer.AIAURIs.EnableTemplating && badURL != "" { return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter %v: %s", pair.Source, badURL)), nil } @@ -710,7 +926,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat if ok { newPath := newPathData.([]string) var updateChain bool - var constructedChain []issuerID + var constructedChain []issuing.IssuerID for index, newPathRef := range newPath { // Allow self for the first entry. if index == 0 && newPathRef == "self" { @@ -761,7 +977,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat addWarningOnDereferencing(sc, oldName, response) } if issuer.AIAURIs != nil && issuer.AIAURIs.EnableTemplating { - _, aiaErr := issuer.AIAURIs.toURLEntries(sc, issuer.ID) + _, aiaErr := ToURLEntries(sc, issuer.ID, issuer.AIAURIs) if aiaErr != nil { response.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", aiaErr)) } @@ -771,11 +987,11 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat } func (b *backend) pathGetRawIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not get issuer until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -854,11 +1070,11 @@ func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, da b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not delete issuer until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -867,7 +1083,7 @@ func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, da ref, err := sc.resolveIssuerReference(issuerName) if err != nil { // Return as if we deleted it if we fail to lookup the issuer. - if ref == IssuerRefNotFound { + if ref == issuing.IssuerRefNotFound { return &logical.Response{}, nil } return nil, err @@ -902,6 +1118,18 @@ func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, da response.AddWarning(msg) } + // Finally, we need to rebuild both the local and the unified CRLs. This + // will free up any now unnecessary space used in both the CRL config + // and for the underlying CRL. + warnings, err := b.CrlBuilder().rebuild(sc, true) + if err != nil { + return nil, err + } + + for index, warning := range warnings { + response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + return response, nil } @@ -940,21 +1168,50 @@ the certificate. func pathGetIssuerCRL(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/crl(/pem|/der|/delta(/pem|/der)?)?" - return buildPathGetIssuerCRL(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationSuffix: "crl|crl-pem|crl-der|crl-delta|crl-delta-pem|crl-delta-der", + } + + return buildPathGetIssuerCRL(b, pattern, displayAttrs) } -func buildPathGetIssuerCRL(b *backend, pattern string) *framework.Path { +func pathGetIssuerUnifiedCRL(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/unified-crl(/pem|/der|/delta(/pem|/der)?)?" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationSuffix: "unified-crl|unified-crl-pem|unified-crl-der|unified-crl-delta|unified-crl-delta-pem|unified-crl-delta-der", + } + + return buildPathGetIssuerCRL(b, pattern, displayAttrs) +} + +func buildPathGetIssuerCRL(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { fields := map[string]*framework.FieldSchema{} fields = addIssuerRefNameFields(fields) return &framework.Path{ // Returns raw values. - Pattern: pattern, - Fields: fields, + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathGetIssuerCRL, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "crl": { + Type: framework.TypeString, + Required: false, + }, + }, + }}, + }, }, }, @@ -964,28 +1221,48 @@ func buildPathGetIssuerCRL(b *backend, pattern string) *framework.Path { } func (b *backend) pathGetIssuerCRL(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not get issuer's CRL until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } sc := b.makeStorageContext(ctx, req.Storage) - if err := b.crlBuilder.rebuildIfForced(sc); err != nil { + warnings, err := b.CrlBuilder().rebuildIfForced(sc) + if err != nil { return nil, err } + if len(warnings) > 0 { + // Since this is a fetch of a specific CRL, this most likely comes + // from an automated system of some sort; these warnings would be + // ignored and likely meaningless. Log them instead. + msg := "During rebuild of CRL on issuer CRL fetch, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } var certificate []byte var contentType string + isUnified := strings.Contains(req.Path, "unified") + isDelta := strings.Contains(req.Path, "delta") + response := &logical.Response{} var crlType ifModifiedReqType = ifModifiedCRL - if strings.Contains(req.Path, "delta") { + + if !isUnified && isDelta { crlType = ifModifiedDeltaCRL + } else if isUnified && !isDelta { + crlType = ifModifiedUnifiedCRL + } else if isUnified && isDelta { + crlType = ifModifiedUnifiedDeltaCRL } + ret, err := sendNotModifiedResponseIfNecessary(&IfModifiedSinceHelper{req: req, reqType: crlType}, sc, response) if err != nil { return nil, err @@ -993,7 +1270,8 @@ func (b *backend) pathGetIssuerCRL(ctx context.Context, req *logical.Request, da if ret { return response, nil } - crlPath, err := sc.resolveIssuerCRLPath(issuerName) + + crlPath, err := sc.resolveIssuerCRLPath(issuerName, isUnified) if err != nil { return nil, err } diff --git a/builtin/logical/pki/path_fetch_keys.go b/builtin/logical/pki/path_fetch_keys.go index 2e718240dc8d..4e112a22a71f 100644 --- a/builtin/logical/pki/path_fetch_keys.go +++ b/builtin/logical/pki/path_fetch_keys.go @@ -1,22 +1,53 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" + "crypto" "fmt" + "net/http" + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" ) func pathListKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "keys", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ - Callback: b.pathListKeysHandler, + Callback: b.pathListKeysHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `A list of keys`, + Required: true, + }, + "key_info": { + Type: framework.TypeMap, + Description: `Key info with issuer name`, + Required: false, + }, + }, + }}, + }, ForwardPerformanceStandby: false, ForwardPerformanceSecondary: false, }, @@ -34,7 +65,7 @@ their identifier and their name (if set).` ) func (b *backend) pathListKeysHandler(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not list keys until migration has completed"), nil } @@ -70,12 +101,19 @@ func (b *backend) pathListKeysHandler(ctx context.Context, req *logical.Request, func pathKey(b *backend) *framework.Path { pattern := "key/" + framework.GenericNameRegex(keyRefParam) - return buildPathKey(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "key", + } + + return buildPathKey(b, pattern, displayAttrs) } -func buildPathKey(b *backend, pattern string) *framework.Path { +func buildPathKey(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { return &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Fields: map[string]*framework.FieldSchema{ keyRefParam: { @@ -91,17 +129,81 @@ func buildPathKey(b *backend, pattern string) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathGetKeyHandler, + Callback: b.pathGetKeyHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: `Key Id`, + Required: true, + }, + "key_name": { + Type: framework.TypeString, + Description: `Key Name`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `Key Type`, + Required: true, + }, + "subject_key_id": { + Type: framework.TypeString, + Description: `RFC 5280 Subject Key Identifier of the public counterpart`, + Required: false, + }, + "managed_key_id": { + Type: framework.TypeString, + Description: `Managed Key Id`, + Required: false, + }, + "managed_key_name": { + Type: framework.TypeString, + Description: `Managed Key Name`, + Required: false, + }, + }, + }}, + }, ForwardPerformanceStandby: false, ForwardPerformanceSecondary: false, }, logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathUpdateKeyHandler, + Callback: b.pathUpdateKeyHandler, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: `Key Id`, + Required: true, + }, + "key_name": { + Type: framework.TypeString, + Description: `Key Name`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `Key Type`, + Required: true, + }, + }, + }}, + }, ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.DeleteOperation: &framework.PathOperation{ - Callback: b.pathDeleteKeyHandler, + Callback: b.pathDeleteKeyHandler, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: "No Content", + }}, + }, ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, @@ -126,7 +228,7 @@ the certificate. ) func (b *backend) pathGetKeyHandler(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not get keys until migration has completed"), nil } @@ -155,23 +257,40 @@ func (b *backend) pathGetKeyHandler(ctx context.Context, req *logical.Request, d keyTypeParam: string(key.PrivateKeyType), } - if key.isManagedPrivateKey() { - managedKeyUUID, err := key.getManagedKeyUUID() + var pkForSkid crypto.PublicKey + if key.IsManagedPrivateKey() { + managedKeyUUID, err := issuing.GetManagedKeyUUID(key) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("failed extracting managed key uuid from key id %s (%s): %v", key.ID, key.Name, err)} } - keyInfo, err := getManagedKeyInfo(ctx, b, managedKeyUUID) + keyInfo, err := managed_key.GetManagedKeyInfo(ctx, b, managedKeyUUID) if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("failed fetching managed key info from key id %s (%s): %v", key.ID, key.Name, err)} } + pkForSkid, err = managed_key.GetManagedKeyPublicKey(sc.Context, sc.Backend, managedKeyUUID) + if err != nil { + return nil, err + } + // To remain consistent across the api responses (mainly generate root/intermediate calls), return the actual // type of key, not that it is a managed key. - respData[keyTypeParam] = string(keyInfo.keyType) - respData[managedKeyIdArg] = string(keyInfo.uuid) - respData[managedKeyNameArg] = string(keyInfo.name) + respData[keyTypeParam] = string(keyInfo.KeyType) + respData[managedKeyIdArg] = string(keyInfo.Uuid) + respData[managedKeyNameArg] = string(keyInfo.Name) + } else { + pkForSkid, err = getPublicKeyFromBytes([]byte(key.PrivateKey)) + if err != nil { + return nil, err + } + } + + skid, err := certutil.GetSubjectKeyID(pkForSkid) + if err != nil { + return nil, err } + respData[skidParam] = certutil.GetHexFormatted([]byte(skid), ":") return &logical.Response{Data: respData}, nil } @@ -182,7 +301,7 @@ func (b *backend) pathUpdateKeyHandler(ctx context.Context, req *logical.Request b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not update keys until migration has completed"), nil } @@ -240,7 +359,7 @@ func (b *backend) pathDeleteKeyHandler(ctx context.Context, req *logical.Request b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not delete keys until migration has completed"), nil } @@ -252,7 +371,7 @@ func (b *backend) pathDeleteKeyHandler(ctx context.Context, req *logical.Request sc := b.makeStorageContext(ctx, req.Storage) keyId, err := sc.resolveKeyReference(keyRef) if err != nil { - if keyId == KeyRefNotFound { + if keyId == issuing.KeyRefNotFound { // We failed to lookup the key, we should ignore any error here and reply as if it was deleted. return nil, nil } diff --git a/builtin/logical/pki/path_intermediate.go b/builtin/logical/pki/path_intermediate.go index cfcff87b04f3..60742a3d8c3b 100644 --- a/builtin/logical/pki/path_intermediate.go +++ b/builtin/logical/pki/path_intermediate.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" "encoding/base64" "fmt" + "net/http" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" @@ -11,13 +15,27 @@ import ( ) func pathGenerateIntermediate(b *backend) *framework.Path { - return buildPathGenerateIntermediate(b, "intermediate/generate/"+framework.GenericNameRegex("exported")) + pattern := "intermediate/generate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate", + OperationSuffix: "intermediate", + } + + return buildPathGenerateIntermediate(b, pattern, displayAttrs) } func pathSetSignedIntermediate(b *backend) *framework.Path { ret := &framework.Path{ Pattern: "intermediate/set-signed", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "set-signed", + OperationSuffix: "intermediate", + }, + Fields: map[string]*framework.FieldSchema{ "certificate": { Type: framework.TypeString, @@ -31,6 +49,38 @@ appended to the bundle.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathImportIssuers, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "mapping": { + Type: framework.TypeMap, + Description: "A mapping of issuer_id to key_id for all issuers included in this request", + Required: true, + }, + "imported_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new keys imported as a part of this request", + Required: true, + }, + "imported_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new issuers imported as a part of this request", + Required: true, + }, + "existing_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Existing keys specified as part of the import bundle of this request", + Required: true, + }, + "existing_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Existing issuers specified as part of the import bundle of this request", + Required: true, + }, + }, + }}, + }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -52,7 +102,7 @@ func (b *backend) pathGenerateIntermediate(ctx context.Context, req *logical.Req var err error - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not create intermediate until migration has completed"), nil } diff --git a/builtin/logical/pki/path_issue_sign.go b/builtin/logical/pki/path_issue_sign.go index 7203d56c73cf..dd3018199068 100644 --- a/builtin/logical/pki/path_issue_sign.go +++ b/builtin/logical/pki/path_issue_sign.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -7,6 +10,7 @@ import ( "encoding/base64" "encoding/pem" "fmt" + "net/http" "strings" "time" @@ -15,25 +19,84 @@ import ( "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" ) func pathIssue(b *backend) *framework.Path { pattern := "issue/" + framework.GenericNameRegex("role") - return buildPathIssue(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "issue", + OperationSuffix: "with-role", + } + + return buildPathIssue(b, pattern, displayAttrs) } func pathIssuerIssue(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/issue/" + framework.GenericNameRegex("role") - return buildPathIssue(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "issue", + OperationSuffix: "with-role", + } + + return buildPathIssue(b, pattern, displayAttrs) } -func buildPathIssue(b *backend, pattern string) *framework.Path { +func buildPathIssue(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { ret := &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.metricsWrap("issue", roleRequired, b.pathIssue), + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `Issuing Certificate Authority`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeCommaStringSlice, + Description: `Certificate Chain`, + Required: false, + }, + "serial_number": { + Type: framework.TypeString, + Description: `Serial Number`, + Required: true, + }, + "expiration": { + Type: framework.TypeInt64, + Description: `Time of expiration`, + Required: true, + }, + "private_key": { + Type: framework.TypeString, + Description: `Private key`, + Required: false, + }, + "private_key_type": { + Type: framework.TypeString, + Description: `Private key type`, + Required: false, + }, + }, + }}, + }, }, }, @@ -47,21 +110,68 @@ func buildPathIssue(b *backend, pattern string) *framework.Path { func pathSign(b *backend) *framework.Path { pattern := "sign/" + framework.GenericNameRegex("role") - return buildPathSign(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "sign", + OperationSuffix: "with-role", + } + + return buildPathSign(b, pattern, displayAttrs) } func pathIssuerSign(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign/" + framework.GenericNameRegex("role") - return buildPathSign(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "with-role", + } + + return buildPathSign(b, pattern, displayAttrs) } -func buildPathSign(b *backend, pattern string) *framework.Path { +func buildPathSign(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { ret := &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.metricsWrap("sign", roleRequired, b.pathSign), + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `Issuing Certificate Authority`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeCommaStringSlice, + Description: `Certificate Chain`, + Required: false, + }, + "serial_number": { + Type: framework.TypeString, + Description: `Serial Number`, + Required: true, + }, + "expiration": { + Type: framework.TypeInt64, + Description: `Time of expiration`, + Required: true, + }, + }, + }}, + }, }, }, @@ -82,22 +192,69 @@ func buildPathSign(b *backend, pattern string) *framework.Path { func pathIssuerSignVerbatim(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-verbatim" + framework.OptionalParamRegex("role") - return buildPathIssuerSignVerbatim(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "verbatim|verbatim-with-role", + } + + return buildPathIssuerSignVerbatim(b, pattern, displayAttrs) } func pathSignVerbatim(b *backend) *framework.Path { pattern := "sign-verbatim" + framework.OptionalParamRegex("role") - return buildPathIssuerSignVerbatim(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "sign", + OperationSuffix: "verbatim|verbatim-with-role", + } + + return buildPathIssuerSignVerbatim(b, pattern, displayAttrs) } -func buildPathIssuerSignVerbatim(b *backend, pattern string) *framework.Path { +func buildPathIssuerSignVerbatim(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { ret := &framework.Path{ - Pattern: pattern, - Fields: map[string]*framework.FieldSchema{}, + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: getCsrSignVerbatimSchemaFields(), Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.metricsWrap("sign-verbatim", roleOptional, b.pathSignVerbatim), + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `Issuing Certificate Authority`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeCommaStringSlice, + Description: `Certificate Chain`, + Required: false, + }, + "serial_number": { + Type: framework.TypeString, + Description: `Serial Number`, + Required: true, + }, + "expiration": { + Type: framework.TypeInt64, + Description: `Time of expiration`, + Required: true, + }, + }, + }}, + }, }, }, @@ -105,61 +262,6 @@ func buildPathIssuerSignVerbatim(b *backend, pattern string) *framework.Path { HelpDescription: pathIssuerSignVerbatimHelpDesc, } - ret.Fields = addNonCACommonFields(ret.Fields) - - ret.Fields["csr"] = &framework.FieldSchema{ - Type: framework.TypeString, - Default: "", - Description: `PEM-format CSR to be signed. Values will be -taken verbatim from the CSR, except for -basic constraints.`, - } - - ret.Fields["key_usage"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Default: []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, - Description: `A comma-separated string or list of key usages (not extended -key usages). Valid values can be found at -https://golang.org/pkg/crypto/x509/#KeyUsage --- simply drop the "KeyUsage" part of the name. -To remove all key usages from being set, set -this value to an empty list.`, - } - - ret.Fields["ext_key_usage"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Default: []string{}, - Description: `A comma-separated string or list of extended key usages. Valid values can be found at -https://golang.org/pkg/crypto/x509/#ExtKeyUsage --- simply drop the "ExtKeyUsage" part of the name. -To remove all key usages from being set, set -this value to an empty list.`, - } - - ret.Fields["ext_key_usage_oids"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `A comma-separated string or list of extended key usage oids.`, - } - - ret.Fields["signature_bits"] = &framework.FieldSchema{ - Type: framework.TypeInt, - Default: 0, - Description: `The number of bits to use in the signature -algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for -SHA-2-512. Defaults to 0 to automatically detect based on key length -(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, - DisplayAttrs: &framework.DisplayAttributes{ - Value: 0, - }, - } - - ret.Fields["use_pss"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to use PSS signatures when using a -RSA key-type issuer. Defaults to false.`, - } - return ret } @@ -185,7 +287,7 @@ See the API documentation for more information about required parameters. // pathIssue issues a certificate and private key from given parameters, // subject to role restrictions -func (b *backend) pathIssue(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) { +func (b *backend) pathIssue(ctx context.Context, req *logical.Request, data *framework.FieldData, role *issuing.RoleEntry) (*logical.Response, error) { if role.KeyType == "any" { return logical.ErrorResponse("role key type \"any\" not allowed for issuing certificates, only signing"), nil } @@ -195,62 +297,49 @@ func (b *backend) pathIssue(ctx context.Context, req *logical.Request, data *fra // pathSign issues a certificate from a submitted CSR, subject to role // restrictions -func (b *backend) pathSign(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) { +func (b *backend) pathSign(ctx context.Context, req *logical.Request, data *framework.FieldData, role *issuing.RoleEntry) (*logical.Response, error) { return b.pathIssueSignCert(ctx, req, data, role, true, false) } // pathSignVerbatim issues a certificate from a submitted CSR, *not* subject to // role restrictions -func (b *backend) pathSignVerbatim(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) { - entry := &roleEntry{ - AllowLocalhost: true, - AllowAnyName: true, - AllowIPSANs: true, - AllowWildcardCertificates: new(bool), - EnforceHostnames: false, - KeyType: "any", - UseCSRCommonName: true, - UseCSRSANs: true, - AllowedOtherSANs: []string{"*"}, - AllowedSerialNumbers: []string{"*"}, - AllowedURISANs: []string{"*"}, - CNValidations: []string{"disabled"}, - GenerateLease: new(bool), - KeyUsage: data.Get("key_usage").([]string), - ExtKeyUsage: data.Get("ext_key_usage").([]string), - ExtKeyUsageOIDs: data.Get("ext_key_usage_oids").([]string), - SignatureBits: data.Get("signature_bits").(int), - UsePSS: data.Get("use_pss").(bool), - } - *entry.AllowWildcardCertificates = true - - *entry.GenerateLease = false +func (b *backend) pathSignVerbatim(ctx context.Context, req *logical.Request, data *framework.FieldData, role *issuing.RoleEntry) (*logical.Response, error) { + opts := []issuing.RoleModifier{ + issuing.WithKeyUsage(data.Get("key_usage").([]string)), + issuing.WithExtKeyUsage(data.Get("ext_key_usage").([]string)), + issuing.WithExtKeyUsageOIDs(data.Get("ext_key_usage_oids").([]string)), + issuing.WithSignatureBits(data.Get("signature_bits").(int)), + issuing.WithUsePSS(data.Get("use_pss").(bool)), + } + // if we did receive a role parameter value with a valid role, use some of its values + // to populate and influence the sign-verbatim behavior. if role != nil { + opts = append(opts, issuing.WithNoStore(role.NoStore)) + opts = append(opts, issuing.WithIssuer(role.Issuer)) + if role.TTL > 0 { - entry.TTL = role.TTL + opts = append(opts, issuing.WithTTL(role.TTL)) } + if role.MaxTTL > 0 { - entry.MaxTTL = role.MaxTTL + opts = append(opts, issuing.WithMaxTTL(role.MaxTTL)) } + if role.GenerateLease != nil { - *entry.GenerateLease = *role.GenerateLease + opts = append(opts, issuing.WithGenerateLease(*role.GenerateLease)) } + if role.NotBeforeDuration > 0 { - entry.NotBeforeDuration = role.NotBeforeDuration + opts = append(opts, issuing.WithNotBeforeDuration(role.NotBeforeDuration)) } - entry.NoStore = role.NoStore - entry.Issuer = role.Issuer - } - - if len(entry.Issuer) == 0 { - entry.Issuer = defaultRef } + entry := issuing.SignVerbatimRoleWithOpts(opts...) return b.pathIssueSignCert(ctx, req, data, entry, true, true) } -func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry, useCSR, useCSRValues bool) (*logical.Response, error) { +func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, data *framework.FieldData, role *issuing.RoleEntry, useCSR, useCSRValues bool) (*logical.Response, error) { // If storing the certificate and on a performance standby, forward this request on to the primary // Allow performance secondaries to generate and store certificates locally to them. if !role.NoStore && b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) { @@ -276,7 +365,7 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d } else { // Otherwise, we must have a newer API which requires an issuer // reference. Fetch it in this case - issuerName = getIssuerRef(data) + issuerName = GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -290,7 +379,7 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d var caErr error sc := b.makeStorageContext(ctx, req.Storage) - signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + signingBundle, caErr := sc.fetchCAInfo(issuerName, issuing.IssuanceUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: @@ -326,99 +415,21 @@ func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, d } } - signingCB, err := signingBundle.ToCertBundle() - if err != nil { - return nil, fmt.Errorf("error converting raw signing bundle to cert bundle: %w", err) + generateLease := false + if role.GenerateLease != nil && *role.GenerateLease { + generateLease = true } - cb, err := parsedBundle.ToCertBundle() + resp, err := signIssueApiResponse(b, data, parsedBundle, signingBundle, generateLease, warnings) if err != nil { - return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) - } - - caChainGen := newCaChainOutput(parsedBundle, data) - - respData := map[string]interface{}{ - "expiration": int64(parsedBundle.Certificate.NotAfter.Unix()), - "serial_number": cb.SerialNumber, - } - - switch format { - case "pem": - respData["issuing_ca"] = signingCB.Certificate - respData["certificate"] = cb.Certificate - if caChainGen.containsChain() { - respData["ca_chain"] = caChainGen.pemEncodedChain() - } - if !useCSR { - respData["private_key"] = cb.PrivateKey - respData["private_key_type"] = cb.PrivateKeyType - } - - case "pem_bundle": - respData["issuing_ca"] = signingCB.Certificate - respData["certificate"] = cb.ToPEMBundle() - if caChainGen.containsChain() { - respData["ca_chain"] = caChainGen.pemEncodedChain() - } - if !useCSR { - respData["private_key"] = cb.PrivateKey - respData["private_key_type"] = cb.PrivateKeyType - } - - case "der": - respData["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes) - respData["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes) - - if caChainGen.containsChain() { - respData["ca_chain"] = caChainGen.derEncodedChain() - } - - if !useCSR { - respData["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes) - respData["private_key_type"] = cb.PrivateKeyType - } - default: - return nil, fmt.Errorf("unsupported format: %s", format) - } - - var resp *logical.Response - switch { - case role.GenerateLease == nil: - return nil, fmt.Errorf("generate lease in role is nil") - case !*role.GenerateLease: - // If lease generation is disabled do not populate `Secret` field in - // the response - resp = &logical.Response{ - Data: respData, - } - default: - resp = b.Secret(SecretCertsType).Response( - respData, - map[string]interface{}{ - "serial_number": cb.SerialNumber, - }) - resp.Secret.TTL = parsedBundle.Certificate.NotAfter.Sub(time.Now()) - } - - if data.Get("private_key_format").(string) == "pkcs8" { - err = convertRespToPKCS8(resp) - if err != nil { - return nil, err - } + return nil, err } if !role.NoStore { - key := "certs/" + normalizeSerial(cb.SerialNumber) - certsCounted := b.certsCounted.Load() - err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: key, - Value: parsedBundle.CertificateBytes, - }) + err = issuing.StoreCertificate(ctx, req.Storage, b.GetCertificateCounter(), parsedBundle) if err != nil { - return nil, fmt.Errorf("unable to store certificate locally: %w", err) + return nil, err } - b.incrementTotalCertificatesCount(certsCounted, key) } if useCSR { @@ -479,6 +490,93 @@ func (cac *caChainOutput) derEncodedChain() []string { return derCaChain } +func signIssueApiResponse(b *backend, data *framework.FieldData, parsedBundle *certutil.ParsedCertBundle, signingBundle *certutil.CAInfoBundle, generateLease bool, warnings []string) (*logical.Response, error) { + cb, err := parsedBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) + } + + signingCB, err := signingBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw signing bundle to cert bundle: %w", err) + } + + caChainGen := newCaChainOutput(parsedBundle, data) + includeKey := parsedBundle.PrivateKey != nil + + respData := map[string]interface{}{ + "expiration": parsedBundle.Certificate.NotAfter.Unix(), + "serial_number": cb.SerialNumber, + } + + format := getFormat(data) + switch format { + case "pem": + respData["issuing_ca"] = signingCB.Certificate + respData["certificate"] = cb.Certificate + if caChainGen.containsChain() { + respData["ca_chain"] = caChainGen.pemEncodedChain() + } + if includeKey { + respData["private_key"] = cb.PrivateKey + respData["private_key_type"] = cb.PrivateKeyType + } + + case "pem_bundle": + respData["issuing_ca"] = signingCB.Certificate + respData["certificate"] = cb.ToPEMBundle() + if caChainGen.containsChain() { + respData["ca_chain"] = caChainGen.pemEncodedChain() + } + if includeKey { + respData["private_key"] = cb.PrivateKey + respData["private_key_type"] = cb.PrivateKeyType + } + + case "der": + respData["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes) + respData["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes) + + if caChainGen.containsChain() { + respData["ca_chain"] = caChainGen.derEncodedChain() + } + + if includeKey { + respData["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes) + respData["private_key_type"] = cb.PrivateKeyType + } + default: + return nil, fmt.Errorf("unsupported format: %s", format) + } + + var resp *logical.Response + if generateLease { + resp = b.Secret(SecretCertsType).Response( + respData, + map[string]interface{}{ + "serial_number": cb.SerialNumber, + }) + resp.Secret.TTL = parsedBundle.Certificate.NotAfter.Sub(time.Now()) + } else { + resp = &logical.Response{ + Data: respData, + } + } + + if includeKey { + if keyFormat := data.Get("private_key_format"); keyFormat == "pkcs8" { + err := convertRespToPKCS8(resp) + if err != nil { + return nil, err + } + } + } + + resp = addWarnings(resp, warnings) + + return resp, nil +} + const pathIssueHelpSyn = ` Request a certificate using a certain role with the provided details. ` diff --git a/builtin/logical/pki/path_manage_issuers.go b/builtin/logical/pki/path_manage_issuers.go index 689b3a716619..2611a807a1b3 100644 --- a/builtin/logical/pki/path_manage_issuers.go +++ b/builtin/logical/pki/path_manage_issuers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -6,29 +9,100 @@ import ( "crypto/x509" "encoding/pem" "fmt" + "net/http" "strings" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" ) func pathIssuerGenerateRoot(b *backend) *framework.Path { - return buildPathGenerateRoot(b, "issuers/generate/root/"+framework.GenericNameRegex("exported")) + pattern := "issuers/generate/root/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuers, + OperationVerb: "generate", + OperationSuffix: "root", + } + + return buildPathGenerateRoot(b, pattern, displayAttrs) } func pathRotateRoot(b *backend) *framework.Path { - return buildPathGenerateRoot(b, "root/rotate/"+framework.GenericNameRegex("exported")) + pattern := "root/rotate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "rotate", + OperationSuffix: "root", + } + + return buildPathGenerateRoot(b, pattern, displayAttrs) } -func buildPathGenerateRoot(b *backend, pattern string) *framework.Path { +func buildPathGenerateRoot(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { ret := &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCAGenerateRoot, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "expiration": { + Type: framework.TypeInt64, + Description: `The expiration of the given issuer.`, + Required: true, + }, + "serial_number": { + Type: framework.TypeString, + Description: `The requested Subject's named serial number.`, + Required: true, + }, + "certificate": { + Type: framework.TypeString, + Description: `The generated self-signed CA certificate.`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `The issuing certificate authority.`, + Required: true, + }, + "issuer_id": { + Type: framework.TypeString, + Description: `The ID of the issuer`, + Required: true, + }, + "issuer_name": { + Type: framework.TypeString, + Description: `The name of the issuer.`, + Required: true, + }, + "key_id": { + Type: framework.TypeString, + Description: `The ID of the key.`, + Required: true, + }, + "key_name": { + Type: framework.TypeString, + Description: `The key name if given.`, + Required: true, + }, + "private_key": { + Type: framework.TypeString, + Description: `The private key if exported was specified.`, + Required: false, + }, + }, + }}, + }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -46,20 +120,63 @@ func buildPathGenerateRoot(b *backend, pattern string) *framework.Path { } func pathIssuerGenerateIntermediate(b *backend) *framework.Path { - return buildPathGenerateIntermediate(b, - "issuers/generate/intermediate/"+framework.GenericNameRegex("exported")) + pattern := "issuers/generate/intermediate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuers, + OperationVerb: "generate", + OperationSuffix: "intermediate", + } + + return buildPathGenerateIntermediate(b, pattern, displayAttrs) } func pathCrossSignIntermediate(b *backend) *framework.Path { - return buildPathGenerateIntermediate(b, "intermediate/cross-sign") + pattern := "intermediate/cross-sign" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "cross-sign", + OperationSuffix: "intermediate", + } + + return buildPathGenerateIntermediate(b, pattern, displayAttrs) } -func buildPathGenerateIntermediate(b *backend, pattern string) *framework.Path { +func buildPathGenerateIntermediate(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { ret := &framework.Path{ - Pattern: pattern, + Pattern: pattern, + DisplayAttrs: displayAttrs, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathGenerateIntermediate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "csr": { + Type: framework.TypeString, + Description: `Certificate signing request.`, + Required: true, + }, + "key_id": { + Type: framework.TypeString, + Description: `Id of the key.`, + Required: true, + }, + "private_key": { + Type: framework.TypeString, + Description: `Generated private key.`, + Required: false, + }, + "private_key_type": { + Type: framework.TypeString, + Description: `Specifies the format used for marshaling the private key.`, + Required: false, + }, + }, + }}, + }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -90,6 +207,13 @@ with Active Directory Certificate Services.`, func pathImportIssuer(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuers/import/(cert|bundle)", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuers, + OperationVerb: "import", + OperationSuffix: "cert|bundle", + }, + Fields: map[string]*framework.FieldSchema{ "pem_bundle": { Type: framework.TypeString, @@ -101,6 +225,38 @@ secret-key (optional) and certificates.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathImportIssuers, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "mapping": { + Type: framework.TypeMap, + Description: "A mapping of issuer_id to key_id for all issuers included in this request", + Required: true, + }, + "imported_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new keys imported as a part of this request", + Required: true, + }, + "imported_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new issuers imported as a part of this request", + Required: true, + }, + "existing_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Existing keys specified as part of the import bundle of this request", + Required: true, + }, + "existing_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Existing issuers specified as part of the import bundle of this request", + Required: true, + }, + }, + }}, + }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -120,7 +276,7 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d keysAllowed := strings.HasSuffix(req.Path, "bundle") || req.Path == "config/ca" - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not import issuers until migration has completed"), nil } @@ -163,6 +319,8 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d var createdKeys []string var createdIssuers []string + var existingKeys []string + var existingIssuers []string issuerKeyMap := make(map[string]string) // Rather than using certutil.ParsePEMBundle (which restricts the @@ -219,6 +377,8 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d if !existing { createdKeys = append(createdKeys, key.ID.String()) + } else { + existingKeys = append(existingKeys, key.ID.String()) } } @@ -231,6 +391,8 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d issuerKeyMap[cert.ID.String()] = cert.KeyID.String() if !existing { createdIssuers = append(createdIssuers, cert.ID.String()) + } else { + existingIssuers = append(existingIssuers, cert.ID.String()) } } @@ -239,11 +401,13 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d "mapping": issuerKeyMap, "imported_keys": createdKeys, "imported_issuers": createdIssuers, + "existing_keys": existingKeys, + "existing_issuers": existingIssuers, }, } if len(createdIssuers) > 0 { - err := b.crlBuilder.rebuild(sc, true) + warnings, err := b.CrlBuilder().rebuild(sc, true) if err != nil { // Before returning, check if the error message includes the // string "PSS". If so, it indicates we might've wanted to modify @@ -258,6 +422,9 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d return nil, err } + for index, warning := range warnings { + response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } var issuersWithKeys []string for _, issuer := range createdIssuers { @@ -272,7 +439,7 @@ func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, d response.AddWarning("Unable to fetch default issuers configuration to update default issuer if necessary: " + err.Error()) } else if config.DefaultFollowsLatestIssuer { if len(issuersWithKeys) == 1 { - if err := sc.updateDefaultIssuerId(issuerID(issuersWithKeys[0])); err != nil { + if err := sc.updateDefaultIssuerId(issuing.IssuerID(issuersWithKeys[0])); err != nil { response.AddWarning("Unable to update this new root as the default issuer: " + err.Error()) } } else if len(issuersWithKeys) > 1 { @@ -349,11 +516,100 @@ func pathRevokeIssuer(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/revoke", - Fields: fields, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "revoke", + OperationSuffix: "issuer", + }, + + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathRevokeIssuer, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuer_id": { + Type: framework.TypeString, + Description: `ID of the issuer`, + Required: true, + }, + "issuer_name": { + Type: framework.TypeString, + Description: `Name of the issuer`, + Required: true, + }, + "key_id": { + Type: framework.TypeString, + Description: `ID of the Key`, + Required: true, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "manual_chain": { + Type: framework.TypeCommaStringSlice, + Description: `Manual Chain`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeCommaStringSlice, + Description: `Certificate Authority Chain`, + Required: true, + }, + "leaf_not_after_behavior": { + Type: framework.TypeString, + Description: ``, + Required: true, + }, + "usage": { + Type: framework.TypeString, + Description: `Allowed usage`, + Required: true, + }, + "revocation_signature_algorithm": { + Type: framework.TypeString, + Description: `Which signature algorithm to use when building CRLs`, + Required: true, + }, + "revoked": { + Type: framework.TypeBool, + Description: `Whether the issuer was revoked`, + Required: true, + }, + "issuing_certificates": { + Type: framework.TypeCommaStringSlice, + Description: `Specifies the URL values for the Issuing Certificate field`, + Required: true, + }, + "crl_distribution_points": { + Type: framework.TypeStringSlice, + Description: `Specifies the URL values for the CRL Distribution Points field`, + Required: true, + }, + "ocsp_servers": { + Type: framework.TypeStringSlice, + Description: `Specifies the URL values for the OCSP Servers field`, + Required: true, + }, + "revocation_time": { + Type: framework.TypeInt64, + Description: `Time of revocation`, + Required: false, + }, + "revocation_time_rfc3339": { + Type: framework.TypeTime, + Description: `RFC formatted time of revocation`, + Required: false, + }, + }, + }}, + }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -372,11 +628,11 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da defer b.issuersLock.Unlock() // Issuer revocation can't work on the legacy cert bundle. - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("cannot revoke issuer until migration has completed"), nil } - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } @@ -406,8 +662,8 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da // new revocations of leaves issued by this issuer to trigger a CRL // rebuild still. issuer.Revoked = true - if issuer.Usage.HasUsage(IssuanceUsage) { - issuer.Usage.ToggleUsage(IssuanceUsage) + if issuer.Usage.HasUsage(issuing.IssuanceUsage) { + issuer.Usage.ToggleUsage(issuing.IssuanceUsage) } currTime := time.Now() @@ -475,7 +731,7 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da } // Rebuild the CRL to include the newly revoked issuer. - crlErr := b.crlBuilder.rebuild(sc, false) + warnings, crlErr := b.CrlBuilder().rebuild(sc, false) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -491,6 +747,9 @@ func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, da // Impossible. return nil, err } + for index, warning := range warnings { + response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } // For sanity, we'll add a warning message here if there's no other // issuer which verifies this issuer. diff --git a/builtin/logical/pki/path_manage_keys.go b/builtin/logical/pki/path_manage_keys.go index 90119ce4e8a1..2b9297f118a8 100644 --- a/builtin/logical/pki/path_manage_keys.go +++ b/builtin/logical/pki/path_manage_keys.go @@ -1,20 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "bytes" "context" "encoding/pem" + "net/http" "strings" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" ) func pathGenerateKey(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/generate/(internal|exported|kms)", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate", + OperationSuffix: "internal-key|exported-key|kms-key", + }, + Fields: map[string]*framework.FieldSchema{ keyNameParam: { Type: framework.TypeString, @@ -34,9 +46,8 @@ func pathGenerateKey(b *backend) *framework.Path { Type: framework.TypeInt, Default: 0, Description: `The number of bits to use. Allowed values are -0 (universal default); with rsa key_type: 2048 (default), 3072, or -4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with -ed25519.`, +0 (universal default); with rsa key_type: 2048 (default), 3072, 4096 or 8192; +with ec key_type: 224, 256 (default), 384, or 521; ignored with ed25519.`, }, "managed_key_name": { Type: framework.TypeString, @@ -54,7 +65,36 @@ is required. Ignored for other types.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathGenerateKeyHandler, + Callback: b.pathGenerateKeyHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: `ID assigned to this key.`, + Required: true, + }, + "key_name": { + Type: framework.TypeString, + Description: `Name assigned to this key.`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `The type of key to use; defaults to RSA. "rsa" + "ec" and "ed25519" are the only valid values.`, + Required: true, + }, + "private_key": { + Type: framework.TypeString, + Description: `The private key string`, + Required: false, + }, + }, + }}, + }, + ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, @@ -76,7 +116,7 @@ func (b *backend) pathGenerateKeyHandler(ctx context.Context, req *logical.Reque b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not generate keys until migration has completed"), nil } @@ -115,7 +155,7 @@ func (b *backend) pathGenerateKeyHandler(ctx context.Context, req *logical.Reque return nil, err } - keyBundle, actualPrivateKeyType, err = createKmsKeyBundle(ctx, b, keyId) + keyBundle, actualPrivateKeyType, err = managed_key.CreateKmsKeyBundle(ctx, b, keyId) if err != nil { return nil, err } @@ -149,6 +189,12 @@ func pathImportKey(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/import", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "import", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ keyNameParam: { Type: framework.TypeString, @@ -162,7 +208,30 @@ func pathImportKey(b *backend) *framework.Path { Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathImportKeyHandler, + Callback: b.pathImportKeyHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: `ID assigned to this key.`, + Required: true, + }, + "key_name": { + Type: framework.TypeString, + Description: `Name assigned to this key.`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `The type of key to use; defaults to RSA. "rsa" + "ec" and "ed25519" are the only valid values.`, + Required: true, + }, + }, + }}, + }, ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, @@ -185,7 +254,7 @@ func (b *backend) pathImportKeyHandler(ctx context.Context, req *logical.Request b.issuersLock.Lock() defer b.issuersLock.Unlock() - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Cannot import keys until migration has completed"), nil } diff --git a/builtin/logical/pki/path_manage_keys_test.go b/builtin/logical/pki/path_manage_keys_test.go index 7b53ae836ee6..1fc671caa773 100644 --- a/builtin/logical/pki/path_manage_keys_test.go +++ b/builtin/logical/pki/path_manage_keys_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -9,6 +12,9 @@ import ( "fmt" "testing" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -26,7 +32,7 @@ func TestPKI_PathManageKeys_GenerateInternalKeys(t *testing.T) { wantLogicalErr bool }{ {"all-defaults", "", []int{0}, false}, - {"rsa", "rsa", []int{0, 2048, 3072, 4096}, false}, + {"rsa", "rsa", []int{0, 2048, 3072, 4096, 8192}, false}, {"ec", "ec", []int{0, 224, 256, 384, 521}, false}, {"ed25519", "ed25519", []int{0}, false}, {"error-rsa", "rsa", []int{-1, 343444}, true}, @@ -95,6 +101,8 @@ func TestPKI_PathManageKeys_GenerateExportedKeys(t *testing.T) { }, MountPoint: "pki/", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("keys/generate/exported"), logical.UpdateOperation), resp, true) + require.NoError(t, err, "Failed generating exported key") require.NotNil(t, resp, "Got nil response generating exported key") require.Equal(t, "ec", resp.Data["key_type"], "key_type field contained an invalid type") @@ -136,13 +144,16 @@ func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { }, MountPoint: "pki/", }) + + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("keys/import"), logical.UpdateOperation), resp, true) + require.NoError(t, err, "Failed importing ec key") require.NotNil(t, resp, "Got nil response importing ec key") require.False(t, resp.IsError(), "received an error response: %v", resp.Error()) require.NotEmpty(t, resp.Data["key_id"], "key id for ec import response was empty") require.Equal(t, "my-ec-key", resp.Data["key_name"], "key_name was incorrect for ec key") require.Equal(t, certutil.ECPrivateKey, resp.Data["key_type"]) - keyId1 := resp.Data["key_id"].(keyID) + keyId1 := resp.Data["key_id"].(issuing.KeyID) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -160,7 +171,7 @@ func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { require.NotEmpty(t, resp.Data["key_id"], "key id for rsa import response was empty") require.Equal(t, "my-rsa-key", resp.Data["key_name"], "key_name was incorrect for ec key") require.Equal(t, certutil.RSAPrivateKey, resp.Data["key_type"]) - keyId2 := resp.Data["key_id"].(keyID) + keyId2 := resp.Data["key_id"].(issuing.KeyID) require.NotEqual(t, keyId1, keyId2) @@ -241,7 +252,7 @@ func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { require.NotEmpty(t, resp.Data["key_id"], "key id for rsa import response was empty") require.Equal(t, "my-rsa-key", resp.Data["key_name"], "key_name was incorrect for ec key") require.Equal(t, certutil.RSAPrivateKey, resp.Data["key_type"]) - keyId2Reimport := resp.Data["key_id"].(keyID) + keyId2Reimport := resp.Data["key_id"].(issuing.KeyID) require.NotEqual(t, keyId2, keyId2Reimport, "re-importing key 2 did not generate a new key id") } @@ -260,7 +271,7 @@ func TestPKI_PathManageKeys_DeleteDefaultKeyWarns(t *testing.T) { require.NoError(t, err, "Failed generating key") require.NotNil(t, resp, "Got nil response generating key") require.False(t, resp.IsError(), "resp contained errors generating key: %#v", resp.Error()) - keyId := resp.Data["key_id"].(keyID) + keyId := resp.Data["key_id"].(issuing.KeyID) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.DeleteOperation, @@ -288,7 +299,7 @@ func TestPKI_PathManageKeys_DeleteUsedKeyFails(t *testing.T) { require.NoError(t, err, "Failed generating issuer") require.NotNil(t, resp, "Got nil response generating issuer") require.False(t, resp.IsError(), "resp contained errors generating issuer: %#v", resp.Error()) - keyId := resp.Data["key_id"].(keyID) + keyId := resp.Data["key_id"].(issuing.KeyID) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.DeleteOperation, @@ -315,7 +326,7 @@ func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { require.NoError(t, err, "Failed generating key") require.NotNil(t, resp, "Got nil response generating key") require.False(t, resp.IsError(), "resp contained errors generating key: %#v", resp.Error()) - keyId := resp.Data["key_id"].(keyID) + keyId := resp.Data["key_id"].(issuing.KeyID) resp, err = b.HandleRequest(context.Background(), &logical.Request{ Operation: logical.UpdateOperation, @@ -324,6 +335,8 @@ func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { Data: map[string]interface{}{"key_name": "new-name"}, MountPoint: "pki/", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("key/"+keyId.String()), logical.UpdateOperation), resp, true) + require.NoError(t, err, "failed updating key with new name") require.NotNil(t, resp, "Got nil response updating key with new name") require.False(t, resp.IsError(), "unexpected error updating key with new name: %#v", resp.Error()) @@ -334,6 +347,8 @@ func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { Storage: s, MountPoint: "pki/", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("key/"+keyId.String()), logical.ReadOperation), resp, true) + require.NoError(t, err, "failed reading key after name update") require.NotNil(t, resp, "Got nil response reading key after name update") require.False(t, resp.IsError(), "unexpected error reading key: %#v", resp.Error()) diff --git a/builtin/logical/pki/path_ocsp.go b/builtin/logical/pki/path_ocsp.go new file mode 100644 index 000000000000..2a5a1b8dc7bf --- /dev/null +++ b/builtin/logical/pki/path_ocsp.go @@ -0,0 +1,536 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "errors" + "fmt" + "io" + "math/big" + "net/http" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ocsp" +) + +const ( + ocspReqParam = "req" + ocspResponseContentType = "application/ocsp-response" + maximumRequestSize = 2048 // A normal simple request is 87 bytes, so give us some buffer +) + +type ocspRespInfo struct { + serialNumber *big.Int + ocspStatus int + revocationTimeUTC *time.Time + issuerID issuing.IssuerID +} + +// These response variables should not be mutated, instead treat them as constants +var ( + OcspUnauthorizedResponse = &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ocspResponseContentType, + logical.HTTPStatusCode: http.StatusUnauthorized, + logical.HTTPRawBody: ocsp.UnauthorizedErrorResponse, + }, + } + OcspMalformedResponse = &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ocspResponseContentType, + logical.HTTPStatusCode: http.StatusBadRequest, + logical.HTTPRawBody: ocsp.MalformedRequestErrorResponse, + }, + } + OcspInternalErrorResponse = &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ocspResponseContentType, + logical.HTTPStatusCode: http.StatusInternalServerError, + logical.HTTPRawBody: ocsp.InternalErrorErrorResponse, + }, + } + + ErrMissingOcspUsage = errors.New("issuer entry did not have the OCSPSigning usage") + ErrIssuerHasNoKey = errors.New("issuer has no key") + ErrUnknownIssuer = errors.New("unknown issuer") +) + +func buildPathOcspGet(b *backend) *framework.Path { + pattern := "ocsp/" + framework.MatchAllRegex(ocspReqParam) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "ocsp-with-get-req", + } + + return buildOcspGetWithPath(b, pattern, displayAttrs) +} + +func buildPathUnifiedOcspGet(b *backend) *framework.Path { + pattern := "unified-ocsp/" + framework.MatchAllRegex(ocspReqParam) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "unified-ocsp-with-get-req", + } + + return buildOcspGetWithPath(b, pattern, displayAttrs) +} + +func buildOcspGetWithPath(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + return &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: map[string]*framework.FieldSchema{ + ocspReqParam: { + Type: framework.TypeString, + Description: "base-64 encoded ocsp request", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.ocspHandler, + }, + }, + + HelpSynopsis: pathOcspHelpSyn, + HelpDescription: pathOcspHelpDesc, + } +} + +func buildPathOcspPost(b *backend) *framework.Path { + pattern := "ocsp" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "ocsp", + } + + return buildOcspPostWithPath(b, pattern, displayAttrs) +} + +func buildPathUnifiedOcspPost(b *backend) *framework.Path { + pattern := "unified-ocsp" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "unified-ocsp", + } + + return buildOcspPostWithPath(b, pattern, displayAttrs) +} + +func buildOcspPostWithPath(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + return &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.ocspHandler, + }, + }, + + HelpSynopsis: pathOcspHelpSyn, + HelpDescription: pathOcspHelpDesc, + } +} + +func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, request.Storage) + cfg, err := b.CrlBuilder().getConfigWithUpdate(sc) + if err != nil || cfg.OcspDisable || (isUnifiedOcspPath(request) && !cfg.UnifiedCRL) { + return OcspUnauthorizedResponse, nil + } + + derReq, err := fetchDerEncodedRequest(request, data) + if err != nil { + return OcspMalformedResponse, nil + } + + ocspReq, err := ocsp.ParseRequest(derReq) + if err != nil { + return OcspMalformedResponse, nil + } + + useUnifiedStorage := canUseUnifiedStorage(request, cfg) + + ocspStatus, err := getOcspStatus(sc, ocspReq, useUnifiedStorage) + if err != nil { + return logAndReturnInternalError(b, err), nil + } + + caBundle, issuer, err := lookupOcspIssuer(sc, ocspReq, ocspStatus.issuerID) + if err != nil { + if errors.Is(err, ErrUnknownIssuer) { + // Since we were not able to find a matching issuer for the incoming request + // generate an Unknown OCSP response. This might turn into an Unauthorized if + // we find out that we don't have a default issuer or it's missing the proper Usage flags + return generateUnknownResponse(cfg, sc, ocspReq), nil + } + if errors.Is(err, ErrMissingOcspUsage) { + // If we did find a matching issuer but aren't allowed to sign, the spec says + // we should be responding with an Unauthorized response as we don't have the + // ability to sign the response. + // https://www.rfc-editor.org/rfc/rfc5019#section-2.2.3 + return OcspUnauthorizedResponse, nil + } + return logAndReturnInternalError(b, err), nil + } + + byteResp, err := genResponse(cfg, caBundle, ocspStatus, ocspReq.HashAlgorithm, issuer.RevocationSigAlg) + if err != nil { + return logAndReturnInternalError(b, err), nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ocspResponseContentType, + logical.HTTPStatusCode: http.StatusOK, + logical.HTTPRawBody: byteResp, + }, + }, nil +} + +func canUseUnifiedStorage(req *logical.Request, cfg *crlConfig) bool { + if isUnifiedOcspPath(req) { + return true + } + + // We are operating on the existing /pki/ocsp path, both of these fields need to be enabled + // for us to use the unified path. + return shouldLocalPathsUseUnified(cfg) +} + +func isUnifiedOcspPath(req *logical.Request) bool { + return strings.HasPrefix(req.Path, "unified-ocsp") +} + +func generateUnknownResponse(cfg *crlConfig, sc *storageContext, ocspReq *ocsp.Request) *logical.Response { + // Generate an Unknown OCSP response, signing with the default issuer from the mount as we did + // not match the request's issuer. If no default issuer can be used, return with Unauthorized as there + // isn't much else we can do at this point. + config, err := sc.getIssuersConfig() + if err != nil { + return logAndReturnInternalError(sc.Backend, err) + } + + if config.DefaultIssuerId == "" { + // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. + return OcspUnauthorizedResponse + } + + caBundle, issuer, err := getOcspIssuerParsedBundle(sc, config.DefaultIssuerId) + if err != nil { + if errors.Is(err, ErrUnknownIssuer) || errors.Is(err, ErrIssuerHasNoKey) { + // We must have raced on a delete/update of the default issuer, anyways + // no way to sign a response so Unauthorized it is. + return OcspUnauthorizedResponse + } + return logAndReturnInternalError(sc.Backend, err) + } + + if !issuer.Usage.HasUsage(issuing.OCSPSigningUsage) { + // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. + return OcspUnauthorizedResponse + } + + info := &ocspRespInfo{ + serialNumber: ocspReq.SerialNumber, + ocspStatus: ocsp.Unknown, + } + + byteResp, err := genResponse(cfg, caBundle, info, ocspReq.HashAlgorithm, issuer.RevocationSigAlg) + if err != nil { + return logAndReturnInternalError(sc.Backend, err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ocspResponseContentType, + logical.HTTPStatusCode: http.StatusOK, + logical.HTTPRawBody: byteResp, + }, + } +} + +func fetchDerEncodedRequest(request *logical.Request, data *framework.FieldData) ([]byte, error) { + switch request.Operation { + case logical.ReadOperation: + // The param within the GET request should have a base64 encoded version of a DER request. + base64Req := data.Get(ocspReqParam).(string) + if base64Req == "" { + return nil, errors.New("no base64 encoded ocsp request was found") + } + + if len(base64Req) >= maximumRequestSize { + return nil, errors.New("request is too large") + } + + return base64.StdEncoding.DecodeString(base64Req) + case logical.UpdateOperation: + // POST bodies should contain the binary form of the DER request. + // NOTE: Writing an empty update request to Vault causes a nil request.HTTPRequest, and that object + // says that it is possible for its Body element to be nil as well, so check both just in case. + if request.HTTPRequest == nil { + return nil, errors.New("no data in request") + } + rawBody := request.HTTPRequest.Body + if rawBody == nil { + return nil, errors.New("no data in request body") + } + defer rawBody.Close() + + requestBytes, err := io.ReadAll(io.LimitReader(rawBody, maximumRequestSize)) + if err != nil { + return nil, err + } + + if len(requestBytes) >= maximumRequestSize { + return nil, errors.New("request is too large") + } + return requestBytes, nil + default: + return nil, fmt.Errorf("unsupported request method: %s", request.Operation) + } +} + +func logAndReturnInternalError(b *backend, err error) *logical.Response { + // Since OCSP might be a high traffic endpoint, we will log at debug level only + // any internal errors we do get. There is no way for us to return to the end-user + // errors, so we rely on the log statement to help in debugging possible + // issues in the field. + b.Logger().Debug("OCSP internal error", "error", err) + return OcspInternalErrorResponse +} + +func getOcspStatus(sc *storageContext, ocspReq *ocsp.Request, useUnifiedStorage bool) (*ocspRespInfo, error) { + revEntryRaw, err := fetchCertBySerialBigInt(sc, revokedPath, ocspReq.SerialNumber) + if err != nil { + return nil, err + } + + info := ocspRespInfo{ + serialNumber: ocspReq.SerialNumber, + ocspStatus: ocsp.Good, + } + + if revEntryRaw != nil { + var revEntry revocationInfo + if err := revEntryRaw.DecodeJSON(&revEntry); err != nil { + return nil, err + } + + info.ocspStatus = ocsp.Revoked + info.revocationTimeUTC = &revEntry.RevocationTimeUTC + info.issuerID = revEntry.CertificateIssuer // This might be empty if the CRL hasn't been rebuilt + } else if useUnifiedStorage { + dashSerial := normalizeSerialFromBigInt(ocspReq.SerialNumber) + unifiedEntry, err := getUnifiedRevocationBySerial(sc, dashSerial) + if err != nil { + return nil, err + } + + if unifiedEntry != nil { + info.ocspStatus = ocsp.Revoked + info.revocationTimeUTC = &unifiedEntry.RevocationTimeUTC + info.issuerID = unifiedEntry.CertificateIssuer + } + } + + return &info, nil +} + +func lookupOcspIssuer(sc *storageContext, req *ocsp.Request, optRevokedIssuer issuing.IssuerID) (*certutil.ParsedCertBundle, *issuing.IssuerEntry, error) { + reqHash := req.HashAlgorithm + if !reqHash.Available() { + return nil, nil, x509.ErrUnsupportedAlgorithm + } + + // This will prime up issuerIds, with either the optRevokedIssuer value if set + // or if we are operating in legacy storage mode, the shim bundle id or finally + // a list of all our issuers in this mount. + issuerIds, err := lookupIssuerIds(sc, optRevokedIssuer) + if err != nil { + return nil, nil, err + } + + matchedButNoUsage := false + for _, issuerId := range issuerIds { + parsedBundle, issuer, err := getOcspIssuerParsedBundle(sc, issuerId) + if err != nil { + // A bit touchy here as if we get an ErrUnknownIssuer for an issuer id that we picked up + // from a revocation entry, we still return an ErrUnknownOcspIssuer as we can't validate + // the end-user actually meant this specific issuer's cert with serial X. + if errors.Is(err, ErrUnknownIssuer) || errors.Is(err, ErrIssuerHasNoKey) { + // This skips either bad issuer ids, or root certs with no keys that we can't use. + continue + } + return nil, nil, err + } + + // Make sure the client and Vault are talking about the same issuer, otherwise + // we might have a case of a matching serial number for a different issuer which + // we should not respond back in the affirmative about. + matches, err := doesRequestMatchIssuer(parsedBundle, req) + if err != nil { + return nil, nil, err + } + + if matches { + if !issuer.Usage.HasUsage(issuing.OCSPSigningUsage) { + matchedButNoUsage = true + // We found a matching issuer, but it's not allowed to sign the + // response, there might be another issuer that we rotated + // that will match though, so keep iterating. + continue + } + + return parsedBundle, issuer, nil + } + } + + if matchedButNoUsage { + // We matched an issuer but it did not have an OCSP signing usage set so bail. + return nil, nil, ErrMissingOcspUsage + } + + return nil, nil, ErrUnknownIssuer +} + +func getOcspIssuerParsedBundle(sc *storageContext, issuerId issuing.IssuerID) (*certutil.ParsedCertBundle, *issuing.IssuerEntry, error) { + issuer, bundle, err := sc.fetchCertBundleByIssuerId(issuerId, true) + if err != nil { + switch err.(type) { + case errutil.UserError: + // Most likely the issuer id no longer exists skip it + return nil, nil, ErrUnknownIssuer + default: + return nil, nil, err + } + } + + if issuer.KeyID == "" { + // No point if the key does not exist from the issuer to use as a signer. + return nil, nil, ErrIssuerHasNoKey + } + + caBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) + if err != nil { + return nil, nil, err + } + + return caBundle, issuer, nil +} + +func lookupIssuerIds(sc *storageContext, optRevokedIssuer issuing.IssuerID) ([]issuing.IssuerID, error) { + if optRevokedIssuer != "" { + return []issuing.IssuerID{optRevokedIssuer}, nil + } + + if sc.Backend.UseLegacyBundleCaStorage() { + return []issuing.IssuerID{legacyBundleShimID}, nil + } + + return sc.listIssuers() +} + +func doesRequestMatchIssuer(parsedBundle *certutil.ParsedCertBundle, req *ocsp.Request) (bool, error) { + // issuer name hashing taken from golang.org/x/crypto/ocsp. + var pkInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(parsedBundle.Certificate.RawSubjectPublicKeyInfo, &pkInfo); err != nil { + return false, err + } + + h := req.HashAlgorithm.New() + h.Write(pkInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(parsedBundle.Certificate.RawSubject) + issuerNameHash := h.Sum(nil) + + return bytes.Equal(req.IssuerKeyHash, issuerKeyHash) && bytes.Equal(req.IssuerNameHash, issuerNameHash), nil +} + +func genResponse(cfg *crlConfig, caBundle *certutil.ParsedCertBundle, info *ocspRespInfo, reqHash crypto.Hash, revSigAlg x509.SignatureAlgorithm) ([]byte, error) { + curTime := time.Now() + duration, err := parseutil.ParseDurationSecond(cfg.OcspExpiry) + if err != nil { + return nil, err + } + + // x/crypto/ocsp lives outside of the standard library's crypto/x509 and includes + // ripped-off variants of many internal structures and functions. These + // lack support for PSS signatures altogether, so if we have revSigAlg + // that uses PSS, downgrade it to PKCS#1v1.5. This fixes the lack of + // support in x/ocsp, at the risk of OCSP requests failing due to lack + // of PKCS#1v1.5 (in say, PKCS#11 HSMs or GCP). + // + // Other restrictions, such as hash function selection, will still work + // however. + switch revSigAlg { + case x509.SHA256WithRSAPSS: + revSigAlg = x509.SHA256WithRSA + case x509.SHA384WithRSAPSS: + revSigAlg = x509.SHA384WithRSA + case x509.SHA512WithRSAPSS: + revSigAlg = x509.SHA512WithRSA + } + + // Due to a bug in Go's ocsp.ParseResponse(...), we do not provision + // Certificate any more on the response to help Go based OCSP clients. + // This was technically unnecessary, as the Certificate given here + // both signed the OCSP response and issued the leaf cert, and so + // should already be trusted by the client. + // + // See also: https://github.com/golang/go/issues/59641 + template := ocsp.Response{ + IssuerHash: reqHash, + Status: info.ocspStatus, + SerialNumber: info.serialNumber, + ThisUpdate: curTime, + ExtraExtensions: []pkix.Extension{}, + SignatureAlgorithm: revSigAlg, + } + + if duration > 0 { + template.NextUpdate = curTime.Add(duration) + } + + if info.ocspStatus == ocsp.Revoked { + template.RevokedAt = *info.revocationTimeUTC + template.RevocationReason = ocsp.Unspecified + } + + return ocsp.CreateResponse(caBundle.Certificate, caBundle.Certificate, template, caBundle.PrivateKey) +} + +const pathOcspHelpSyn = ` +Query a certificate's revocation status through OCSP' +` + +const pathOcspHelpDesc = ` +This endpoint expects DER encoded OCSP requests and returns DER encoded OCSP responses +` diff --git a/builtin/logical/pki/path_ocsp_test.go b/builtin/logical/pki/path_ocsp_test.go new file mode 100644 index 000000000000..baeb49cb1dc9 --- /dev/null +++ b/builtin/logical/pki/path_ocsp_test.go @@ -0,0 +1,743 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + "encoding/base64" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/ocsp" +) + +// If the ocsp_disabled flag is set to true in the crl configuration make sure we always +// return an Unauthorized error back as we assume an end-user disabling the feature does +// not want us to act as the OCSP authority and the RFC specifies this is the appropriate response. +func TestOcsp_Disabled(t *testing.T) { + t.Parallel() + type testArgs struct { + reqType string + } + var tests []testArgs + for _, reqType := range []string{"get", "post"} { + tests = append(tests, testArgs{ + reqType: reqType, + }) + } + for _, tt := range tests { + localTT := tt + t.Run(localTT.reqType, func(t *testing.T) { + b, s, testEnv := setupOcspEnv(t, "rsa") + resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "ocsp_disable": "true", + }) + requireSuccessNonNilResponse(t, resp, err) + resp, err = SendOcspRequest(t, b, s, localTT.reqType, testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 401, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) + }) + } +} + +// If we can't find the issuer within the request and have no default issuer to sign an Unknown response +// with return an UnauthorizedErrorResponse/according to/the RFC, similar to if we are disabled (lack of authority) +// This behavior differs from CRLs when an issuer is removed from a mount. +func TestOcsp_UnknownIssuerWithNoDefault(t *testing.T) { + t.Parallel() + + _, _, testEnv := setupOcspEnv(t, "ec") + // Create another completely empty mount so the created issuer/certificate above is unknown + b, s := CreateBackendWithStorage(t) + + resp, err := SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 401, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) +} + +// If the issuer in the request does exist, but the request coming in associates the serial with the +// wrong issuer return an Unknown response back to the caller. +func TestOcsp_WrongIssuerInRequest(t *testing.T) { + t.Parallel() + + b, s, testEnv := setupOcspEnv(t, "ec") + serial := serialFromCert(testEnv.leafCertIssuer1) + resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serial, + }) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer2, crypto.SHA1) + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Unknown, ocspResp.Status) +} + +// Verify that requests we can't properly decode result in the correct response of MalformedRequestError +func TestOcsp_MalformedRequests(t *testing.T) { + t.Parallel() + type testArgs struct { + reqType string + } + var tests []testArgs + for _, reqType := range []string{"get", "post"} { + tests = append(tests, testArgs{ + reqType: reqType, + }) + } + for _, tt := range tests { + localTT := tt + t.Run(localTT.reqType, func(t *testing.T) { + b, s, _ := setupOcspEnv(t, "rsa") + badReq := []byte("this is a bad request") + var resp *logical.Response + var err error + switch localTT.reqType { + case "get": + resp, err = sendOcspGetRequest(b, s, badReq) + case "post": + resp, err = sendOcspPostRequest(b, s, badReq) + default: + t.Fatalf("bad request type") + } + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 400, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.MalformedRequestErrorResponse, respDer) + }) + } +} + +// Validate that we properly handle a revocation entry that contains an issuer ID that no longer exists, +// the best we can do in this use case is to respond back with the default issuer that we don't know +// the issuer that they are requesting (we can't guarantee that the client is actually requesting a serial +// from that issuer) +func TestOcsp_InvalidIssuerIdInRevocationEntry(t *testing.T) { + t.Parallel() + + b, s, testEnv := setupOcspEnv(t, "ec") + ctx := context.Background() + + // Revoke the entry + serial := serialFromCert(testEnv.leafCertIssuer1) + resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serial, + }) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + // Twiddle the entry so that the issuer id is no longer valid. + storagePath := revokedPath + normalizeSerial(serial) + var revInfo revocationInfo + revEntry, err := s.Get(ctx, storagePath) + require.NoError(t, err, "failed looking up storage path: %s", storagePath) + err = revEntry.DecodeJSON(&revInfo) + require.NoError(t, err, "failed decoding storage entry: %v", revEntry) + revInfo.CertificateIssuer = "00000000-0000-0000-0000-000000000000" + revEntry, err = logical.StorageEntryJSON(storagePath, revInfo) + require.NoError(t, err, "failed re-encoding revocation info: %v", revInfo) + err = s.Put(ctx, revEntry) + require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) + + // Send the request + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Unknown, ocspResp.Status) +} + +// Validate that we properly handle an unknown issuer use-case but that the default issuer +// does not have the OCSP usage flag set, we can't do much else other than reply with an +// Unauthorized response. +func TestOcsp_UnknownIssuerIdWithDefaultHavingOcspUsageRemoved(t *testing.T) { + t.Parallel() + + b, s, testEnv := setupOcspEnv(t, "ec") + ctx := context.Background() + + // Revoke the entry + serial := serialFromCert(testEnv.leafCertIssuer1) + resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serial, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("revoke"), logical.UpdateOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + // Twiddle the entry so that the issuer id is no longer valid. + storagePath := revokedPath + normalizeSerial(serial) + var revInfo revocationInfo + revEntry, err := s.Get(ctx, storagePath) + require.NoError(t, err, "failed looking up storage path: %s", storagePath) + err = revEntry.DecodeJSON(&revInfo) + require.NoError(t, err, "failed decoding storage entry: %v", revEntry) + revInfo.CertificateIssuer = "00000000-0000-0000-0000-000000000000" + revEntry, err = logical.StorageEntryJSON(storagePath, revInfo) + require.NoError(t, err, "failed re-encoding revocation info: %v", revInfo) + err = s.Put(ctx, revEntry) + require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) + + // Update our issuers to no longer have the OcspSigning usage + resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ + "usage": "read-only,issuing-certificates,crl-signing", + }) + requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer1") + resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId2.String(), map[string]interface{}{ + "usage": "read-only,issuing-certificates,crl-signing", + }) + requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer2") + + // Send the request + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 401, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) +} + +// Verify that if we do have a revoked certificate entry for the request, that matches an +// issuer but that issuer does not have the OcspUsage flag set that we return an Unauthorized +// response back to the caller +func TestOcsp_RevokedCertHasIssuerWithoutOcspUsage(t *testing.T) { + b, s, testEnv := setupOcspEnv(t, "ec") + + // Revoke our certificate + resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serialFromCert(testEnv.leafCertIssuer1), + }) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + // Update our issuer to no longer have the OcspSigning usage + resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ + "usage": "read-only,issuing-certificates,crl-signing", + }) + requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") + requireFieldsSetInResp(t, resp, "usage") + + // Do not assume a specific ordering for usage... + usages, err := issuing.NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) + require.NoError(t, err, "failed parsing usage return value") + require.True(t, usages.HasUsage(issuing.IssuanceUsage)) + require.True(t, usages.HasUsage(issuing.CRLSigningUsage)) + require.False(t, usages.HasUsage(issuing.OCSPSigningUsage)) + + // Request an OCSP request from it, we should get an Unauthorized response back + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 401, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) +} + +// Verify if our matching issuer for a revocation entry has no key associated with it that +// we bail with an Unauthorized response. +func TestOcsp_RevokedCertHasIssuerWithoutAKey(t *testing.T) { + b, s, testEnv := setupOcspEnv(t, "ec") + + // Revoke our certificate + resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serialFromCert(testEnv.leafCertIssuer1), + }) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + // Delete the key associated with our issuer + resp, err = CBRead(b, s, "issuer/"+testEnv.issuerId1.String()) + requireSuccessNonNilResponse(t, resp, err, "failed reading issuer") + requireFieldsSetInResp(t, resp, "key_id") + keyId := resp.Data["key_id"].(issuing.KeyID) + + // This is a bit naughty but allow me to delete the key... + sc := b.makeStorageContext(context.Background(), s) + issuer, err := sc.fetchIssuerById(testEnv.issuerId1) + require.NoError(t, err, "failed to get issuer from storage") + issuer.KeyID = "" + err = sc.writeIssuer(issuer) + require.NoError(t, err, "failed to write issuer update") + + resp, err = CBDelete(b, s, "key/"+keyId.String()) + requireSuccessNonNilResponse(t, resp, err, "failed deleting key") + + // Request an OCSP request from it, we should get an Unauthorized response back + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 401, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) +} + +// Verify if for some reason an end-user has rotated an existing certificate using the same +// key so our algo matches multiple issuers and one has OCSP usage disabled. We expect that +// even if a prior issuer issued the certificate, the new matching issuer can respond and sign +// the response to the caller on its behalf. +// +// NOTE: This test is a bit at the mercy of iteration order of the issuer ids. +// +// If it becomes flaky, most likely something is wrong in the code +// and not the test. +func TestOcsp_MultipleMatchingIssuersOneWithoutSigningUsage(t *testing.T) { + b, s, testEnv := setupOcspEnv(t, "ec") + + // Create a matching issuer as issuer1 with the same backing key + resp, err := CBWrite(b, s, "root/rotate/existing", map[string]interface{}{ + "key_ref": testEnv.keyId1, + "ttl": "40h", + "common_name": "example-ocsp.com", + }) + requireSuccessNonNilResponse(t, resp, err, "rotate issuer failed") + requireFieldsSetInResp(t, resp, "issuer_id") + rotatedCert := parseCert(t, resp.Data["certificate"].(string)) + + // Remove ocsp signing from our issuer + resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ + "usage": "read-only,issuing-certificates,crl-signing", + }) + requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") + requireFieldsSetInResp(t, resp, "usage") + // Do not assume a specific ordering for usage... + usages, err := issuing.NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) + require.NoError(t, err, "failed parsing usage return value") + require.True(t, usages.HasUsage(issuing.IssuanceUsage)) + require.True(t, usages.HasUsage(issuing.CRLSigningUsage)) + require.False(t, usages.HasUsage(issuing.OCSPSigningUsage)) + + // Request an OCSP request from it, we should get a Good response back, from the rotated cert + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Good, ocspResp.Status) + require.Equal(t, crypto.SHA1, ocspResp.IssuerHash) + require.Equal(t, 0, ocspResp.RevocationReason) + require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) + + requireOcspSignatureAlgoForKey(t, rotatedCert.SignatureAlgorithm, ocspResp.SignatureAlgorithm) + requireOcspResponseSignedBy(t, ocspResp, rotatedCert) +} + +// Make sure OCSP GET/POST requests work through the entire stack, and not just +// through the quicker backend layer the other tests are doing. +func TestOcsp_HigherLevel(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + mountPKIEndpoint(t, client, "pki") + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "root-ca.com", + "ttl": "600h", + }) + + require.NoError(t, err, "error generating root ca: %v", err) + require.NotNil(t, resp, "expected ca info from root") + + issuerCert := parseCert(t, resp.Data["certificate"].(string)) + + resp, err = client.Logical().Write("pki/roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "no_store": "false", // make sure we store this cert + "max_ttl": "1h", + "key_type": "ec", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + resp, err = client.Logical().Write("pki/issue/example", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "15m", + }) + require.NoError(t, err, "error issuing certificate: %v", err) + require.NotNil(t, resp, "got nil response from issuing request") + certToRevoke := parseCert(t, resp.Data["certificate"].(string)) + serialNum := resp.Data["serial_number"].(string) + + // Revoke the certificate + resp, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": serialNum, + }) + require.NoError(t, err, "error revoking certificate: %v", err) + require.NotNil(t, resp, "got nil response from revoke") + + // Make sure that OCSP handler responds properly + ocspReq := generateRequest(t, crypto.SHA256, certToRevoke, issuerCert) + ocspPostReq := client.NewRequest(http.MethodPost, "/v1/pki/ocsp") + ocspPostReq.Headers.Set("Content-Type", "application/ocsp-request") + ocspPostReq.BodyBytes = ocspReq + rawResp, err := client.RawRequest(ocspPostReq) + require.NoError(t, err, "failed sending ocsp post request") + + require.Equal(t, 200, rawResp.StatusCode) + require.Equal(t, ocspResponseContentType, rawResp.Header.Get("Content-Type")) + bodyReader := rawResp.Body + respDer, err := io.ReadAll(bodyReader) + bodyReader.Close() + require.NoError(t, err, "failed reading response body") + + ocspResp, err := ocsp.ParseResponse(respDer, issuerCert) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Revoked, ocspResp.Status) + require.Equal(t, certToRevoke.SerialNumber, ocspResp.SerialNumber) + + // Test OCSP Get request for ocsp + urlEncoded := base64.StdEncoding.EncodeToString(ocspReq) + if strings.Contains(urlEncoded, "//") { + // workaround known redirect bug that is difficult to fix + t.Skipf("VAULT-13630 - Skipping GET OCSP test with encoded issuer cert containing // triggering redirection bug") + } + + ocspGetReq := client.NewRequest(http.MethodGet, "/v1/pki/ocsp/"+urlEncoded) + ocspGetReq.Headers.Set("Content-Type", "application/ocsp-request") + rawResp, err = client.RawRequest(ocspGetReq) + require.NoError(t, err, "failed sending ocsp get request") + + require.Equal(t, 200, rawResp.StatusCode) + require.Equal(t, ocspResponseContentType, rawResp.Header.Get("Content-Type")) + bodyReader = rawResp.Body + respDer, err = io.ReadAll(bodyReader) + bodyReader.Close() + require.NoError(t, err, "failed reading response body") + + ocspResp, err = ocsp.ParseResponse(respDer, issuerCert) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Revoked, ocspResp.Status) + require.Equal(t, certToRevoke.SerialNumber, ocspResp.SerialNumber) +} + +// TestOcsp_NextUpdate make sure that we are setting the appropriate values +// for the NextUpdate field within our responses. +func TestOcsp_NextUpdate(t *testing.T) { + // Within the runOcspRequestTest, with a ocspExpiry of 0, + // we will validate that NextUpdate was not set in the response + runOcspRequestTest(t, "POST", "ec", 0, 0, crypto.SHA256, 0) + + // Within the runOcspRequestTest, with a ocspExpiry of 24 hours, we will validate + // that NextUpdate is set and has a time 24 hours larger than ThisUpdate + runOcspRequestTest(t, "POST", "ec", 0, 0, crypto.SHA256, 24*time.Hour) +} + +func TestOcsp_ValidRequests(t *testing.T) { + type caKeyConf struct { + keyType string + keyBits int + sigBits int + } + t.Parallel() + type testArgs struct { + reqType string + keyConf caKeyConf + reqHash crypto.Hash + } + var tests []testArgs + for _, reqType := range []string{"get", "post"} { + for _, keyConf := range []caKeyConf{ + {"rsa", 0, 0}, + {"rsa", 0, 384}, + {"rsa", 0, 512}, + {"ec", 0, 0}, + {"ec", 521, 0}, + } { + // "ed25519" is not supported at the moment in x/crypto/ocsp + for _, requestHash := range []crypto.Hash{crypto.SHA1, crypto.SHA256, crypto.SHA384, crypto.SHA512} { + tests = append(tests, testArgs{ + reqType: reqType, + keyConf: keyConf, + reqHash: requestHash, + }) + } + } + } + for _, tt := range tests { + localTT := tt + testName := fmt.Sprintf("%s-%s-keybits-%d-sigbits-%d-reqHash-%s", localTT.reqType, localTT.keyConf.keyType, + localTT.keyConf.keyBits, + localTT.keyConf.sigBits, + localTT.reqHash) + t.Run(testName, func(t *testing.T) { + runOcspRequestTest(t, localTT.reqType, localTT.keyConf.keyType, localTT.keyConf.keyBits, + localTT.keyConf.sigBits, localTT.reqHash, 12*time.Hour) + }) + } +} + +func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, + caKeyBits int, caKeySigBits int, requestHash crypto.Hash, ocspExpiry time.Duration, +) { + b, s, testEnv := setupOcspEnvWithCaKeyConfig(t, caKeyType, caKeyBits, caKeySigBits, ocspExpiry) + + // Non-revoked cert + resp, err := SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Good, ocspResp.Status) + require.Equal(t, requestHash, ocspResp.IssuerHash) + require.Equal(t, 0, ocspResp.RevocationReason) + require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) + + requireOcspSignatureAlgoForKey(t, testEnv.issuer1.SignatureAlgorithm, ocspResp.SignatureAlgorithm) + requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1) + + // Now revoke it + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serialFromCert(testEnv.leafCertIssuer1), + }) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + resp, err = SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request with revoked") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer = resp.Data["http_raw_body"].([]byte) + + ocspResp, err = ocsp.ParseResponse(respDer, testEnv.issuer1) + require.NoError(t, err, "parsing ocsp get response with revoked") + + require.Equal(t, ocsp.Revoked, ocspResp.Status) + require.Equal(t, requestHash, ocspResp.IssuerHash) + require.Equal(t, 0, ocspResp.RevocationReason) + require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) + + requireOcspSignatureAlgoForKey(t, testEnv.issuer1.SignatureAlgorithm, ocspResp.SignatureAlgorithm) + requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1) + + // Request status for our second issuer + resp, err = SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer2, testEnv.issuer2, requestHash) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer = resp.Data["http_raw_body"].([]byte) + + ocspResp, err = ocsp.ParseResponse(respDer, testEnv.issuer2) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Good, ocspResp.Status) + require.Equal(t, requestHash, ocspResp.IssuerHash) + require.Equal(t, 0, ocspResp.RevocationReason) + require.Equal(t, testEnv.leafCertIssuer2.SerialNumber, ocspResp.SerialNumber) + + // Verify that our thisUpdate and nextUpdate fields are updated as expected + resp, err = CBRead(b, s, "config/crl") + requireSuccessNonNilResponse(t, resp, err, "failed reading from config/crl") + requireFieldsSetInResp(t, resp, "ocsp_expiry") + ocspExpiryRaw := resp.Data["ocsp_expiry"].(string) + expectedDiff, err := parseutil.ParseDurationSecond(ocspExpiryRaw) + require.NoError(t, err, "failed to parse default ocsp expiry value") + + thisUpdate := ocspResp.ThisUpdate + require.Less(t, time.Since(thisUpdate), 10*time.Second, "expected ThisUpdate field to be within the last 10 seconds") + if expectedDiff != 0 { + nextUpdate := ocspResp.NextUpdate + require.False(t, nextUpdate.IsZero(), "nextUpdate field value should have been a non-zero time") + require.True(t, thisUpdate.Before(nextUpdate), + fmt.Sprintf("thisUpdate %s, should have been before nextUpdate: %s", thisUpdate, nextUpdate)) + nextUpdateDiff := nextUpdate.Sub(thisUpdate) + require.Equal(t, expectedDiff, nextUpdateDiff, + fmt.Sprintf("the delta between thisUpdate %s and nextUpdate: %s should have been around: %s but was %s", + thisUpdate, nextUpdate, defaultCrlConfig.OcspExpiry, nextUpdateDiff)) + } else { + // With the config value set to 0, we shouldn't have a NextUpdate field set + require.True(t, ocspResp.NextUpdate.IsZero(), "nextUpdate value was not zero as expected was: %v", ocspResp.NextUpdate) + } + requireOcspSignatureAlgoForKey(t, testEnv.issuer2.SignatureAlgorithm, ocspResp.SignatureAlgorithm) + requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer2) +} + +func requireOcspSignatureAlgoForKey(t *testing.T, expected x509.SignatureAlgorithm, actual x509.SignatureAlgorithm) { + t.Helper() + + require.Equal(t, expected.String(), actual.String()) +} + +type ocspTestEnv struct { + issuer1 *x509.Certificate + issuer2 *x509.Certificate + + issuerId1 issuing.IssuerID + issuerId2 issuing.IssuerID + + leafCertIssuer1 *x509.Certificate + leafCertIssuer2 *x509.Certificate + + keyId1 issuing.KeyID + keyId2 issuing.KeyID +} + +func setupOcspEnv(t *testing.T, keyType string) (*backend, logical.Storage, *ocspTestEnv) { + return setupOcspEnvWithCaKeyConfig(t, keyType, 0, 0, 12*time.Hour) +} + +func setupOcspEnvWithCaKeyConfig(t *testing.T, keyType string, caKeyBits int, caKeySigBits int, ocspExpiry time.Duration) (*backend, logical.Storage, *ocspTestEnv) { + b, s := CreateBackendWithStorage(t) + var issuerCerts []*x509.Certificate + var leafCerts []*x509.Certificate + var issuerIds []issuing.IssuerID + var keyIds []issuing.KeyID + + resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "ocsp_enable": true, + "ocsp_expiry": fmt.Sprintf("%ds", int(ocspExpiry.Seconds())), + }) + requireSuccessNonNilResponse(t, resp, err, "config/crl failed") + + for i := 0; i < 2; i++ { + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "key_type": keyType, + "key_bits": caKeyBits, + "signature_bits": caKeySigBits, + "ttl": "40h", + "common_name": "example-ocsp.com", + }) + requireSuccessNonNilResponse(t, resp, err, "root/generate/internal") + requireFieldsSetInResp(t, resp, "issuer_id", "key_id") + issuerId := resp.Data["issuer_id"].(issuing.IssuerID) + keyId := resp.Data["key_id"].(issuing.KeyID) + + resp, err = CBWrite(b, s, "roles/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ + "allow_bare_domains": true, + "allow_subdomains": true, + "allowed_domains": "foobar.com", + "no_store": false, + "generate_lease": false, + "issuer_ref": issuerId, + "key_type": keyType, + }) + requireSuccessNonNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) + + resp, err = CBWrite(b, s, "issue/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ + "common_name": "test.foobar.com", + }) + requireSuccessNonNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) + requireFieldsSetInResp(t, resp, "certificate", "issuing_ca", "serial_number") + leafCert := parseCert(t, resp.Data["certificate"].(string)) + issuingCa := parseCert(t, resp.Data["issuing_ca"].(string)) + + issuerCerts = append(issuerCerts, issuingCa) + leafCerts = append(leafCerts, leafCert) + issuerIds = append(issuerIds, issuerId) + keyIds = append(keyIds, keyId) + } + + testEnv := &ocspTestEnv{ + issuerId1: issuerIds[0], + issuer1: issuerCerts[0], + leafCertIssuer1: leafCerts[0], + keyId1: keyIds[0], + + issuerId2: issuerIds[1], + issuer2: issuerCerts[1], + leafCertIssuer2: leafCerts[1], + keyId2: keyIds[1], + } + + return b, s, testEnv +} + +func SendOcspRequest(t *testing.T, b *backend, s logical.Storage, getOrPost string, cert, issuer *x509.Certificate, requestHash crypto.Hash) (*logical.Response, error) { + t.Helper() + + ocspRequest := generateRequest(t, requestHash, cert, issuer) + + switch strings.ToLower(getOrPost) { + case "get": + return sendOcspGetRequest(b, s, ocspRequest) + case "post": + return sendOcspPostRequest(b, s, ocspRequest) + default: + t.Fatalf("unsupported value for SendOcspRequest getOrPost arg: %s", getOrPost) + } + return nil, nil +} + +func sendOcspGetRequest(b *backend, s logical.Storage, ocspRequest []byte) (*logical.Response, error) { + urlEncoded := base64.StdEncoding.EncodeToString(ocspRequest) + return CBRead(b, s, "ocsp/"+urlEncoded) +} + +func sendOcspPostRequest(b *backend, s logical.Storage, ocspRequest []byte) (*logical.Response, error) { + reader := io.NopCloser(bytes.NewReader(ocspRequest)) + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "ocsp", + Storage: s, + MountPoint: "pki/", + HTTPRequest: &http.Request{ + Body: reader, + }, + }) + + return resp, err +} diff --git a/builtin/logical/pki/path_resign_crls.go b/builtin/logical/pki/path_resign_crls.go index 7f8746aa9452..95187c5734af 100644 --- a/builtin/logical/pki/path_resign_crls.go +++ b/builtin/logical/pki/path_resign_crls.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -12,11 +15,13 @@ import ( "errors" "fmt" "math/big" + "net/http" "strconv" "strings" "time" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -39,6 +44,13 @@ var ( func pathResignCrls(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/resign-crls", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "resign", + OperationSuffix: "crls", + }, + Fields: map[string]*framework.FieldSchema{ issuerRefParam: { Type: framework.TypeString, @@ -77,6 +89,18 @@ base64 encoded. Defaults to "pem".`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathUpdateResignCrlsHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "crl": { + Type: framework.TypeString, + Description: `CRL`, + Required: true, + }, + }, + }}, + }, }, }, @@ -89,6 +113,13 @@ base64 encoded. Defaults to "pem".`, func pathSignRevocationList(b *backend) *framework.Path { return &framework.Path{ Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-revocation-list", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "revocation-list", + }, + Fields: map[string]*framework.FieldSchema{ issuerRefParam: { Type: framework.TypeString, @@ -133,6 +164,18 @@ value (string)`, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathUpdateSignRevocationListHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "crl": { + Type: framework.TypeString, + Description: `CRL`, + Required: true, + }, + }, + }}, + }, }, }, @@ -143,11 +186,11 @@ return a signed CRL based on the parameter values.`, } func (b *backend) pathUpdateResignCrlsHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("This API cannot be used until the migration has completed"), nil } - issuerRef := getIssuerRef(data) + issuerRef := GetIssuerRef(data) crlNumber := data.Get(crlNumberParam).(int) deltaCrlBaseNumber := data.Get(deltaCrlBaseNumberParam).(int) nextUpdateStr := data.Get(nextUpdateParam).(string) @@ -210,7 +253,7 @@ func (b *backend) pathUpdateResignCrlsHandler(ctx context.Context, request *logi if deltaCrlBaseNumber > -1 { ext, err := certutil.CreateDeltaCRLIndicatorExt(int64(deltaCrlBaseNumber)) if err != nil { - return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) + return nil, fmt.Errorf("could not create crl delta indicator extension: %w", err) } template.ExtraExtensions = []pkix.Extension{ext} } @@ -231,11 +274,11 @@ func (b *backend) pathUpdateResignCrlsHandler(ctx context.Context, request *logi } func (b *backend) pathUpdateSignRevocationListHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("This API cannot be used until the migration has completed"), nil } - issuerRef := getIssuerRef(data) + issuerRef := GetIssuerRef(data) crlNumber := data.Get(crlNumberParam).(int) deltaCrlBaseNumber := data.Get(deltaCrlBaseNumberParam).(int) nextUpdateStr := data.Get(nextUpdateParam).(string) @@ -283,7 +326,7 @@ func (b *backend) pathUpdateSignRevocationListHandler(ctx context.Context, reque if deltaCrlBaseNumber > -1 { ext, err := certutil.CreateDeltaCRLIndicatorExt(int64(deltaCrlBaseNumber)) if err != nil { - return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) + return nil, fmt.Errorf("could not create crl delta indicator extension: %w", err) } crlExtensions = append(crlExtensions, ext) } @@ -607,7 +650,7 @@ func getCaBundle(sc *storageContext, issuerRef string) (*certutil.CAInfoBundle, return nil, fmt.Errorf("failed to resolve issuer %s: %w", issuerRefParam, err) } - return sc.fetchCAInfoByIssuerId(issuerId, CRLSigningUsage) + return sc.fetchCAInfoByIssuerId(issuerId, issuing.CRLSigningUsage) } func decodePemCrls(rawCrls []string) ([]*x509.RevocationList, error) { diff --git a/builtin/logical/pki/path_resign_crls_test.go b/builtin/logical/pki/path_resign_crls_test.go index 65ffcf95ed7b..d586a23539e9 100644 --- a/builtin/logical/pki/path_resign_crls_test.go +++ b/builtin/logical/pki/path_resign_crls_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -10,6 +13,8 @@ import ( "testing" "time" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/vault" @@ -55,6 +60,7 @@ func TestResignCrls_NormalCrl(t *testing.T) { "format": "pem", "crls": []string{crl1, crl2}, }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b1.Route("issuer/default/resign-crls"), logical.UpdateOperation), resp, true) requireSuccessNonNilResponse(t, resp, err) requireFieldsSetInResp(t, resp, "crl") pemCrl := resp.Data["crl"].(string) @@ -351,6 +357,7 @@ func TestSignRevocationList_NoRevokedCerts(t *testing.T) { "next_update": "12h", "format": "pem", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/default/sign-revocation-list"), logical.UpdateOperation), resp, true) requireSuccessNonNilResponse(t, resp, err) requireFieldsSetInResp(t, resp, "crl") pemCrl := resp.Data["crl"].(string) @@ -490,6 +497,8 @@ func requireExtensionOid(t *testing.T, identifier asn1.ObjectIdentifier, extensi } func extractSerialsFromCrl(t *testing.T, crl *x509.RevocationList) map[string]time.Time { + t.Helper() + serials := map[string]time.Time{} for _, revokedCert := range crl.RevokedCertificates { diff --git a/builtin/logical/pki/path_revoke.go b/builtin/logical/pki/path_revoke.go index fce63dbf04b4..c36034b613f5 100644 --- a/builtin/logical/pki/path_revoke.go +++ b/builtin/logical/pki/path_revoke.go @@ -1,7 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" + "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" @@ -9,19 +13,29 @@ import ( "crypto/x509" "encoding/pem" "fmt" + "net/http" "strings" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" ) func pathListCertsRevoked(b *backend) *framework.Path { return &framework.Path{ Pattern: "certs/revoked/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "revoked-certs", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathListRevokedCertsHandler, @@ -33,9 +47,35 @@ func pathListCertsRevoked(b *backend) *framework.Path { } } +func pathListCertsRevocationQueue(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "certs/revocation-queue/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "certs-revocation-queue", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathListRevocationQueueHandler, + }, + }, + + HelpSynopsis: pathListRevocationQueueHelpSyn, + HelpDescription: pathListRevocationQueueHelpDesc, + } +} + func pathRevoke(b *backend) *framework.Path { return &framework.Path{ Pattern: `revoke`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "revoke", + }, + Fields: map[string]*framework.FieldSchema{ "serial_number": { Type: framework.TypeString, @@ -56,6 +96,28 @@ signed by an issuer in this mount.`, // If this needs to write, the entire request will be forwarded to the // active node of the current performance cluster, but we don't want to // forward invalid revoke requests there. + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "revocation_time": { + Type: framework.TypeInt64, + Description: `Revocation Time`, + Required: false, + }, + "revocation_time_rfc3339": { + Type: framework.TypeTime, + Description: `Revocation Time`, + Required: false, + }, + "state": { + Type: framework.TypeString, + Description: `Revocation State`, + Required: false, + }, + }, + }}, + }, }, }, @@ -67,6 +129,13 @@ signed by an issuer in this mount.`, func pathRevokeWithKey(b *backend) *framework.Path { return &framework.Path{ Pattern: `revoke-with-key`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "revoke", + OperationSuffix: "with-key", + }, + Fields: map[string]*framework.FieldSchema{ "serial_number": { Type: framework.TypeString, @@ -92,6 +161,28 @@ be in PEM format.`, // If this needs to write, the entire request will be forwarded to the // active node of the current performance cluster, but we don't want to // forward invalid revoke requests there. + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "revocation_time": { + Type: framework.TypeInt64, + Description: `Revocation Time`, + Required: false, + }, + "revocation_time_rfc3339": { + Type: framework.TypeTime, + Description: `Revocation Time`, + Required: false, + }, + "state": { + Type: framework.TypeString, + Description: `Revocation State`, + Required: false, + }, + }, + }}, + }, }, }, @@ -104,6 +195,12 @@ func pathRotateCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: `crl/rotate`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "rotate", + OperationSuffix: "crl", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathRotateCRLRead, @@ -111,6 +208,18 @@ func pathRotateCRL(b *backend) *framework.Path { // so this request should be forwarded when it is first seen, not // when it is ready to write. ForwardPerformanceStandby: true, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "success": { + Type: framework.TypeBool, + Description: `Whether rotation was successful`, + Required: true, + }, + }, + }}, + }, }, }, @@ -123,6 +232,12 @@ func pathRotateDeltaCRL(b *backend) *framework.Path { return &framework.Path{ Pattern: `crl/rotate-delta`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "rotate", + OperationSuffix: "delta-crl", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathRotateDeltaCRLRead, @@ -130,6 +245,18 @@ func pathRotateDeltaCRL(b *backend) *framework.Path { // so this request should be forwarded when it is first seen, not // when it is ready to write. ForwardPerformanceStandby: true, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "success": { + Type: framework.TypeBool, + Description: `Whether rotation was successful`, + Required: true, + }, + }, + }}, + }, }, }, @@ -138,13 +265,50 @@ func pathRotateDeltaCRL(b *backend) *framework.Path { } } -func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *logical.Request, certPem string) (string, bool, []byte, error) { +func pathListUnifiedRevoked(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "certs/unified-revoked/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "unified-revoked-certs", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathListUnifiedRevokedCertsHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `List of Keys`, + Required: false, + }, + "key_info": { + Type: framework.TypeString, + Description: `Key information`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathListUnifiedRevokedHelpSyn, + HelpDescription: pathListUnifiedRevokedHelpDesc, + } +} + +func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *logical.Request, certPem string) (string, bool, *x509.Certificate, error) { // This function handles just the verification of the certificate against // the global issuer set, checking whether or not it is importable. // // We return the parsed serial number, an optionally-nil byte array to // write out to disk, and an error if one occurred. - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { // We require listing all issuers from the 1.11 method. If we're // still using the legacy CA bundle but with the newer certificate // attribute, we err and require the operator to upgrade and migrate @@ -208,7 +372,7 @@ func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *log // imported this certificate, likely when we issued it. We don't // need to re-verify the signature as we assume it was already // verified when it was imported. - return serial, false, certEntry.Value, nil + return serial, false, certReferenceStored, nil } } @@ -242,13 +406,13 @@ func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *log } if foundMatchingIssuer { - return serial, true, certReference.Raw, nil + return serial, true, certReference, nil } return serial, false, nil, errutil.UserError{Err: "unable to verify signature on presented cert from any present issuer in this mount; certificates from previous CAs will need to have their issuing CA and key re-imported if revocation is necessary"} } -func (b *backend) pathRevokeWriteHandleKey(ctx context.Context, req *logical.Request, cert []byte, keyPem string) error { +func (b *backend) pathRevokeWriteHandleKey(req *logical.Request, certReference *x509.Certificate, keyPem string) error { if keyPem == "" { // The only way to get here should be via the /revoke endpoint; // validate the path one more time and return an error if necessary. @@ -261,12 +425,6 @@ func (b *backend) pathRevokeWriteHandleKey(ctx context.Context, req *logical.Req return nil } - // Parse the certificate for reference. - certReference, err := x509.ParseCertificate(cert) - if err != nil { - return errutil.UserError{Err: fmt.Sprintf("certificate could not be parsed: %v", err)} - } - // Now parse the key's PEM block. pemBlock, _ := pem.Decode([]byte(keyPem)) if pemBlock == nil { @@ -279,6 +437,28 @@ func (b *backend) pathRevokeWriteHandleKey(ctx context.Context, req *logical.Req return fmt.Errorf("failed to parse provided private key: %w", err) } + return validatePrivateKeyMatchesCert(signer, certReference) +} + +func validatePrivateKeyMatchesCert(signer crypto.Signer, certReference *x509.Certificate) error { + public := signer.Public() + + switch certReference.PublicKey.(type) { + case *rsa.PublicKey: + rsaPriv, ok := signer.(*rsa.PrivateKey) + if !ok { + return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} + } + + if err := rsaPriv.Validate(); err != nil { + return errutil.UserError{Err: fmt.Sprintf("error validating integrity of private key: %v", err)} + } + } + + return validatePublicKeyMatchesCert(public, certReference) +} + +func validatePublicKeyMatchesCert(verifier crypto.PublicKey, certReference *x509.Certificate) error { // Finally, verify if the cert and key match. This code has been // cribbed from the Go TLS config code, with minor modifications. // @@ -286,22 +466,18 @@ func (b *backend) pathRevokeWriteHandleKey(ctx context.Context, req *logical.Req // components and ensure we validate exponent and curve information // as well. // - // // See: https://github.com/golang/go/blob/c6a2dada0df8c2d75cf3ae599d7caed77d416fa2/src/crypto/tls/tls.go#L304-L331 switch certPub := certReference.PublicKey.(type) { case *rsa.PublicKey: - privPub, ok := signer.Public().(*rsa.PublicKey) + privPub, ok := verifier.(*rsa.PublicKey) if !ok { return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} } - if err := signer.(*rsa.PrivateKey).Validate(); err != nil { - return err - } if certPub.N.Cmp(privPub.N) != 0 || certPub.E != privPub.E { return errutil.UserError{Err: "provided private key does not match certificate's public key"} } case *ecdsa.PublicKey: - privPub, ok := signer.Public().(*ecdsa.PublicKey) + privPub, ok := verifier.(*ecdsa.PublicKey) if !ok { return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} } @@ -309,7 +485,7 @@ func (b *backend) pathRevokeWriteHandleKey(ctx context.Context, req *logical.Req return errutil.UserError{Err: "provided private key does not match certificate's public key"} } case ed25519.PublicKey: - privPub, ok := signer.Public().(ed25519.PublicKey) + privPub, ok := verifier.(ed25519.PublicKey) if !ok { return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} } @@ -323,9 +499,47 @@ func (b *backend) pathRevokeWriteHandleKey(ctx context.Context, req *logical.Req return nil } -func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, data *framework.FieldData, _ *roleEntry) (*logical.Response, error) { +func (b *backend) maybeRevokeCrossCluster(sc *storageContext, config *crlConfig, serial string, havePrivateKey bool) (*logical.Response, error) { + if !config.UseGlobalQueue { + return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found.", serial)), nil + } + + if havePrivateKey { + return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found, "+ + "and cross-cluster revocation not supported with key revocation.", serial)), nil + } + + // Here, we have to use the global revocation queue as the cert + // was not found on this current cluster. + currTime := time.Now() + nSerial := normalizeSerial(serial) + queueReq := revocationRequest{ + RequestedAt: currTime, + } + path := crossRevocationPath + nSerial + + reqEntry, err := logical.StorageEntryJSON(path, queueReq) + if err != nil { + return nil, fmt.Errorf("failed to create storage entry for cross-cluster revocation request: %w", err) + } + + if err := sc.Storage.Put(sc.Context, reqEntry); err != nil { + return nil, fmt.Errorf("error persisting cross-cluster revocation request: %w", err) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "state": "pending", + }, + } + resp.AddWarning("Revocation request was not found on this present node. This request will be in a pending state until the PR cluster which issued this certificate sees the request and revokes the certificate. If no online cluster has this certificate, the request will eventually be removed without revoking any certificates.") + return resp, nil +} + +func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, data *framework.FieldData, _ *issuing.RoleEntry) (*logical.Response, error) { rawSerial, haveSerial := data.GetOk("serial_number") rawCertificate, haveCert := data.GetOk("certificate") + sc := b.makeStorageContext(ctx, req.Storage) if !haveSerial && !haveCert { return logical.ErrorResponse("The serial number or certificate to revoke must be provided."), nil @@ -347,16 +561,27 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat } } + writeCert := false + var cert *x509.Certificate var serial string - if haveSerial { + + config, err := sc.Backend.CrlBuilder().getConfigWithUpdate(sc) + if err != nil { + return nil, fmt.Errorf("error revoking serial: %s: failed reading config: %w", serial, err) + } + + if haveCert { + serial, writeCert, cert, err = b.pathRevokeWriteHandleCertificate(ctx, req, rawCertificate.(string)) + if err != nil { + return nil, err + } + } else { // Easy case: this cert should be in storage already. serial = rawSerial.(string) if len(serial) == 0 { return logical.ErrorResponse("The serial number must be provided"), nil } - // Here, fetch the certificate from disk to validate we can revoke it. - sc := b.makeStorageContext(ctx, req.Storage) certEntry, err := fetchCertBySerial(sc, "certs/", serial) if err != nil { switch err.(type) { @@ -366,49 +591,55 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat return nil, err } } - if certEntry == nil { - return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found or was already revoked", serial)), nil - } - - // Now, if the user provided a key, we'll have to make sure the key - // and stored certificate match. - if err := b.pathRevokeWriteHandleKey(ctx, req, certEntry.Value, keyPem); err != nil { - return nil, err - } - } else { - // Otherwise, we've gotta parse the certificate from the request and - // then import it into cluster-local storage. Before writing the - // certificate (and forwarding), we want to verify this certificate - // was actually signed by one of our present issuers. - var err error - var writeCert bool - var certBytes []byte - serial, writeCert, certBytes, err = b.pathRevokeWriteHandleCertificate(ctx, req, rawCertificate.(string)) - if err != nil { - return nil, err - } - // Before we write the certificate, we've gotta verify the request in - // the event of a PoP-based revocation scheme; we don't want to litter - // storage with issued-but-not-revoked certificates. - if err := b.pathRevokeWriteHandleKey(ctx, req, certBytes, keyPem); err != nil { - return nil, err + if certEntry != nil { + cert, err = x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %w", err) + } } + } - // At this point, a forward operation will occur if we're on a standby - // node as we're now attempting to write the bytes of the cert out to - // disk. - if writeCert { - err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: "certs/" + serial, - Value: certBytes, - }) + if cert == nil { + if config.UnifiedCRL { + // Saving grace if we aren't able to load the certificate locally/or were given it, + // if we have a unified revocation entry already return its revocation times, + // otherwise we fail with a certificate not found message. + unifiedRev, err := getUnifiedRevocationBySerial(sc, normalizeSerial(serial)) if err != nil { return nil, err } + if unifiedRev != nil { + return &logical.Response{ + Data: map[string]interface{}{ + "revocation_time": unifiedRev.RevocationTimeUTC.Unix(), + "revocation_time_rfc3339": unifiedRev.RevocationTimeUTC.Format(time.RFC3339Nano), + }, + }, nil + } } - // Finally, we have a valid serial number to use for BYOC revocation! + return b.maybeRevokeCrossCluster(sc, config, serial, keyPem != "") + } + + // Before we write the certificate, we've gotta verify the request in + // the event of a PoP-based revocation scheme; we don't want to litter + // storage with issued-but-not-revoked certificates. + if err := b.pathRevokeWriteHandleKey(req, cert, keyPem); err != nil { + return nil, err + } + + // At this point, a forward operation will occur if we're on a standby + // node as we're now attempting to write the bytes of the cert out to + // disk. + if writeCert { + err := req.Storage.Put(ctx, &logical.StorageEntry{ + Key: "certs/" + normalizeSerial(serial), + Value: cert.Raw, + }) + if err != nil { + return nil, err + } } // Assumption: this check is cheap. Call this twice, in the cert-import @@ -418,23 +649,18 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat return nil, logical.ErrReadOnly } - // We store and identify by lowercase colon-separated hex, but other - // utilities use dashes and/or uppercase, so normalize - serial = strings.ReplaceAll(strings.ToLower(serial), "-", ":") - - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() - sc := b.makeStorageContext(ctx, req.Storage) - return revokeCert(sc, serial, false) + return revokeCert(sc, config, cert) } func (b *backend) pathRotateCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { - b.revokeStorageLock.RLock() - defer b.revokeStorageLock.RUnlock() + b.GetRevokeStorageLock().RLock() + defer b.GetRevokeStorageLock().RUnlock() sc := b.makeStorageContext(ctx, req.Storage) - crlErr := b.crlBuilder.rebuild(sc, false) + warnings, crlErr := b.CrlBuilder().rebuild(sc, false) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -444,24 +670,30 @@ func (b *backend) pathRotateCRLRead(ctx context.Context, req *logical.Request, _ } } - return &logical.Response{ + resp := &logical.Response{ Data: map[string]interface{}{ "success": true, }, - }, nil + } + + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + + return resp, nil } func (b *backend) pathRotateDeltaCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { sc := b.makeStorageContext(ctx, req.Storage) - cfg, err := b.crlBuilder.getConfigWithUpdate(sc) + cfg, err := b.CrlBuilder().getConfigWithUpdate(sc) if err != nil { return nil, fmt.Errorf("error fetching CRL configuration: %w", err) } isEnabled := cfg.EnableDelta - crlErr := b.crlBuilder.rebuildDeltaCRLsIfForced(sc, true) + warnings, crlErr := b.CrlBuilder().rebuildDeltaCRLsIfForced(sc, true) if crlErr != nil { switch crlErr.(type) { case errutil.UserError: @@ -480,6 +712,9 @@ func (b *backend) pathRotateDeltaCRLRead(ctx context.Context, req *logical.Reque if !isEnabled { resp.AddWarning("requested rebuild of delta CRL when delta CRL is not enabled; this is a no-op") } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } return resp, nil } @@ -500,6 +735,96 @@ func (b *backend) pathListRevokedCertsHandler(ctx context.Context, request *logi return logical.ListResponse(revokedCerts), nil } +func (b *backend) pathListRevocationQueueHandler(ctx context.Context, request *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + var responseKeys []string + responseInfo := make(map[string]interface{}) + + sc := b.makeStorageContext(ctx, request.Storage) + + clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) + if err != nil { + return nil, fmt.Errorf("failed to list cross-cluster revocation queue participating clusters: %w", err) + } + + for cIndex, cluster := range clusters { + cluster = cluster[0 : len(cluster)-1] + cPath := crossRevocationPrefix + cluster + "/" + serials, err := sc.Storage.List(sc.Context, cPath) + if err != nil { + return nil, fmt.Errorf("failed to list cross-cluster revocation queue entries for cluster %v (%v): %w", cluster, cIndex, err) + } + + for _, serial := range serials { + // Always strip the slash out; it indicates the presence of + // a confirmed revocation, which we add to the main serial's + // entry. + hasSlash := serial[len(serial)-1] == '/' + if hasSlash { + serial = serial[0 : len(serial)-1] + } + serial = denormalizeSerial(serial) + + var data map[string]interface{} + rawData, isPresent := responseInfo[serial] + if !isPresent { + data = map[string]interface{}{} + responseKeys = append(responseKeys, serial) + } else { + data = rawData.(map[string]interface{}) + } + + if hasSlash { + data["confirmed"] = true + data["confirmation_cluster"] = cluster + } else { + data["requesting_cluster"] = cluster + } + + responseInfo[serial] = data + } + } + + return logical.ListResponseWithInfo(responseKeys, responseInfo), nil +} + +func (b *backend) pathListUnifiedRevokedCertsHandler(ctx context.Context, request *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, request.Storage) + responseKeys := []string{} + responseInfo := make(map[string]interface{}) + + clusterPathsById, err := lookupUnifiedClusterPaths(sc) + if err != nil { + return nil, err + } + + for clusterId := range clusterPathsById { + clusterSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) + if err != nil { + return nil, err + } + for _, serial := range clusterSerials { + if strings.HasSuffix(serial, "/") { + // Skip folders as they wouldn't be a proper revocation + continue + } + colonSerial := denormalizeSerial(serial) + var data map[string][]string + rawData, isPresent := responseInfo[colonSerial] + if !isPresent { + responseKeys = append(responseKeys, colonSerial) + data = map[string][]string{} + } else { + data = rawData.(map[string][]string) + } + + data["revoking_clusters"] = append(data["revoking_clusters"], clusterId) + responseInfo[colonSerial] = data + } + } + + return logical.ListResponseWithInfo(responseKeys, responseInfo), nil +} + const pathRevokeHelpSyn = ` Revoke a certificate by serial number or with explicit certificate. @@ -535,3 +860,20 @@ List all revoked serial numbers within the local cluster const pathListRevokedHelpDesc = ` Returns a list of serial numbers for revoked certificates in the local cluster. ` + +const pathListUnifiedRevokedHelpSyn = ` +List all revoked serial numbers within this cluster's unified storage area. +` + +const pathListUnifiedRevokedHelpDesc = ` +Returns a list of serial numbers for revoked certificates within this cluster's unified storage. +` + +const pathListRevocationQueueHelpSyn = ` +List all pending, cross-cluster revocations known to the local cluster. +` + +const pathListRevocationQueueHelpDesc = ` +Returns a detailed list containing serial number, requesting cluster, and +optionally a confirming cluster. +` diff --git a/builtin/logical/pki/path_roles.go b/builtin/logical/pki/path_roles.go index 93d0a53baa8f..69f77fdce3d9 100644 --- a/builtin/logical/pki/path_roles.go +++ b/builtin/logical/pki/path_roles.go @@ -1,25 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" - "crypto/x509" "encoding/json" + "errors" "fmt" + "net/http" "strings" "time" - "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" ) func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "roles", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ListOperation: &framework.PathOperation{ Callback: b.pathRoleList, @@ -32,8 +42,360 @@ func pathListRoles(b *backend) *framework.Path { } func pathRoles(b *backend) *framework.Path { + pathRolesResponseFields := map[string]*framework.FieldSchema{ + "ttl": { + Type: framework.TypeInt64, + Required: true, + Description: `The lease duration (validity period of the +certificate) if no specific lease duration is requested. +The lease duration controls the expiration of certificates +issued by this backend. Defaults to the system default +value or the value of max_ttl, whichever is shorter.`, + }, + + "max_ttl": { + Type: framework.TypeInt64, + Required: true, + Description: `The maximum allowed lease duration. If not +set, defaults to the system maximum lease TTL.`, + }, + "allow_token_displayname": { + Type: framework.TypeBool, + Required: true, + Description: `Whether to allow "localhost" and "localdomain" +as a valid common name in a request, independent of allowed_domains value.`, + }, + + "allow_localhost": { + Type: framework.TypeBool, + Required: true, + Description: `Whether to allow "localhost" and "localdomain" +as a valid common name in a request, independent of allowed_domains value.`, + }, + + "allowed_domains": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `Specifies the domains this role is allowed +to issue certificates for. This is used with the allow_bare_domains, +allow_subdomains, and allow_glob_domains to determine matches for the +common name, DNS-typed SAN entries, and Email-typed SAN entries of +certificates. See the documentation for more information. This parameter +accepts a comma-separated string or list of domains.`, + }, + "allowed_domains_template": { + Type: framework.TypeBool, + Required: true, + Description: `If set, Allowed domains can be specified using identity template policies. + Non-templated domains are also permitted.`, + }, + "allow_bare_domains": { + Type: framework.TypeBool, + Required: true, + Description: `If set, clients can request certificates +for the base domains themselves, e.g. "example.com" of domains listed +in allowed_domains. This is a separate option as in some cases this can +be considered a security threat. See the documentation for more +information.`, + }, + + "allow_subdomains": { + Type: framework.TypeBool, + Required: true, + Description: `If set, clients can request certificates for +subdomains of domains listed in allowed_domains, including wildcard +subdomains. See the documentation for more information.`, + }, + + "allow_glob_domains": { + Type: framework.TypeBool, + Required: true, + Description: `If set, domains specified in allowed_domains +can include shell-style glob patterns, e.g. "ftp*.example.com". +See the documentation for more information.`, + }, + + "allow_wildcard_certificates": { + Type: framework.TypeBool, + Required: true, + Description: `If set, allows certificates with wildcards in +the common name to be issued, conforming to RFC 6125's Section 6.4.3; e.g., +"*.example.net" or "b*z.example.net". See the documentation for more +information.`, + }, + + "allow_any_name": { + Type: framework.TypeBool, + Required: true, + Description: `If set, clients can request certificates for +any domain, regardless of allowed_domains restrictions. +See the documentation for more information.`, + }, + + "enforce_hostnames": { + Type: framework.TypeBool, + Required: true, + Description: `If set, only valid host names are allowed for +CN and DNS SANs, and the host part of email addresses. Defaults to true.`, + }, + + "allow_ip_sans": { + Type: framework.TypeBool, + Required: true, + Description: `If set, IP Subject Alternative Names are allowed. +Any valid IP is accepted and No authorization checking is performed.`, + }, + + "allowed_uri_sans": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `If set, an array of allowed URIs for URI Subject Alternative Names. +Any valid URI is accepted, these values support globbing.`, + }, + + "allowed_uri_sans_template": { + Type: framework.TypeBool, + Required: true, + Description: `If set, Allowed URI SANs can be specified using identity template policies. + Non-templated URI SANs are also permitted.`, + }, + + "allowed_other_sans": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `If set, an array of allowed other names to put in SANs. These values support globbing and must be in the format ;:. Currently only "utf8" is a valid type. All values, including globbing values, must use this syntax, with the exception being a single "*" which allows any OID and any value (but type must still be utf8).`, + }, + + "allowed_serial_numbers": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `If set, an array of allowed serial numbers to put in Subject. These values support globbing.`, + }, + "allowed_user_ids": { + Type: framework.TypeCommaStringSlice, + Description: `If set, an array of allowed user-ids to put in user system login name specified here: https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1`, + }, + "server_flag": { + Type: framework.TypeBool, + Default: true, + Description: `If set, certificates are flagged for server auth use. +Defaults to true. See also RFC 5280 Section 4.2.1.12.`, + }, + + "client_flag": { + Type: framework.TypeBool, + Required: true, + Description: `If set, certificates are flagged for client auth use. +Defaults to true. See also RFC 5280 Section 4.2.1.12.`, + }, + + "code_signing_flag": { + Type: framework.TypeBool, + Required: true, + Description: `If set, certificates are flagged for code signing +use. Defaults to false. See also RFC 5280 Section 4.2.1.12.`, + }, + + "email_protection_flag": { + Type: framework.TypeBool, + Required: true, + Description: `If set, certificates are flagged for email +protection use. Defaults to false. See also RFC 5280 Section 4.2.1.12.`, + }, + + "key_type": { + Type: framework.TypeString, + Required: true, + Description: `The type of key to use; defaults to RSA. "rsa" +"ec", "ed25519" and "any" are the only valid values.`, + }, + + "key_bits": { + Type: framework.TypeInt, + Required: true, + Description: `The number of bits to use. Allowed values are +0 (universal default); with rsa key_type: 2048 (default), 3072, or +4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with +ed25519.`, + }, + "signature_bits": { + Type: framework.TypeInt, + Required: true, + Description: `The number of bits to use in the signature +algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for +SHA-2-512. Defaults to 0 to automatically detect based on key length +(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, + }, + "use_pss": { + Type: framework.TypeBool, + Required: false, + Description: `Whether or not to use PSS signatures when using a +RSA key-type issuer. Defaults to false.`, + }, + "key_usage": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `A comma-separated string or list of key usages (not extended +key usages). Valid values can be found at +https://golang.org/pkg/crypto/x509/#KeyUsage +-- simply drop the "KeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list. See also RFC 5280 +Section 4.2.1.3.`, + }, + + "ext_key_usage": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `A comma-separated string or list of extended key usages. Valid values can be found at +https://golang.org/pkg/crypto/x509/#ExtKeyUsage +-- simply drop the "ExtKeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list. See also RFC 5280 +Section 4.2.1.12.`, + }, + + "ext_key_usage_oids": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `A comma-separated string or list of extended key usage oids.`, + }, + + "use_csr_common_name": { + Type: framework.TypeBool, + Required: true, + Description: `If set, when used with a signing profile, +the common name in the CSR will be used. This +does *not* include any requested Subject Alternative +Names; use use_csr_sans for that. Defaults to true.`, + }, + + "use_csr_sans": { + Type: framework.TypeBool, + Required: true, + Description: `If set, when used with a signing profile, +the SANs in the CSR will be used. This does *not* +include the Common Name (cn); use use_csr_common_name +for that. Defaults to true.`, + }, + + "ou": { + Type: framework.TypeCommaStringSlice, + Description: `If set, OU (OrganizationalUnit) will be set to +this value in certificates issued by this role.`, + }, + + "organization": { + Type: framework.TypeCommaStringSlice, + Description: `If set, O (Organization) will be set to +this value in certificates issued by this role.`, + }, + + "country": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Country will be set to +this value in certificates issued by this role.`, + }, + + "locality": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Locality will be set to +this value in certificates issued by this role.`, + }, + + "province": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Province will be set to +this value in certificates issued by this role.`, + }, + + "street_address": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Street Address will be set to +this value in certificates issued by this role.`, + }, + + "postal_code": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Postal Code will be set to +this value in certificates issued by this role.`, + }, + + "generate_lease": { + Type: framework.TypeBool, + Description: ` +If set, certificates issued/signed against this role will have Vault leases +attached to them. Defaults to "false". Certificates can be added to the CRL by +"vault revoke " when certificates are associated with leases. It can +also be done using the "pki/revoke" endpoint. However, when lease generation is +disabled, invoking "pki/revoke" would be the only way to add the certificates +to the CRL. When large number of certificates are generated with long +lifetimes, it is recommended that lease generation be disabled, as large amount of +leases adversely affect the startup time of Vault.`, + }, + + "no_store": { + Type: framework.TypeBool, + Description: ` +If set, certificates issued/signed against this role will not be stored in the +storage backend. This can improve performance when issuing large numbers of +certificates. However, certificates issued in this way cannot be enumerated +or revoked, so this option is recommended only for certificates that are +non-sensitive, or extremely short-lived. This option implies a value of "false" +for "generate_lease".`, + }, + + "require_cn": { + Type: framework.TypeBool, + Description: `If set to false, makes the 'common_name' field optional while generating a certificate.`, + }, + + "cn_validations": { + Type: framework.TypeCommaStringSlice, + Description: `List of allowed validations to run against the +Common Name field. Values can include 'email' to validate the CN is a email +address, 'hostname' to validate the CN is a valid hostname (potentially +including wildcards). When multiple validations are specified, these take +OR semantics (either email OR hostname are allowed). The special value +'disabled' allows disabling all CN name validations, allowing for arbitrary +non-Hostname, non-Email address CNs.`, + }, + + "policy_identifiers": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated string or list of policy OIDs, or a JSON list of qualified policy +information, which must include an oid, and may include a notice and/or cps url, using the form +[{"oid"="1.3.6.1.4.1.7.8","notice"="I am a user Notice"}, {"oid"="1.3.6.1.4.1.44947.1.2.4 ","cps"="https://example.com"}].`, + }, + + "basic_constraints_valid_for_non_ca": { + Type: framework.TypeBool, + Description: `Mark Basic Constraints valid when issuing non-CA certificates.`, + }, + "not_before_duration": { + Type: framework.TypeInt64, + Description: `The duration in seconds before now which the certificate needs to be backdated by.`, + }, + "not_after": { + Type: framework.TypeString, + Description: `Set the not after field of the certificate with specified date value. +The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ.`, + }, + "issuer_ref": { + Type: framework.TypeString, + Description: `Reference to the issuer used to sign requests +serviced by this role.`, + }, + } + return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ "backend": { Type: framework.TypeString, @@ -180,6 +542,11 @@ Any valid URI is accepted, these values support globbing.`, Description: `If set, an array of allowed serial numbers to put in Subject. These values support globbing.`, }, + "allowed_user_ids": { + Type: framework.TypeCommaStringSlice, + Description: `If set, an array of allowed user-ids to put in user system login name specified here: https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1`, + }, + "server_flag": { Type: framework.TypeBool, Default: true, @@ -445,21 +812,44 @@ serviced by this role.`, Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathRoleRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: pathRolesResponseFields, + }}, + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathRoleCreate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: pathRolesResponseFields, + }}, + }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathRoleDelete, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: "No Content", + }}, + }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, }, logical.PatchOperation: &framework.PathOperation{ Callback: b.pathRolePatch, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: pathRolesResponseFields, + }}, + }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -471,137 +861,26 @@ serviced by this role.`, } } -func (b *backend) getRole(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { - entry, err := s.Get(ctx, "role/"+n) +// GetRole loads a role from storage, will validate it and error out if, +// updates it and stores it back if possible. If the role does not exist +// a nil, nil response is returned. +func (b *backend) GetRole(ctx context.Context, s logical.Storage, n string) (*issuing.RoleEntry, error) { + result, err := issuing.GetRole(ctx, s, n) if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result roleEntry - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - // Migrate existing saved entries and save back if changed - modified := false - if len(result.DeprecatedTTL) == 0 && len(result.Lease) != 0 { - result.DeprecatedTTL = result.Lease - result.Lease = "" - modified = true - } - if result.TTL == 0 && len(result.DeprecatedTTL) != 0 { - parsed, err := parseutil.ParseDurationSecond(result.DeprecatedTTL) - if err != nil { - return nil, err - } - result.TTL = parsed - result.DeprecatedTTL = "" - modified = true - } - if len(result.DeprecatedMaxTTL) == 0 && len(result.LeaseMax) != 0 { - result.DeprecatedMaxTTL = result.LeaseMax - result.LeaseMax = "" - modified = true - } - if result.MaxTTL == 0 && len(result.DeprecatedMaxTTL) != 0 { - parsed, err := parseutil.ParseDurationSecond(result.DeprecatedMaxTTL) - if err != nil { - return nil, err - } - result.MaxTTL = parsed - result.DeprecatedMaxTTL = "" - modified = true - } - if result.AllowBaseDomain { - result.AllowBaseDomain = false - result.AllowBareDomains = true - modified = true - } - if result.AllowedDomainsOld != "" { - result.AllowedDomains = strings.Split(result.AllowedDomainsOld, ",") - result.AllowedDomainsOld = "" - modified = true - } - if result.AllowedBaseDomain != "" { - found := false - for _, v := range result.AllowedDomains { - if v == result.AllowedBaseDomain { - found = true - break - } + if errors.Is(err, issuing.ErrRoleNotFound) { + return nil, nil } - if !found { - result.AllowedDomains = append(result.AllowedDomains, result.AllowedBaseDomain) - } - result.AllowedBaseDomain = "" - modified = true - } - if result.AllowWildcardCertificates == nil { - // While not the most secure default, when AllowWildcardCertificates isn't - // explicitly specified in the stored Role, we automatically upgrade it to - // true to preserve compatibility with previous versions of Vault. Once this - // field is set, this logic will not be triggered any more. - result.AllowWildcardCertificates = new(bool) - *result.AllowWildcardCertificates = true - modified = true - } - - // Upgrade generate_lease in role - if result.GenerateLease == nil { - // All the new roles will have GenerateLease always set to a value. A - // nil value indicates that this role needs an upgrade. Set it to - // `true` to not alter its current behavior. - result.GenerateLease = new(bool) - *result.GenerateLease = true - modified = true - } - - // Upgrade key usages - if result.KeyUsageOld != "" { - result.KeyUsage = strings.Split(result.KeyUsageOld, ",") - result.KeyUsageOld = "" - modified = true - } - - // Upgrade OU - if result.OUOld != "" { - result.OU = strings.Split(result.OUOld, ",") - result.OUOld = "" - modified = true - } - - // Upgrade Organization - if result.OrganizationOld != "" { - result.Organization = strings.Split(result.OrganizationOld, ",") - result.OrganizationOld = "" - modified = true - } - - // Set the issuer field to default if not set. We want to do this - // unconditionally as we should probably never have an empty issuer - // on a stored roles. - if len(result.Issuer) == 0 { - result.Issuer = defaultRef - modified = true - } - - // Update CN Validations to be the present default, "email,hostname" - if len(result.CNValidations) == 0 { - result.CNValidations = []string{"email", "hostname"} - modified = true + return nil, err } // Ensure the role is valid after updating. - _, err = validateRole(b, &result, ctx, s) + _, err = validateRole(b, result, ctx, s) if err != nil { return nil, err } - if modified && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { - jsonEntry, err := logical.StorageEntryJSON("role/"+n, &result) + if result.WasModified && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + jsonEntry, err := logical.StorageEntryJSON("role/"+n, result) if err != nil { return nil, err } @@ -611,9 +890,10 @@ func (b *backend) getRole(ctx context.Context, s logical.Storage, n string) (*ro return nil, err } } + result.WasModified = false } - return &result, nil + return result, nil } func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { @@ -631,7 +911,7 @@ func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data * return logical.ErrorResponse("missing role name"), nil } - role, err := b.getRole(ctx, req.Storage, roleName) + role, err := b.GetRole(ctx, req.Storage, roleName) if err != nil { return nil, err } @@ -658,7 +938,7 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data var err error name := data.Get("name").(string) - entry := &roleEntry{ + entry := &issuing.RoleEntry{ MaxTTL: time.Duration(data.Get("max_ttl").(int)) * time.Second, TTL: time.Duration(data.Get("ttl").(int)) * time.Second, AllowLocalhost: data.Get("allow_localhost").(bool), @@ -698,11 +978,13 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data RequireCN: data.Get("require_cn").(bool), CNValidations: data.Get("cn_validations").([]string), AllowedSerialNumbers: data.Get("allowed_serial_numbers").([]string), + AllowedUserIDs: data.Get("allowed_user_ids").([]string), PolicyIdentifiers: getPolicyIdentifier(data, nil), BasicConstraintsValidForNonCA: data.Get("basic_constraints_valid_for_non_ca").(bool), NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, NotAfter: data.Get("not_after").(string), Issuer: data.Get("issuer_ref").(string), + Name: name, } allowedOtherSANs := data.Get("allowed_other_sans").([]string) @@ -763,7 +1045,7 @@ func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data return resp, nil } -func validateRole(b *backend, entry *roleEntry, ctx context.Context, s logical.Storage) (*logical.Response, error) { +func validateRole(b *backend, entry *issuing.RoleEntry, ctx context.Context, s logical.Storage) (*logical.Response, error) { resp := &logical.Response{} var err error @@ -801,11 +1083,11 @@ func validateRole(b *backend, entry *roleEntry, ctx context.Context, s logical.S entry.Issuer = defaultRef } // Check that the issuers reference set resolves to something - if !b.useLegacyBundleCaStorage() { + if !b.UseLegacyBundleCaStorage() { sc := b.makeStorageContext(ctx, s) issuerId, err := sc.resolveIssuerReference(entry.Issuer) if err != nil { - if issuerId == IssuerRefNotFound { + if issuerId == issuing.IssuerRefNotFound { resp = &logical.Response{} if entry.Issuer == defaultRef { resp.AddWarning("Issuing Certificate was set to default, but no default issuing certificate (configurable at /config/issuers) is currently set") @@ -848,7 +1130,7 @@ func getTimeWithExplicitDefault(data *framework.FieldData, field string, default func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { name := data.Get("name").(string) - oldEntry, err := b.getRole(ctx, req.Storage, name) + oldEntry, err := b.GetRole(ctx, req.Storage, name) if err != nil { return nil, err } @@ -856,7 +1138,7 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data return logical.ErrorResponse("Unable to fetch role entry to patch"), nil } - entry := &roleEntry{ + entry := &issuing.RoleEntry{ MaxTTL: getTimeWithExplicitDefault(data, "max_ttl", oldEntry.MaxTTL), TTL: getTimeWithExplicitDefault(data, "ttl", oldEntry.TTL), AllowLocalhost: getWithExplicitDefault(data, "allow_localhost", oldEntry.AllowLocalhost).(bool), @@ -896,6 +1178,7 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data RequireCN: getWithExplicitDefault(data, "require_cn", oldEntry.RequireCN).(bool), CNValidations: getWithExplicitDefault(data, "cn_validations", oldEntry.CNValidations).([]string), AllowedSerialNumbers: getWithExplicitDefault(data, "allowed_serial_numbers", oldEntry.AllowedSerialNumbers).([]string), + AllowedUserIDs: getWithExplicitDefault(data, "allowed_user_ids", oldEntry.AllowedUserIDs).([]string), PolicyIdentifiers: getPolicyIdentifier(data, &oldEntry.PolicyIdentifiers), BasicConstraintsValidForNonCA: getWithExplicitDefault(data, "basic_constraints_valid_for_non_ca", oldEntry.BasicConstraintsValidForNonCA).(bool), NotBeforeDuration: getTimeWithExplicitDefault(data, "not_before_duration", oldEntry.NotBeforeDuration), @@ -969,202 +1252,6 @@ func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data return resp, nil } -func parseKeyUsages(input []string) int { - var parsedKeyUsages x509.KeyUsage - for _, k := range input { - switch strings.ToLower(strings.TrimSpace(k)) { - case "digitalsignature": - parsedKeyUsages |= x509.KeyUsageDigitalSignature - case "contentcommitment": - parsedKeyUsages |= x509.KeyUsageContentCommitment - case "keyencipherment": - parsedKeyUsages |= x509.KeyUsageKeyEncipherment - case "dataencipherment": - parsedKeyUsages |= x509.KeyUsageDataEncipherment - case "keyagreement": - parsedKeyUsages |= x509.KeyUsageKeyAgreement - case "certsign": - parsedKeyUsages |= x509.KeyUsageCertSign - case "crlsign": - parsedKeyUsages |= x509.KeyUsageCRLSign - case "encipheronly": - parsedKeyUsages |= x509.KeyUsageEncipherOnly - case "decipheronly": - parsedKeyUsages |= x509.KeyUsageDecipherOnly - } - } - - return int(parsedKeyUsages) -} - -func parseExtKeyUsages(role *roleEntry) certutil.CertExtKeyUsage { - var parsedKeyUsages certutil.CertExtKeyUsage - - if role.ServerFlag { - parsedKeyUsages |= certutil.ServerAuthExtKeyUsage - } - - if role.ClientFlag { - parsedKeyUsages |= certutil.ClientAuthExtKeyUsage - } - - if role.CodeSigningFlag { - parsedKeyUsages |= certutil.CodeSigningExtKeyUsage - } - - if role.EmailProtectionFlag { - parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage - } - - for _, k := range role.ExtKeyUsage { - switch strings.ToLower(strings.TrimSpace(k)) { - case "any": - parsedKeyUsages |= certutil.AnyExtKeyUsage - case "serverauth": - parsedKeyUsages |= certutil.ServerAuthExtKeyUsage - case "clientauth": - parsedKeyUsages |= certutil.ClientAuthExtKeyUsage - case "codesigning": - parsedKeyUsages |= certutil.CodeSigningExtKeyUsage - case "emailprotection": - parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage - case "ipsecendsystem": - parsedKeyUsages |= certutil.IpsecEndSystemExtKeyUsage - case "ipsectunnel": - parsedKeyUsages |= certutil.IpsecTunnelExtKeyUsage - case "ipsecuser": - parsedKeyUsages |= certutil.IpsecUserExtKeyUsage - case "timestamping": - parsedKeyUsages |= certutil.TimeStampingExtKeyUsage - case "ocspsigning": - parsedKeyUsages |= certutil.OcspSigningExtKeyUsage - case "microsoftservergatedcrypto": - parsedKeyUsages |= certutil.MicrosoftServerGatedCryptoExtKeyUsage - case "netscapeservergatedcrypto": - parsedKeyUsages |= certutil.NetscapeServerGatedCryptoExtKeyUsage - } - } - - return parsedKeyUsages -} - -type roleEntry struct { - LeaseMax string `json:"lease_max"` - Lease string `json:"lease"` - DeprecatedMaxTTL string `json:"max_ttl"` - DeprecatedTTL string `json:"ttl"` - TTL time.Duration `json:"ttl_duration"` - MaxTTL time.Duration `json:"max_ttl_duration"` - AllowLocalhost bool `json:"allow_localhost"` - AllowedBaseDomain string `json:"allowed_base_domain"` - AllowedDomainsOld string `json:"allowed_domains,omitempty"` - AllowedDomains []string `json:"allowed_domains_list"` - AllowedDomainsTemplate bool `json:"allowed_domains_template"` - AllowBaseDomain bool `json:"allow_base_domain"` - AllowBareDomains bool `json:"allow_bare_domains"` - AllowTokenDisplayName bool `json:"allow_token_displayname"` - AllowSubdomains bool `json:"allow_subdomains"` - AllowGlobDomains bool `json:"allow_glob_domains"` - AllowWildcardCertificates *bool `json:"allow_wildcard_certificates,omitempty"` - AllowAnyName bool `json:"allow_any_name"` - EnforceHostnames bool `json:"enforce_hostnames"` - AllowIPSANs bool `json:"allow_ip_sans"` - ServerFlag bool `json:"server_flag"` - ClientFlag bool `json:"client_flag"` - CodeSigningFlag bool `json:"code_signing_flag"` - EmailProtectionFlag bool `json:"email_protection_flag"` - UseCSRCommonName bool `json:"use_csr_common_name"` - UseCSRSANs bool `json:"use_csr_sans"` - KeyType string `json:"key_type"` - KeyBits int `json:"key_bits"` - UsePSS bool `json:"use_pss"` - SignatureBits int `json:"signature_bits"` - MaxPathLength *int `json:",omitempty"` - KeyUsageOld string `json:"key_usage,omitempty"` - KeyUsage []string `json:"key_usage_list"` - ExtKeyUsage []string `json:"extended_key_usage_list"` - OUOld string `json:"ou,omitempty"` - OU []string `json:"ou_list"` - OrganizationOld string `json:"organization,omitempty"` - Organization []string `json:"organization_list"` - Country []string `json:"country"` - Locality []string `json:"locality"` - Province []string `json:"province"` - StreetAddress []string `json:"street_address"` - PostalCode []string `json:"postal_code"` - GenerateLease *bool `json:"generate_lease,omitempty"` - NoStore bool `json:"no_store"` - RequireCN bool `json:"require_cn"` - CNValidations []string `json:"cn_validations"` - AllowedOtherSANs []string `json:"allowed_other_sans"` - AllowedSerialNumbers []string `json:"allowed_serial_numbers"` - AllowedURISANs []string `json:"allowed_uri_sans"` - AllowedURISANsTemplate bool `json:"allowed_uri_sans_template"` - PolicyIdentifiers []string `json:"policy_identifiers"` - ExtKeyUsageOIDs []string `json:"ext_key_usage_oids"` - BasicConstraintsValidForNonCA bool `json:"basic_constraints_valid_for_non_ca"` - NotBeforeDuration time.Duration `json:"not_before_duration"` - NotAfter string `json:"not_after"` - Issuer string `json:"issuer"` -} - -func (r *roleEntry) ToResponseData() map[string]interface{} { - responseData := map[string]interface{}{ - "ttl": int64(r.TTL.Seconds()), - "max_ttl": int64(r.MaxTTL.Seconds()), - "allow_localhost": r.AllowLocalhost, - "allowed_domains": r.AllowedDomains, - "allowed_domains_template": r.AllowedDomainsTemplate, - "allow_bare_domains": r.AllowBareDomains, - "allow_token_displayname": r.AllowTokenDisplayName, - "allow_subdomains": r.AllowSubdomains, - "allow_glob_domains": r.AllowGlobDomains, - "allow_wildcard_certificates": r.AllowWildcardCertificates, - "allow_any_name": r.AllowAnyName, - "allowed_uri_sans_template": r.AllowedURISANsTemplate, - "enforce_hostnames": r.EnforceHostnames, - "allow_ip_sans": r.AllowIPSANs, - "server_flag": r.ServerFlag, - "client_flag": r.ClientFlag, - "code_signing_flag": r.CodeSigningFlag, - "email_protection_flag": r.EmailProtectionFlag, - "use_csr_common_name": r.UseCSRCommonName, - "use_csr_sans": r.UseCSRSANs, - "key_type": r.KeyType, - "key_bits": r.KeyBits, - "signature_bits": r.SignatureBits, - "use_pss": r.UsePSS, - "key_usage": r.KeyUsage, - "ext_key_usage": r.ExtKeyUsage, - "ext_key_usage_oids": r.ExtKeyUsageOIDs, - "ou": r.OU, - "organization": r.Organization, - "country": r.Country, - "locality": r.Locality, - "province": r.Province, - "street_address": r.StreetAddress, - "postal_code": r.PostalCode, - "no_store": r.NoStore, - "allowed_other_sans": r.AllowedOtherSANs, - "allowed_serial_numbers": r.AllowedSerialNumbers, - "allowed_uri_sans": r.AllowedURISANs, - "require_cn": r.RequireCN, - "cn_validations": r.CNValidations, - "policy_identifiers": r.PolicyIdentifiers, - "basic_constraints_valid_for_non_ca": r.BasicConstraintsValidForNonCA, - "not_before_duration": int64(r.NotBeforeDuration.Seconds()), - "not_after": r.NotAfter, - "issuer_ref": r.Issuer, - } - if r.MaxPathLength != nil { - responseData["max_path_length"] = r.MaxPathLength - } - if r.GenerateLease != nil { - responseData["generate_lease"] = r.GenerateLease - } - return responseData -} - func checkCNValidations(validations []string) ([]string, error) { var haveDisabled bool var haveEmail bool diff --git a/builtin/logical/pki/path_roles_test.go b/builtin/logical/pki/path_roles_test.go index 315c6d0bec74..b227832ce965 100644 --- a/builtin/logical/pki/path_roles_test.go +++ b/builtin/logical/pki/path_roles_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -11,9 +14,12 @@ import ( "github.com/go-errors/errors" "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" ) func TestPki_RoleGenerateLease(t *testing.T) { @@ -65,7 +71,7 @@ func TestPki_RoleGenerateLease(t *testing.T) { t.Fatal(err) } - var role roleEntry + var role issuing.RoleEntry if err := entry.DecodeJSON(&role); err != nil { t.Fatal(err) } @@ -141,12 +147,14 @@ func TestPki_RoleKeyUsage(t *testing.T) { } resp, err = b.HandleRequest(context.Background(), roleReq) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(roleReq.Path), logical.UpdateOperation), resp, true) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v resp: %#v", err, resp) } roleReq.Operation = logical.ReadOperation resp, err = b.HandleRequest(context.Background(), roleReq) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(roleReq.Path), logical.ReadOperation), resp, true) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("bad: err: %v resp: %#v", err, resp) } @@ -164,7 +172,7 @@ func TestPki_RoleKeyUsage(t *testing.T) { t.Fatal(err) } - var role roleEntry + var role issuing.RoleEntry if err := entry.DecodeJSON(&role); err != nil { t.Fatal(err) } @@ -199,7 +207,7 @@ func TestPki_RoleKeyUsage(t *testing.T) { if entry == nil { t.Fatalf("role should not be nil") } - var result roleEntry + var result issuing.RoleEntry if err := entry.DecodeJSON(&result); err != nil { t.Fatalf("err: %v", err) } @@ -259,7 +267,7 @@ func TestPki_RoleOUOrganizationUpgrade(t *testing.T) { t.Fatal(err) } - var role roleEntry + var role issuing.RoleEntry if err := entry.DecodeJSON(&role); err != nil { t.Fatal(err) } @@ -299,7 +307,7 @@ func TestPki_RoleOUOrganizationUpgrade(t *testing.T) { if entry == nil { t.Fatalf("role should not be nil") } - var result roleEntry + var result issuing.RoleEntry if err := entry.DecodeJSON(&result); err != nil { t.Fatalf("err: %v", err) } @@ -359,7 +367,7 @@ func TestPki_RoleAllowedDomains(t *testing.T) { t.Fatal(err) } - var role roleEntry + var role issuing.RoleEntry if err := entry.DecodeJSON(&role); err != nil { t.Fatal(err) } @@ -393,7 +401,7 @@ func TestPki_RoleAllowedDomains(t *testing.T) { if entry == nil { t.Fatalf("role should not be nil") } - var result roleEntry + var result issuing.RoleEntry if err := entry.DecodeJSON(&result); err != nil { t.Fatalf("err: %v", err) } diff --git a/builtin/logical/pki/path_root.go b/builtin/logical/pki/path_root.go index a8dbf8354029..bf9ace1e561f 100644 --- a/builtin/logical/pki/path_root.go +++ b/builtin/logical/pki/path_root.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -12,6 +15,7 @@ import ( "encoding/pem" "errors" "fmt" + "net/http" "reflect" "strings" "time" @@ -23,18 +27,40 @@ import ( "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" ) func pathGenerateRoot(b *backend) *framework.Path { - return buildPathGenerateRoot(b, "root/generate/"+framework.GenericNameRegex("exported")) + pattern := "root/generate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate", + OperationSuffix: "root", + } + + return buildPathGenerateRoot(b, pattern, displayAttrs) } func pathDeleteRoot(b *backend) *framework.Path { ret := &framework.Path{ Pattern: "root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "root", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.DeleteOperation: &framework.PathOperation{ Callback: b.pathCADeleteRoot, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + }}, + }, // Read more about why these flags are set in backend.go ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -55,7 +81,7 @@ func (b *backend) pathCADeleteRoot(ctx context.Context, req *logical.Request, _ defer b.issuersLock.Unlock() sc := b.makeStorageContext(ctx, req.Storage) - if !b.useLegacyBundleCaStorage() { + if !b.UseLegacyBundleCaStorage() { issuers, err := sc.listIssuers() if err != nil { return nil, err @@ -80,11 +106,15 @@ func (b *backend) pathCADeleteRoot(ctx context.Context, req *logical.Request, _ } } - // Delete legacy CA bundle. + // Delete legacy CA bundle and its backup, if any. if err := req.Storage.Delete(ctx, legacyCertBundlePath); err != nil { return nil, err } + if err := req.Storage.Delete(ctx, legacyCertBundleBackupPath); err != nil { + return nil, err + } + // Delete legacy CRL bundle. if err := req.Storage.Delete(ctx, legacyCRLPath); err != nil { return nil, err @@ -105,7 +135,7 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, var err error - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return logical.ErrorResponse("Can not create root CA until migration has completed"), nil } @@ -258,22 +288,19 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, // Also store it as just the certificate identified by serial number, so it // can be revoked - key := "certs/" + normalizeSerial(cb.SerialNumber) - certsCounted := b.certsCounted.Load() - err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: key, - Value: parsedBundle.CertificateBytes, - }) + err = issuing.StoreCertificate(ctx, req.Storage, b.GetCertificateCounter(), parsedBundle) if err != nil { - return nil, fmt.Errorf("unable to store certificate locally: %w", err) + return nil, err } - b.incrementTotalCertificatesCount(certsCounted, key) // Build a fresh CRL - err = b.crlBuilder.rebuild(sc, true) + warnings, err = b.CrlBuilder().rebuild(sc, true) if err != nil { return nil, err } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } if parsedBundle.Certificate.MaxPathLen == 0 { resp.AddWarning("Max path length of the generated certificate is zero. This certificate cannot be used to issue intermediate CA certificates.") @@ -297,19 +324,17 @@ func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { var err error - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } format := getFormat(data) if format == "" { - return logical.ErrorResponse( - `The "format" path parameter must be "pem" or "der"`, - ), nil + return logical.ErrorResponse(`The "format" path parameter must be "pem", "der" or "pem_bundle"`), nil } - role := &roleEntry{ + role := &issuing.RoleEntry{ OU: data.Get("ou").([]string), Organization: data.Get("organization").([]string), Country: data.Get("country").([]string), @@ -341,7 +366,7 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R var caErr error sc := b.makeStorageContext(ctx, req.Storage) - signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + signingBundle, caErr := sc.fetchCAInfo(issuerName, issuing.IssuanceUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: @@ -386,6 +411,20 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R return nil, fmt.Errorf("verification of parsed bundle failed: %w", err) } + resp, err := signIntermediateResponse(signingBundle, parsedBundle, format, warnings) + if err != nil { + return nil, err + } + + err = issuing.StoreCertificate(ctx, req.Storage, b.GetCertificateCounter(), parsedBundle) + if err != nil { + return nil, err + } + + return resp, nil +} + +func signIntermediateResponse(signingBundle *certutil.CAInfoBundle, parsedBundle *certutil.ParsedCertBundle, format string, warnings []string) (*logical.Response, error) { signingCB, err := signingBundle.ToCertBundle() if err != nil { return nil, fmt.Errorf("error converting raw signing bundle to cert bundle: %w", err) @@ -455,40 +494,22 @@ func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.R return nil, fmt.Errorf("unsupported format argument: %s", format) } - key := "certs/" + normalizeSerial(cb.SerialNumber) - certsCounted := b.certsCounted.Load() - err = req.Storage.Put(ctx, &logical.StorageEntry{ - Key: key, - Value: parsedBundle.CertificateBytes, - }) - if err != nil { - return nil, fmt.Errorf("unable to store certificate locally: %w", err) - } - b.incrementTotalCertificatesCount(certsCounted, key) - if parsedBundle.Certificate.MaxPathLen == 0 { resp.AddWarning("Max path length of the signed certificate is zero. This certificate cannot be used to issue intermediate CA certificates.") } resp = addWarnings(resp, warnings) - return resp, nil } func (b *backend) pathIssuerSignSelfIssued(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - var err error - - issuerName := getIssuerRef(data) + issuerName := GetIssuerRef(data) if len(issuerName) == 0 { return logical.ErrorResponse("missing issuer reference"), nil } certPem := data.Get("certificate").(string) - block, _ := pem.Decode([]byte(certPem)) - if block == nil || len(block.Bytes) == 0 { - return logical.ErrorResponse("certificate could not be PEM-decoded"), nil - } - certs, err := x509.ParseCertificates(block.Bytes) + certs, err := parsing.ParseCertificatesFromString(certPem) if err != nil { return logical.ErrorResponse(fmt.Sprintf("error parsing certificate: %s", err)), nil } @@ -504,9 +525,8 @@ func (b *backend) pathIssuerSignSelfIssued(ctx context.Context, req *logical.Req return logical.ErrorResponse("given certificate is not self-issued"), nil } - var caErr error sc := b.makeStorageContext(ctx, req.Storage) - signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + signingBundle, caErr := sc.fetchCAInfo(issuerName, issuing.IssuanceUsage) if caErr != nil { switch caErr.(type) { case errutil.UserError: diff --git a/builtin/logical/pki/path_sign_issuers.go b/builtin/logical/pki/path_sign_issuers.go index cadbac5553f3..e9bdc5e8a301 100644 --- a/builtin/logical/pki/path_sign_issuers.go +++ b/builtin/logical/pki/path_sign_issuers.go @@ -1,28 +1,80 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( + "net/http" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) func pathIssuerSignIntermediate(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-intermediate" - return buildPathIssuerSignIntermediateRaw(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "intermediate", + } + + return buildPathIssuerSignIntermediateRaw(b, pattern, displayAttrs) } func pathSignIntermediate(b *backend) *framework.Path { pattern := "root/sign-intermediate" - return buildPathIssuerSignIntermediateRaw(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIRoot, + OperationVerb: "sign", + OperationSuffix: "intermediate", + } + + return buildPathIssuerSignIntermediateRaw(b, pattern, displayAttrs) } -func buildPathIssuerSignIntermediateRaw(b *backend, pattern string) *framework.Path { +func buildPathIssuerSignIntermediateRaw(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { fields := addIssuerRefField(map[string]*framework.FieldSchema{}) path := &framework.Path{ - Pattern: pattern, - Fields: fields, + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathIssuerSignIntermediate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "expiration": { + Type: framework.TypeInt64, + Description: `Expiration Time`, + Required: true, + }, + "serial_number": { + Type: framework.TypeString, + Description: `Serial Number`, + Required: true, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `Issuing CA`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeStringSlice, + Description: `CA Chain`, + Required: true, + }, + }, + }}, + }, }, }, @@ -113,15 +165,29 @@ See the API documentation for more information about required parameters. func pathIssuerSignSelfIssued(b *backend) *framework.Path { pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-self-issued" - return buildPathIssuerSignSelfIssued(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "self-issued", + } + + return buildPathIssuerSignSelfIssued(b, pattern, displayAttrs) } func pathSignSelfIssued(b *backend) *framework.Path { pattern := "root/sign-self-issued" - return buildPathIssuerSignSelfIssued(b, pattern) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIRoot, + OperationVerb: "sign", + OperationSuffix: "self-issued", + } + + return buildPathIssuerSignSelfIssued(b, pattern, displayAttrs) } -func buildPathIssuerSignSelfIssued(b *backend, pattern string) *framework.Path { +func buildPathIssuerSignSelfIssued(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { fields := map[string]*framework.FieldSchema{ "certificate": { Type: framework.TypeString, @@ -135,11 +201,29 @@ func buildPathIssuerSignSelfIssued(b *backend, pattern string) *framework.Path { } fields = addIssuerRefField(fields) path := &framework.Path{ - Pattern: pattern, - Fields: fields, + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathIssuerSignSelfIssued, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `Issuing CA`, + Required: true, + }, + }, + }}, + }, }, }, diff --git a/builtin/logical/pki/path_tidy.go b/builtin/logical/pki/path_tidy.go index 101fbfc316c4..d4f4c6f39e12 100644 --- a/builtin/logical/pki/path_tidy.go +++ b/builtin/logical/pki/path_tidy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -11,7 +14,8 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" - + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" @@ -19,37 +23,130 @@ import ( var tidyCancelledError = errors.New("tidy operation cancelled") +//go:generate enumer -type=tidyStatusState -trimprefix=tidyStatus +type tidyStatusState int + +const ( + tidyStatusInactive tidyStatusState = iota + tidyStatusStarted + tidyStatusFinished + tidyStatusError + tidyStatusCancelling + tidyStatusCancelled +) + +type tidyStatus struct { + // Parameters used to initiate the operation + safetyBuffer int + issuerSafetyBuffer int + revQueueSafetyBuffer int + acmeAccountSafetyBuffer int + + tidyCertStore bool + tidyRevokedCerts bool + tidyRevokedAssocs bool + tidyExpiredIssuers bool + tidyBackupBundle bool + tidyRevocationQueue bool + tidyCrossRevokedCerts bool + tidyAcme bool + pauseDuration string + + // Status + state tidyStatusState + err error + timeStarted time.Time + timeFinished time.Time + message string + + // These counts use a custom incrementer that grab and release + // a lock prior to reading. + certStoreDeletedCount uint + revokedCertDeletedCount uint + missingIssuerCertCount uint + revQueueDeletedCount uint + crossRevokedDeletedCount uint + + acmeAccountsCount uint + acmeAccountsRevokedCount uint + acmeAccountsDeletedCount uint + acmeOrdersDeletedCount uint +} + type tidyConfig struct { - Enabled bool `json:"enabled"` - Interval time.Duration `json:"interval_duration"` - CertStore bool `json:"tidy_cert_store"` - RevokedCerts bool `json:"tidy_revoked_certs"` - IssuerAssocs bool `json:"tidy_revoked_cert_issuer_associations"` - ExpiredIssuers bool `json:"tidy_expired_issuers"` - SafetyBuffer time.Duration `json:"safety_buffer"` - IssuerSafetyBuffer time.Duration `json:"issuer_safety_buffer"` - PauseDuration time.Duration `json:"pause_duration"` + // AutoTidy config + Enabled bool `json:"enabled"` + Interval time.Duration `json:"interval_duration"` + + // Tidy Operations + CertStore bool `json:"tidy_cert_store"` + RevokedCerts bool `json:"tidy_revoked_certs"` + IssuerAssocs bool `json:"tidy_revoked_cert_issuer_associations"` + ExpiredIssuers bool `json:"tidy_expired_issuers"` + BackupBundle bool `json:"tidy_move_legacy_ca_bundle"` + RevocationQueue bool `json:"tidy_revocation_queue"` + CrossRevokedCerts bool `json:"tidy_cross_cluster_revoked_certs"` + TidyAcme bool `json:"tidy_acme"` + + // Safety Buffers + SafetyBuffer time.Duration `json:"safety_buffer"` + IssuerSafetyBuffer time.Duration `json:"issuer_safety_buffer"` + QueueSafetyBuffer time.Duration `json:"revocation_queue_safety_buffer"` + AcmeAccountSafetyBuffer time.Duration `json:"acme_account_safety_buffer"` + PauseDuration time.Duration `json:"pause_duration"` + + // Metrics. + MaintainCount bool `json:"maintain_stored_certificate_counts"` + PublishMetrics bool `json:"publish_stored_certificate_count_metrics"` +} + +func (tc *tidyConfig) IsAnyTidyEnabled() bool { + return tc.CertStore || tc.RevokedCerts || tc.IssuerAssocs || tc.ExpiredIssuers || tc.BackupBundle || tc.TidyAcme || tc.CrossRevokedCerts || tc.RevocationQueue +} + +func (tc *tidyConfig) AnyTidyConfig() string { + return "tidy_cert_store / tidy_revoked_certs / tidy_revoked_cert_issuer_associations / tidy_expired_issuers / tidy_move_legacy_ca_bundle / tidy_revocation_queue / tidy_cross_cluster_revoked_certs / tidy_acme" } var defaultTidyConfig = tidyConfig{ - Enabled: false, - Interval: 12 * time.Hour, - CertStore: false, - RevokedCerts: false, - IssuerAssocs: false, - ExpiredIssuers: false, - SafetyBuffer: 72 * time.Hour, - IssuerSafetyBuffer: 365 * 24 * time.Hour, - PauseDuration: 0 * time.Second, + Enabled: false, + Interval: 12 * time.Hour, + CertStore: false, + RevokedCerts: false, + IssuerAssocs: false, + ExpiredIssuers: false, + BackupBundle: false, + TidyAcme: false, + SafetyBuffer: 72 * time.Hour, + IssuerSafetyBuffer: 365 * 24 * time.Hour, + AcmeAccountSafetyBuffer: 30 * 24 * time.Hour, + PauseDuration: 0 * time.Second, + MaintainCount: false, + PublishMetrics: false, + RevocationQueue: false, + QueueSafetyBuffer: 48 * time.Hour, + CrossRevokedCerts: false, } func pathTidy(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy$", - Fields: addTidyFields(map[string]*framework.FieldSchema{}), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "tidy", + }, + + Fields: addTidyFields(map[string]*framework.FieldSchema{}), Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathTidyWrite, + Callback: b.pathTidyWrite, + Responses: map[int][]framework.Response{ + http.StatusAccepted: {{ + Description: "Accepted", + Fields: map[string]*framework.FieldSchema{}, + }}, + }, ForwardPerformanceStandby: true, }, }, @@ -61,9 +158,172 @@ func pathTidy(b *backend) *framework.Path { func pathTidyCancel(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy-cancel$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "tidy", + OperationSuffix: "cancel", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathTidyCancelWrite, + Callback: b.pathTidyCancelWrite, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer time duration`, + Required: false, + }, + "issuer_safety_buffer": { + Type: framework.TypeInt, + Description: `Issuer safety buffer`, + Required: false, + }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Description: `Revocation queue safety buffer`, + Required: true, + }, + "tidy_cert_store": { + Type: framework.TypeBool, + Description: `Tidy certificate store`, + Required: false, + }, + "tidy_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy revoked certificates`, + Required: false, + }, + "tidy_revoked_cert_issuer_associations": { + Type: framework.TypeBool, + Description: `Tidy revoked certificate issuer associations`, + Required: false, + }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: false, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: false, + }, + "tidy_expired_issuers": { + Type: framework.TypeBool, + Description: `Tidy expired issuers`, + Required: false, + }, + "pause_duration": { + Type: framework.TypeString, + Description: `Duration to pause between tidying certificates`, + Required: false, + }, + "state": { + Type: framework.TypeString, + Description: `One of Inactive, Running, Finished, or Error`, + Required: false, + }, + "error": { + Type: framework.TypeString, + Description: `The error message`, + Required: false, + }, + "time_started": { + Type: framework.TypeString, + Description: `Time the operation started`, + Required: false, + }, + "time_finished": { + Type: framework.TypeString, + Description: `Time the operation finished`, + Required: false, + }, + "last_auto_tidy_finished": { + Type: framework.TypeString, + Description: `Time the last auto-tidy operation finished`, + Required: true, + }, + "message": { + Type: framework.TypeString, + Description: `Message of the operation`, + Required: false, + }, + "cert_store_deleted_count": { + Type: framework.TypeInt, + Description: `The number of certificate storage entries deleted`, + Required: false, + }, + "revoked_cert_deleted_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: false, + }, + "current_cert_store_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: false, + }, + "current_revoked_cert_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: false, + }, + "missing_issuer_cert_count": { + Type: framework.TypeInt, + Required: false, + }, + "tidy_move_legacy_ca_bundle": { + Type: framework.TypeBool, + Required: false, + }, + "tidy_cross_cluster_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy the cross-cluster revoked certificate store`, + Required: false, + }, + "tidy_revocation_queue": { + Type: framework.TypeBool, + Required: false, + }, + "revocation_queue_deleted_count": { + Type: framework.TypeInt, + Required: false, + }, + "cross_revoked_cert_deleted_count": { + Type: framework.TypeInt, + Required: false, + }, + "internal_backend_uuid": { + Type: framework.TypeString, + Required: false, + }, + "total_acme_account_count": { + Type: framework.TypeInt, + Description: `Total number of acme accounts iterated over`, + Required: false, + }, + "acme_account_deleted_count": { + Type: framework.TypeInt, + Description: `The number of revoked acme accounts removed`, + Required: false, + }, + "acme_account_revoked_count": { + Type: framework.TypeInt, + Description: `The number of unused acme accounts revoked`, + Required: false, + }, + "acme_orders_deleted_count": { + Type: framework.TypeInt, + Description: `The number of expired, unused acme orders removed`, + Required: false, + }, + }, + }}, + }, ForwardPerformanceStandby: true, }, }, @@ -75,9 +335,173 @@ func pathTidyCancel(b *backend) *framework.Path { func pathTidyStatus(b *backend) *framework.Path { return &framework.Path{ Pattern: "tidy-status$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "tidy", + OperationSuffix: "status", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathTidyStatusRead, + Callback: b.pathTidyStatusRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer time duration`, + Required: true, + }, + "issuer_safety_buffer": { + Type: framework.TypeInt, + Description: `Issuer safety buffer`, + Required: true, + }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Description: `Revocation queue safety buffer`, + Required: true, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: false, + }, + "tidy_cert_store": { + Type: framework.TypeBool, + Description: `Tidy certificate store`, + Required: true, + }, + "tidy_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy revoked certificates`, + Required: true, + }, + "tidy_revoked_cert_issuer_associations": { + Type: framework.TypeBool, + Description: `Tidy revoked certificate issuer associations`, + Required: true, + }, + "tidy_expired_issuers": { + Type: framework.TypeBool, + Description: `Tidy expired issuers`, + Required: true, + }, + "tidy_cross_cluster_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy the cross-cluster revoked certificate store`, + Required: false, + }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: true, + }, + "pause_duration": { + Type: framework.TypeString, + Description: `Duration to pause between tidying certificates`, + Required: true, + }, + "state": { + Type: framework.TypeString, + Description: `One of Inactive, Running, Finished, or Error`, + Required: true, + }, + "error": { + Type: framework.TypeString, + Description: `The error message`, + Required: true, + }, + "time_started": { + Type: framework.TypeString, + Description: `Time the operation started`, + Required: true, + }, + "time_finished": { + Type: framework.TypeString, + Description: `Time the operation finished`, + Required: false, + }, + "last_auto_tidy_finished": { + Type: framework.TypeString, + Description: `Time the last auto-tidy operation finished`, + Required: true, + }, + "message": { + Type: framework.TypeString, + Description: `Message of the operation`, + Required: true, + }, + "cert_store_deleted_count": { + Type: framework.TypeInt, + Description: `The number of certificate storage entries deleted`, + Required: true, + }, + "revoked_cert_deleted_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: true, + }, + "current_cert_store_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: true, + }, + "cross_revoked_cert_deleted_count": { + Type: framework.TypeInt, + Description: ``, + Required: true, + }, + "current_revoked_cert_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: true, + }, + "revocation_queue_deleted_count": { + Type: framework.TypeInt, + Required: true, + }, + "tidy_move_legacy_ca_bundle": { + Type: framework.TypeBool, + Required: true, + }, + "tidy_revocation_queue": { + Type: framework.TypeBool, + Required: true, + }, + "missing_issuer_cert_count": { + Type: framework.TypeInt, + Required: true, + }, + "internal_backend_uuid": { + Type: framework.TypeString, + Required: true, + }, + "total_acme_account_count": { + Type: framework.TypeInt, + Description: `Total number of acme accounts iterated over`, + Required: false, + }, + "acme_account_deleted_count": { + Type: framework.TypeInt, + Description: `The number of revoked acme accounts removed`, + Required: false, + }, + "acme_account_revoked_count": { + Type: framework.TypeInt, + Description: `The number of unused acme accounts revoked`, + Required: false, + }, + "acme_orders_deleted_count": { + Type: framework.TypeInt, + Description: `The number of expired, unused acme orders removed`, + Required: false, + }, + }, + }}, + }, ForwardPerformanceStandby: true, }, }, @@ -89,6 +513,9 @@ func pathTidyStatus(b *backend) *framework.Path { func pathConfigAutoTidy(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/auto-tidy", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, Fields: addTidyFields(map[string]*framework.FieldSchema{ "enabled": { Type: framework.TypeBool, @@ -99,13 +526,208 @@ func pathConfigAutoTidy(b *backend) *framework.Path { Description: `Interval at which to run an auto-tidy operation. This is the time between tidy invocations (after one finishes to the start of the next). Running a manual tidy will reset this duration.`, Default: int(defaultTidyConfig.Interval / time.Second), // TypeDurationSecond currently requires the default to be an int. }, + "maintain_stored_certificate_counts": { + Type: framework.TypeBool, + Description: `This configures whether stored certificates +are counted upon initialization of the backend, and whether during +normal operation, a running count of certificates stored is maintained.`, + Default: false, + }, + "publish_stored_certificate_count_metrics": { + Type: framework.TypeBool, + Description: `This configures whether the stored certificate +count is published to the metrics consumer. It does not affect if the +stored certificate count is maintained, and if maintained, it will be +available on the tidy-status endpoint.`, + Default: false, + }, }), Operations: map[logical.Operation]framework.OperationHandler{ logical.ReadOperation: &framework.PathOperation{ Callback: b.pathConfigAutoTidyRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "auto-tidy-configuration", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "enabled": { + Type: framework.TypeBool, + Description: `Specifies whether automatic tidy is enabled or not`, + Required: true, + }, + "interval_duration": { + Type: framework.TypeInt, + Description: `Specifies the duration between automatic tidy operation`, + Required: true, + }, + "tidy_cert_store": { + Type: framework.TypeBool, + Description: `Specifies whether to tidy up the certificate store`, + Required: true, + }, + "tidy_revoked_certs": { + Type: framework.TypeBool, + Description: `Specifies whether to remove all invalid and expired certificates from storage`, + Required: true, + }, + "tidy_revoked_cert_issuer_associations": { + Type: framework.TypeBool, + Description: `Specifies whether to associate revoked certificates with their corresponding issuers`, + Required: true, + }, + "tidy_expired_issuers": { + Type: framework.TypeBool, + Description: `Specifies whether tidy expired issuers`, + Required: true, + }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: true, + }, + "safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer time duration`, + Required: true, + }, + "issuer_safety_buffer": { + Type: framework.TypeInt, + Description: `Issuer safety buffer`, + Required: true, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: false, + }, + "pause_duration": { + Type: framework.TypeString, + Description: `Duration to pause between tidying certificates`, + Required: true, + }, + "tidy_move_legacy_ca_bundle": { + Type: framework.TypeBool, + Required: true, + }, + "tidy_cross_cluster_revoked_certs": { + Type: framework.TypeBool, + Required: true, + }, + "tidy_revocation_queue": { + Type: framework.TypeBool, + Required: true, + }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Required: true, + }, + "publish_stored_certificate_count_metrics": { + Type: framework.TypeBool, + Required: true, + }, + "maintain_stored_certificate_counts": { + Type: framework.TypeBool, + Required: true, + }, + }, + }}, + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathConfigAutoTidyWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "auto-tidy", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "enabled": { + Type: framework.TypeBool, + Description: `Specifies whether automatic tidy is enabled or not`, + Required: true, + }, + "interval_duration": { + Type: framework.TypeInt, + Description: `Specifies the duration between automatic tidy operation`, + Required: true, + }, + "tidy_cert_store": { + Type: framework.TypeBool, + Description: `Specifies whether to tidy up the certificate store`, + Required: true, + }, + "tidy_revoked_certs": { + Type: framework.TypeBool, + Description: `Specifies whether to remove all invalid and expired certificates from storage`, + Required: true, + }, + "tidy_revoked_cert_issuer_associations": { + Type: framework.TypeBool, + Description: `Specifies whether to associate revoked certificates with their corresponding issuers`, + Required: true, + }, + "tidy_expired_issuers": { + Type: framework.TypeBool, + Description: `Specifies whether tidy expired issuers`, + Required: true, + }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: true, + }, + "safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer time duration`, + Required: true, + }, + "issuer_safety_buffer": { + Type: framework.TypeInt, + Description: `Issuer safety buffer`, + Required: true, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: true, + }, + "pause_duration": { + Type: framework.TypeString, + Description: `Duration to pause between tidying certificates`, + Required: true, + }, + "tidy_cross_cluster_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy the cross-cluster revoked certificate store`, + Required: true, + }, + "tidy_revocation_queue": { + Type: framework.TypeBool, + Required: true, + }, + "tidy_move_legacy_ca_bundle": { + Type: framework.TypeBool, + Required: true, + }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Required: true, + }, + "publish_stored_certificate_count_metrics": { + Type: framework.TypeBool, + Required: true, + }, + "maintain_stored_certificate_counts": { + Type: framework.TypeBool, + Required: true, + }, + }, + }}, + }, // Read more about why these flags are set in backend.go. ForwardPerformanceStandby: true, ForwardPerformanceSecondary: true, @@ -122,9 +744,15 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr tidyRevokedCerts := d.Get("tidy_revoked_certs").(bool) || d.Get("tidy_revocation_list").(bool) tidyRevokedAssocs := d.Get("tidy_revoked_cert_issuer_associations").(bool) tidyExpiredIssuers := d.Get("tidy_expired_issuers").(bool) + tidyBackupBundle := d.Get("tidy_move_legacy_ca_bundle").(bool) issuerSafetyBuffer := d.Get("issuer_safety_buffer").(int) pauseDurationStr := d.Get("pause_duration").(string) pauseDuration := 0 * time.Second + tidyRevocationQueue := d.Get("tidy_revocation_queue").(bool) + queueSafetyBuffer := d.Get("revocation_queue_safety_buffer").(int) + tidyCrossRevokedCerts := d.Get("tidy_cross_cluster_revoked_certs").(bool) + tidyAcme := d.Get("tidy_acme").(bool) + acmeAccountSafetyBuffer := d.Get("acme_account_safety_buffer").(int) if safetyBuffer < 1 { return logical.ErrorResponse("safety_buffer must be greater than zero"), nil @@ -134,9 +762,17 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr return logical.ErrorResponse("issuer_safety_buffer must be greater than zero"), nil } + if queueSafetyBuffer < 1 { + return logical.ErrorResponse("revocation_queue_safety_buffer must be greater than zero"), nil + } + + if acmeAccountSafetyBuffer < 1 { + return logical.ErrorResponse("acme_account_safety_buffer must be greater than zero"), nil + } + if pauseDurationStr != "" { var err error - pauseDuration, err = time.ParseDuration(pauseDurationStr) + pauseDuration, err = parseutil.ParseDurationSecond(pauseDurationStr) if err != nil { return logical.ErrorResponse(fmt.Sprintf("Error parsing pause_duration: %v", err)), nil } @@ -148,18 +784,26 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr bufferDuration := time.Duration(safetyBuffer) * time.Second issuerBufferDuration := time.Duration(issuerSafetyBuffer) * time.Second + queueSafetyBufferDuration := time.Duration(queueSafetyBuffer) * time.Second + acmeAccountSafetyBufferDuration := time.Duration(acmeAccountSafetyBuffer) * time.Second // Manual run with constructed configuration. config := &tidyConfig{ - Enabled: true, - Interval: 0 * time.Second, - CertStore: tidyCertStore, - RevokedCerts: tidyRevokedCerts, - IssuerAssocs: tidyRevokedAssocs, - ExpiredIssuers: tidyExpiredIssuers, - SafetyBuffer: bufferDuration, - IssuerSafetyBuffer: issuerBufferDuration, - PauseDuration: pauseDuration, + Enabled: true, + Interval: 0 * time.Second, + CertStore: tidyCertStore, + RevokedCerts: tidyRevokedCerts, + IssuerAssocs: tidyRevokedAssocs, + ExpiredIssuers: tidyExpiredIssuers, + BackupBundle: tidyBackupBundle, + SafetyBuffer: bufferDuration, + IssuerSafetyBuffer: issuerBufferDuration, + PauseDuration: pauseDuration, + RevocationQueue: tidyRevocationQueue, + QueueSafetyBuffer: queueSafetyBufferDuration, + CrossRevokedCerts: tidyCrossRevokedCerts, + TidyAcme: tidyAcme, + AcmeAccountSafetyBuffer: acmeAccountSafetyBufferDuration, } if !atomic.CompareAndSwapUint32(b.tidyCASGuard, 0, 1) { @@ -184,12 +828,20 @@ func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *fr b.startTidyOperation(req, config) resp := &logical.Response{} - if !tidyCertStore && !tidyRevokedCerts && !tidyRevokedAssocs && !tidyExpiredIssuers { - resp.AddWarning("No targets to tidy; specify tidy_cert_store=true or tidy_revoked_certs=true or tidy_revoked_cert_issuer_associations=true or tidy_expired_issuers=true to start a tidy operation.") + if !config.IsAnyTidyEnabled() { + resp.AddWarning("Manual tidy requested but no tidy operations were set. Enable at least one tidy operation to be run (" + config.AnyTidyConfig() + ").") } else { resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") } + if tidyRevocationQueue || tidyCrossRevokedCerts { + isNotPerfPrimary := b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) + if isNotPerfPrimary { + resp.AddWarning("tidy_revocation_queue=true and tidy_cross_cluster_revoked_certs=true can only be set on the active node of the primary cluster unless a local mount is used; this option has been ignored.") + } + } + return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) } @@ -223,12 +875,61 @@ func (b *backend) startTidyOperation(req *logical.Request, config *tidyConfig) { } } + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + if config.ExpiredIssuers { if err := b.doTidyExpiredIssuers(ctx, req, logger, config); err != nil { return err } } + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.BackupBundle { + if err := b.doTidyMoveCABundle(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.RevocationQueue { + if err := b.doTidyRevocationQueue(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.CrossRevokedCerts { + if err := b.doTidyCrossRevocationStore(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.TidyAcme { + if err := b.doTidyAcme(ctx, req, logger, config); err != nil { + return err + } + } + return nil } @@ -298,7 +999,7 @@ func (b *backend) doTidyCertStore(ctx context.Context, req *logical.Request, log return fmt.Errorf("unable to parse stored certificate with serial %q: %w", serial, err) } - if time.Now().After(cert.NotAfter.Add(config.SafetyBuffer)) { + if time.Since(cert.NotAfter) > config.SafetyBuffer { if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { return fmt.Errorf("error deleting serial %q from storage: %w", serial, err) } @@ -314,8 +1015,8 @@ func (b *backend) doTidyCertStore(ctx context.Context, req *logical.Request, log } func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() // Fetch and parse our issuers so we can associate them if necessary. sc := b.makeStorageContext(ctx, req.Storage) @@ -348,9 +1049,9 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques // Check for pause duration to reduce resource consumption. if config.PauseDuration > (0 * time.Second) { - b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Unlock() time.Sleep(config.PauseDuration) - b.revokeStorageLock.Lock() + b.GetRevokeStorageLock().Lock() } revokedEntry, err := req.Storage.Get(ctx, "revoked/"+serial) @@ -393,7 +1094,7 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques if config.IssuerAssocs { if !isRevInfoIssuerValid(&revInfo, issuerIDCertMap) { b.tidyStatusIncMissingIssuerCertCount() - revInfo.CertificateIssuer = issuerID("") + revInfo.CertificateIssuer = issuing.IssuerID("") storeCert = true if associateRevokedCertWithIsssuer(&revInfo, revokedCert, issuerIDCertMap) { fixedIssuers += 1 @@ -406,7 +1107,7 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques // past its NotAfter value. This is because we use the // information on revoked/ to build the CRL and the // information on certs/ for lookup. - if time.Now().After(revokedCert.NotAfter.Add(config.SafetyBuffer)) { + if time.Since(revokedCert.NotAfter) > config.SafetyBuffer { if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { return fmt.Errorf("error deleting serial %q from revoked list: %w", serial, err) } @@ -451,9 +1152,17 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques } if !config.AutoRebuild { - if err := b.crlBuilder.rebuild(sc, false); err != nil { + warnings, err := b.CrlBuilder().rebuild(sc, false) + if err != nil { return err } + if len(warnings) > 0 { + msg := "During rebuild of CRL for tidy, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } } } @@ -461,6 +1170,9 @@ func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Reques } func (b *backend) doTidyExpiredIssuers(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + // We do not support cancelling within the expired issuers operation. + // Any cancellation will occur before or after this operation. + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { b.Logger().Debug("skipping expired issuer tidy as we're not on the primary or secondary with a local mount") @@ -470,7 +1182,7 @@ func (b *backend) doTidyExpiredIssuers(ctx context.Context, req *logical.Request // Short-circuit to avoid having to deal with the legacy mounts. While we // could handle this case and remove these issuers, its somewhat // unexpected behavior and we'd prefer to finish the migration first. - if b.useLegacyBundleCaStorage() { + if b.UseLegacyBundleCaStorage() { return nil } @@ -492,16 +1204,11 @@ func (b *backend) doTidyExpiredIssuers(ctx context.Context, req *logical.Request } // We want certificates which have expired before this date by a given - // safety buffer. So we subtract the buffer from now, and anything which - // has expired before our after buffer can be tidied, and anything that - // expired after this buffer must be kept. - now := time.Now() - afterBuffer := now.Add(-1 * config.IssuerSafetyBuffer) - + // safety buffer. rebuildChainsAndCRL := false for issuer, cert := range issuerIDCertMap { - if cert.NotAfter.After(afterBuffer) { + if time.Since(cert.NotAfter) <= config.IssuerSafetyBuffer { continue } @@ -554,12 +1261,348 @@ func (b *backend) doTidyExpiredIssuers(ctx context.Context, req *logical.Request // Removal of issuers is generally a good reason to rebuild the CRL, // even if auto-rebuild is enabled. - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() - if err := b.crlBuilder.rebuild(sc, false); err != nil { + warnings, err := b.CrlBuilder().rebuild(sc, false) + if err != nil { return err } + if len(warnings) > 0 { + msg := "During rebuild of CRL for tidy, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } + } + + return nil +} + +func (b *backend) doTidyMoveCABundle(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + // We do not support cancelling within this operation; any cancel will + // occur before or after this operation. + + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + b.Logger().Debug("skipping moving the legacy CA bundle as we're not on the primary or secondary with a local mount") + return nil + } + + // Short-circuit to avoid moving the legacy bundle from under a legacy + // mount. + if b.UseLegacyBundleCaStorage() { + return nil + } + + // If we've already run, exit. + _, bundle, err := getLegacyCertBundle(ctx, req.Storage) + if err != nil { + return fmt.Errorf("failed to fetch the legacy CA bundle: %w", err) + } + + if bundle == nil { + b.Logger().Debug("No legacy CA bundle available; nothing to do.") + return nil + } + + log, err := getLegacyBundleMigrationLog(ctx, req.Storage) + if err != nil { + return fmt.Errorf("failed to fetch the legacy bundle migration log: %w", err) + } + + if log == nil { + return fmt.Errorf("refusing to tidy with an empty legacy migration log but present CA bundle: %w", err) + } + + if time.Since(log.Created) <= config.IssuerSafetyBuffer { + b.Logger().Debug("Migration was created too recently to remove the legacy bundle; refusing to move legacy CA bundle to backup location.") + return nil + } + + // Do the write before the delete. + entry, err := logical.StorageEntryJSON(legacyCertBundleBackupPath, bundle) + if err != nil { + return fmt.Errorf("failed to create new backup storage entry: %w", err) + } + + err = req.Storage.Put(ctx, entry) + if err != nil { + return fmt.Errorf("failed to write new backup legacy CA bundle: %w", err) + } + + err = req.Storage.Delete(ctx, legacyCertBundlePath) + if err != nil { + return fmt.Errorf("failed to remove old legacy CA bundle path: %w", err) + } + + b.Logger().Info("legacy CA bundle successfully moved to backup location") + return nil +} + +func (b *backend) doTidyRevocationQueue(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + b.Logger().Debug("skipping cross-cluster revocation queue tidy as we're not on the primary or secondary with a local mount") + return nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revocation queue participating clusters: %w", err) + } + + // Grab locks as we're potentially modifying revocation-related storage. + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() + + for cIndex, cluster := range clusters { + if cluster[len(cluster)-1] == '/' { + cluster = cluster[0 : len(cluster)-1] + } + + cPath := crossRevocationPrefix + cluster + "/" + serials, err := sc.Storage.List(sc.Context, cPath) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revocation queue entries for cluster %v (%v): %w", cluster, cIndex, err) + } + + for _, serial := range serials { + // Check for cancellation. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.GetRevokeStorageLock().Unlock() + time.Sleep(config.PauseDuration) + b.GetRevokeStorageLock().Lock() + } + + // Confirmation entries _should_ be handled by this cluster's + // processRevocationQueue(...) invocation; if not, when the plugin + // reloads, maybeGatherQueueForFirstProcess(...) will remove all + // stale confirmation requests. However, we don't want to force an + // operator to reload their in-use plugin, so allow tidy to also + // clean up confirmation values without reloading. + if serial[len(serial)-1] == '/' { + // Check if we have a confirmed entry. + confirmedPath := cPath + serial + "confirmed" + removalEntry, err := sc.Storage.Get(sc.Context, confirmedPath) + if err != nil { + return fmt.Errorf("error reading revocation confirmation (%v) during tidy: %w", confirmedPath, err) + } + if removalEntry == nil { + continue + } + + // Remove potential revocation requests from all clusters. + for _, subCluster := range clusters { + if subCluster[len(subCluster)-1] == '/' { + subCluster = subCluster[0 : len(subCluster)-1] + } + + reqPath := subCluster + "/" + serial[0:len(serial)-1] + if err := sc.Storage.Delete(sc.Context, reqPath); err != nil { + return fmt.Errorf("failed to remove confirmed revocation request on candidate cluster (%v): %w", reqPath, err) + } + } + + // Then delete the confirmation. + if err := sc.Storage.Delete(sc.Context, confirmedPath); err != nil { + return fmt.Errorf("failed to remove confirmed revocation confirmation (%v): %w", confirmedPath, err) + } + + // No need to handle a revocation request at this path: it can't + // still exist on this cluster after we deleted it above. + continue + } + + ePath := cPath + serial + entry, err := sc.Storage.Get(sc.Context, ePath) + if err != nil { + return fmt.Errorf("error reading revocation request (%v) to tidy: %w", ePath, err) + } + if entry == nil || entry.Value == nil { + continue + } + + var revRequest revocationRequest + if err := entry.DecodeJSON(&revRequest); err != nil { + return fmt.Errorf("error reading revocation request (%v) to tidy: %w", ePath, err) + } + + if time.Since(revRequest.RequestedAt) <= config.QueueSafetyBuffer { + continue + } + + // Safe to remove this entry. + if err := sc.Storage.Delete(sc.Context, ePath); err != nil { + return fmt.Errorf("error deleting revocation request (%v): %w", ePath, err) + } + + // Assumption: there should never be a need to remove this from + // the processing queue on this node. We're on the active primary, + // so our writes don't cause invalidations. This means we'd have + // to have slated it for deletion very quickly after it'd been + // sent (i.e., inside of the 1-minute boundary that periodicFunc + // executes at). While this is possible, because we grab the + // revocationStorageLock above, we can't execute interleaved + // with that periodicFunc, so the periodicFunc would've had to + // finished before we actually did this deletion (or it wouldn't + // have ignored this serial because our deletion would've + // happened prior to it reading the storage entry). Thus we should + // be safe to ignore the revocation queue removal here. + b.tidyStatusIncRevQueueCount() + } + } + + return nil +} + +func (b *backend) doTidyCrossRevocationStore(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + b.Logger().Debug("skipping cross-cluster revoked certificate store tidy as we're not on the primary or secondary with a local mount") + return nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + clusters, err := sc.Storage.List(sc.Context, unifiedRevocationReadPathPrefix) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revoked certificate store participating clusters: %w", err) + } + + // Grab locks as we're potentially modifying revocation-related storage. + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() + + for cIndex, cluster := range clusters { + if cluster[len(cluster)-1] == '/' { + cluster = cluster[0 : len(cluster)-1] + } + + cPath := unifiedRevocationReadPathPrefix + cluster + "/" + serials, err := sc.Storage.List(sc.Context, cPath) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revoked certificate store entries for cluster %v (%v): %w", cluster, cIndex, err) + } + + for _, serial := range serials { + // Check for cancellation. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.GetRevokeStorageLock().Unlock() + time.Sleep(config.PauseDuration) + b.GetRevokeStorageLock().Lock() + } + + ePath := cPath + serial + entry, err := sc.Storage.Get(sc.Context, ePath) + if err != nil { + return fmt.Errorf("error reading cross-cluster revocation entry (%v) to tidy: %w", ePath, err) + } + if entry == nil || entry.Value == nil { + continue + } + + var details unifiedRevocationEntry + if err := entry.DecodeJSON(&details); err != nil { + return fmt.Errorf("error decoding cross-cluster revocation entry (%v) to tidy: %w", ePath, err) + } + + if time.Since(details.CertExpiration) <= config.SafetyBuffer { + continue + } + + // Safe to remove this entry. + if err := sc.Storage.Delete(sc.Context, ePath); err != nil { + return fmt.Errorf("error deleting revocation request (%v): %w", ePath, err) + } + + b.tidyStatusIncCrossRevCertCount() + } + } + + return nil +} + +func (b *backend) doTidyAcme(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + b.acmeAccountLock.Lock() + defer b.acmeAccountLock.Unlock() + + sc := b.makeStorageContext(ctx, req.Storage) + thumbprints, err := sc.Storage.List(ctx, acmeThumbprintPrefix) + if err != nil { + return err + } + + b.tidyStatusLock.Lock() + b.tidyStatus.acmeAccountsCount = uint(len(thumbprints)) + b.tidyStatusLock.Unlock() + + for _, thumbprint := range thumbprints { + err := b.tidyAcmeAccountByThumbprint(b.GetAcmeState(), sc, thumbprint, config.SafetyBuffer, config.AcmeAccountSafetyBuffer) + if err != nil { + logger.Warn("error tidying account %v: %v", thumbprint, err.Error()) + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.acmeAccountLock.Unlock() // Correct the Lock + time.Sleep(config.PauseDuration) + b.acmeAccountLock.Lock() + } + + } + + // Clean up any unused EAB + eabIds, err := b.GetAcmeState().ListEabIds(sc) + if err != nil { + return fmt.Errorf("failed listing EAB ids: %w", err) + } + + for _, eabId := range eabIds { + eab, err := b.GetAcmeState().LoadEab(sc, eabId) + if err != nil { + if errors.Is(err, ErrStorageItemNotFound) { + // We don't need to worry about a consumed EAB + continue + } + return err + } + + eabExpiration := eab.CreatedOn.Add(config.AcmeAccountSafetyBuffer) + if time.Now().After(eabExpiration) { + _, err := b.GetAcmeState().DeleteEab(sc, eabId) + if err != nil { + return fmt.Errorf("failed to tidy eab %s: %w", eabId, err) + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.acmeAccountLock.Unlock() // Correct the Lock + time.Sleep(config.PauseDuration) + b.acmeAccountLock.Lock() + } } return nil @@ -600,6 +1643,10 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f "tidy_revoked_certs": nil, "tidy_revoked_cert_issuer_associations": nil, "tidy_expired_issuers": nil, + "tidy_move_legacy_ca_bundle": nil, + "tidy_revocation_queue": nil, + "tidy_cross_cluster_revoked_certs": nil, + "tidy_acme": nil, "pause_duration": nil, "state": "Inactive", "error": nil, @@ -611,9 +1658,33 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f "missing_issuer_cert_count": nil, "current_cert_store_count": nil, "current_revoked_cert_count": nil, + "internal_backend_uuid": nil, + "revocation_queue_deleted_count": nil, + "cross_revoked_cert_deleted_count": nil, + "total_acme_account_count": nil, + "acme_account_deleted_count": nil, + "acme_account_revoked_count": nil, + "acme_orders_deleted_count": nil, + "acme_account_safety_buffer": nil, }, } + resp.Data["internal_backend_uuid"] = b.backendUUID + + certCounter := b.GetCertificateCounter() + if certCounter.IsEnabled() { + resp.Data["current_cert_store_count"] = certCounter.CertificateCount() + resp.Data["current_revoked_cert_count"] = certCounter.RevokedCount() + if !certCounter.IsInitialized() { + resp.AddWarning("Certificates in storage are still being counted, current counts provided may be " + + "inaccurate") + } + certError := certCounter.Error() + if certError != nil { + resp.Data["certificate_counting_error"] = certError.Error() + } + } + if b.tidyStatus.state == tidyStatusInactive { return resp, nil } @@ -624,12 +1695,25 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f resp.Data["tidy_revoked_certs"] = b.tidyStatus.tidyRevokedCerts resp.Data["tidy_revoked_cert_issuer_associations"] = b.tidyStatus.tidyRevokedAssocs resp.Data["tidy_expired_issuers"] = b.tidyStatus.tidyExpiredIssuers + resp.Data["tidy_move_legacy_ca_bundle"] = b.tidyStatus.tidyBackupBundle + resp.Data["tidy_revocation_queue"] = b.tidyStatus.tidyRevocationQueue + resp.Data["tidy_cross_cluster_revoked_certs"] = b.tidyStatus.tidyCrossRevokedCerts + resp.Data["tidy_acme"] = b.tidyStatus.tidyAcme resp.Data["pause_duration"] = b.tidyStatus.pauseDuration resp.Data["time_started"] = b.tidyStatus.timeStarted resp.Data["message"] = b.tidyStatus.message resp.Data["cert_store_deleted_count"] = b.tidyStatus.certStoreDeletedCount resp.Data["revoked_cert_deleted_count"] = b.tidyStatus.revokedCertDeletedCount resp.Data["missing_issuer_cert_count"] = b.tidyStatus.missingIssuerCertCount + resp.Data["revocation_queue_deleted_count"] = b.tidyStatus.revQueueDeletedCount + resp.Data["cross_revoked_cert_deleted_count"] = b.tidyStatus.crossRevokedDeletedCount + resp.Data["revocation_queue_safety_buffer"] = b.tidyStatus.revQueueSafetyBuffer + resp.Data["last_auto_tidy_finished"] = b.lastTidy + resp.Data["total_acme_account_count"] = b.tidyStatus.acmeAccountsCount + resp.Data["acme_account_deleted_count"] = b.tidyStatus.acmeAccountsDeletedCount + resp.Data["acme_account_revoked_count"] = b.tidyStatus.acmeAccountsRevokedCount + resp.Data["acme_orders_deleted_count"] = b.tidyStatus.acmeOrdersDeletedCount + resp.Data["acme_account_safety_buffer"] = b.tidyStatus.acmeAccountSafetyBuffer switch b.tidyStatus.state { case tidyStatusStarted: @@ -651,14 +1735,6 @@ func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *f resp.Data["time_finished"] = b.tidyStatus.timeFinished } - resp.Data["current_cert_store_count"] = b.certCount - resp.Data["current_revoked_cert_count"] = b.revokedCertCount - - if !b.certsCounted.Load() { - resp.AddWarning("Certificates in storage are still being counted, current counts provided may be " + - "inaccurate") - } - return resp, nil } @@ -670,17 +1746,7 @@ func (b *backend) pathConfigAutoTidyRead(ctx context.Context, req *logical.Reque } return &logical.Response{ - Data: map[string]interface{}{ - "enabled": config.Enabled, - "interval_duration": int(config.Interval / time.Second), - "tidy_cert_store": config.CertStore, - "tidy_revoked_certs": config.RevokedCerts, - "tidy_revoked_cert_issuer_associations": config.IssuerAssocs, - "tidy_expired_issuers": config.ExpiredIssuers, - "safety_buffer": int(config.SafetyBuffer / time.Second), - "issuer_safety_buffer": int(config.IssuerSafetyBuffer / time.Second), - "pause_duration": config.PauseDuration.String(), - }, + Data: getTidyConfigData(*config), }, nil } @@ -722,7 +1788,7 @@ func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Requ } if pauseDurationRaw, ok := d.GetOk("pause_duration"); ok { - config.PauseDuration, err = time.ParseDuration(pauseDurationRaw.(string)) + config.PauseDuration, err = parseutil.ParseDurationSecond(pauseDurationRaw.(string)) if err != nil { return logical.ErrorResponse(fmt.Sprintf("unable to parse given pause_duration: %v", err)), nil } @@ -743,8 +1809,50 @@ func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Requ } } - if config.Enabled && !(config.CertStore || config.RevokedCerts || config.IssuerAssocs || config.ExpiredIssuers) { - return logical.ErrorResponse("Auto-tidy enabled but no tidy operations were requested. Enable at least one tidy operation to be run (tidy_cert_store / tidy_revoked_certs / tidy_revoked_cert_issuer_associations)."), nil + if backupBundle, ok := d.GetOk("tidy_move_legacy_ca_bundle"); ok { + config.BackupBundle = backupBundle.(bool) + } + + if revocationQueueRaw, ok := d.GetOk("tidy_revocation_queue"); ok { + config.RevocationQueue = revocationQueueRaw.(bool) + } + + if queueSafetyBufferRaw, ok := d.GetOk("revocation_queue_safety_buffer"); ok { + config.QueueSafetyBuffer = time.Duration(queueSafetyBufferRaw.(int)) * time.Second + if config.QueueSafetyBuffer < 1*time.Second { + return logical.ErrorResponse(fmt.Sprintf("given revocation_queue_safety_buffer must be at least one second; got: %v", queueSafetyBufferRaw)), nil + } + } + + if crossRevokedRaw, ok := d.GetOk("tidy_cross_cluster_revoked_certs"); ok { + config.CrossRevokedCerts = crossRevokedRaw.(bool) + } + + if tidyAcmeRaw, ok := d.GetOk("tidy_acme"); ok { + config.TidyAcme = tidyAcmeRaw.(bool) + } + + if acmeAccountSafetyBufferRaw, ok := d.GetOk("acme_account_safety_buffer"); ok { + config.AcmeAccountSafetyBuffer = time.Duration(acmeAccountSafetyBufferRaw.(int)) * time.Second + if config.AcmeAccountSafetyBuffer < 1*time.Second { + return logical.ErrorResponse(fmt.Sprintf("given acme_account_safety_buffer must be at least one second; got: %v", acmeAccountSafetyBufferRaw)), nil + } + } + + if config.Enabled && !config.IsAnyTidyEnabled() { + return logical.ErrorResponse("Auto-tidy enabled but no tidy operations were requested. Enable at least one tidy operation to be run (" + config.AnyTidyConfig() + ")."), nil + } + + if maintainCountEnabledRaw, ok := d.GetOk("maintain_stored_certificate_counts"); ok { + config.MaintainCount = maintainCountEnabledRaw.(bool) + } + + if runningStorageMetricsEnabledRaw, ok := d.GetOk("publish_stored_certificate_count_metrics"); ok { + config.PublishMetrics = runningStorageMetricsEnabledRaw.(bool) + } + + if config.PublishMetrics && !config.MaintainCount { + return logical.ErrorResponse("Can not publish a running storage metrics count to metrics without first maintaining that count. Enable `maintain_stored_certificate_counts` to enable `publish_stored_certificate_count_metrics`."), nil } if err := sc.writeAutoTidyConfig(config); err != nil { @@ -752,17 +1860,7 @@ func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Requ } return &logical.Response{ - Data: map[string]interface{}{ - "enabled": config.Enabled, - "interval_duration": int(config.Interval / time.Second), - "tidy_cert_store": config.CertStore, - "tidy_revoked_certs": config.RevokedCerts, - "tidy_revoked_cert_issuer_associations": config.IssuerAssocs, - "tidy_expired_issuers": config.ExpiredIssuers, - "safety_buffer": int(config.SafetyBuffer / time.Second), - "issuer_safety_buffer": int(config.IssuerSafetyBuffer / time.Second), - "pause_duration": config.PauseDuration.String(), - }, + Data: getTidyConfigData(*config), }, nil } @@ -771,13 +1869,19 @@ func (b *backend) tidyStatusStart(config *tidyConfig) { defer b.tidyStatusLock.Unlock() b.tidyStatus = &tidyStatus{ - safetyBuffer: int(config.SafetyBuffer / time.Second), - issuerSafetyBuffer: int(config.IssuerSafetyBuffer / time.Second), - tidyCertStore: config.CertStore, - tidyRevokedCerts: config.RevokedCerts, - tidyRevokedAssocs: config.IssuerAssocs, - tidyExpiredIssuers: config.ExpiredIssuers, - pauseDuration: config.PauseDuration.String(), + safetyBuffer: int(config.SafetyBuffer / time.Second), + issuerSafetyBuffer: int(config.IssuerSafetyBuffer / time.Second), + revQueueSafetyBuffer: int(config.QueueSafetyBuffer / time.Second), + acmeAccountSafetyBuffer: int(config.AcmeAccountSafetyBuffer / time.Second), + tidyCertStore: config.CertStore, + tidyRevokedCerts: config.RevokedCerts, + tidyRevokedAssocs: config.IssuerAssocs, + tidyExpiredIssuers: config.ExpiredIssuers, + tidyBackupBundle: config.BackupBundle, + tidyRevocationQueue: config.RevocationQueue, + tidyCrossRevokedCerts: config.CrossRevokedCerts, + tidyAcme: config.TidyAcme, + pauseDuration: config.PauseDuration.String(), state: tidyStatusStarted, timeStarted: time.Now(), @@ -825,7 +1929,7 @@ func (b *backend) tidyStatusIncCertStoreCount() { b.tidyStatus.certStoreDeletedCount++ - b.decrementTotalCertificatesCountReport() + b.GetCertificateCounter().DecrementTotalCertificatesCountReport() } func (b *backend) tidyStatusIncRevokedCertCount() { @@ -834,7 +1938,7 @@ func (b *backend) tidyStatusIncRevokedCertCount() { b.tidyStatus.revokedCertDeletedCount++ - b.decrementTotalRevokedCertificatesCountReport() + b.GetCertificateCounter().DecrementTotalRevokedCertificatesCountReport() } func (b *backend) tidyStatusIncMissingIssuerCertCount() { @@ -844,6 +1948,41 @@ func (b *backend) tidyStatusIncMissingIssuerCertCount() { b.tidyStatus.missingIssuerCertCount++ } +func (b *backend) tidyStatusIncRevQueueCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.revQueueDeletedCount++ +} + +func (b *backend) tidyStatusIncCrossRevCertCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.crossRevokedDeletedCount++ +} + +func (b *backend) tidyStatusIncRevAcmeAccountCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.acmeAccountsRevokedCount++ +} + +func (b *backend) tidyStatusIncDeletedAcmeAccountCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.acmeAccountsDeletedCount++ +} + +func (b *backend) tidyStatusIncDelAcmeOrderCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.acmeOrdersDeletedCount++ +} + const pathTidyHelpSyn = ` Tidy up the backend by removing expired certificates, revocation information, or both. @@ -905,6 +2044,20 @@ The result includes the following fields: * 'cert_store_deleted_count': The number of certificate storage entries deleted * 'revoked_cert_deleted_count': The number of revoked certificate entries deleted * 'missing_issuer_cert_count': The number of revoked certificates which were missing a valid issuer reference +* 'tidy_expired_issuers': the value of this parameter when initiating the tidy operation +* 'issuer_safety_buffer': the value of this parameter when initiating the tidy operation +* 'tidy_move_legacy_ca_bundle': the value of this parameter when initiating the tidy operation +* 'tidy_revocation_queue': the value of this parameter when initiating the tidy operation +* 'revocation_queue_deleted_count': the number of revocation queue entries deleted +* 'tidy_cross_cluster_revoked_certs': the value of this parameter when initiating the tidy operation +* 'cross_revoked_cert_deleted_count': the number of cross-cluster revoked certificate entries deleted +* 'revocation_queue_safety_buffer': the value of this parameter when initiating the tidy operation +* 'tidy_acme': the value of this parameter when initiating the tidy operation +* 'acme_account_safety_buffer': the value of this parameter when initiating the tidy operation +* 'total_acme_account_count': the total number of acme accounts in the list to be iterated over +* 'acme_account_deleted_count': the number of revoked acme accounts deleted during the operation +* 'acme_account_revoked_count': the number of acme accounts revoked during the operation +* 'acme_orders_deleted_count': the number of acme orders deleted during the operation ` const pathConfigAutoTidySyn = ` @@ -920,3 +2073,26 @@ controls the frequency of auto-tidy execution). Once enabled, a tidy operation will be kicked off automatically, as if it were executed with the posted configuration. ` + +func getTidyConfigData(config tidyConfig) map[string]interface{} { + return map[string]interface{}{ + // This map is in the same order as tidyConfig to ensure that all fields are accounted for + "enabled": config.Enabled, + "interval_duration": int(config.Interval / time.Second), + "tidy_cert_store": config.CertStore, + "tidy_revoked_certs": config.RevokedCerts, + "tidy_revoked_cert_issuer_associations": config.IssuerAssocs, + "tidy_expired_issuers": config.ExpiredIssuers, + "tidy_move_legacy_ca_bundle": config.BackupBundle, + "tidy_acme": config.TidyAcme, + "safety_buffer": int(config.SafetyBuffer / time.Second), + "issuer_safety_buffer": int(config.IssuerSafetyBuffer / time.Second), + "acme_account_safety_buffer": int(config.AcmeAccountSafetyBuffer / time.Second), + "pause_duration": config.PauseDuration.String(), + "publish_stored_certificate_count_metrics": config.PublishMetrics, + "maintain_stored_certificate_counts": config.MaintainCount, + "tidy_revocation_queue": config.RevocationQueue, + "revocation_queue_safety_buffer": int(config.QueueSafetyBuffer / time.Second), + "tidy_cross_cluster_revoked_certs": config.CrossRevokedCerts, + } +} diff --git a/builtin/logical/pki/path_tidy_test.go b/builtin/logical/pki/path_tidy_test.go index 4cd137a21c24..1eb00c7f5bdb 100644 --- a/builtin/logical/pki/path_tidy_test.go +++ b/builtin/logical/pki/path_tidy_test.go @@ -1,10 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" "encoding/json" + "errors" + "fmt" + "path" + "strings" "testing" "time" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "golang.org/x/crypto/acme" + + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + + "github.com/armon/go-metrics" + "github.com/hashicorp/vault/api" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/logical" @@ -13,6 +35,142 @@ import ( "github.com/stretchr/testify/require" ) +func TestTidyConfigs(t *testing.T) { + t.Parallel() + + var cfg tidyConfig + operations := strings.Split(cfg.AnyTidyConfig(), " / ") + require.Greater(t, len(operations), 1, "expected more than one operation") + t.Logf("Got tidy operations: %v", operations) + + lastOp := operations[len(operations)-1] + + for _, operation := range operations { + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + operation: true, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to enable auto-tidy operation "+operation) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for operation "+operation) + require.True(t, resp.Data[operation].(bool), "expected operation to be enabled after reading auto-tidy config "+operation) + + resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + operation: false, + lastOp: true, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to disable auto-tidy operation "+operation) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for operation "+operation) + require.False(t, resp.Data[operation].(bool), "expected operation to be disabled after reading auto-tidy config "+operation) + + resp, err = CBWrite(b, s, "tidy", map[string]interface{}{ + operation: true, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to start tidy operation with "+operation) + if len(resp.Warnings) > 0 { + t.Logf("got warnings while starting manual tidy: %v", resp.Warnings) + for _, warning := range resp.Warnings { + if strings.Contains(warning, "Manual tidy requested but no tidy operations were set.") { + t.Fatalf("expected to be able to enable tidy operation with just %v but got warning: %v / (resp=%v)", operation, warning, resp) + } + } + } + + lastOp = operation + } + + // pause_duration is tested elsewhere in other tests. + type configSafetyBufferValueStr struct { + Config string + FirstValue int + SecondValue int + DefaultValue int + } + configSafetyBufferValues := []configSafetyBufferValueStr{ + { + Config: "safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.SafetyBuffer / time.Second), + }, + { + Config: "issuer_safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.IssuerSafetyBuffer / time.Second), + }, + { + Config: "acme_account_safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.AcmeAccountSafetyBuffer / time.Second), + }, + { + Config: "revocation_queue_safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.QueueSafetyBuffer / time.Second), + }, + } + + for _, flag := range configSafetyBufferValues { + b, s := CreateBackendWithStorage(t) + + resp, err := CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for flag "+flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.DefaultValue, "expected initial auto-tidy config to match default value for "+flag.Config) + + resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + "tidy_cert_store": true, + flag.Config: flag.FirstValue, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to set auto-tidy config option "+flag.Config) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for config "+flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.FirstValue, "expected value to be set after reading auto-tidy config "+flag.Config) + + resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + "tidy_cert_store": true, + flag.Config: flag.SecondValue, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to set auto-tidy config option "+flag.Config) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for config "+flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.SecondValue, "expected value to be set after reading auto-tidy config "+flag.Config) + + resp, err = CBWrite(b, s, "tidy", map[string]interface{}{ + "tidy_cert_store": true, + flag.Config: flag.FirstValue, + }) + t.Logf("tidy run results: resp=%v/err=%v", resp, err) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to start tidy operation with "+flag.Config) + if len(resp.Warnings) > 0 { + for _, warning := range resp.Warnings { + if strings.Contains(warning, "unrecognized parameter") && strings.Contains(warning, flag.Config) { + t.Fatalf("warning '%v' claims parameter '%v' is unknown", warning, flag.Config) + } + } + } + + time.Sleep(2 * time.Second) + + resp, err = CBRead(b, s, "tidy-status") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to start tidy operation with "+flag.Config) + t.Logf("got response: %v for config: %v", resp, flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.FirstValue, "expected flag to be set in tidy-status for config "+flag.Config) + } +} + func TestAutoTidy(t *testing.T) { t.Parallel() @@ -218,17 +376,19 @@ func TestTidyCancellation(t *testing.T) { // Kick off a tidy operation (which runs in the background), but with // a slow-ish pause between certificates. - _, err = CBWrite(b, s, "tidy", map[string]interface{}{ + resp, err := CBWrite(b, s, "tidy", map[string]interface{}{ "tidy_cert_store": true, "safety_buffer": "1s", "pause_duration": "1s", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("tidy"), logical.UpdateOperation), resp, true) // If we wait six seconds, the operation should still be running. That's // how we check that pause_duration works. time.Sleep(3 * time.Second) - resp, err := CBRead(b, s, "tidy-status") + resp, err = CBRead(b, s, "tidy-status") + require.NoError(t, err) require.NotNil(t, resp) require.NotNil(t, resp.Data) @@ -236,6 +396,7 @@ func TestTidyCancellation(t *testing.T) { // If we now cancel the operation, the response should say Cancelling. cancelResp, err := CBWrite(b, s, "tidy-cancel", map[string]interface{}{}) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("tidy-cancel"), logical.UpdateOperation), resp, true) require.NoError(t, err) require.NotNil(t, cancelResp) require.NotNil(t, cancelResp.Data) @@ -255,6 +416,7 @@ func TestTidyCancellation(t *testing.T) { time.Sleep(3 * time.Second) statusResp, err := CBRead(b, s, "tidy-status") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("tidy-status"), logical.ReadOperation), resp, true) require.NoError(t, err) require.NotNil(t, statusResp) require.NotNil(t, statusResp.Data) @@ -379,6 +541,7 @@ func TestTidyIssuerConfig(t *testing.T) { // Ensure the default auto-tidy config matches expectations resp, err := CBRead(b, s, "config/auto-tidy") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/auto-tidy"), logical.ReadOperation), resp, true) requireSuccessNonNilResponse(t, resp, err) jsonBlob, err := json.Marshal(&defaultTidyConfig) @@ -392,6 +555,8 @@ func TestTidyIssuerConfig(t *testing.T) { defaultConfigMap["issuer_safety_buffer"] = int(time.Duration(defaultConfigMap["issuer_safety_buffer"].(float64)) / time.Second) defaultConfigMap["safety_buffer"] = int(time.Duration(defaultConfigMap["safety_buffer"].(float64)) / time.Second) defaultConfigMap["pause_duration"] = time.Duration(defaultConfigMap["pause_duration"].(float64)).String() + defaultConfigMap["revocation_queue_safety_buffer"] = int(time.Duration(defaultConfigMap["revocation_queue_safety_buffer"].(float64)) / time.Second) + defaultConfigMap["acme_account_safety_buffer"] = int(time.Duration(defaultConfigMap["acme_account_safety_buffer"].(float64)) / time.Second) require.Equal(t, defaultConfigMap, resp.Data) @@ -400,7 +565,758 @@ func TestTidyIssuerConfig(t *testing.T) { "tidy_expired_issuers": true, "issuer_safety_buffer": "5s", }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/auto-tidy"), logical.UpdateOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err) require.Equal(t, true, resp.Data["tidy_expired_issuers"]) require.Equal(t, 5, resp.Data["issuer_safety_buffer"]) } + +// TestCertStorageMetrics ensures that when enabled, metrics are able to count the number of certificates in storage and +// number of revoked certificates in storage. Moreover, this test ensures that the gauge is emitted periodically, so +// that the metric does not disappear or go stale. +func TestCertStorageMetrics(t *testing.T) { + // This tests uses the same setup as TestAutoTidy + newPeriod := 1 * time.Second + + // We set up a metrics accumulator + inmemSink := metrics.NewInmemSink( + 2*newPeriod, // A short time period is ideal here to test metrics are emitted every periodic func + 10*newPeriod) // Do not keep a huge amount of metrics in the sink forever, clear them out to save memory usage. + + metricsConf := metrics.DefaultConfig("") + metricsConf.EnableHostname = false + metricsConf.EnableHostnameLabel = false + metricsConf.EnableServiceLabel = false + metricsConf.EnableTypePrefix = false + + _, err := metrics.NewGlobal(metricsConf, inmemSink) + if err != nil { + t.Fatal(err) + } + + // This test requires the periodicFunc to trigger, which requires we stand + // up a full test cluster. + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + // See notes below about usage of /sys/raw for reading cluster + // storage without barrier encryption. + EnableRaw: true, + RollbackPeriod: newPeriod, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Mount PKI + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "10m", + MaxLeaseTTL: "60m", + }, + }) + require.NoError(t, err) + + // Generate root. + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root X1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["issuer_id"]) + + // Set up a testing role. + _, err = client.Logical().Write("pki/roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + }) + require.NoError(t, err) + + // Run tidy so that tidy-status is not empty + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_revoked_certs": true, + }) + require.NoError(t, err) + + // Since certificate counts are off by default, we shouldn't see counts in the tidy status + tidyStatus, err := client.Logical().Read("pki/tidy-status") + if err != nil { + t.Fatal(err) + } + // backendUUID should exist, we need this for metrics + backendUUID := tidyStatus.Data["internal_backend_uuid"].(string) + // "current_cert_store_count", "current_revoked_cert_count" + countData, ok := tidyStatus.Data["current_cert_store_count"] + if ok && countData != nil { + t.Fatalf("Certificate counting should be off by default, but current cert store count %v appeared in tidy status in unconfigured mount", countData) + } + revokedCountData, ok := tidyStatus.Data["current_revoked_cert_count"] + if ok && revokedCountData != nil { + t.Fatalf("Certificate counting should be off by default, but revoked cert count %v appeared in tidy status in unconfigured mount", revokedCountData) + } + + // Since certificate counts are off by default, those metrics should not exist yet + stableMetric := inmemSink.Data() + mostRecentInterval := stableMetric[len(stableMetric)-1] + _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] + if ok { + t.Fatalf("Certificate counting should be off by default, but revoked cert count was emitted as a metric in an unconfigured mount") + } + _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] + if ok { + t.Fatalf("Certificate counting should be off by default, but total certificate count was emitted as a metric in an unconfigured mount") + } + + // Write the auto-tidy config. + _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "enabled": true, + "interval_duration": "1s", + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "safety_buffer": "1s", + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": false, + }) + require.NoError(t, err) + + // Reload the Mount - Otherwise Stored Certificate Counts Will Not Be Populated + // Sealing cores as plugin reload triggers the race detector - VAULT-13635 + testhelpers.EnsureCoresSealed(t, cluster) + testhelpers.EnsureCoresUnsealed(t, cluster) + + // Wait until a tidy run has completed. + testhelpers.RetryUntil(t, 5*time.Second, func() error { + resp, err = client.Logical().Read("pki/tidy-status") + if err != nil { + return fmt.Errorf("error reading tidy status: %w", err) + } + if finished, ok := resp.Data["time_finished"]; !ok || finished == "" || finished == nil { + return fmt.Errorf("tidy time_finished not run yet: %v", finished) + } + return nil + }) + + // Since publish_stored_certificate_count_metrics is still false, these metrics should still not exist yet + stableMetric = inmemSink.Data() + mostRecentInterval = stableMetric[len(stableMetric)-1] + _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] + if ok { + t.Fatalf("Certificate counting should be off by default, but revoked cert count was emitted as a metric in an unconfigured mount") + } + _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] + if ok { + t.Fatalf("Certificate counting should be off by default, but total certificate count was emitted as a metric in an unconfigured mount") + } + + // But since certificate counting is on, the metrics should exist on tidyStatus endpoint: + tidyStatus, err = client.Logical().Read("pki/tidy-status") + require.NoError(t, err, "failed reading tidy-status endpoint") + + // backendUUID should exist, we need this for metrics + backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) + // "current_cert_store_count", "current_revoked_cert_count" + certStoreCount, ok := tidyStatus.Data["current_cert_store_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but current cert store count does not appear in tidy status") + } + if certStoreCount != json.Number("1") { + t.Fatalf("Only created one certificate, but a got a certificate count of %v", certStoreCount) + } + revokedCertCount, ok := tidyStatus.Data["current_revoked_cert_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but revoked cert store count does not appear in tidy status") + } + if revokedCertCount != json.Number("0") { + t.Fatalf("Have not yet revoked a certificate, but got a revoked cert store count of %v", revokedCertCount) + } + + // Write the auto-tidy config, again, this time turning on metrics + _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "enabled": true, + "interval_duration": "1s", + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "safety_buffer": "1s", + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": true, + }) + require.NoError(t, err, "failed updating auto-tidy configuration") + + // Issue a cert and revoke it. + resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ + "common_name": "example.com", + "ttl": "10s", + }) + require.NoError(t, err, "failed to issue leaf certificate") + require.NotNil(t, resp, "nil response without error on issuing leaf certificate") + require.NotNil(t, resp.Data, "empty Data without error on issuing leaf certificate") + require.NotEmpty(t, resp.Data["serial_number"]) + require.NotEmpty(t, resp.Data["certificate"]) + leafSerial := resp.Data["serial_number"].(string) + leafCert := parseCert(t, resp.Data["certificate"].(string)) + + // Read cert before revoking + resp, err = client.Logical().Read("pki/cert/" + leafSerial) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + revocationTime, err := (resp.Data["revocation_time"].(json.Number)).Int64() + require.Equal(t, int64(0), revocationTime, "revocation time was not zero") + require.Empty(t, resp.Data["revocation_time_rfc3339"], "revocation_time_rfc3339 was not empty") + require.Empty(t, resp.Data["issuer_id"], "issuer_id was not empty") + + revokeResp, err := client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": leafSerial, + }) + require.NoError(t, err, "failed revoking serial number: %s", leafSerial) + + for _, warning := range revokeResp.Warnings { + if strings.Contains(warning, "already expired; refusing to add to CRL") { + t.Skipf("Skipping test as we missed the revocation window of our leaf cert") + } + } + + // We read the auto-tidy endpoint again, to ensure any metrics logic has completed (lock on config) + _, err = client.Logical().Read("/pki/config/auto-tidy") + require.NoError(t, err, "failed to read auto-tidy configuration") + + // Check Metrics After Cert Has Be Created and Revoked + tidyStatus, err = client.Logical().Read("pki/tidy-status") + require.NoError(t, err, "failed to read tidy-status") + + backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) + certStoreCount, ok = tidyStatus.Data["current_cert_store_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but current cert store count does not appear in tidy status") + } + if certStoreCount != json.Number("2") { + t.Fatalf("Created root and leaf certificate, but a got a certificate count of %v", certStoreCount) + } + revokedCertCount, ok = tidyStatus.Data["current_revoked_cert_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but revoked cert store count does not appear in tidy status") + } + if revokedCertCount != json.Number("1") { + t.Fatalf("Revoked one certificate, but got a revoked cert store count of %v\n:%v", revokedCertCount, tidyStatus) + } + // This should now be initialized + certCountError, ok := tidyStatus.Data["certificate_counting_error"] + if ok && certCountError.(string) != "" { + t.Fatalf("Expected certificate count error to disappear after initialization, but got error %v", certCountError) + } + + testhelpers.RetryUntil(t, newPeriod*5, func() error { + stableMetric = inmemSink.Data() + mostRecentInterval = stableMetric[len(stableMetric)-1] + revokedCertCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] + if !ok { + return errors.New("turned on metrics, but revoked cert count was not emitted") + } + if revokedCertCountGaugeValue.Value != 1 { + return fmt.Errorf("revoked one certificate, but metrics emitted a revoked cert store count of %v", revokedCertCountGaugeValue) + } + certStoreCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] + if !ok { + return errors.New("turned on metrics, but total certificate count was not emitted") + } + if certStoreCountGaugeValue.Value != 2 { + return fmt.Errorf("stored two certificiates, but total certificate count emitted was %v", certStoreCountGaugeValue.Value) + } + return nil + }) + + // Wait for cert to expire and the safety buffer to elapse. + sleepFor := time.Until(leafCert.NotAfter) + 3*time.Second + t.Logf("%v: Sleeping for %v, leaf certificate expires: %v", time.Now().Format(time.RFC3339), sleepFor, leafCert.NotAfter) + time.Sleep(sleepFor) + + // Wait for auto-tidy to run afterwards. + var foundTidyRunning string + var foundTidyFinished bool + timeoutChan := time.After(120 * time.Second) + for { + if foundTidyRunning != "" && foundTidyFinished { + break + } + + select { + case <-timeoutChan: + t.Fatalf("expected auto-tidy to run (%v) and finish (%v) before 120 seconds elapsed", foundTidyRunning, foundTidyFinished) + default: + time.Sleep(250 * time.Millisecond) + + resp, err = client.Logical().Read("pki/tidy-status") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["state"]) + require.NotEmpty(t, resp.Data["time_started"]) + state := resp.Data["state"].(string) + started := resp.Data["time_started"].(string) + + t.Logf("%v: Resp: %v", time.Now().Format(time.RFC3339), resp.Data) + + // We want the _next_ tidy run after the cert expires. This + // means if we're currently finished when we hit this the + // first time, we want to wait for the next run. + if foundTidyRunning == "" { + foundTidyRunning = started + } else if foundTidyRunning != started && !foundTidyFinished && state == "Finished" { + foundTidyFinished = true + } + } + } + + // After Tidy, Cert Store Count Should Still Be Available, and Be Updated: + // Check Metrics After Cert Has Be Created and Revoked + tidyStatus, err = client.Logical().Read("pki/tidy-status") + if err != nil { + t.Fatal(err) + } + backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) + // "current_cert_store_count", "current_revoked_cert_count" + certStoreCount, ok = tidyStatus.Data["current_cert_store_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but current cert store count does not appear in tidy status") + } + if certStoreCount != json.Number("1") { + t.Fatalf("Created root and leaf certificate, deleted leaf, but a got a certificate count of %v", certStoreCount) + } + revokedCertCount, ok = tidyStatus.Data["current_revoked_cert_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but revoked cert store count does not appear in tidy status") + } + if revokedCertCount != json.Number("0") { + t.Fatalf("Revoked certificate has been tidied, but got a revoked cert store count of %v", revokedCertCount) + } + + testhelpers.RetryUntil(t, newPeriod*5, func() error { + stableMetric = inmemSink.Data() + mostRecentInterval = stableMetric[len(stableMetric)-1] + revokedCertCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] + if !ok { + return errors.New("turned on metrics, but revoked cert count was not emitted") + } + if revokedCertCountGaugeValue.Value != 0 { + return fmt.Errorf("revoked certificate has been tidied, but metrics emitted a revoked cert store count of %v", revokedCertCountGaugeValue) + } + certStoreCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] + if !ok { + return errors.New("turned on metrics, but total certificate count was not emitted") + } + if certStoreCountGaugeValue.Value != 1 { + return fmt.Errorf("only one of two certificates left after tidy, but total certificate count emitted was %v", certStoreCountGaugeValue.Value) + } + return nil + }) +} + +// This test uses the default safety buffer with backdating. +func TestTidyAcmeWithBackdate(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx := context.Background() + + // Grab the mount UUID for sys/raw invocations. + pkiMount := findStorageMountUuid(t, client, "pki") + + // Register an Account, do nothing with it + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account with order/cert + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + t.Logf("got account URI: %v", acct.URI) + require.NoError(t, err, "failed registering account") + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization failed") + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + // -> Ensure we see it in storage. Since we don't have direct storage + // access, use sys/raw interface. + acmeThumbprintsPath := path.Join("sys/raw/logical", pkiMount, acmeThumbprintPrefix) + listResp, err := client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err, "failed listing ACME thumbprints") + require.NotEmpty(t, listResp.Data["keys"], "expected non-empty list response") + + // Run Tidy + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + waitForTidyToFinish(t, client, "pki") + + // Check that the Account is Still There, Still Valid. + account, err := acmeClient.GetReg(context.Background(), "" /* legacy unused param*/) + require.NoError(t, err, "received account looking up acme account") + require.Equal(t, acme.StatusValid, account.Status) + + // Find the associated thumbprint + listResp, err = client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err) + require.NotNil(t, listResp) + thumbprintEntries := listResp.Data["keys"].([]interface{}) + require.Equal(t, len(thumbprintEntries), 1) + thumbprint := thumbprintEntries[0].(string) + + // Let "Time Pass"; this is a HACK, this function sys-writes to overwrite the date on objects in storage + duration := time.Until(acmeCert.NotAfter) + 31*24*time.Hour + accountId := acmeClient.KID[strings.LastIndex(string(acmeClient.KID), "/")+1:] + orderId := order.URI[strings.LastIndex(order.URI, "/")+1:] + backDateAcmeOrderSys(t, testCtx, client, string(accountId), orderId, duration, pkiMount) + + // Run Tidy -> clean up order + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + tidyResp := waitForTidyToFinish(t, client, "pki") + + require.Equal(t, tidyResp.Data["acme_orders_deleted_count"], json.Number("1"), + "expected to revoke a single ACME order: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_revoked_count"], json.Number("0"), + "no ACME account should have been revoked: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_deleted_count"], json.Number("0"), + "no ACME account should have been revoked: %v", tidyResp) + + // Make sure our order is indeed deleted. + _, err = acmeClient.GetOrder(context.Background(), order.URI) + require.ErrorContains(t, err, "order does not exist") + + // Check that the Account is Still There, Still Valid. + account, err = acmeClient.GetReg(context.Background(), "" /* legacy unused param*/) + require.NoError(t, err, "received account looking up acme account") + require.Equal(t, acme.StatusValid, account.Status) + + // Now back date the account to make sure we revoke it + backDateAcmeAccountSys(t, testCtx, client, thumbprint, duration, pkiMount) + + // Run Tidy -> mark account revoked + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + tidyResp = waitForTidyToFinish(t, client, "pki") + require.Equal(t, tidyResp.Data["acme_orders_deleted_count"], json.Number("0"), + "no ACME orders should have been deleted: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_revoked_count"], json.Number("1"), + "expected to revoke a single ACME account: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_deleted_count"], json.Number("0"), + "no ACME account should have been revoked: %v", tidyResp) + + // Lookup our account to make sure we get the appropriate revoked status + account, err = acmeClient.GetReg(context.Background(), "" /* legacy unused param*/) + require.NoError(t, err, "received account looking up acme account") + require.Equal(t, acme.StatusRevoked, account.Status) + + // Let "Time Pass"; this is a HACK, this function sys-writes to overwrite the date on objects in storage + backDateAcmeAccountSys(t, testCtx, client, thumbprint, duration, pkiMount) + + // Run Tidy -> remove account + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + waitForTidyToFinish(t, client, "pki") + + // Check Account No Longer Appears + listResp, err = client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err) + if listResp != nil { + thumbprintEntries = listResp.Data["keys"].([]interface{}) + require.Equal(t, 0, len(thumbprintEntries)) + } + + // Nor Under Account + _, acctKID := path.Split(acct.URI) + acctPath := path.Join("sys/raw/logical", pkiMount, acmeAccountPrefix, acctKID) + t.Logf("account path: %v", acctPath) + getResp, err := client.Logical().ReadWithContext(testCtx, acctPath) + require.NoError(t, err) + require.Nil(t, getResp) +} + +// This test uses a smaller safety buffer. +func TestTidyAcmeWithSafetyBuffer(t *testing.T) { + t.Parallel() + + // This would still be way easier if I could do both sides + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx := context.Background() + + // Grab the mount UUID for sys/raw invocations. + pkiMount := findStorageMountUuid(t, client, "pki") + + // Register an Account, do nothing with it + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + t.Logf("got account URI: %v", acct.URI) + require.NoError(t, err, "failed registering account") + + // -> Ensure we see it in storage. Since we don't have direct storage + // access, use sys/raw interface. + acmeThumbprintsPath := path.Join("sys/raw/logical", pkiMount, acmeThumbprintPrefix) + listResp, err := client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err, "failed listing ACME thumbprints") + require.NotEmpty(t, listResp.Data["keys"], "expected non-empty list response") + thumbprintEntries := listResp.Data["keys"].([]interface{}) + require.Equal(t, len(thumbprintEntries), 1) + + // Wait for the account to expire. + time.Sleep(2 * time.Second) + + // Run Tidy -> mark account revoked + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + "acme_account_safety_buffer": "1s", + }) + require.NoError(t, err) + + // Wait for tidy to finish. + statusResp := waitForTidyToFinish(t, client, "pki") + require.Equal(t, statusResp.Data["acme_account_revoked_count"], json.Number("1"), "expected to revoke a single ACME account") + + // Wait for the account to expire. + time.Sleep(2 * time.Second) + + // Run Tidy -> remove account + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + "acme_account_safety_buffer": "1s", + }) + require.NoError(t, err) + + // Wait for tidy to finish. + waitForTidyToFinish(t, client, "pki") + + // Check Account No Longer Appears + listResp, err = client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err) + if listResp != nil { + thumbprintEntries = listResp.Data["keys"].([]interface{}) + require.Equal(t, 0, len(thumbprintEntries)) + } + + // Nor Under Account + _, acctKID := path.Split(acct.URI) + acctPath := path.Join("sys/raw/logical", pkiMount, acmeAccountPrefix, acctKID) + t.Logf("account path: %v", acctPath) + getResp, err := client.Logical().ReadWithContext(testCtx, acctPath) + require.NoError(t, err) + require.Nil(t, getResp) +} + +// The sys tests refer to all of the tests using sys/raw/logical which work off of a client +func backDateAcmeAccountSys(t *testing.T, testContext context.Context, client *api.Client, thumbprintString string, backdateAmount time.Duration, mount string) { + rawThumbprintPath := path.Join("sys/raw/logical/", mount, acmeThumbprintPrefix+thumbprintString) + thumbprintResp, err := client.Logical().ReadWithContext(testContext, rawThumbprintPath) + if err != nil { + t.Fatalf("unable to fetch thumbprint response at %v: %v", rawThumbprintPath, err) + } + + var thumbprint acmeThumbprint + err = jsonutil.DecodeJSON([]byte(thumbprintResp.Data["value"].(string)), &thumbprint) + if err != nil { + t.Fatalf("unable to decode thumbprint response %v to find account entry: %v", thumbprintResp.Data, err) + } + + accountPath := path.Join("sys/raw/logical", mount, acmeAccountPrefix+thumbprint.Kid) + accountResp, err := client.Logical().ReadWithContext(testContext, accountPath) + if err != nil { + t.Fatalf("unable to fetch account entry %v: %v", thumbprint.Kid, err) + } + + var account acmeAccount + err = jsonutil.DecodeJSON([]byte(accountResp.Data["value"].(string)), &account) + if err != nil { + t.Fatalf("unable to decode acme account %v: %v", accountResp, err) + } + + t.Logf("got account before update: %v", account) + + account.AccountCreatedDate = backDate(account.AccountCreatedDate, backdateAmount) + account.MaxCertExpiry = backDate(account.MaxCertExpiry, backdateAmount) + account.AccountRevokedDate = backDate(account.AccountRevokedDate, backdateAmount) + + t.Logf("got account after update: %v", account) + + encodeJSON, err := jsonutil.EncodeJSON(account) + _, err = client.Logical().WriteWithContext(context.Background(), accountPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + if err != nil { + t.Fatalf("error saving backdated account entry at %v: %v", accountPath, err) + } + + ordersPath := path.Join("sys/raw/logical", mount, acmeAccountPrefix, thumbprint.Kid, "/orders/") + ordersRaw, err := client.Logical().ListWithContext(context.Background(), ordersPath) + require.NoError(t, err, "failed listing orders") + + if ordersRaw == nil { + t.Logf("skipping backdating orders as there are none") + return + } + + require.NotNil(t, ordersRaw, "got no response data") + require.NotNil(t, ordersRaw.Data, "got no response data") + + orders := ordersRaw.Data + + for _, orderId := range orders["keys"].([]interface{}) { + backDateAcmeOrderSys(t, testContext, client, thumbprint.Kid, orderId.(string), backdateAmount, mount) + } + + // No need to change certificates entries here - no time is stored on AcmeCertEntry +} + +func backDateAcmeOrderSys(t *testing.T, testContext context.Context, client *api.Client, accountKid string, orderId string, backdateAmount time.Duration, mount string) { + rawOrderPath := path.Join("sys/raw/logical/", mount, acmeAccountPrefix, accountKid, "orders", orderId) + orderResp, err := client.Logical().ReadWithContext(testContext, rawOrderPath) + if err != nil { + t.Fatalf("unable to fetch order entry %v on account %v at %v", orderId, accountKid, rawOrderPath) + } + + var order *acmeOrder + err = jsonutil.DecodeJSON([]byte(orderResp.Data["value"].(string)), &order) + if err != nil { + t.Fatalf("error decoding order entry %v on account %v, %v produced: %v", orderId, accountKid, orderResp, err) + } + + order.Expires = backDate(order.Expires, backdateAmount) + order.CertificateExpiry = backDate(order.CertificateExpiry, backdateAmount) + + encodeJSON, err := jsonutil.EncodeJSON(order) + _, err = client.Logical().WriteWithContext(context.Background(), rawOrderPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + if err != nil { + t.Fatalf("error saving backdated order entry %v on account %v : %v", orderId, accountKid, err) + } + + for _, authId := range order.AuthorizationIds { + backDateAcmeAuthorizationSys(t, testContext, client, accountKid, authId, backdateAmount, mount) + } +} + +func backDateAcmeAuthorizationSys(t *testing.T, testContext context.Context, client *api.Client, accountKid string, authId string, backdateAmount time.Duration, mount string) { + rawAuthPath := path.Join("sys/raw/logical/", mount, acmeAccountPrefix, accountKid, "/authorizations/", authId) + + authResp, err := client.Logical().ReadWithContext(testContext, rawAuthPath) + if err != nil { + t.Fatalf("unable to fetch authorization %v : %v", rawAuthPath, err) + } + + var auth *ACMEAuthorization + err = jsonutil.DecodeJSON([]byte(authResp.Data["value"].(string)), &auth) + if err != nil { + t.Fatalf("error decoding auth %v, auth entry %v produced %v", rawAuthPath, authResp, err) + } + + expiry, err := auth.GetExpires() + if err != nil { + t.Fatalf("could not get expiry on %v: %v", rawAuthPath, err) + } + newExpiry := backDate(expiry, backdateAmount) + auth.Expires = time.Time.Format(newExpiry, time.RFC3339) + + encodeJSON, err := jsonutil.EncodeJSON(auth) + _, err = client.Logical().WriteWithContext(context.Background(), rawAuthPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + if err != nil { + t.Fatalf("error updating authorization date on %v: %v", rawAuthPath, err) + } +} + +func backDate(original time.Time, change time.Duration) time.Time { + if original.IsZero() { + return original + } + + zeroTime := time.Time{} + + if original.Before(zeroTime.Add(change)) { + return zeroTime + } + + return original.Add(-change) +} + +func waitForTidyToFinish(t *testing.T, client *api.Client, mount string) *api.Secret { + var statusResp *api.Secret + testhelpers.RetryUntil(t, 5*time.Second, func() error { + var err error + + tidyStatusPath := mount + "/tidy-status" + statusResp, err = client.Logical().Read(tidyStatusPath) + if err != nil { + return fmt.Errorf("failed reading path: %s: %w", tidyStatusPath, err) + } + if state, ok := statusResp.Data["state"]; !ok || state == "Running" { + return fmt.Errorf("tidy status state is still running") + } + + if errorOccurred, ok := statusResp.Data["error"]; !ok || !(errorOccurred == nil || errorOccurred == "") { + return fmt.Errorf("tidy status returned an error: %s", errorOccurred) + } + + return nil + }) + + t.Logf("got tidy status: %v", statusResp.Data) + return statusResp +} diff --git a/builtin/logical/pki/periodic.go b/builtin/logical/pki/periodic.go new file mode 100644 index 000000000000..b5c6209b1459 --- /dev/null +++ b/builtin/logical/pki/periodic.go @@ -0,0 +1,337 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "crypto/x509" + "errors" + "fmt" + "sync/atomic" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + minUnifiedTransferDelay = 30 * time.Minute +) + +type UnifiedTransferStatus struct { + isRunning atomic.Bool + lastRun time.Time + forceRerun atomic.Bool +} + +func (uts *UnifiedTransferStatus) forceRun() { + uts.forceRerun.Store(true) +} + +func newUnifiedTransferStatus() *UnifiedTransferStatus { + return &UnifiedTransferStatus{} +} + +// runUnifiedTransfer meant to run as a background, this will process all and +// send all missing local revocation entries to the unified space if the feature +// is enabled. +func runUnifiedTransfer(sc *storageContext) { + b := sc.Backend + status := b.GetUnifiedTransferStatus() + + isPerfStandby := b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) + + if isPerfStandby || b.System().LocalMount() { + // We only do this on active enterprise nodes, when we aren't a local mount + return + } + + config, err := b.CrlBuilder().getConfigWithUpdate(sc) + if err != nil { + b.Logger().Error("failed to retrieve crl config from storage for unified transfer background process", + "error", err) + return + } + + if !config.UnifiedCRL { + // Feature is disabled, no need to run + return + } + + clusterId, err := b.System().ClusterID(sc.Context) + if err != nil { + b.Logger().Error("failed to fetch cluster id for unified transfer background process", + "error", err) + return + } + + if !status.isRunning.CompareAndSwap(false, true) { + b.Logger().Debug("an existing unified transfer process is already running") + return + } + defer status.isRunning.Store(false) + + // Because access to lastRun is not locked, we need to delay this check + // until after we grab the isRunning CAS lock. + if !status.lastRun.IsZero() { + // We have run before, we only run again if we have + // been requested to forceRerun, and we haven't run since our + // minimum delay. + if !(status.forceRerun.Load() && time.Since(status.lastRun) < minUnifiedTransferDelay) { + return + } + } + + // Reset our flag before we begin, we do this before we start as + // we can't guarantee that we can properly parse/fix the error from an + // error that comes in from the revoke API after that. This will + // force another run, which worst case, we will fix it on the next + // periodic function call that passes our min delay. + status.forceRerun.Store(false) + + err = doUnifiedTransferMissingLocalSerials(sc, clusterId) + if err != nil { + b.Logger().Error("an error occurred running unified transfer", "error", err.Error()) + status.forceRerun.Store(true) + } else { + if config.EnableDelta { + err = doUnifiedTransferMissingDeltaWALSerials(sc, clusterId) + if err != nil { + b.Logger().Error("an error occurred running unified transfer", "error", err.Error()) + status.forceRerun.Store(true) + } + } + } + + status.lastRun = time.Now() +} + +func doUnifiedTransferMissingLocalSerials(sc *storageContext, clusterId string) error { + localRevokedSerialNums, err := sc.listRevokedCerts() + if err != nil { + return err + } + if len(localRevokedSerialNums) == 0 { + // No local certs to transfer, no further work to do. + return nil + } + + unifiedSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) + if err != nil { + return err + } + unifiedCertLookup := sliceToMapKey(unifiedSerials) + + errCount := 0 + for i, serialNum := range localRevokedSerialNums { + if i%25 == 0 { + config, _ := sc.Backend.CrlBuilder().getConfigWithUpdate(sc) + if config != nil && !config.UnifiedCRL { + return errors.New("unified crl has been disabled after we started, stopping") + } + } + if _, ok := unifiedCertLookup[serialNum]; !ok { + err := readRevocationEntryAndTransfer(sc, serialNum) + if err != nil { + errCount++ + sc.Backend.Logger().Error("Failed transferring local revocation to unified space", + "serial", serialNum, "error", err) + } + } + } + + if errCount > 0 { + sc.Backend.Logger().Warn(fmt.Sprintf("Failed transfering %d local serials to unified storage", errCount)) + } + + return nil +} + +func doUnifiedTransferMissingDeltaWALSerials(sc *storageContext, clusterId string) error { + // We need to do a similar thing for Delta WAL entry certificates. + // When the delta WAL failed to write for one or more entries, + // we'll need to replicate these up to the primary cluster. When it + // has performed a new delta WAL build, it will empty storage and + // update to a last written WAL entry that exceeds what we've seen + // locally. + thisUnifiedWALEntryPath := unifiedDeltaWALPath + deltaWALLastRevokedSerialName + lastUnifiedWALEntry, err := getLastWALSerial(sc, thisUnifiedWALEntryPath) + if err != nil { + return fmt.Errorf("failed to fetch last cross-cluster unified revoked delta WAL serial number: %w", err) + } + + lastLocalWALEntry, err := getLastWALSerial(sc, localDeltaWALLastRevokedSerial) + if err != nil { + return fmt.Errorf("failed to fetch last locally revoked delta WAL serial number: %w", err) + } + + // We now need to transfer all the entries and then write the last WAL + // entry at the end. Start by listing all certificates; any missing + // certificates will be copied over and then the WAL entry will be + // updated once. + // + // We do not delete entries either locally or remotely, as either + // cluster could've rebuilt delta CRLs with out-of-sync information, + // removing some entries (and, we cannot differentiate between these + // two cases). On next full CRL rebuild (on either cluster), the state + // should get synchronized, and future delta CRLs after this function + // returns without issue will see the remaining entries. + // + // Lastly, we need to ensure we don't accidentally write any unified + // delta WAL entries that aren't present in the main cross-cluster + // revoked storage location. This would mean the above function failed + // to copy them for some reason, despite them presumably appearing + // locally. + _unifiedWALEntries, err := sc.Storage.List(sc.Context, unifiedDeltaWALPath) + if err != nil { + return fmt.Errorf("failed to list cross-cluster unified delta WAL storage: %w", err) + } + unifiedWALEntries := sliceToMapKey(_unifiedWALEntries) + + _unifiedRevokedSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revoked certificates: %w", err) + } + unifiedRevokedSerials := sliceToMapKey(_unifiedRevokedSerials) + + localWALEntries, err := sc.Storage.List(sc.Context, localDeltaWALPath) + if err != nil { + return fmt.Errorf("failed to list local delta WAL storage: %w", err) + } + + if lastUnifiedWALEntry == lastLocalWALEntry && len(_unifiedWALEntries) == len(localWALEntries) { + // Writing the last revoked WAL entry is the last thing that we do. + // Because these entries match (across clusters) and we have the same + // number of entries, assume we don't have anything to sync and exit + // early. + // + // We need both checks as, in the event of PBPWF failing and then + // returning while more revocations are happening, we could have + // been schedule to run, but then skip running (if only the first + // condition was checked) because a later revocation succeeded + // in writing a unified WAL entry, before we started replicating + // the rest back up. + // + // The downside of this approach is that, if the main cluster + // does a full rebuild in the mean time, we could re-sync more + // entries back up to the primary cluster that are already + // included in the complete CRL. Users can manually rebuild the + // full CRL (clearing these duplicate delta CRL entries) if this + // affects them. + return nil + } + + errCount := 0 + for index, serial := range localWALEntries { + if index%25 == 0 { + config, _ := sc.Backend.CrlBuilder().getConfigWithUpdate(sc) + if config != nil && (!config.UnifiedCRL || !config.EnableDelta) { + return errors.New("unified or delta CRLs have been disabled after we started, stopping") + } + } + + if serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName { + // Skip our special serial numbers. + continue + } + + _, isAlreadyPresent := unifiedWALEntries[serial] + if isAlreadyPresent { + // Serial exists on both local and unified cluster. We're + // presuming we don't need to read and re-write these entries + // and that only missing entries need to be updated. + continue + } + + _, isRevokedCopied := unifiedRevokedSerials[serial] + if !isRevokedCopied { + // We need to wait here to copy over. + errCount += 1 + sc.Backend.Logger().Debug("Delta WAL exists locally, but corresponding cross-cluster full revocation entry is missing; skipping", "serial", serial) + continue + } + + // All good: read the local entry and write to the remote variant. + localPath := localDeltaWALPath + serial + unifiedPath := unifiedDeltaWALPath + serial + + entry, err := sc.Storage.Get(sc.Context, localPath) + if err != nil || entry == nil { + errCount += 1 + sc.Backend.Logger().Error("Failed reading local delta WAL entry to copy to cross-cluster", "serial", serial, "err", err) + continue + } + + entry.Key = unifiedPath + err = sc.Storage.Put(sc.Context, entry) + if err != nil { + errCount += 1 + sc.Backend.Logger().Error("Failed sync local delta WAL entry to cross-cluster unified delta WAL location", "serial", serial, "err", err) + continue + } + } + + if errCount > 0 { + // See note above about why we don't fail here. + sc.Backend.Logger().Warn(fmt.Sprintf("Failed transfering %d local delta WAL serials to unified storage", errCount)) + return nil + } + + // Everything worked. Here, we can write over the delta WAL last revoked + // value. By using the earlier value, even if new revocations have + // occurred, we ensure any further missing entries can be handled in the + // next round. + lastRevSerial := lastWALInfo{Serial: lastLocalWALEntry} + lastWALEntry, err := logical.StorageEntryJSON(thisUnifiedWALEntryPath, lastRevSerial) + if err != nil { + return fmt.Errorf("unable to create cross-cluster unified last delta CRL WAL entry: %w", err) + } + if err = sc.Storage.Put(sc.Context, lastWALEntry); err != nil { + return fmt.Errorf("error saving cross-cluster unified last delta CRL WAL entry: %w", err) + } + + return nil +} + +func readRevocationEntryAndTransfer(sc *storageContext, serial string) error { + hyphenSerial := normalizeSerial(serial) + revInfo, err := sc.fetchRevocationInfo(hyphenSerial) + if err != nil { + return fmt.Errorf("failed loading revocation entry for serial: %s: %w", serial, err) + } + if revInfo == nil { + sc.Backend.Logger().Debug("no certificate revocation entry for serial", "serial", serial) + return nil + } + cert, err := x509.ParseCertificate(revInfo.CertificateBytes) + if err != nil { + sc.Backend.Logger().Debug("failed parsing certificate stored in revocation entry for serial", + "serial", serial, "error", err) + return nil + } + if revInfo.CertificateIssuer == "" { + // No certificate issuer assigned to this serial yet, just drop it for now, + // as a crl rebuild/tidy needs to happen + return nil + } + + revocationTime := revInfo.RevocationTimeUTC + if revInfo.RevocationTimeUTC.IsZero() { + // Legacy revocation entries only had this field and not revocationTimeUTC set... + revocationTime = time.Unix(revInfo.RevocationTime, 0) + } + + if time.Now().After(cert.NotAfter) { + // ignore transferring this entry as it has already expired. + return nil + } + + entry := &unifiedRevocationEntry{ + SerialNumber: hyphenSerial, + CertExpiration: cert.NotAfter, + RevocationTimeUTC: revocationTime, + CertificateIssuer: revInfo.CertificateIssuer, + } + + return writeUnifiedRevocationEntry(sc, entry) +} diff --git a/builtin/logical/pki/pki_backend/common.go b/builtin/logical/pki/pki_backend/common.go new file mode 100644 index 000000000000..b34fda076199 --- /dev/null +++ b/builtin/logical/pki/pki_backend/common.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki_backend + +import ( + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/logical" +) + +type SystemViewGetter interface { + System() logical.SystemView +} + +type MountInfo interface { + BackendUUID() string +} + +type Logger interface { + Logger() log.Logger +} + +type CertificateCounter interface { + IsInitialized() bool + IncrementTotalCertificatesCount(certsCounted bool, newSerial string) +} diff --git a/builtin/logical/pki/secret_certs.go b/builtin/logical/pki/secret_certs.go index c3afa06fb502..274aa55edf65 100644 --- a/builtin/logical/pki/secret_certs.go +++ b/builtin/logical/pki/secret_certs.go @@ -1,7 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( "context" + "crypto/x509" "fmt" "github.com/hashicorp/vault/sdk/framework" @@ -45,13 +49,38 @@ func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, _ return nil, fmt.Errorf("could not find serial in internal secret data") } - b.revokeStorageLock.Lock() - defer b.revokeStorageLock.Unlock() + b.GetRevokeStorageLock().Lock() + defer b.GetRevokeStorageLock().Unlock() sc := b.makeStorageContext(ctx, req.Storage) - resp, err := revokeCert(sc, serialInt.(string), true) - if resp == nil && err == nil { - b.Logger().Warn("expired certificate revoke failed because not found in storage, treating as success", "serial", serialInt.(string)) + serial := serialInt.(string) + + certEntry, err := fetchCertBySerial(sc, "certs/", serial) + if err != nil { + return nil, err + } + if certEntry == nil { + // We can't write to revoked/ or update the CRL anyway because we don't have the cert, + // and there's no reason to expect this will work on a subsequent + // retry. Just give up and let the lease get deleted. + b.Logger().Warn("expired certificate revoke failed because not found in storage, treating as success", "serial", serial) + return nil, nil + } + + cert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %w", err) } - return resp, err + + // Compatibility: Don't revoke CAs if they had leases. New CAs going forward aren't issued leases. + if cert.IsCA { + return nil, nil + } + + config, err := sc.Backend.CrlBuilder().getConfigWithUpdate(sc) + if err != nil { + return nil, fmt.Errorf("error revoking serial: %s: failed reading config: %w", serial, err) + } + + return revokeCert(sc, config, cert) } diff --git a/builtin/logical/pki/storage.go b/builtin/logical/pki/storage.go index f8861a97cca4..6daeebd81fdc 100644 --- a/builtin/logical/pki/storage.go +++ b/builtin/logical/pki/storage.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -5,255 +8,50 @@ import ( "context" "crypto" "crypto/x509" + "errors" "fmt" - "sort" "strings" "time" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" ) +var ErrStorageItemNotFound = errors.New("storage item not found") + const ( - storageKeyConfig = "config/keys" - storageIssuerConfig = "config/issuers" - keyPrefix = "config/key/" - issuerPrefix = "config/issuer/" - storageLocalCRLConfig = "crls/config" + storageKeyConfig = issuing.StorageKeyConfig + storageIssuerConfig = issuing.StorageIssuerConfig + keyPrefix = issuing.KeyPrefix + issuerPrefix = issuing.IssuerPrefix + storageLocalCRLConfig = issuing.StorageLocalCRLConfig + storageUnifiedCRLConfig = issuing.StorageUnifiedCRLConfig legacyMigrationBundleLogKey = "config/legacyMigrationBundleLog" - legacyCertBundlePath = "config/ca_bundle" + legacyCertBundlePath = issuing.LegacyCertBundlePath + legacyCertBundleBackupPath = "config/ca_bundle.bak" legacyCRLPath = "crl" deltaCRLPath = "delta-crl" deltaCRLPathSuffix = "-delta" + unifiedCRLPath = "unified-crl" + unifiedDeltaCRLPath = "unified-delta-crl" + unifiedCRLPathPrefix = "unified-" autoTidyConfigPath = "config/auto-tidy" clusterConfigPath = "config/cluster" - // Used as a quick sanity check for a reference id lookups... - uuidLength = 36 - maxRolesToScanOnIssuerChange = 100 maxRolesToFindOnIssuerChange = 10 - - latestIssuerVersion = 1 -) - -type keyID string - -func (p keyID) String() string { - return string(p) -} - -type issuerID string - -func (p issuerID) String() string { - return string(p) -} - -type crlID string - -func (p crlID) String() string { - return string(p) -} - -const ( - IssuerRefNotFound = issuerID("not-found") - KeyRefNotFound = keyID("not-found") -) - -type keyEntry struct { - ID keyID `json:"id"` - Name string `json:"name"` - PrivateKeyType certutil.PrivateKeyType `json:"private_key_type"` - PrivateKey string `json:"private_key"` -} - -func (e keyEntry) getManagedKeyUUID() (UUIDKey, error) { - if !e.isManagedPrivateKey() { - return "", errutil.InternalError{Err: "getManagedKeyId called on a key id %s (%s) "} - } - return extractManagedKeyId([]byte(e.PrivateKey)) -} - -func (e keyEntry) isManagedPrivateKey() bool { - return e.PrivateKeyType == certutil.ManagedPrivateKey -} - -type issuerUsage uint - -const ( - ReadOnlyUsage issuerUsage = iota - IssuanceUsage issuerUsage = 1 << iota - CRLSigningUsage issuerUsage = 1 << iota - OCSPSigningUsage issuerUsage = 1 << iota - - // When adding a new usage in the future, we'll need to create a usage - // mask field on the IssuerEntry and handle migrations to a newer mask, - // inferring a value for the new bits. - AllIssuerUsages = ReadOnlyUsage | IssuanceUsage | CRLSigningUsage | OCSPSigningUsage ) -var namedIssuerUsages = map[string]issuerUsage{ - "read-only": ReadOnlyUsage, - "issuing-certificates": IssuanceUsage, - "crl-signing": CRLSigningUsage, - "ocsp-signing": OCSPSigningUsage, -} - -func (i *issuerUsage) ToggleUsage(usages ...issuerUsage) { - for _, usage := range usages { - *i ^= usage - } -} - -func (i issuerUsage) HasUsage(usage issuerUsage) bool { - return (i & usage) == usage -} - -func (i issuerUsage) Names() string { - var names []string - var builtUsage issuerUsage - - // Return the known set of usages in a sorted order to not have Terraform state files flipping - // saying values are different when it's the same list in a different order. - keys := make([]string, 0, len(namedIssuerUsages)) - for k := range namedIssuerUsages { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, name := range keys { - usage := namedIssuerUsages[name] - if i.HasUsage(usage) { - names = append(names, name) - builtUsage.ToggleUsage(usage) - } - } - - if i != builtUsage { - // Found some unknown usage, we should indicate this in the names. - names = append(names, fmt.Sprintf("unknown:%v", i^builtUsage)) - } - - return strings.Join(names, ",") -} - -func NewIssuerUsageFromNames(names []string) (issuerUsage, error) { - var result issuerUsage - for index, name := range names { - usage, ok := namedIssuerUsages[name] - if !ok { - return ReadOnlyUsage, fmt.Errorf("unknown name for usage at index %v: %v", index, name) - } - - result.ToggleUsage(usage) - } - - return result, nil -} - -type issuerEntry struct { - ID issuerID `json:"id"` - Name string `json:"name"` - KeyID keyID `json:"key_id"` - Certificate string `json:"certificate"` - CAChain []string `json:"ca_chain"` - ManualChain []issuerID `json:"manual_chain"` - SerialNumber string `json:"serial_number"` - LeafNotAfterBehavior certutil.NotAfterBehavior `json:"not_after_behavior"` - Usage issuerUsage `json:"usage"` - RevocationSigAlg x509.SignatureAlgorithm `json:"revocation_signature_algorithm"` - Revoked bool `json:"revoked"` - RevocationTime int64 `json:"revocation_time"` - RevocationTimeUTC time.Time `json:"revocation_time_utc"` - AIAURIs *aiaConfigEntry `json:"aia_uris,omitempty"` - LastModified time.Time `json:"last_modified"` - Version uint `json:"version"` -} - -type localCRLConfigEntry struct { - IssuerIDCRLMap map[issuerID]crlID `json:"issuer_id_crl_map"` - CRLNumberMap map[crlID]int64 `json:"crl_number_map"` - LastCompleteNumberMap map[crlID]int64 `json:"last_complete_number_map"` - CRLExpirationMap map[crlID]time.Time `json:"crl_expiration_map"` - LastModified time.Time `json:"last_modified"` - DeltaLastModified time.Time `json:"delta_last_modified"` -} - -type keyConfigEntry struct { - DefaultKeyId keyID `json:"default"` -} - -type issuerConfigEntry struct { - // This new fetchedDefault field allows us to detect if the default - // issuer was modified, in turn dispatching the timestamp updater - // if necessary. - fetchedDefault issuerID `json:"-"` - DefaultIssuerId issuerID `json:"default"` - DefaultFollowsLatestIssuer bool `json:"default_follows_latest_issuer"` -} - -type clusterConfigEntry struct { - Path string `json:"path"` -} - -type aiaConfigEntry struct { - IssuingCertificates []string `json:"issuing_certificates"` - CRLDistributionPoints []string `json:"crl_distribution_points"` - OCSPServers []string `json:"ocsp_servers"` - EnableTemplating bool `json:"enable_templating"` -} - -func (c *aiaConfigEntry) toURLEntries(sc *storageContext, issuer issuerID) (*certutil.URLEntries, error) { - if len(c.IssuingCertificates) == 0 && len(c.CRLDistributionPoints) == 0 && len(c.OCSPServers) == 0 { - return &certutil.URLEntries{}, nil - } - - result := certutil.URLEntries{ - IssuingCertificates: c.IssuingCertificates[:], - CRLDistributionPoints: c.CRLDistributionPoints[:], - OCSPServers: c.OCSPServers[:], - } - - if c.EnableTemplating { - cfg, err := sc.getClusterConfig() - if err != nil { - return nil, fmt.Errorf("error fetching cluster-local address config: %w", err) - } - - for name, source := range map[string]*[]string{ - "issuing_certificates": &result.IssuingCertificates, - "crl_distribution_points": &result.CRLDistributionPoints, - "ocsp_servers": &result.OCSPServers, - } { - templated := make([]string, len(*source)) - for index, uri := range *source { - if strings.Contains(uri, "{{cluster_path}}") && len(cfg.Path) == 0 { - return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information") - } - - if strings.Contains(uri, "{{issuer_id}}") && len(issuer) == 0 { - // Elide issuer AIA info as we lack an issuer_id. - return nil, fmt.Errorf("unable to template AIA URLs as we lack an issuer_id for this operation") - } - - uri = strings.ReplaceAll(uri, "{{cluster_path}}", cfg.Path) - uri = strings.ReplaceAll(uri, "{{issuer_id}}", issuer.String()) - templated[index] = uri - } - - if uri := validateURLs(templated); uri != "" { - return nil, fmt.Errorf("error validating templated %v; invalid URI: %v", name, uri) - } - - *source = templated - } - } - - return &result, nil +func ToURLEntries(sc *storageContext, issuer issuing.IssuerID, c *issuing.AiaConfigEntry) (*certutil.URLEntries, error) { + return issuing.ToURLEntries(sc.Context, sc.Storage, issuer, c) } type storageContext struct { @@ -270,71 +68,32 @@ func (b *backend) makeStorageContext(ctx context.Context, s logical.Storage) *st } } -func (sc *storageContext) listKeys() ([]keyID, error) { - strList, err := sc.Storage.List(sc.Context, keyPrefix) - if err != nil { - return nil, err - } - - keyIds := make([]keyID, 0, len(strList)) - for _, entry := range strList { - keyIds = append(keyIds, keyID(entry)) - } - - return keyIds, nil +func (sc *storageContext) WithFreshTimeout(timeout time.Duration) (*storageContext, context.CancelFunc) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + return &storageContext{ + Context: ctx, + Storage: sc.Storage, + Backend: sc.Backend, + }, cancel } -func (sc *storageContext) fetchKeyById(keyId keyID) (*keyEntry, error) { - if len(keyId) == 0 { - return nil, errutil.InternalError{Err: "unable to fetch pki key: empty key identifier"} - } - - entry, err := sc.Storage.Get(sc.Context, keyPrefix+keyId.String()) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki key: %v", err)} - } - if entry == nil { - return nil, errutil.UserError{Err: fmt.Sprintf("pki key id %s does not exist", keyId.String())} - } - - var key keyEntry - if err := entry.DecodeJSON(&key); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki key with id %s: %v", keyId.String(), err)} - } - - return &key, nil +func (sc *storageContext) listKeys() ([]issuing.KeyID, error) { + return issuing.ListKeys(sc.Context, sc.Storage) } -func (sc *storageContext) writeKey(key keyEntry) error { - keyId := key.ID - - json, err := logical.StorageEntryJSON(keyPrefix+keyId.String(), key) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, json) +func (sc *storageContext) fetchKeyById(keyId issuing.KeyID) (*issuing.KeyEntry, error) { + return issuing.FetchKeyById(sc.Context, sc.Storage, keyId) } -func (sc *storageContext) deleteKey(id keyID) (bool, error) { - config, err := sc.getKeysConfig() - if err != nil { - return false, err - } - - wasDefault := false - if config.DefaultKeyId == id { - wasDefault = true - config.DefaultKeyId = keyID("") - if err := sc.setKeysConfig(config); err != nil { - return wasDefault, err - } - } +func (sc *storageContext) writeKey(key issuing.KeyEntry) error { + return issuing.WriteKey(sc.Context, sc.Storage, key) +} - return wasDefault, sc.Storage.Delete(sc.Context, keyPrefix+id.String()) +func (sc *storageContext) deleteKey(id issuing.KeyID) (bool, error) { + return issuing.DeleteKey(sc.Context, sc.Storage, id) } -func (sc *storageContext) importKey(keyValue string, keyName string, keyType certutil.PrivateKeyType) (*keyEntry, bool, error) { +func (sc *storageContext) importKey(keyValue string, keyName string, keyType certutil.PrivateKeyType) (*issuing.KeyEntry, bool, error) { // importKey imports the specified PEM-format key (from keyValue) into // the new PKI storage format. The first return field is a reference to // the new key; the second is whether or not the key already existed @@ -357,11 +116,7 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer // Get our public key from the current inbound key, to compare against all the other keys. var pkForImportingKey crypto.PublicKey if keyType == certutil.ManagedPrivateKey { - managedKeyUUID, err := extractManagedKeyId([]byte(keyValue)) - if err != nil { - return nil, false, errutil.InternalError{Err: fmt.Sprintf("failed extracting managed key uuid from key: %v", err)} - } - pkForImportingKey, err = getManagedKeyPublicKey(sc.Context, sc.Backend, managedKeyUUID) + pkForImportingKey, err = managed_key.GetPublicKeyFromKeyBytes(sc.Context, sc.Backend, []byte(keyValue)) if err != nil { return nil, false, err } @@ -402,7 +157,7 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer } // Haven't found a key, so we've gotta create it and write it into storage. - var result keyEntry + var result issuing.KeyEntry result.ID = genKeyId() result.Name = keyName result.PrivateKey = keyValue @@ -489,263 +244,32 @@ func (sc *storageContext) importKey(keyValue string, keyName string, keyType cer return &result, false, nil } -func (i issuerEntry) GetCertificate() (*x509.Certificate, error) { - cert, err := parseCertificateFromBytes([]byte(i.Certificate)) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse certificate from issuer: %s: %v", err.Error(), i.ID)} - } - - return cert, nil +func GetAIAURLs(sc *storageContext, i *issuing.IssuerEntry) (*certutil.URLEntries, error) { + return issuing.GetAIAURLs(sc.Context, sc.Storage, i) } -func (i issuerEntry) EnsureUsage(usage issuerUsage) error { - // We want to spit out a nice error message about missing usages. - if i.Usage.HasUsage(usage) { - return nil - } - - issuerRef := fmt.Sprintf("id:%v", i.ID) - if len(i.Name) > 0 { - issuerRef = fmt.Sprintf("%v / name:%v", issuerRef, i.Name) - } - - // These usages differ at some point in time. We've gotta find the first - // usage that differs and return a logical-sounding error message around - // that difference. - for name, candidate := range namedIssuerUsages { - if usage.HasUsage(candidate) && !i.Usage.HasUsage(candidate) { - return fmt.Errorf("requested usage %v for issuer [%v] but only had usage %v", name, issuerRef, i.Usage.Names()) - } - } - - // Maybe we have an unnamed usage that's requested. - return fmt.Errorf("unknown delta between usages: %v -> %v / for issuer [%v]", usage.Names(), i.Usage.Names(), issuerRef) +func (sc *storageContext) listIssuers() ([]issuing.IssuerID, error) { + return issuing.ListIssuers(sc.Context, sc.Storage) } -func (i issuerEntry) CanMaybeSignWithAlgo(algo x509.SignatureAlgorithm) error { - // Hack: Go isn't kind enough expose its lovely signatureAlgorithmDetails - // informational struct for our usage. However, we don't want to actually - // fetch the private key and attempt a signature with this algo (as we'll - // mint new, previously unsigned material in the process that could maybe - // be potentially abused if it leaks). - // - // So... - // - // ...we maintain our own mapping of cert.PKI<->sigAlgos. Notably, we - // exclude DSA support as the PKI engine has never supported DSA keys. - if algo == x509.UnknownSignatureAlgorithm { - // Special cased to indicate upgrade and letting Go automatically - // chose the correct value. - return nil - } - - cert, err := i.GetCertificate() - if err != nil { - return fmt.Errorf("unable to parse issuer's potential signature algorithm types: %w", err) - } - - switch cert.PublicKeyAlgorithm { - case x509.RSA: - switch algo { - case x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, - x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, - x509.SHA512WithRSAPSS: - return nil - } - case x509.ECDSA: - switch algo { - case x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512: - return nil - } - case x509.Ed25519: - switch algo { - case x509.PureEd25519: - return nil - } - } - - return fmt.Errorf("unable to use issuer of type %v to sign with %v key type", cert.PublicKeyAlgorithm.String(), algo.String()) +func (sc *storageContext) resolveKeyReference(reference string) (issuing.KeyID, error) { + return issuing.ResolveKeyReference(sc.Context, sc.Storage, reference) } -func (i issuerEntry) GetAIAURLs(sc *storageContext) (*certutil.URLEntries, error) { - // Default to the per-issuer AIA URLs. - entries := i.AIAURIs - - // If none are set (either due to a nil entry or because no URLs have - // been provided), fall back to the global AIA URL config. - if entries == nil || (len(entries.IssuingCertificates) == 0 && len(entries.CRLDistributionPoints) == 0 && len(entries.OCSPServers) == 0) { - var err error - - entries, err = getGlobalAIAURLs(sc.Context, sc.Storage) - if err != nil { - return nil, err - } - } - - if entries == nil { - return &certutil.URLEntries{}, nil - } - - return entries.toURLEntries(sc, i.ID) +// fetchIssuerById returns an IssuerEntry based on issuerId, if none found an error is returned. +func (sc *storageContext) fetchIssuerById(issuerId issuing.IssuerID) (*issuing.IssuerEntry, error) { + return issuing.FetchIssuerById(sc.Context, sc.Storage, issuerId) } -func (sc *storageContext) listIssuers() ([]issuerID, error) { - strList, err := sc.Storage.List(sc.Context, issuerPrefix) - if err != nil { - return nil, err - } - - issuerIds := make([]issuerID, 0, len(strList)) - for _, entry := range strList { - issuerIds = append(issuerIds, issuerID(entry)) - } - - return issuerIds, nil +func (sc *storageContext) writeIssuer(issuer *issuing.IssuerEntry) error { + return issuing.WriteIssuer(sc.Context, sc.Storage, issuer) } -func (sc *storageContext) resolveKeyReference(reference string) (keyID, error) { - if reference == defaultRef { - // Handle fetching the default key. - config, err := sc.getKeysConfig() - if err != nil { - return keyID("config-error"), err - } - if len(config.DefaultKeyId) == 0 { - return KeyRefNotFound, fmt.Errorf("no default key currently configured") - } - - return config.DefaultKeyId, nil - } - - // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. - if len(reference) == uuidLength { - entry, err := sc.Storage.Get(sc.Context, keyPrefix+reference) - if err != nil { - return keyID("key-read"), err - } - if entry != nil { - return keyID(reference), nil - } - } - - // ... than to pull all keys from storage. - keys, err := sc.listKeys() - if err != nil { - return keyID("list-error"), err - } - for _, keyId := range keys { - key, err := sc.fetchKeyById(keyId) - if err != nil { - return keyID("key-read"), err - } - - if key.Name == reference { - return key.ID, nil - } - } - - // Otherwise, we must not have found the key. - return KeyRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI key for reference: %v", reference)} -} - -// fetchIssuerById returns an issuerEntry based on issuerId, if none found an error is returned. -func (sc *storageContext) fetchIssuerById(issuerId issuerID) (*issuerEntry, error) { - if len(issuerId) == 0 { - return nil, errutil.InternalError{Err: "unable to fetch pki issuer: empty issuer identifier"} - } - - entry, err := sc.Storage.Get(sc.Context, issuerPrefix+issuerId.String()) - if err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki issuer: %v", err)} - } - if entry == nil { - return nil, errutil.UserError{Err: fmt.Sprintf("pki issuer id %s does not exist", issuerId.String())} - } - - var issuer issuerEntry - if err := entry.DecodeJSON(&issuer); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki issuer with id %s: %v", issuerId.String(), err)} - } - - return sc.upgradeIssuerIfRequired(&issuer), nil -} - -func (sc *storageContext) upgradeIssuerIfRequired(issuer *issuerEntry) *issuerEntry { - // *NOTE*: Don't attempt to write out the issuer here as it may cause ErrReadOnly that will direct the - // request all the way up to the primary cluster which would be horrible for local cluster operations such - // as generating a leaf cert or a revoke. - // Also even though we could tell if we are the primary cluster's active node, we can't tell if we have the - // a full rw issuer lock, so it might not be safe to write. - if issuer.Version == latestIssuerVersion { - return issuer - } - - if issuer.Version == 0 { - // Upgrade at this step requires interrogating the certificate itself; - // if this decode fails, it indicates internal problems and the - // request will subsequently fail elsewhere. However, decoding this - // certificate is mildly expensive, so we only do it in the event of - // a Version 0 certificate. - cert, err := issuer.GetCertificate() - if err != nil { - return issuer - } - - hadCRL := issuer.Usage.HasUsage(CRLSigningUsage) - // Remove CRL signing usage if it exists on the issuer but doesn't - // exist in the KU of the x509 certificate. - if hadCRL && (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 { - issuer.Usage.ToggleUsage(OCSPSigningUsage) - } - - // Handle our new OCSPSigning usage flag for earlier versions. If we - // had it (prior to removing it in this upgrade), we'll add the OCSP - // flag since EKUs don't matter. - if hadCRL && !issuer.Usage.HasUsage(OCSPSigningUsage) { - issuer.Usage.ToggleUsage(OCSPSigningUsage) - } - } - - issuer.Version = latestIssuerVersion - return issuer -} - -func (sc *storageContext) writeIssuer(issuer *issuerEntry) error { - issuerId := issuer.ID - if issuer.LastModified.IsZero() { - issuer.LastModified = time.Now().UTC() - } - - json, err := logical.StorageEntryJSON(issuerPrefix+issuerId.String(), issuer) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, json) -} - -func (sc *storageContext) deleteIssuer(id issuerID) (bool, error) { - config, err := sc.getIssuersConfig() - if err != nil { - return false, err - } - - wasDefault := false - if config.DefaultIssuerId == id { - wasDefault = true - // Overwrite the fetched default issuer as we're going to remove this - // entry. - config.fetchedDefault = issuerID("") - config.DefaultIssuerId = issuerID("") - if err := sc.setIssuersConfig(config); err != nil { - return wasDefault, err - } - } - - return wasDefault, sc.Storage.Delete(sc.Context, issuerPrefix+id.String()) +func (sc *storageContext) deleteIssuer(id issuing.IssuerID) (bool, error) { + return issuing.DeleteIssuer(sc.Context, sc.Storage, id) } -func (sc *storageContext) importIssuer(certValue string, issuerName string) (*issuerEntry, bool, error) { +func (sc *storageContext) importIssuer(certValue string, issuerName string) (*issuing.IssuerEntry, bool, error) { // importIssuers imports the specified PEM-format certificate (from // certValue) into the new PKI storage format. The first return field is a // reference to the new issuer; the second is whether or not the issuer @@ -823,18 +347,18 @@ func (sc *storageContext) importIssuer(certValue string, issuerName string) (*is // Haven't found an issuer, so we've gotta create it and write it into // storage. - var result issuerEntry + var result issuing.IssuerEntry result.ID = genIssuerId() result.Name = issuerName result.Certificate = certValue result.LeafNotAfterBehavior = certutil.ErrNotAfterBehavior - result.Usage.ToggleUsage(AllIssuerUsages) - result.Version = latestIssuerVersion + result.Usage.ToggleUsage(issuing.AllIssuerUsages) + result.Version = issuing.LatestIssuerVersion // If we lack relevant bits for CRL, prohibit it from being set // on the usage side. - if (issuerCert.KeyUsage&x509.KeyUsageCRLSign) == 0 && result.Usage.HasUsage(CRLSigningUsage) { - result.Usage.ToggleUsage(CRLSigningUsage) + if (issuerCert.KeyUsage&x509.KeyUsageCRLSign) == 0 && result.Usage.HasUsage(issuing.CRLSigningUsage) { + result.Usage.ToggleUsage(issuing.CRLSigningUsage) } // We shouldn't add CSRs or multiple certificates in this @@ -904,172 +428,48 @@ func areCertificatesEqual(cert1 *x509.Certificate, cert2 *x509.Certificate) bool return bytes.Equal(cert1.Raw, cert2.Raw) } -func (sc *storageContext) setLocalCRLConfig(mapping *localCRLConfigEntry) error { - json, err := logical.StorageEntryJSON(storageLocalCRLConfig, mapping) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, json) +func (sc *storageContext) setLocalCRLConfig(mapping *issuing.InternalCRLConfigEntry) error { + return issuing.SetLocalCRLConfig(sc.Context, sc.Storage, mapping) } -func (sc *storageContext) getLocalCRLConfig() (*localCRLConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, storageLocalCRLConfig) - if err != nil { - return nil, err - } - - mapping := &localCRLConfigEntry{} - if entry != nil { - if err := entry.DecodeJSON(mapping); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode cluster-local CRL configuration: %v", err)} - } - } - - if len(mapping.IssuerIDCRLMap) == 0 { - mapping.IssuerIDCRLMap = make(map[issuerID]crlID) - } - - if len(mapping.CRLNumberMap) == 0 { - mapping.CRLNumberMap = make(map[crlID]int64) - } - - if len(mapping.LastCompleteNumberMap) == 0 { - mapping.LastCompleteNumberMap = make(map[crlID]int64) - - // Since this might not exist on migration, we want to guess as - // to the last full CRL number was. This was likely the last - // value from CRLNumberMap if it existed, since we're just adding - // the mapping here in this block. - // - // After the next full CRL build, we will have set this value - // correctly, so it doesn't really matter in the long term if - // we're off here. - for id, number := range mapping.CRLNumberMap { - // Decrement by one, since CRLNumberMap is the future number, - // not the last built number. - mapping.LastCompleteNumberMap[id] = number - 1 - } - } - - if len(mapping.CRLExpirationMap) == 0 { - mapping.CRLExpirationMap = make(map[crlID]time.Time) - } - - return mapping, nil +func (sc *storageContext) setUnifiedCRLConfig(mapping *issuing.InternalCRLConfigEntry) error { + return issuing.SetUnifiedCRLConfig(sc.Context, sc.Storage, mapping) } -func (sc *storageContext) setKeysConfig(config *keyConfigEntry) error { - json, err := logical.StorageEntryJSON(storageKeyConfig, config) - if err != nil { - return err - } - - return sc.Storage.Put(sc.Context, json) +func (sc *storageContext) getLocalCRLConfig() (*issuing.InternalCRLConfigEntry, error) { + return issuing.GetLocalCRLConfig(sc.Context, sc.Storage) } -func (sc *storageContext) getKeysConfig() (*keyConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, storageKeyConfig) - if err != nil { - return nil, err - } - - keyConfig := &keyConfigEntry{} - if entry != nil { - if err := entry.DecodeJSON(keyConfig); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode key configuration: %v", err)} - } - } - - return keyConfig, nil +func (sc *storageContext) getUnifiedCRLConfig() (*issuing.InternalCRLConfigEntry, error) { + return issuing.GetUnifiedCRLConfig(sc.Context, sc.Storage) } -func (sc *storageContext) setIssuersConfig(config *issuerConfigEntry) error { - json, err := logical.StorageEntryJSON(storageIssuerConfig, config) - if err != nil { - return err - } - - if err := sc.Storage.Put(sc.Context, json); err != nil { - return err - } - - if err := sc.changeDefaultIssuerTimestamps(config.fetchedDefault, config.DefaultIssuerId); err != nil { - return err - } - - return nil +func (sc *storageContext) setKeysConfig(config *issuing.KeyConfigEntry) error { + return issuing.SetKeysConfig(sc.Context, sc.Storage, config) } -func (sc *storageContext) getIssuersConfig() (*issuerConfigEntry, error) { - entry, err := sc.Storage.Get(sc.Context, storageIssuerConfig) - if err != nil { - return nil, err - } +func (sc *storageContext) getKeysConfig() (*issuing.KeyConfigEntry, error) { + return issuing.GetKeysConfig(sc.Context, sc.Storage) +} - issuerConfig := &issuerConfigEntry{} - if entry != nil { - if err := entry.DecodeJSON(issuerConfig); err != nil { - return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode issuer configuration: %v", err)} - } - } - issuerConfig.fetchedDefault = issuerConfig.DefaultIssuerId +func (sc *storageContext) setIssuersConfig(config *issuing.IssuerConfigEntry) error { + return issuing.SetIssuersConfig(sc.Context, sc.Storage, config) +} - return issuerConfig, nil +func (sc *storageContext) getIssuersConfig() (*issuing.IssuerConfigEntry, error) { + return issuing.GetIssuersConfig(sc.Context, sc.Storage) } // Lookup within storage the value of reference, assuming the string is a reference to an issuer entry, -// returning the converted issuerID or an error if not found. This method will not properly resolve the +// returning the converted IssuerID or an error if not found. This method will not properly resolve the // special legacyBundleShimID value as we do not want to confuse our special value and a user-provided name of the // same value. -func (sc *storageContext) resolveIssuerReference(reference string) (issuerID, error) { - if reference == defaultRef { - // Handle fetching the default issuer. - config, err := sc.getIssuersConfig() - if err != nil { - return issuerID("config-error"), err - } - if len(config.DefaultIssuerId) == 0 { - return IssuerRefNotFound, fmt.Errorf("no default issuer currently configured") - } - - return config.DefaultIssuerId, nil - } - - // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. - if len(reference) == uuidLength { - entry, err := sc.Storage.Get(sc.Context, issuerPrefix+reference) - if err != nil { - return issuerID("issuer-read"), err - } - if entry != nil { - return issuerID(reference), nil - } - } - - // ... than to pull all issuers from storage. - issuers, err := sc.listIssuers() - if err != nil { - return issuerID("list-error"), err - } - - for _, issuerId := range issuers { - issuer, err := sc.fetchIssuerById(issuerId) - if err != nil { - return issuerID("issuer-read"), err - } - - if issuer.Name == reference { - return issuer.ID, nil - } - } - - // Otherwise, we must not have found the issuer. - return IssuerRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI issuer for reference: %v", reference)} +func (sc *storageContext) resolveIssuerReference(reference string) (issuing.IssuerID, error) { + return issuing.ResolveIssuerReference(sc.Context, sc.Storage, reference) } -func (sc *storageContext) resolveIssuerCRLPath(reference string) (string, error) { - if sc.Backend.useLegacyBundleCaStorage() { +func (sc *storageContext) resolveIssuerCRLPath(reference string, unified bool) (string, error) { + if sc.Backend.UseLegacyBundleCaStorage() { return legacyCRLPath, nil } @@ -1078,13 +478,26 @@ func (sc *storageContext) resolveIssuerCRLPath(reference string) (string, error) return legacyCRLPath, err } - crlConfig, err := sc.getLocalCRLConfig() - if err != nil { - return legacyCRLPath, err + var crlConfig *issuing.InternalCRLConfigEntry + if unified { + crlConfig, err = issuing.GetUnifiedCRLConfig(sc.Context, sc.Storage) + if err != nil { + return legacyCRLPath, err + } + } else { + crlConfig, err = issuing.GetLocalCRLConfig(sc.Context, sc.Storage) + if err != nil { + return legacyCRLPath, err + } } if crlId, ok := crlConfig.IssuerIDCRLMap[issuer]; ok && len(crlId) > 0 { - return fmt.Sprintf("crls/%v", crlId), nil + path := fmt.Sprintf("crls/%v", crlId) + if unified { + path = unifiedCRLPathPrefix + path + } + + return path, nil } return legacyCRLPath, fmt.Errorf("unable to find CRL for issuer: id:%v/ref:%v", issuer, reference) @@ -1093,46 +506,11 @@ func (sc *storageContext) resolveIssuerCRLPath(reference string) (string, error) // Builds a certutil.CertBundle from the specified issuer identifier, // optionally loading the key or not. This method supports loading legacy // bundles using the legacyBundleShimID issuerId, and if no entry is found will return an error. -func (sc *storageContext) fetchCertBundleByIssuerId(id issuerID, loadKey bool) (*issuerEntry, *certutil.CertBundle, error) { - if id == legacyBundleShimID { - // We have not completed the migration, or started a request in legacy mode, so - // attempt to load the bundle from the legacy location - issuer, bundle, err := getLegacyCertBundle(sc.Context, sc.Storage) - if err != nil { - return nil, nil, err - } - if issuer == nil || bundle == nil { - return nil, nil, errutil.UserError{Err: "no legacy cert bundle exists"} - } - - return issuer, bundle, err - } - - issuer, err := sc.fetchIssuerById(id) - if err != nil { - return nil, nil, err - } - - var bundle certutil.CertBundle - bundle.Certificate = issuer.Certificate - bundle.CAChain = issuer.CAChain - bundle.SerialNumber = issuer.SerialNumber - - // Fetch the key if it exists. Sometimes we don't need the key immediately. - if loadKey && issuer.KeyID != keyID("") { - key, err := sc.fetchKeyById(issuer.KeyID) - if err != nil { - return nil, nil, err - } - - bundle.PrivateKeyType = key.PrivateKeyType - bundle.PrivateKey = key.PrivateKey - } - - return issuer, &bundle, nil +func (sc *storageContext) fetchCertBundleByIssuerId(id issuing.IssuerID, loadKey bool) (*issuing.IssuerEntry, *certutil.CertBundle, error) { + return issuing.FetchCertBundleByIssuerId(sc.Context, sc.Storage, id, loadKey) } -func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerName string, keyName string) (*issuerEntry, *keyEntry, error) { +func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerName string, keyName string) (*issuing.IssuerEntry, *issuing.KeyEntry, error) { myKey, _, err := sc.importKey(caBundle.PrivateKey, keyName, caBundle.PrivateKeyType) if err != nil { return nil, nil, err @@ -1141,7 +519,7 @@ func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerNam // We may have existing mounts that only contained a key with no certificate yet as a signed CSR // was never setup within the mount. if caBundle.Certificate == "" { - return &issuerEntry{}, myKey, nil + return &issuing.IssuerEntry{}, myKey, nil } myIssuer, _, err := sc.importIssuer(caBundle.Certificate, issuerName) @@ -1158,16 +536,16 @@ func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerNam return myIssuer, myKey, nil } -func genIssuerId() issuerID { - return issuerID(genUuid()) +func genIssuerId() issuing.IssuerID { + return issuing.IssuerID(genUuid()) } -func genKeyId() keyID { - return keyID(genUuid()) +func genKeyId() issuing.KeyID { + return issuing.KeyID(genUuid()) } -func genCRLId() crlID { - return crlID(genUuid()) +func genCRLId() issuing.CrlID { + return issuing.CrlID(genUuid()) } func genUuid() string { @@ -1215,7 +593,7 @@ func (sc *storageContext) checkForRolesReferencing(issuerId string) (timeout boo return false, 0, err } if entry != nil { // If nil, someone deleted an entry since we haven't taken a lock here so just continue - var role roleEntry + var role issuing.RoleEntry err = entry.DecodeJSON(&role) if err != nil { return false, inUseBy, err @@ -1273,9 +651,33 @@ func (sc *storageContext) getRevocationConfig() (*crlConfig, error) { result.Expiry = defaultCrlConfig.Expiry } + isLocalMount := sc.Backend.System().LocalMount() + if (!constants.IsEnterprise || isLocalMount) && (result.UnifiedCRLOnExistingPaths || result.UnifiedCRL || result.UseGlobalQueue) { + // An end user must have had Enterprise, enabled the unified config args and then downgraded to OSS. + sc.Backend.Logger().Warn("Not running Vault Enterprise or using a local mount, " + + "disabling unified_crl, unified_crl_on_existing_paths and cross_cluster_revocation config flags.") + result.UnifiedCRLOnExistingPaths = false + result.UnifiedCRL = false + result.UseGlobalQueue = false + } + return &result, nil } +func (sc *storageContext) setRevocationConfig(config *crlConfig) error { + entry, err := logical.StorageEntryJSON("config/crl", config) + if err != nil { + return fmt.Errorf("failed building storage entry JSON: %w", err) + } + + err = sc.Storage.Put(sc.Context, entry) + if err != nil { + return fmt.Errorf("failed writing storage entry: %w", err) + } + + return nil +} + func (sc *storageContext) getAutoTidyConfig() (*tidyConfig, error) { entry, err := sc.Storage.Get(sc.Context, autoTidyConfigPath) if err != nil { @@ -1305,7 +707,15 @@ func (sc *storageContext) writeAutoTidyConfig(config *tidyConfig) error { return err } - return sc.Storage.Put(sc.Context, entry) + err = sc.Storage.Put(sc.Context, entry) + if err != nil { + return err + } + + certCounter := sc.Backend.GetCertificateCounter() + certCounter.ReconfigureWithTidyConfig(config) + + return nil } func (sc *storageContext) listRevokedCerts() ([]string, error) { @@ -1317,13 +727,13 @@ func (sc *storageContext) listRevokedCerts() ([]string, error) { return list, err } -func (sc *storageContext) getClusterConfig() (*clusterConfigEntry, error) { +func (sc *storageContext) getClusterConfig() (*issuing.ClusterConfigEntry, error) { entry, err := sc.Storage.Get(sc.Context, clusterConfigPath) if err != nil { return nil, err } - var result clusterConfigEntry + var result issuing.ClusterConfigEntry if entry == nil { return &result, nil } @@ -1335,7 +745,7 @@ func (sc *storageContext) getClusterConfig() (*clusterConfigEntry, error) { return &result, nil } -func (sc *storageContext) writeClusterConfig(config *clusterConfigEntry) error { +func (sc *storageContext) writeClusterConfig(config *issuing.ClusterConfigEntry) error { entry, err := logical.StorageEntryJSON(clusterConfigPath, config) if err != nil { return err @@ -1343,3 +753,19 @@ func (sc *storageContext) writeClusterConfig(config *clusterConfigEntry) error { return sc.Storage.Put(sc.Context, entry) } + +func (sc *storageContext) fetchRevocationInfo(serial string) (*revocationInfo, error) { + var revInfo *revocationInfo + revEntry, err := fetchCertBySerial(sc, revokedPath, serial) + if err != nil { + return nil, err + } + if revEntry != nil { + err = revEntry.DecodeJSON(&revInfo) + if err != nil { + return nil, fmt.Errorf("error decoding existing revocation info: %w", err) + } + } + + return revInfo, nil +} diff --git a/builtin/logical/pki/storage_migrations.go b/builtin/logical/pki/storage_migrations.go index 9104e5c6f3af..de9b61a91587 100644 --- a/builtin/logical/pki/storage_migrations.go +++ b/builtin/logical/pki/storage_migrations.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -7,6 +10,7 @@ import ( "fmt" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -16,16 +20,16 @@ import ( // and we need to perform it again... const ( latestMigrationVersion = 2 - legacyBundleShimID = issuerID("legacy-entry-shim-id") - legacyBundleShimKeyID = keyID("legacy-entry-shim-key-id") + legacyBundleShimID = issuing.LegacyBundleShimID + legacyBundleShimKeyID = issuing.LegacyBundleShimKeyID ) type legacyBundleMigrationLog struct { - Hash string `json:"hash"` - Created time.Time `json:"created"` - CreatedIssuer issuerID `json:"issuer_id"` - CreatedKey keyID `json:"key_id"` - MigrationVersion int `json:"migrationVersion"` + Hash string `json:"hash"` + Created time.Time `json:"created"` + CreatedIssuer issuing.IssuerID `json:"issuer_id"` + CreatedKey issuing.KeyID `json:"key_id"` + MigrationVersion int `json:"migrationVersion"` } type migrationInfo struct { @@ -81,27 +85,44 @@ func migrateStorage(ctx context.Context, b *backend, s logical.Storage) error { return nil } - var issuerIdentifier issuerID - var keyIdentifier keyID + var issuerIdentifier issuing.IssuerID + var keyIdentifier issuing.KeyID sc := b.makeStorageContext(ctx, s) if migrationInfo.legacyBundle != nil { - // Generate a unique name for the migrated items in case things were to be re-migrated again - // for some weird reason in the future... - migrationName := fmt.Sprintf("current-%d", time.Now().Unix()) - - b.Logger().Info("performing PKI migration to new keys/issuers layout") - anIssuer, aKey, err := sc.writeCaBundle(migrationInfo.legacyBundle, migrationName, migrationName) - if err != nil { - return err + // When the legacy bundle still exists, there's three scenarios we + // need to worry about: + // + // 1. When we have no migration log, we definitely want to migrate. + haveNoLog := migrationInfo.migrationLog == nil + // 2. When we have an (empty) log and the version is zero, we want to + // migrate. + haveOldVersion := !haveNoLog && migrationInfo.migrationLog.MigrationVersion == 0 + // 3. When we have a log and the version is at least 1 (where this + // migration was introduced), we want to run the migration again + // only if the legacy bundle hash has changed. + isCurrentOrBetterVersion := !haveNoLog && migrationInfo.migrationLog.MigrationVersion >= 1 + haveChange := !haveNoLog && migrationInfo.migrationLog.Hash != migrationInfo.legacyBundleHash + haveVersionWithChange := isCurrentOrBetterVersion && haveChange + + if haveNoLog || haveOldVersion || haveVersionWithChange { + // Generate a unique name for the migrated items in case things were to be re-migrated again + // for some weird reason in the future... + migrationName := fmt.Sprintf("current-%d", time.Now().Unix()) + + b.Logger().Info("performing PKI migration to new keys/issuers layout") + anIssuer, aKey, err := sc.writeCaBundle(migrationInfo.legacyBundle, migrationName, migrationName) + if err != nil { + return err + } + b.Logger().Info("Migration generated the following ids and set them as defaults", + "issuer id", anIssuer.ID, "key id", aKey.ID) + issuerIdentifier = anIssuer.ID + keyIdentifier = aKey.ID + + // Since we do not have all the mount information available we must schedule + // the CRL to be rebuilt at a later time. + b.CrlBuilder().requestRebuildIfActiveNode(b) } - b.Logger().Info("Migration generated the following ids and set them as defaults", - "issuer id", anIssuer.ID, "key id", aKey.ID) - issuerIdentifier = anIssuer.ID - keyIdentifier = aKey.ID - - // Since we do not have all the mount information available we must schedule - // the CRL to be rebuilt at a later time. - b.crlBuilder.requestRebuildIfActiveNode(b) } if migrationInfo.migrationLog != nil && migrationInfo.migrationLog.MigrationVersion == 1 { @@ -182,33 +203,6 @@ func setLegacyBundleMigrationLog(ctx context.Context, s logical.Storage, lbm *le return s.Put(ctx, json) } -func getLegacyCertBundle(ctx context.Context, s logical.Storage) (*issuerEntry, *certutil.CertBundle, error) { - entry, err := s.Get(ctx, legacyCertBundlePath) - if err != nil { - return nil, nil, err - } - - if entry == nil { - return nil, nil, nil - } - - cb := &certutil.CertBundle{} - err = entry.DecodeJSON(cb) - if err != nil { - return nil, nil, err - } - - // Fake a storage entry with backwards compatibility in mind. - issuer := &issuerEntry{ - ID: legacyBundleShimID, - KeyID: legacyBundleShimKeyID, - Name: "legacy-entry-shim", - Certificate: cb.Certificate, - CAChain: cb.CAChain, - SerialNumber: cb.SerialNumber, - LeafNotAfterBehavior: certutil.ErrNotAfterBehavior, - } - issuer.Usage.ToggleUsage(AllIssuerUsages) - - return issuer, cb, nil +func getLegacyCertBundle(ctx context.Context, s logical.Storage) (*issuing.IssuerEntry, *certutil.CertBundle, error) { + return issuing.GetLegacyCertBundle(ctx, s) } diff --git a/builtin/logical/pki/storage_migrations_test.go b/builtin/logical/pki/storage_migrations_test.go index 5535d1af6cd9..d5f297874f80 100644 --- a/builtin/logical/pki/storage_migrations_test.go +++ b/builtin/logical/pki/storage_migrations_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -6,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/require" @@ -20,7 +24,7 @@ func Test_migrateStorageEmptyStorage(t *testing.T) { // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) - require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") request := &logical.InitializationRequest{Storage: s} err := b.initialize(ctx, request) @@ -45,7 +49,7 @@ func Test_migrateStorageEmptyStorage(t *testing.T) { require.Empty(t, logEntry.CreatedIssuer) require.Empty(t, logEntry.CreatedKey) - require.False(t, b.useLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") + require.False(t, b.UseLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") // Make sure we can re-run the migration without issues request = &logical.InitializationRequest{Storage: s} @@ -69,7 +73,7 @@ func Test_migrateStorageOnlyKey(t *testing.T) { // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) - require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") bundle := genCertBundle(t, b, s) // Clear everything except for the key @@ -103,7 +107,7 @@ func Test_migrateStorageOnlyKey(t *testing.T) { "Hash value (%s) should not have been empty", logEntry.Hash) require.True(t, startTime.Before(logEntry.Created), "created log entry time (%v) was before our start time(%v)?", logEntry.Created, startTime) - require.Equal(t, logEntry.CreatedIssuer, issuerID("")) + require.Equal(t, logEntry.CreatedIssuer, issuing.IssuerID("")) require.Equal(t, logEntry.CreatedKey, keyIds[0]) keyId := keyIds[0] @@ -123,11 +127,11 @@ func Test_migrateStorageOnlyKey(t *testing.T) { // Make sure we setup the default values keysConfig, err := sc.getKeysConfig() require.NoError(t, err) - require.Equal(t, &keyConfigEntry{DefaultKeyId: keyId}, keysConfig) + require.Equal(t, &issuing.KeyConfigEntry{DefaultKeyId: keyId}, keysConfig) issuersConfig, err := sc.getIssuersConfig() require.NoError(t, err) - require.Equal(t, issuerID(""), issuersConfig.DefaultIssuerId) + require.Equal(t, issuing.IssuerID(""), issuersConfig.DefaultIssuerId) // Make sure if we attempt to re-run the migration nothing happens... err = migrateStorage(ctx, b, s) @@ -139,7 +143,7 @@ func Test_migrateStorageOnlyKey(t *testing.T) { require.Equal(t, logEntry.Created, logEntry2.Created) require.Equal(t, logEntry.Hash, logEntry2.Hash) - require.False(t, b.useLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") + require.False(t, b.UseLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") } func Test_migrateStorageSimpleBundle(t *testing.T) { @@ -151,7 +155,7 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) - require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") bundle := genCertBundle(t, b, s) json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) @@ -201,7 +205,7 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { require.Equal(t, keyId, issuer.KeyID) require.Empty(t, issuer.ManualChain) require.Equal(t, []string{bundle.Certificate + "\n"}, issuer.CAChain) - require.Equal(t, AllIssuerUsages, issuer.Usage) + require.Equal(t, issuing.AllIssuerUsages, issuer.Usage) require.Equal(t, certutil.ErrNotAfterBehavior, issuer.LeafNotAfterBehavior) require.Equal(t, keyId, key.ID) @@ -216,7 +220,7 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { // Make sure we setup the default values keysConfig, err := sc.getKeysConfig() require.NoError(t, err) - require.Equal(t, &keyConfigEntry{DefaultKeyId: keyId}, keysConfig) + require.Equal(t, &issuing.KeyConfigEntry{DefaultKeyId: keyId}, keysConfig) issuersConfig, err := sc.getIssuersConfig() require.NoError(t, err) @@ -232,7 +236,7 @@ func Test_migrateStorageSimpleBundle(t *testing.T) { require.Equal(t, logEntry.Created, logEntry2.Created) require.Equal(t, logEntry.Hash, logEntry2.Hash) - require.False(t, b.useLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") + require.False(t, b.UseLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") // Make sure we can re-process a migration from scratch for whatever reason err = s.Delete(ctx, legacyMigrationBundleLogKey) @@ -293,8 +297,8 @@ func TestMigration_OnceChainRebuild(t *testing.T) { // // Afterwards, we mutate these issuers to only point at themselves and // write back out. - var rootIssuerId issuerID - var intIssuerId issuerID + var rootIssuerId issuing.IssuerID + var intIssuerId issuing.IssuerID for _, issuerId := range issuerIds { issuer, err := sc.fetchIssuerById(issuerId) require.NoError(t, err) @@ -365,7 +369,7 @@ func TestExpectedOpsWork_PreMigration(t *testing.T) { b, s := CreateBackendWithStorage(t) // Reset the version the helper above set to 1. b.pkiStorageVersion.Store(0) - require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") bundle := genCertBundle(t, b, s) json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) @@ -590,6 +594,282 @@ func TestExpectedOpsWork_PreMigration(t *testing.T) { requireFailInMigration(t, b, s, logical.ReadOperation, "config/keys") } +func TestBackupBundle(t *testing.T) { + t.Parallel() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Reset the version the helper above set to 1. + b.pkiStorageVersion.Store(0) + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + + // Create an empty request and tidy configuration for us. + req := &logical.Request{ + Storage: s, + MountPoint: "pki/", + } + cfg := &tidyConfig{ + BackupBundle: true, + IssuerSafetyBuffer: 120 * time.Second, + } + + // Migration should do nothing if we're on an empty mount. + err := b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileNotExists(t, sc, legacyCertBundlePath) + requireFileNotExists(t, sc, legacyCertBundleBackupPath) + issuerIds, err := sc.listIssuers() + require.NoError(t, err) + require.Empty(t, issuerIds) + keyIds, err := sc.listKeys() + require.NoError(t, err) + require.Empty(t, keyIds) + + // Create a legacy CA bundle and write it out. + bundle := genCertBundle(t, b, s) + json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) + require.NoError(t, err) + err = s.Put(ctx, json) + require.NoError(t, err) + legacyContents := requireFileExists(t, sc, legacyCertBundlePath, nil) + + // Doing another tidy should maintain the status quo since we've + // still not done our migration. + err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + requireFileNotExists(t, sc, legacyCertBundleBackupPath) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.Empty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.Empty(t, keyIds) + + // Do a migration; this should provision an issuer and key. + initReq := &logical.InitializationRequest{Storage: s} + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // Doing another tidy should maintain the status quo since we've + // done our migration too recently relative to the safety buffer. + err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + requireFileNotExists(t, sc, legacyCertBundleBackupPath) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // Shortening our buffer should ensure the migration occurs, removing + // the legacy bundle but creating the backup one. + time.Sleep(2 * time.Second) + cfg.IssuerSafetyBuffer = 1 * time.Second + err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileNotExists(t, sc, legacyCertBundlePath) + requireFileExists(t, sc, legacyCertBundleBackupPath, legacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // A new initialization should do nothing. + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileNotExists(t, sc, legacyCertBundlePath) + requireFileExists(t, sc, legacyCertBundleBackupPath, legacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + require.Equal(t, len(issuerIds), 1) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + require.Equal(t, len(keyIds), 1) + + // Restoring the legacy bundles with new issuers should redo the + // migration. + newBundle := genCertBundle(t, b, s) + json, err = logical.StorageEntryJSON(legacyCertBundlePath, newBundle) + require.NoError(t, err) + err = s.Put(ctx, json) + require.NoError(t, err) + newLegacyContents := requireFileExists(t, sc, legacyCertBundlePath, nil) + + // -> reinit + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, newLegacyContents) + requireFileExists(t, sc, legacyCertBundleBackupPath, legacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + require.Equal(t, len(issuerIds), 2) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + require.Equal(t, len(keyIds), 2) + + // -> when we tidy again, we'll overwrite the old backup with the new + // one. + time.Sleep(2 * time.Second) + err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileNotExists(t, sc, legacyCertBundlePath) + requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // Finally, restoring the legacy bundle and re-migrating should redo + // the migration. + err = s.Put(ctx, json) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, newLegacyContents) + requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) + + // -> overwrite the version and re-migrate + logEntry, err := getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + logEntry.MigrationVersion = 0 + err = setLegacyBundleMigrationLog(ctx, s, logEntry) + require.NoError(t, err) + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, newLegacyContents) + requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + require.Equal(t, len(issuerIds), 2) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + require.Equal(t, len(keyIds), 2) + + // -> Re-tidy should remove the legacy one. + time.Sleep(2 * time.Second) + err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileNotExists(t, sc, legacyCertBundlePath) + requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) +} + +func TestDeletedIssuersPostMigration(t *testing.T) { + // We want to simulate the following scenario: + // + // 1.10.x: -> Create a CA. + // 1.11.0: -> Migrate to new issuer layout but version 1. + // -> Delete existing issuers, create new ones. + // (now): -> Migrate to version 2 layout, make sure we don't see + // re-migration. + + t.Parallel() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Reset the version the helper above set to 1. + b.pkiStorageVersion.Store(0) + require.True(t, b.UseLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + + // Create a legacy CA bundle and write it out. + bundle := genCertBundle(t, b, s) + json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) + require.NoError(t, err) + err = s.Put(ctx, json) + require.NoError(t, err) + legacyContents := requireFileExists(t, sc, legacyCertBundlePath, nil) + + // Do a migration; this should provision an issuer and key. + initReq := &logical.InitializationRequest{Storage: s} + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + issuerIds, err := sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err := sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // Hack: reset the version to 1, to simulate a pre-version-2 migration + // log. + info, err := getMigrationInfo(sc.Context, sc.Storage) + require.NoError(t, err, "failed to read migration info") + info.migrationLog.MigrationVersion = 1 + err = setLegacyBundleMigrationLog(sc.Context, sc.Storage, info.migrationLog) + require.NoError(t, err, "failed to write migration info") + + // Now delete all issuers and keys and create some new ones. + for _, issuerId := range issuerIds { + deleted, err := sc.deleteIssuer(issuerId) + require.True(t, deleted, "expected it to be deleted") + require.NoError(t, err, "error removing issuer") + } + for _, keyId := range keyIds { + deleted, err := sc.deleteKey(keyId) + require.True(t, deleted, "expected it to be deleted") + require.NoError(t, err, "error removing key") + } + emptyIssuers, err := sc.listIssuers() + require.NoError(t, err) + require.Empty(t, emptyIssuers) + emptyKeys, err := sc.listKeys() + require.NoError(t, err) + require.Empty(t, emptyKeys) + + // Create a new issuer + key. + bundle = genCertBundle(t, b, s) + _, _, err = sc.writeCaBundle(bundle, "", "") + require.NoError(t, err) + + // List which issuers + keys we currently have. + postDeletionIssuers, err := sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, postDeletionIssuers) + postDeletionKeys, err := sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, postDeletionKeys) + + // Now do another migration from 1->2. This should retain the newly + // created issuers+keys, but not revive any deleted ones. + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + postMigrationIssuers, err := sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, postMigrationIssuers) + require.Equal(t, postMigrationIssuers, postDeletionIssuers, "regression failed: expected second migration from v1->v2 to not introduce new issuers") + postMigrationKeys, err := sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, postMigrationKeys) + require.Equal(t, postMigrationKeys, postDeletionKeys, "regression failed: expected second migration from v1->v2 to not introduce new keys") +} + // requireFailInMigration validate that we fail the operation with the appropriate error message to the end-user func requireFailInMigration(t *testing.T, b *backend, s logical.Storage, operation logical.Operation, path string) { resp, err := b.HandleRequest(context.Background(), &logical.Request{ @@ -605,6 +885,31 @@ func requireFailInMigration(t *testing.T, b *backend, s logical.Storage, operati "error message did not contain migration test for op:%s path:%s resp: %#v", operation, path, resp) } +func requireFileNotExists(t *testing.T, sc *storageContext, path string) { + t.Helper() + + entry, err := sc.Storage.Get(sc.Context, path) + require.NoError(t, err) + if entry != nil { + require.Empty(t, entry.Value) + } else { + require.Empty(t, entry) + } +} + +func requireFileExists(t *testing.T, sc *storageContext, path string, contents []byte) []byte { + t.Helper() + + entry, err := sc.Storage.Get(sc.Context, path) + require.NoError(t, err) + require.NotNil(t, entry) + require.NotEmpty(t, entry.Value) + if contents != nil { + require.Equal(t, entry.Value, contents) + } + return entry.Value +} + // Keys to simulate an intermediate CA mount with also-imported root (parent). const ( migIntPrivKey = `-----BEGIN RSA PRIVATE KEY----- diff --git a/builtin/logical/pki/storage_test.go b/builtin/logical/pki/storage_test.go index 17760653b7b3..3ace55e51c12 100644 --- a/builtin/logical/pki/storage_test.go +++ b/builtin/logical/pki/storage_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -5,6 +8,7 @@ import ( "strings" "testing" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" @@ -19,27 +23,27 @@ func Test_ConfigsRoundTrip(t *testing.T) { sc := b.makeStorageContext(ctx, s) // Create an empty key, issuer for testing. - key := keyEntry{ID: genKeyId()} + key := issuing.KeyEntry{ID: genKeyId()} err := sc.writeKey(key) require.NoError(t, err) - issuer := &issuerEntry{ID: genIssuerId()} + issuer := &issuing.IssuerEntry{ID: genIssuerId()} err = sc.writeIssuer(issuer) require.NoError(t, err) // Verify we handle nothing stored properly keyConfigEmpty, err := sc.getKeysConfig() require.NoError(t, err) - require.Equal(t, &keyConfigEntry{}, keyConfigEmpty) + require.Equal(t, &issuing.KeyConfigEntry{}, keyConfigEmpty) issuerConfigEmpty, err := sc.getIssuersConfig() require.NoError(t, err) - require.Equal(t, &issuerConfigEntry{}, issuerConfigEmpty) + require.Equal(t, &issuing.IssuerConfigEntry{}, issuerConfigEmpty) // Now attempt to store and reload properly - origKeyConfig := &keyConfigEntry{ + origKeyConfig := &issuing.KeyConfigEntry{ DefaultKeyId: key.ID, } - origIssuerConfig := &issuerConfigEntry{ + origIssuerConfig := &issuing.IssuerConfigEntry{ DefaultIssuerId: issuer.ID, } @@ -95,12 +99,12 @@ func Test_IssuerRoundTrip(t *testing.T) { keys, err := sc.listKeys() require.NoError(t, err) - require.ElementsMatch(t, []keyID{key1.ID, key2.ID}, keys) + require.ElementsMatch(t, []issuing.KeyID{key1.ID, key2.ID}, keys) issuers, err := sc.listIssuers() require.NoError(t, err) - require.ElementsMatch(t, []issuerID{issuer1.ID, issuer2.ID}, issuers) + require.ElementsMatch(t, []issuing.IssuerID{issuer1.ID, issuer2.ID}, issuers) } func Test_KeysIssuerImport(t *testing.T) { @@ -180,7 +184,7 @@ func Test_IssuerUpgrade(t *testing.T) { // Make sure that we add OCSP signing to v0 issuers if CRLSigning is enabled issuer, _ := genIssuerAndKey(t, b, s) issuer.Version = 0 - issuer.Usage.ToggleUsage(OCSPSigningUsage) + issuer.Usage.ToggleUsage(issuing.OCSPSigningUsage) err := sc.writeIssuer(&issuer) require.NoError(t, err, "failed writing out issuer") @@ -189,13 +193,13 @@ func Test_IssuerUpgrade(t *testing.T) { require.NoError(t, err, "failed fetching issuer") require.Equal(t, uint(1), newIssuer.Version) - require.True(t, newIssuer.Usage.HasUsage(OCSPSigningUsage)) + require.True(t, newIssuer.Usage.HasUsage(issuing.OCSPSigningUsage)) // If CRLSigning is not present on a v0, we should not have OCSP signing after upgrade. issuer, _ = genIssuerAndKey(t, b, s) issuer.Version = 0 - issuer.Usage.ToggleUsage(OCSPSigningUsage) - issuer.Usage.ToggleUsage(CRLSigningUsage) + issuer.Usage.ToggleUsage(issuing.OCSPSigningUsage) + issuer.Usage.ToggleUsage(issuing.CRLSigningUsage) err = sc.writeIssuer(&issuer) require.NoError(t, err, "failed writing out issuer") @@ -204,15 +208,15 @@ func Test_IssuerUpgrade(t *testing.T) { require.NoError(t, err, "failed fetching issuer") require.Equal(t, uint(1), newIssuer.Version) - require.False(t, newIssuer.Usage.HasUsage(OCSPSigningUsage)) + require.False(t, newIssuer.Usage.HasUsage(issuing.OCSPSigningUsage)) } -func genIssuerAndKey(t *testing.T, b *backend, s logical.Storage) (issuerEntry, keyEntry) { +func genIssuerAndKey(t *testing.T, b *backend, s logical.Storage) (issuing.IssuerEntry, issuing.KeyEntry) { certBundle := genCertBundle(t, b, s) keyId := genKeyId() - pkiKey := keyEntry{ + pkiKey := issuing.KeyEntry{ ID: keyId, PrivateKeyType: certBundle.PrivateKeyType, PrivateKey: strings.TrimSpace(certBundle.PrivateKey) + "\n", @@ -220,14 +224,14 @@ func genIssuerAndKey(t *testing.T, b *backend, s logical.Storage) (issuerEntry, issuerId := genIssuerId() - pkiIssuer := issuerEntry{ + pkiIssuer := issuing.IssuerEntry{ ID: issuerId, KeyID: keyId, Certificate: strings.TrimSpace(certBundle.Certificate) + "\n", CAChain: certBundle.CAChain, SerialNumber: certBundle.SerialNumber, - Usage: AllIssuerUsages, - Version: latestIssuerVersion, + Usage: issuing.AllIssuerUsages, + Version: issuing.LatestIssuerVersion, } return pkiIssuer, pkiKey diff --git a/builtin/logical/pki/storage_unified.go b/builtin/logical/pki/storage_unified.go new file mode 100644 index 000000000000..c279c26191ed --- /dev/null +++ b/builtin/logical/pki/storage_unified.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + unifiedRevocationReadPathPrefix = "unified-revocation/" + unifiedRevocationWritePathPrefix = unifiedRevocationReadPathPrefix + "{{clusterId}}/" +) + +type unifiedRevocationEntry struct { + SerialNumber string `json:"-"` + CertExpiration time.Time `json:"certificate_expiration_utc"` + RevocationTimeUTC time.Time `json:"revocation_time_utc"` + CertificateIssuer issuing.IssuerID `json:"issuer_id"` +} + +func getUnifiedRevocationBySerial(sc *storageContext, serial string) (*unifiedRevocationEntry, error) { + clusterPaths, err := lookupUnifiedClusterPaths(sc) + if err != nil { + return nil, err + } + + for _, path := range clusterPaths { + serialPath := path + serial + entryRaw, err := sc.Storage.Get(sc.Context, serialPath) + if err != nil { + return nil, err + } + + if entryRaw != nil { + var revEntry unifiedRevocationEntry + if err := entryRaw.DecodeJSON(&revEntry); err != nil { + return nil, fmt.Errorf("failed json decoding of unified entry at path %s: %w", serialPath, err) + } + revEntry.SerialNumber = serial + return &revEntry, nil + } + } + + return nil, nil +} + +func writeUnifiedRevocationEntry(sc *storageContext, ure *unifiedRevocationEntry) error { + json, err := logical.StorageEntryJSON(unifiedRevocationWritePathPrefix+normalizeSerial(ure.SerialNumber), ure) + if err != nil { + return err + } + + return sc.Storage.Put(sc.Context, json) +} + +// listClusterSpecificUnifiedRevokedCerts returns a list of revoked certificates from a given cluster +func listClusterSpecificUnifiedRevokedCerts(sc *storageContext, clusterId string) ([]string, error) { + path := unifiedRevocationReadPathPrefix + clusterId + "/" + serials, err := sc.Storage.List(sc.Context, path) + if err != nil { + return nil, err + } + + return serials, nil +} + +// lookupUnifiedClusterPaths returns a map of cluster id to the prefix storage path for that given cluster's +// unified revoked certificates +func lookupUnifiedClusterPaths(sc *storageContext) (map[string]string, error) { + fullPaths := map[string]string{} + + clusterPaths, err := sc.Storage.List(sc.Context, unifiedRevocationReadPathPrefix) + if err != nil { + return nil, err + } + + for _, clusterIdWithSlash := range clusterPaths { + // Only include folder listings, if a file were to be stored under this path ignore it. + if strings.HasSuffix(clusterIdWithSlash, "/") { + clusterId := clusterIdWithSlash[:len(clusterIdWithSlash)-1] // remove trailing / + fullPaths[clusterId] = unifiedRevocationReadPathPrefix + clusterIdWithSlash + } + } + + return fullPaths, nil +} diff --git a/builtin/logical/pki/test_helpers.go b/builtin/logical/pki/test_helpers.go index d73217f3bf3c..2806a5dcafd2 100644 --- a/builtin/logical/pki/test_helpers.go +++ b/builtin/logical/pki/test_helpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( @@ -7,20 +10,28 @@ import ( "crypto/rsa" "crypto/x509" "crypto/x509/pkix" + "encoding/asn1" "encoding/pem" "fmt" "io" + "math" + "math/big" + http2 "net/http" "strings" "testing" + "time" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/require" + "golang.org/x/crypto/ocsp" ) // Setup helpers func CreateBackendWithStorage(t testing.TB) (*backend, logical.Storage) { + t.Helper() + config := logical.TestBackendConfig() config.StorageView = &logical.InmemStorage{} @@ -36,6 +47,8 @@ func CreateBackendWithStorage(t testing.TB) (*backend, logical.Storage) { } func mountPKIEndpoint(t testing.TB, client *api.Client, path string) { + t.Helper() + err := client.Sys().Mount(path, &api.MountInput{ Type: "pki", Config: api.MountConfigInput{ @@ -46,15 +59,47 @@ func mountPKIEndpoint(t testing.TB, client *api.Client, path string) { require.NoError(t, err, "failed mounting pki endpoint") } +func mountCertEndpoint(t testing.TB, client *api.Client, path string) { + t.Helper() + + err := client.Sys().EnableAuthWithOptions(path, &api.MountInput{ + Type: "cert", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + require.NoError(t, err, "failed mounting cert endpoint") +} + // Signing helpers func requireSignedBy(t *testing.T, cert *x509.Certificate, signingCert *x509.Certificate) { + t.Helper() + if err := cert.CheckSignatureFrom(signingCert); err != nil { t.Fatalf("signature verification failed: %v", err) } } +func requireSignedByAtPath(t *testing.T, client *api.Client, leaf *x509.Certificate, path string) { + t.Helper() + + resp, err := client.Logical().Read(path) + require.NoError(t, err, "got unexpected error fetching parent certificate") + require.NotNil(t, resp, "missing response when fetching parent certificate") + require.NotNil(t, resp.Data, "missing data from parent certificate response") + require.NotNil(t, resp.Data["certificate"], "missing certificate field on parent read response") + + parentCert := resp.Data["certificate"].(string) + parent := parseCert(t, parentCert) + + requireSignedBy(t, leaf, parent) +} + // Certificate helper func parseCert(t *testing.T, pemCert string) *x509.Certificate { + t.Helper() + block, _ := pem.Decode([]byte(pemCert)) require.NotNil(t, block, "failed to decode PEM block") @@ -64,6 +109,8 @@ func parseCert(t *testing.T, pemCert string) *x509.Certificate { } func requireMatchingPublicKeys(t *testing.T, cert *x509.Certificate, key crypto.PublicKey) { + t.Helper() + certPubKey := cert.PublicKey areEqual, err := certutil.ComparePublicKeysAndType(certPubKey, key) require.NoError(t, err, "failed comparing public keys: %#v", err) @@ -89,17 +136,25 @@ func getSelfSigned(t *testing.T, subject, issuer *x509.Certificate, key *rsa.Pri // CRL related helpers func getCrlCertificateList(t *testing.T, client *api.Client, mountPoint string) pkix.TBSCertificateList { + t.Helper() + path := fmt.Sprintf("/v1/%s/crl", mountPoint) return getParsedCrlAtPath(t, client, path).TBSCertList } func parseCrlPemBytes(t *testing.T, crlPem []byte) pkix.TBSCertificateList { + t.Helper() + certList, err := x509.ParseCRL(crlPem) require.NoError(t, err) return certList.TBSCertList } func requireSerialNumberInCRL(t *testing.T, revokeList pkix.TBSCertificateList, serialNum string) bool { + if t != nil { + t.Helper() + } + serialsInList := make([]string, 0, len(revokeList.RevokedCertificates)) for _, revokeEntry := range revokeList.RevokedCertificates { formattedSerial := certutil.GetHexFormatted(revokeEntry.SerialNumber.Bytes(), ":") @@ -117,11 +172,15 @@ func requireSerialNumberInCRL(t *testing.T, revokeList pkix.TBSCertificateList, } func getParsedCrl(t *testing.T, client *api.Client, mountPoint string) *pkix.CertificateList { + t.Helper() + path := fmt.Sprintf("/v1/%s/crl", mountPoint) return getParsedCrlAtPath(t, client, path) } func getParsedCrlAtPath(t *testing.T, client *api.Client, path string) *pkix.CertificateList { + t.Helper() + req := client.NewRequest("GET", path) resp, err := client.RawRequest(req) if err != nil { @@ -145,6 +204,8 @@ func getParsedCrlAtPath(t *testing.T, client *api.Client, path string) *pkix.Cer } func getParsedCrlFromBackend(t *testing.T, b *backend, s logical.Storage, path string) *pkix.CertificateList { + t.Helper() + resp, err := CBRead(b, s, path) if err != nil { t.Fatal(err) @@ -180,6 +241,10 @@ func CBReq(b *backend, s logical.Storage, operation logical.Operation, path stri return resp, nil } +func CBHeader(b *backend, s logical.Storage, path string) (*logical.Response, error) { + return CBReq(b, s, logical.HeaderOperation, path, make(map[string]interface{})) +} + func CBRead(b *backend, s logical.Storage, path string) (*logical.Response, error) { return CBReq(b, s, logical.ReadOperation, path, make(map[string]interface{})) } @@ -201,6 +266,8 @@ func CBDelete(b *backend, s logical.Storage, path string) (*logical.Response, er } func requireFieldsSetInResp(t *testing.T, resp *logical.Response, fields ...string) { + t.Helper() + var missingFields []string for _, field := range fields { value, ok := resp.Data[field] @@ -213,6 +280,8 @@ func requireFieldsSetInResp(t *testing.T, resp *logical.Response, fields ...stri } func requireSuccessNonNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { + t.Helper() + require.NoError(t, err, msgAndArgs...) if resp.IsError() { errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) @@ -222,6 +291,8 @@ func requireSuccessNonNilResponse(t *testing.T, resp *logical.Response, err erro } func requireSuccessNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { + t.Helper() + require.NoError(t, err, msgAndArgs...) if resp.IsError() { errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) @@ -232,3 +303,173 @@ func requireSuccessNilResponse(t *testing.T, resp *logical.Response, err error, require.Nilf(t, resp, msg, msgAndArgs...) } } + +func getCRLNumber(t *testing.T, crl pkix.TBSCertificateList) int { + t.Helper() + + for _, extension := range crl.Extensions { + if extension.Id.Equal(certutil.CRLNumberOID) { + bigInt := new(big.Int) + leftOver, err := asn1.Unmarshal(extension.Value, &bigInt) + require.NoError(t, err, "Failed unmarshalling crl number extension") + require.Empty(t, leftOver, "leftover bytes from unmarshalling crl number extension") + require.True(t, bigInt.IsInt64(), "parsed crl number integer is not an int64") + require.False(t, math.MaxInt <= bigInt.Int64(), "parsed crl number integer can not fit in an int") + return int(bigInt.Int64()) + } + } + + t.Fatalf("failed to find crl number extension") + return 0 +} + +func getCrlReferenceFromDelta(t *testing.T, crl pkix.TBSCertificateList) int { + t.Helper() + + for _, extension := range crl.Extensions { + if extension.Id.Equal(certutil.DeltaCRLIndicatorOID) { + bigInt := new(big.Int) + leftOver, err := asn1.Unmarshal(extension.Value, &bigInt) + require.NoError(t, err, "Failed unmarshalling delta crl indicator extension") + require.Empty(t, leftOver, "leftover bytes from unmarshalling delta crl indicator extension") + require.True(t, bigInt.IsInt64(), "parsed delta crl integer is not an int64") + require.False(t, math.MaxInt <= bigInt.Int64(), "parsed delta crl integer can not fit in an int") + return int(bigInt.Int64()) + } + } + + t.Fatalf("failed to find delta crl indicator extension") + return 0 +} + +// waitForUpdatedCrl will wait until the CRL at the provided path has been reloaded +// up for a maxWait duration and gives up if the timeout has been reached. If a negative +// value for lastSeenCRLNumber is provided, the method will load the current CRL and wait +// for a newer CRL be generated. +func waitForUpdatedCrl(t *testing.T, client *api.Client, crlPath string, lastSeenCRLNumber int, maxWait time.Duration) pkix.TBSCertificateList { + t.Helper() + + newCrl, didTimeOut := waitForUpdatedCrlUntil(t, client, crlPath, lastSeenCRLNumber, maxWait) + if didTimeOut { + t.Fatalf("Timed out waiting for new CRL rebuild on path %s", crlPath) + } + return newCrl.TBSCertList +} + +// waitForUpdatedCrlUntil is a helper method that will wait for a CRL to be updated up until maxWait duration +// or give up and return the last CRL it loaded. It will not fail, if it does not see a new CRL within the +// max duration unlike waitForUpdatedCrl. Returns the last loaded CRL at the provided path and a boolean +// indicating if we hit maxWait duration or not. +func waitForUpdatedCrlUntil(t *testing.T, client *api.Client, crlPath string, lastSeenCrlNumber int, maxWait time.Duration) (*pkix.CertificateList, bool) { + t.Helper() + + crl := getParsedCrlAtPath(t, client, crlPath) + initialCrlRevision := getCRLNumber(t, crl.TBSCertList) + newCrlRevision := initialCrlRevision + + // Short circuit the fetches if we have a version of the CRL we want + if lastSeenCrlNumber > 0 && getCRLNumber(t, crl.TBSCertList) > lastSeenCrlNumber { + return crl, false + } + + start := time.Now() + iteration := 0 + for { + iteration++ + + if time.Since(start) > maxWait { + t.Logf("Timed out waiting for new CRL on path %s after iteration %d, delay: %v", + crlPath, iteration, time.Now().Sub(start)) + return crl, true + } + + crl = getParsedCrlAtPath(t, client, crlPath) + newCrlRevision = getCRLNumber(t, crl.TBSCertList) + if newCrlRevision > initialCrlRevision { + t.Logf("Got new revision of CRL %s from %d to %d after iteration %d, delay %v", + crlPath, initialCrlRevision, newCrlRevision, iteration, time.Now().Sub(start)) + return crl, false + } + + time.Sleep(100 * time.Millisecond) + } +} + +// A quick CRL to string to provide better test error messages +func summarizeCrl(t *testing.T, crl pkix.TBSCertificateList) string { + version := getCRLNumber(t, crl) + serials := []string{} + for _, cert := range crl.RevokedCertificates { + serials = append(serials, normalizeSerialFromBigInt(cert.SerialNumber)) + } + return fmt.Sprintf("CRL Version: %d\n"+ + "This Update: %s\n"+ + "Next Update: %s\n"+ + "Revoked Serial Count: %d\n"+ + "Revoked Serials: %v", version, crl.ThisUpdate, crl.NextUpdate, len(serials), serials) +} + +// OCSP helpers +func generateRequest(t *testing.T, requestHash crypto.Hash, cert *x509.Certificate, issuer *x509.Certificate) []byte { + t.Helper() + + opts := &ocsp.RequestOptions{Hash: requestHash} + ocspRequestDer, err := ocsp.CreateRequest(cert, issuer, opts) + require.NoError(t, err, "Failed generating OCSP request") + return ocspRequestDer +} + +func requireOcspResponseSignedBy(t *testing.T, ocspResp *ocsp.Response, issuer *x509.Certificate) { + t.Helper() + + err := ocspResp.CheckSignatureFrom(issuer) + require.NoError(t, err, "Failed signature verification of ocsp response: %w", err) +} + +func performOcspPost(t *testing.T, cert *x509.Certificate, issuerCert *x509.Certificate, client *api.Client, ocspPath string) *ocsp.Response { + t.Helper() + + baseClient := client.WithNamespace("") + + ocspReq := generateRequest(t, crypto.SHA256, cert, issuerCert) + ocspPostReq := baseClient.NewRequest(http2.MethodPost, ocspPath) + ocspPostReq.Headers.Set("Content-Type", "application/ocsp-request") + ocspPostReq.BodyBytes = ocspReq + rawResp, err := baseClient.RawRequest(ocspPostReq) + require.NoError(t, err, "failed sending unified-ocsp post request") + + require.Equal(t, 200, rawResp.StatusCode) + require.Equal(t, ocspResponseContentType, rawResp.Header.Get("Content-Type")) + bodyReader := rawResp.Body + respDer, err := io.ReadAll(bodyReader) + bodyReader.Close() + require.NoError(t, err, "failed reading response body") + + ocspResp, err := ocsp.ParseResponse(respDer, issuerCert) + require.NoError(t, err, "parsing ocsp get response") + return ocspResp +} + +func requireCertMissingFromStorage(t *testing.T, client *api.Client, cert *x509.Certificate) { + serial := serialFromCert(cert) + requireSerialMissingFromStorage(t, client, serial) +} + +func requireSerialMissingFromStorage(t *testing.T, client *api.Client, serial string) { + resp, err := client.Logical().ReadWithContext(context.Background(), "pki/cert/"+serial) + require.NoErrorf(t, err, "failed reading certificate with serial %s", serial) + require.Nilf(t, resp, "expected a nil response looking up serial %s got: %v", serial, resp) +} + +func requireCertInStorage(t *testing.T, client *api.Client, cert *x509.Certificate) { + serial := serialFromCert(cert) + requireSerialInStorage(t, client, serial) +} + +func requireSerialInStorage(t *testing.T, client *api.Client, serial string) { + resp, err := client.Logical().ReadWithContext(context.Background(), "pki/cert/"+serial) + require.NoErrorf(t, err, "failed reading certificate with serial %s", serial) + require.NotNilf(t, resp, "reading certificate returned a nil response for serial: %s", serial) + require.NotNilf(t, resp.Data, "reading certificate returned a nil data response for serial: %s", serial) + require.NotEmpty(t, resp.Data["certificate"], "certificate field was empty for serial: %s", serial) +} diff --git a/builtin/logical/pki/tidystatusstate_enumer.go b/builtin/logical/pki/tidystatusstate_enumer.go new file mode 100644 index 000000000000..11db8e64c429 --- /dev/null +++ b/builtin/logical/pki/tidystatusstate_enumer.go @@ -0,0 +1,53 @@ +// Code generated by "enumer -type=tidyStatusState -trimprefix=tidyStatus"; DO NOT EDIT. + +package pki + +import ( + "fmt" +) + +const _tidyStatusStateName = "InactiveStartedFinishedErrorCancellingCancelled" + +var _tidyStatusStateIndex = [...]uint8{0, 8, 15, 23, 28, 38, 47} + +func (i tidyStatusState) String() string { + if i < 0 || i >= tidyStatusState(len(_tidyStatusStateIndex)-1) { + return fmt.Sprintf("tidyStatusState(%d)", i) + } + return _tidyStatusStateName[_tidyStatusStateIndex[i]:_tidyStatusStateIndex[i+1]] +} + +var _tidyStatusStateValues = []tidyStatusState{0, 1, 2, 3, 4, 5} + +var _tidyStatusStateNameToValueMap = map[string]tidyStatusState{ + _tidyStatusStateName[0:8]: 0, + _tidyStatusStateName[8:15]: 1, + _tidyStatusStateName[15:23]: 2, + _tidyStatusStateName[23:28]: 3, + _tidyStatusStateName[28:38]: 4, + _tidyStatusStateName[38:47]: 5, +} + +// tidyStatusStateString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func tidyStatusStateString(s string) (tidyStatusState, error) { + if val, ok := _tidyStatusStateNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to tidyStatusState values", s) +} + +// tidyStatusStateValues returns all values of the enum +func tidyStatusStateValues() []tidyStatusState { + return _tidyStatusStateValues +} + +// IsAtidyStatusState returns "true" if the value is listed in the enum definition. "false" otherwise +func (i tidyStatusState) IsAtidyStatusState() bool { + for _, v := range _tidyStatusStateValues { + if i == v { + return true + } + } + return false +} diff --git a/builtin/logical/pki/util.go b/builtin/logical/pki/util.go index 23c3111fbecb..b40be575a95a 100644 --- a/builtin/logical/pki/util.go +++ b/builtin/logical/pki/util.go @@ -1,15 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pki import ( - "crypto" "crypto/x509" "fmt" "math/big" "net/http" "regexp" "strings" + "sync" "time" + "github.com/hashicorp/vault/builtin/logical/pki/issuing" + "github.com/hashicorp/vault/builtin/logical/pki/managed_key" + "github.com/hashicorp/vault/builtin/logical/pki/parsing" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" @@ -20,7 +26,7 @@ import ( const ( managedKeyNameArg = "managed_key_name" managedKeyIdArg = "managed_key_id" - defaultRef = "default" + defaultRef = issuing.DefaultRef // Constants for If-Modified-Since operation headerIfModifiedSince = "If-Modified-Since" @@ -28,9 +34,10 @@ const ( ) var ( - nameMatcher = regexp.MustCompile("^" + framework.GenericNameRegex(issuerRefParam) + "$") - errIssuerNameInUse = errutil.UserError{Err: "issuer name already in use"} - errKeyNameInUse = errutil.UserError{Err: "key name already in use"} + nameMatcher = regexp.MustCompile("^" + framework.GenericNameRegex(issuerRefParam) + "$") + errIssuerNameInUse = errutil.UserError{Err: "issuer name already in use"} + errIssuerNameIsEmpty = errutil.UserError{Err: "expected non-empty issuer name"} + errKeyNameInUse = errutil.UserError{Err: "key name already in use"} ) func serialFromCert(cert *x509.Certificate) string { @@ -41,14 +48,24 @@ func serialFromBigInt(serial *big.Int) string { return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) } +func normalizeSerialFromBigInt(serial *big.Int) string { + return parsing.NormalizeSerialForStorageFromBigInt(serial) +} + func normalizeSerial(serial string) string { - return strings.ReplaceAll(strings.ToLower(serial), ":", "-") + return parsing.NormalizeSerialForStorage(serial) } func denormalizeSerial(serial string) string { return strings.ReplaceAll(strings.ToLower(serial), "-", ":") } +func serialToBigInt(serial string) (*big.Int, bool) { + norm := normalizeSerial(serial) + hex := strings.ReplaceAll(norm, "-", "") + return big.NewInt(0).SetString(hex, 16) +} + func kmsRequested(input *inputBundle) bool { return kmsRequestedFromFieldData(input.apiData) } @@ -77,26 +94,6 @@ type managedKeyId interface { String() string } -type ( - UUIDKey string - NameKey string -) - -func (u UUIDKey) String() string { - return string(u) -} - -func (n NameKey) String() string { - return string(n) -} - -type managedKeyInfo struct { - publicKey crypto.PublicKey - keyType certutil.PrivateKeyType - name NameKey - uuid UUIDKey -} - // getManagedKeyId returns a NameKey or a UUIDKey, whichever was specified in the // request API data. func getManagedKeyId(data *framework.FieldData) (managedKeyId, error) { @@ -105,9 +102,9 @@ func getManagedKeyId(data *framework.FieldData) (managedKeyId, error) { return nil, err } - var keyId managedKeyId = NameKey(name) + var keyId managedKeyId = managed_key.NameKey(name) if len(UUID) > 0 { - keyId = UUIDKey(UUID) + keyId = managed_key.UUIDKey(UUID) } return keyId, nil @@ -159,11 +156,12 @@ func getIssuerName(sc *storageContext, data *framework.FieldData) (string, error issuerNameIface, ok := data.GetOk("issuer_name") if ok { issuerName = strings.TrimSpace(issuerNameIface.(string)) - + if len(issuerName) == 0 { + return issuerName, errIssuerNameIsEmpty + } if strings.ToLower(issuerName) == defaultRef { return issuerName, errutil.UserError{Err: "reserved keyword 'default' can not be used as issuer name"} } - if !nameMatcher.MatchString(issuerName) { return issuerName, errutil.UserError{Err: "issuer name contained invalid characters"} } @@ -172,7 +170,7 @@ func getIssuerName(sc *storageContext, data *framework.FieldData) (string, error return issuerName, errIssuerNameInUse } - if err != nil && issuerId != IssuerRefNotFound { + if err != nil && issuerId != issuing.IssuerRefNotFound { return issuerName, errutil.InternalError{Err: err.Error()} } } @@ -197,14 +195,14 @@ func getKeyName(sc *storageContext, data *framework.FieldData) (string, error) { return "", errKeyNameInUse } - if err != nil && keyId != KeyRefNotFound { + if err != nil && keyId != issuing.KeyRefNotFound { return "", errutil.InternalError{Err: err.Error()} } } return keyName, nil } -func getIssuerRef(data *framework.FieldData) string { +func GetIssuerRef(data *framework.FieldData) string { return extractRef(data, issuerRefParam) } @@ -256,19 +254,22 @@ func parseIfNotModifiedSince(req *logical.Request) (time.Time, error) { return headerTimeValue, nil } +//go:generate enumer -type=ifModifiedReqType -trimprefix=ifModified type ifModifiedReqType int const ( - ifModifiedUnknown ifModifiedReqType = iota - ifModifiedCA = iota - ifModifiedCRL = iota - ifModifiedDeltaCRL = iota + ifModifiedUnknown ifModifiedReqType = iota + ifModifiedCA + ifModifiedCRL + ifModifiedDeltaCRL + ifModifiedUnifiedCRL + ifModifiedUnifiedDeltaCRL ) type IfModifiedSinceHelper struct { req *logical.Request reqType ifModifiedReqType - issuerRef issuerID + issuerRef issuing.IssuerID } func sendNotModifiedResponseIfNecessary(helper *IfModifiedSinceHelper, sc *storageContext, resp *logical.Response) (bool, error) { @@ -308,7 +309,7 @@ func (sc *storageContext) isIfModifiedSinceBeforeLastModified(helper *IfModified switch helper.reqType { case ifModifiedCRL, ifModifiedDeltaCRL: - if sc.Backend.crlBuilder.invalidate.Load() { + if sc.Backend.CrlBuilder().invalidate.Load() { // When we see the CRL is invalidated, respond with false // regardless of what the local CRL state says. We've likely // renamed some issuers or are about to rebuild a new CRL.... @@ -327,6 +328,26 @@ func (sc *storageContext) isIfModifiedSinceBeforeLastModified(helper *IfModified if helper.reqType == ifModifiedDeltaCRL { lastModified = crlConfig.DeltaLastModified } + case ifModifiedUnifiedCRL, ifModifiedUnifiedDeltaCRL: + if sc.Backend.CrlBuilder().invalidate.Load() { + // When we see the CRL is invalidated, respond with false + // regardless of what the local CRL state says. We've likely + // renamed some issuers or are about to rebuild a new CRL.... + // + // We do this earlier, ahead of config load, as it saves us a + // potential error condition. + return false, nil + } + + crlConfig, err := sc.getUnifiedCRLConfig() + if err != nil { + return false, err + } + + lastModified = crlConfig.LastModified + if helper.reqType == ifModifiedUnifiedDeltaCRL { + lastModified = crlConfig.DeltaLastModified + } case ifModifiedCA: issuerId, err := sc.resolveIssuerReference(string(helper.issuerRef)) if err != nil { @@ -357,3 +378,109 @@ func addWarnings(resp *logical.Response, warnings []string) *logical.Response { } return resp } + +// revocationQueue is a type for allowing invalidateFunc to continue operating +// quickly, while letting periodicFunc slowly sort through all open +// revocations to process. In particular, we do not wish to be holding this +// lock while periodicFunc is running, so iteration returns a full copy of +// the data in this queue. We use a map from serial->[]clusterId, allowing us +// to quickly insert and remove items, without using a slice of tuples. One +// serial might be present on two clusters, if two clusters both have the cert +// stored locally (e.g., via BYOC), which would result in two confirmation +// entries and thus dictating the need for []clusterId. This also lets us +// avoid having duplicate entries. +type revocationQueue struct { + _l sync.Mutex + queue map[string][]string +} + +func newRevocationQueue() *revocationQueue { + return &revocationQueue{ + queue: make(map[string][]string), + } +} + +func (q *revocationQueue) Add(items ...*revocationQueueEntry) { + q._l.Lock() + defer q._l.Unlock() + + for _, item := range items { + var found bool + for _, cluster := range q.queue[item.Serial] { + if cluster == item.Cluster { + found = true + break + } + } + + if !found { + q.queue[item.Serial] = append(q.queue[item.Serial], item.Cluster) + } + } +} + +func (q *revocationQueue) Remove(item *revocationQueueEntry) { + q._l.Lock() + defer q._l.Unlock() + + clusters, present := q.queue[item.Serial] + if !present { + return + } + + if len(clusters) == 0 || (len(clusters) == 1 && clusters[0] == item.Cluster) { + delete(q.queue, item.Serial) + return + } + + result := clusters + for index, cluster := range clusters { + if cluster == item.Cluster { + result = append(clusters[0:index], clusters[index+1:]...) + break + } + } + + q.queue[item.Serial] = result +} + +// As this doesn't depend on any internal state, it should not be called +// unless it is OK to remove any items added since the last Iterate() +// function call. +func (q *revocationQueue) RemoveAll() { + q._l.Lock() + defer q._l.Unlock() + + q.queue = make(map[string][]string) +} + +func (q *revocationQueue) Iterate() []*revocationQueueEntry { + q._l.Lock() + defer q._l.Unlock() + + // Heuristic: by storing by serial, occasionally we'll get double entires + // if it was already revoked, but otherwise we'll be off by fewer when + // building this list. + ret := make([]*revocationQueueEntry, 0, len(q.queue)) + + for serial, clusters := range q.queue { + for _, cluster := range clusters { + ret = append(ret, &revocationQueueEntry{ + Serial: serial, + Cluster: cluster, + }) + } + } + + return ret +} + +// sliceToMapKey return a map that who's keys are entries in a map. +func sliceToMapKey(s []string) map[string]struct{} { + var empty struct{} + myMap := make(map[string]struct{}, len(s)) + for _, s := range s { + myMap[s] = empty + } + return myMap +} diff --git a/builtin/logical/pkiext/nginx_test.go b/builtin/logical/pkiext/nginx_test.go index 9992627c8448..e7d3ab42ed7e 100644 --- a/builtin/logical/pkiext/nginx_test.go +++ b/builtin/logical/pkiext/nginx_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pkiext import ( @@ -14,11 +17,9 @@ import ( "testing" "time" - "github.com/hashicorp/vault/builtin/logical/pki" - "github.com/hashicorp/vault/helper/testhelpers/docker" - "github.com/hashicorp/go-uuid" - + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/stretchr/testify/require" ) @@ -229,7 +230,7 @@ func CheckWithClients(t *testing.T, network string, address string, url string, // Start our service with a random name to not conflict with other // threads. ctx := context.Background() - ctr, _, _, err := cwRunner.Start(ctx, true, false) + result, err := cwRunner.Start(ctx, true, false) if err != nil { t.Fatalf("Could not start golang container for wget/curl checks: %s", err) } @@ -255,14 +256,14 @@ func CheckWithClients(t *testing.T, network string, address string, url string, wgetCmd = []string{"wget", "--verbose", "--ca-certificate=/root.pem", "--certificate=/client-cert.pem", "--private-key=/client-privkey.pem", url} curlCmd = []string{"curl", "--verbose", "--cacert", "/root.pem", "--cert", "/client-cert.pem", "--key", "/client-privkey.pem", url} } - if err := cwRunner.CopyTo(ctr.ID, "/", certCtx); err != nil { + if err := cwRunner.CopyTo(result.Container.ID, "/", certCtx); err != nil { t.Fatalf("Could not copy certificate and key into container: %v", err) } for _, cmd := range [][]string{hostPrimeCmd, wgetCmd, curlCmd} { t.Logf("Running client connection command: %v", cmd) - stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, ctr.ID, cmd) + stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) if err != nil { t.Fatalf("Could not run command (%v) in container: %v", cmd, err) } @@ -292,7 +293,7 @@ func CheckDeltaCRL(t *testing.T, network string, address string, url string, roo // Start our service with a random name to not conflict with other // threads. ctx := context.Background() - ctr, _, _, err := cwRunner.Start(ctx, true, false) + result, err := cwRunner.Start(ctx, true, false) if err != nil { t.Fatalf("Could not start golang container for wget2 delta CRL checks: %s", err) } @@ -310,14 +311,14 @@ func CheckDeltaCRL(t *testing.T, network string, address string, url string, roo certCtx := docker.NewBuildContext() certCtx["root.pem"] = docker.PathContentsFromString(rootCert) certCtx["crls.pem"] = docker.PathContentsFromString(crls) - if err := cwRunner.CopyTo(ctr.ID, "/", certCtx); err != nil { + if err := cwRunner.CopyTo(result.Container.ID, "/", certCtx); err != nil { t.Fatalf("Could not copy certificate and key into container: %v", err) } for index, cmd := range [][]string{hostPrimeCmd, wgetCmd} { t.Logf("Running client connection command: %v", cmd) - stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, ctr.ID, cmd) + stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) if err != nil { t.Fatalf("Could not run command (%v) in container: %v", cmd, err) } diff --git a/builtin/logical/pkiext/pkiext_binary/acme_test.go b/builtin/logical/pkiext/pkiext_binary/acme_test.go new file mode 100644 index 000000000000..f4a7be0c1d83 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/acme_test.go @@ -0,0 +1,1116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pkiext_binary + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + _ "embed" + "encoding/hex" + "errors" + "fmt" + "html/template" + "net" + "net/http" + "path" + "strings" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/builtin/logical/pkiext" + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/certutil" + hDocker "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/acme" +) + +//go:embed testdata/caddy_http.json +var caddyConfigTemplateHTTP string + +//go:embed testdata/caddy_http_eab.json +var caddyConfigTemplateHTTPEAB string + +//go:embed testdata/caddy_tls_alpn.json +var caddyConfigTemplateTLSALPN string + +// Test_ACME will start a Vault cluster using the docker based binary, and execute +// a bunch of sub-tests against that cluster. It is up to each sub-test to run/configure +// a new pki mount within the cluster to not interfere with each other. +func Test_ACME(t *testing.T) { + cluster := NewVaultPkiClusterWithDNS(t) + defer cluster.Cleanup() + + tc := map[string]func(t *testing.T, cluster *VaultPkiCluster){ + "caddy http": SubtestACMECaddy(caddyConfigTemplateHTTP, false), + "caddy http eab": SubtestACMECaddy(caddyConfigTemplateHTTPEAB, true), + "caddy tls-alpn": SubtestACMECaddy(caddyConfigTemplateTLSALPN, false), + "certbot": SubtestACMECertbot, + "certbot eab": SubtestACMECertbotEab, + "acme ip sans": SubtestACMEIPAndDNS, + "acme wildcard": SubtestACMEWildcardDNS, + "acme prevents ica": SubtestACMEPreventsICADNS, + } + + // Wrap the tests within an outer group, so that we run all tests + // in parallel, but still wait for all tests to finish before completing + // and running the cleanup of the Vault cluster. + t.Run("group", func(gt *testing.T) { + for testName := range tc { + // Trap the function to be embedded later in the run so it + // doesn't get clobbered on the next for iteration + testFunc := tc[testName] + + gt.Run(testName, func(st *testing.T) { + st.Parallel() + testFunc(st, cluster) + }) + } + }) + + // Do not run these tests in parallel. + t.Run("step down", func(gt *testing.T) { SubtestACMEStepDownNode(gt, cluster) }) +} + +// caddyConfig contains information used to render a Caddy configuration file from a template. +type caddyConfig struct { + Hostname string + Directory string + CACert string + EABID string + EABKey string +} + +// SubtestACMECaddy returns an ACME test for Caddy using the provided template. +func SubtestACMECaddy(configTemplate string, enableEAB bool) func(*testing.T, *VaultPkiCluster) { + return func(t *testing.T, cluster *VaultPkiCluster) { + ctx := context.Background() + logger := corehelpers.NewTestLogger(t) + + // Roll a random run ID for mount and hostname uniqueness. + runID, err := uuid.GenerateUUID() + require.NoError(t, err, "failed to generate a unique ID for test run") + runID = strings.Split(runID, "-")[0] + + // Create the PKI mount with ACME enabled + pki, err := cluster.CreateAcmeMount(runID) + require.NoError(t, err, "failed to set up ACME mount") + + // Conditionally enable EAB and retrieve the key. + var eabID, eabKey string + if enableEAB { + err = pki.UpdateAcmeConfig(true, map[string]interface{}{ + "eab_policy": "new-account-required", + }) + require.NoError(t, err, "failed to configure EAB policy in PKI mount") + + eabID, eabKey, err = pki.GetEabKey("acme/") + require.NoError(t, err, "failed to retrieve EAB key from PKI mount") + } + + directory := fmt.Sprintf("https://%s:8200/v1/%s/acme/directory", pki.GetActiveContainerIP(), runID) + vaultNetwork := pki.GetContainerNetworkName() + logger.Trace("dir", "dir", directory) + + logConsumer, logStdout, logStderr := getDockerLog(logger) + + sleepTimer := "45" + + // Kick off Caddy container. + logger.Trace("creating on network", "network", vaultNetwork) + caddyRunner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/library/caddy", + ImageTag: "2.6.4", + ContainerName: fmt.Sprintf("caddy_test_%s", runID), + NetworkName: vaultNetwork, + Ports: []string{"80/tcp", "443/tcp", "443/udp"}, + Entrypoint: []string{"sleep", sleepTimer}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating caddy service runner") + + caddyResult, err := caddyRunner.Start(ctx, true, false) + require.NoError(t, err, "could not start Caddy container") + require.NotNil(t, caddyResult, "could not start Caddy container") + + defer caddyRunner.Stop(ctx, caddyResult.Container.ID) + + networks, err := caddyRunner.GetNetworkAndAddresses(caddyResult.Container.ID) + require.NoError(t, err, "could not read caddy container's IP address") + require.Contains(t, networks, vaultNetwork, "expected to contain vault network") + + ipAddr := networks[vaultNetwork] + hostname := fmt.Sprintf("%s.dadgarcorp.com", runID) + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + // Render the Caddy configuration from the specified template. + tmpl, err := template.New("config").Parse(configTemplate) + require.NoError(t, err, "failed to parse Caddy config template") + var b strings.Builder + err = tmpl.Execute( + &b, + caddyConfig{ + Hostname: hostname, + Directory: directory, + CACert: "/tmp/vault_ca_cert.crt", + EABID: eabID, + EABKey: eabKey, + }, + ) + require.NoError(t, err, "failed to render Caddy config template") + + // Push the Caddy config and the cluster listener's CA certificate over to the docker container. + cpCtx := hDocker.NewBuildContext() + cpCtx["caddy_config.json"] = hDocker.PathContentsFromString(b.String()) + cpCtx["vault_ca_cert.crt"] = hDocker.PathContentsFromString(string(cluster.GetListenerCACertPEM())) + err = caddyRunner.CopyTo(caddyResult.Container.ID, "/tmp/", cpCtx) + require.NoError(t, err, "failed to copy Caddy config and Vault listener CA certificate to container") + + // Start the Caddy server. + caddyCmd := []string{ + "caddy", + "start", + "--config", "/tmp/caddy_config.json", + } + stdout, stderr, retcode, err := caddyRunner.RunCmdWithOutput(ctx, caddyResult.Container.ID, caddyCmd) + logger.Trace("Caddy Start Command", "cmd", caddyCmd, "stdout", string(stdout), "stderr", string(stderr)) + require.NoError(t, err, "got error running Caddy start command") + require.Equal(t, 0, retcode, "expected zero retcode Caddy start command result") + + // Start a cURL container. + curlRunner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/curlimages/curl", + ImageTag: "8.4.0", + ContainerName: fmt.Sprintf("curl_test_%s", runID), + NetworkName: vaultNetwork, + Entrypoint: []string{"sleep", sleepTimer}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating cURL service runner") + + curlResult, err := curlRunner.Start(ctx, true, false) + require.NoError(t, err, "could not start cURL container") + require.NotNil(t, curlResult, "could not start cURL container") + + // Retrieve the PKI mount CA cert and copy it over to the cURL container. + mountCACert, err := pki.GetCACertPEM() + require.NoError(t, err, "failed to retrieve PKI mount CA certificate") + + mountCACertCtx := hDocker.NewBuildContext() + mountCACertCtx["ca_cert.crt"] = hDocker.PathContentsFromString(mountCACert) + err = curlRunner.CopyTo(curlResult.Container.ID, "/tmp/", mountCACertCtx) + require.NoError(t, err, "failed to copy PKI mount CA certificate to cURL container") + + // Use cURL to hit the Caddy server and validate that a certificate was retrieved successfully. + curlCmd := []string{ + "curl", + "-L", + "--cacert", "/tmp/ca_cert.crt", + "--resolve", hostname + ":443:" + ipAddr, + "https://" + hostname + "/", + } + stdout, stderr, retcode, err = curlRunner.RunCmdWithOutput(ctx, curlResult.Container.ID, curlCmd) + logger.Trace("cURL Command", "cmd", curlCmd, "stdout", string(stdout), "stderr", string(stderr)) + require.NoError(t, err, "got error running cURL command") + require.Equal(t, 0, retcode, "expected zero retcode cURL command result") + } +} + +func SubtestACMECertbot(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + pki, err := cluster.CreateAcmeMount("pki") + require.NoError(t, err, "failed setting up acme mount") + + directory := "https://" + pki.GetActiveContainerIP() + ":8200/v1/pki/acme/directory" + vaultNetwork := pki.GetContainerNetworkName() + + logConsumer, logStdout, logStderr := getDockerLog(logger) + + // Default to 45 second timeout, but bump to 120 when running locally or if nightly regression + // flag is provided. + sleepTimer := "45" + if testhelpers.IsLocalOrRegressionTests() { + sleepTimer = "120" + } + + logger.Trace("creating on network", "network", vaultNetwork) + runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/certbot/certbot", + ImageTag: "latest", + ContainerName: "vault_pki_certbot_test", + NetworkName: vaultNetwork, + Entrypoint: []string{"sleep", sleepTimer}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating service runner") + + ctx := context.Background() + result, err := runner.Start(ctx, true, false) + require.NoError(t, err, "could not start container") + require.NotNil(t, result, "could not start container") + + defer runner.Stop(context.Background(), result.Container.ID) + + networks, err := runner.GetNetworkAndAddresses(result.Container.ID) + require.NoError(t, err, "could not read container's IP address") + require.Contains(t, networks, vaultNetwork, "expected to contain vault network") + + ipAddr := networks[vaultNetwork] + hostname := "certbot-acme-client.dadgarcorp.com" + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + // Sinkhole a domain that's invalid just in case it's registered in the future. + cluster.Dns.AddDomain("armoncorp.com") + cluster.Dns.AddRecord("armoncorp.com", "A", "127.0.0.1") + + certbotCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + } + logCatCmd := []string{"cat", "/var/log/letsencrypt/letsencrypt.log"} + + stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, result.Container.ID, certbotCmd) + logger.Trace("Certbot Issue Command", "cmd", certbotCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running issue command") + require.Equal(t, 0, retcode, "expected zero retcode issue command result") + + // N.B. We're using the `certonly` subcommand here because it seems as though the `renew` command + // attempts to install the cert for you. This ends up hanging and getting killed by docker, but is + // also not desired behavior. The certbot docs suggest using `certonly` to renew as seen here: + // https://eff-certbot.readthedocs.io/en/stable/using.html#renewing-certificates + certbotRenewCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + "--cert-name", hostname, + "--force-renewal", + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRenewCmd) + logger.Trace("Certbot Renew Command", "cmd", certbotRenewCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running renew command") + require.Equal(t, 0, retcode, "expected zero retcode renew command result") + + certbotRevokeCmd := []string{ + "certbot", + "revoke", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--non-interactive", + "--no-delete-after-revoke", + "--cert-name", hostname, + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + logger.Trace("Certbot Revoke Command", "cmd", certbotRevokeCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running revoke command") + require.Equal(t, 0, retcode, "expected zero retcode revoke command result") + + // Revoking twice should fail. + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + logger.Trace("Certbot Double Revoke Command", "cmd", certbotRevokeCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode == 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + + require.NoError(t, err, "got error running double revoke command") + require.NotEqual(t, 0, retcode, "expected non-zero retcode double revoke command result") + + // Attempt to issue against a domain that doesn't match the challenge. + // N.B. This test only runs locally or when the nightly regression env var is provided to CI. + if testhelpers.IsLocalOrRegressionTests() { + certbotInvalidIssueCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", "armoncorp.com", + "--issuance-timeout", "10", + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotInvalidIssueCmd) + logger.Trace("Certbot Invalid Issue Command", "cmd", certbotInvalidIssueCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running issue command") + require.NotEqual(t, 0, retcode, "expected non-zero retcode issue command result") + } + + // Attempt to close out our ACME account + certbotUnregisterCmd := []string{ + "certbot", + "unregister", + "--no-verify-ssl", + "--non-interactive", + "--server", directory, + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotUnregisterCmd) + logger.Trace("Certbot Unregister Command", "cmd", certbotUnregisterCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running unregister command") + require.Equal(t, 0, retcode, "expected zero retcode unregister command result") + + // Attempting to close out our ACME account twice should fail + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotUnregisterCmd) + logger.Trace("Certbot Double Unregister Command", "cmd", certbotUnregisterCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running double unregister command") + require.Equal(t, 1, retcode, "expected non-zero retcode double unregister command result") +} + +func SubtestACMECertbotEab(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + mountName := "pki-certbot-eab" + pki, err := cluster.CreateAcmeMount(mountName) + require.NoError(t, err, "failed setting up acme mount") + + err = pki.UpdateAcmeConfig(true, map[string]interface{}{ + "eab_policy": "new-account-required", + }) + require.NoError(t, err) + + eabId, base64EabKey, err := pki.GetEabKey("acme/") + + directory := "https://" + pki.GetActiveContainerIP() + ":8200/v1/" + mountName + "/acme/directory" + vaultNetwork := pki.GetContainerNetworkName() + + logConsumer, logStdout, logStderr := getDockerLog(logger) + + logger.Trace("creating on network", "network", vaultNetwork) + runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/certbot/certbot", + ImageTag: "latest", + ContainerName: "vault_pki_certbot_eab_test", + NetworkName: vaultNetwork, + Entrypoint: []string{"sleep", "45"}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating service runner") + + ctx := context.Background() + result, err := runner.Start(ctx, true, false) + require.NoError(t, err, "could not start container") + require.NotNil(t, result, "could not start container") + + defer runner.Stop(context.Background(), result.Container.ID) + + networks, err := runner.GetNetworkAndAddresses(result.Container.ID) + require.NoError(t, err, "could not read container's IP address") + require.Contains(t, networks, vaultNetwork, "expected to contain vault network") + + ipAddr := networks[vaultNetwork] + hostname := "certbot-eab-acme-client.dadgarcorp.com" + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + certbotCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--eab-kid", eabId, + "--eab-hmac-key='" + base64EabKey + "'", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + } + logCatCmd := []string{"cat", "/var/log/letsencrypt/letsencrypt.log"} + + stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, result.Container.ID, certbotCmd) + logger.Trace("Certbot Issue Command", "cmd", certbotCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running issue command") + require.Equal(t, 0, retcode, "expected zero retcode issue command result") + + certbotRenewCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + "--cert-name", hostname, + "--force-renewal", + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRenewCmd) + logger.Trace("Certbot Renew Command", "cmd", certbotRenewCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running renew command") + require.Equal(t, 0, retcode, "expected zero retcode renew command result") + + certbotRevokeCmd := []string{ + "certbot", + "revoke", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--non-interactive", + "--no-delete-after-revoke", + "--cert-name", hostname, + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + logger.Trace("Certbot Revoke Command", "cmd", certbotRevokeCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + require.NoError(t, err, "got error running revoke command") + require.Equal(t, 0, retcode, "expected zero retcode revoke command result") + + // Revoking twice should fail. + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + logger.Trace("Certbot Double Revoke Command", "cmd", certbotRevokeCmd, "stdout", string(stdout), "stderr", string(stderr)) + if err != nil || retcode == 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + logger.Trace("Certbot logs", "stdout", string(logsStdout), "stderr", string(logsStderr)) + } + + require.NoError(t, err, "got error running double revoke command") + require.NotEqual(t, 0, retcode, "expected non-zero retcode double revoke command result") +} + +func SubtestACMEIPAndDNS(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + pki, err := cluster.CreateAcmeMount("pki-ip-dns-sans") + require.NoError(t, err, "failed setting up acme mount") + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. + basePath, err := pki.UpdateClusterConfigLocalAddr() + require.NoError(t, err, "failed updating cluster config") + + logConsumer, logStdout, logStderr := getDockerLog(logger) + + // Setup an nginx container that we can have respond the queries for ips + runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/nginx", + ImageTag: "latest", + ContainerName: "vault_pki_ipsans_test", + NetworkName: pki.GetContainerNetworkName(), + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating service runner") + + ctx := context.Background() + result, err := runner.Start(ctx, true, false) + require.NoError(t, err, "could not start container") + require.NotNil(t, result, "could not start container") + + nginxContainerId := result.Container.ID + defer runner.Stop(context.Background(), nginxContainerId) + networks, err := runner.GetNetworkAndAddresses(nginxContainerId) + + challengeFolder := "/usr/share/nginx/html/.well-known/acme-challenge/" + createChallengeFolderCmd := []string{ + "sh", "-c", + "mkdir -p '" + challengeFolder + "'", + } + stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, nginxContainerId, createChallengeFolderCmd) + require.NoError(t, err, "failed to create folder in nginx container") + logger.Trace("Update host file command", "cmd", createChallengeFolderCmd, "stdout", string(stdout), "stderr", string(stderr)) + require.Equal(t, 0, retcode, "expected zero retcode from mkdir in nginx container") + + ipAddr := networks[pki.GetContainerNetworkName()] + hostname := "go-lang-acme-client.dadgarcorp.com" + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + // Perform an ACME lifecycle with an order that contains both an IP and a DNS name identifier + err = pki.UpdateRole("ip-dns-sans", map[string]interface{}{ + "key_type": "any", + "allowed_domains": "dadgarcorp.com", + "allow_subdomains": true, + "allow_wildcard_certificates": false, + }) + require.NoError(t, err, "failed creating role ip-dns-sans") + + directoryUrl := basePath + "/roles/ip-dns-sans/acme/directory" + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "ip", Value: ipAddr}, + {Type: "dns", Value: hostname}, + } + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: hostname}, + DNSNames: []string{hostname}, + IPAddresses: []net.IP{net.ParseIP(ipAddr)}, + } + + provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { + // For each http-01 challenge, generate the file to place underneath the nginx challenge folder + acmeCtx := hDocker.NewBuildContext() + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + logger.Trace("ignoring challenge not in status pending", "challenge", challenge) + continue + } + + if challenge.Type == "http-01" { + challengeBody, err := acmeClient.HTTP01ChallengeResponse(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + challengePath := acmeClient.HTTP01ChallengePath(challenge.Token) + require.NoError(t, err, "failed generating challenge path") + + challengeFile := path.Base(challengePath) + + acmeCtx[challengeFile] = hDocker.PathContentsFromString(challengeBody) + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + + // Copy all challenges within the nginx container + err = runner.CopyTo(nginxContainerId, challengeFolder, acmeCtx) + require.NoError(t, err, "failed copying challenges to container") + + return challengesToAccept + } + + acmeCert := doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + + require.Len(t, acmeCert.IPAddresses, 1, "expected only a single ip address in cert") + require.Equal(t, ipAddr, acmeCert.IPAddresses[0].String()) + require.Equal(t, []string{hostname}, acmeCert.DNSNames) + require.Equal(t, hostname, acmeCert.Subject.CommonName) + + // Perform an ACME lifecycle with an order that contains just an IP identifier + err = pki.UpdateRole("ip-sans", map[string]interface{}{ + "key_type": "any", + "use_csr_common_name": false, + "require_cn": false, + "client_flag": false, + }) + require.NoError(t, err, "failed creating role ip-sans") + + directoryUrl = basePath + "/roles/ip-sans/acme/directory" + acmeOrderIdentifiers = []acme.AuthzID{ + {Type: "ip", Value: ipAddr}, + } + cr = &x509.CertificateRequest{ + IPAddresses: []net.IP{net.ParseIP(ipAddr)}, + } + + acmeCert = doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + + require.Len(t, acmeCert.IPAddresses, 1, "expected only a single ip address in cert") + require.Equal(t, ipAddr, acmeCert.IPAddresses[0].String()) + require.Empty(t, acmeCert.DNSNames, "acme cert dns name field should have been empty") + require.Equal(t, "", acmeCert.Subject.CommonName) +} + +type acmeGoValidatorProvisionerFunc func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge + +func doAcmeValidationWithGoLibrary(t *testing.T, directoryUrl string, acmeOrderIdentifiers []acme.AuthzID, cr *x509.CertificateRequest, provisioningFunc acmeGoValidatorProvisionerFunc, expectedFailure string) *x509.Certificate { + logger := corehelpers.NewTestLogger(t) + + // Since we are contacting Vault through the host ip/port, the certificate will not validate properly + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + httpClient := &http.Client{Transport: tr} + + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa account key") + logger.Trace("Using the following url for the ACME directory", "url", directoryUrl) + acmeClient := &acme.Client{ + Key: accountKey, + HTTPClient: httpClient, + DirectoryURL: directoryUrl, + } + + testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancelFunc() + + // Create new account + _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, + func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an ACME order + order, err := acmeClient.AuthorizeOrder(testCtx, acmeOrderIdentifiers) + require.NoError(t, err, "failed creating ACME order") + + var auths []*acme.Authorization + for _, authUrl := range order.AuthzURLs { + authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + auths = append(auths, authorization) + } + + // Handle the validation using the external validation mechanism. + challengesToAccept := provisioningFunc(acmeClient, auths) + require.NotEmpty(t, challengesToAccept, "provisioning function failed to return any challenges to accept") + + // Tell the ACME server, that they can now validate those challenges. + for _, challenge := range challengesToAccept { + _, err = acmeClient.Accept(testCtx, challenge) + require.NoError(t, err, "failed to accept challenge: %v", challenge) + } + + // Wait for the order/challenges to be validated. + _, err = acmeClient.WaitOrder(testCtx, order.URI) + require.NoError(t, err, "failed waiting for order to be ready") + + // Create/sign the CSR and ask ACME server to sign it returning us the final certificate + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) + require.NoError(t, err, "failed generating csr") + + logger.Trace("[TEST-LOG] Created CSR", "csr", hex.EncodeToString(csr)) + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) + if err != nil { + if expectedFailure != "" { + require.Contains(t, err.Error(), expectedFailure, "got a unexpected failure not matching expected value") + return nil + } + + require.NoError(t, err, "failed to get a certificate back from ACME") + } else if expectedFailure != "" { + t.Fatalf("expected failure containing: %s got none", expectedFailure) + } + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + return acmeCert +} + +func SubtestACMEWildcardDNS(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + pki, err := cluster.CreateAcmeMount("pki-dns-wildcards") + require.NoError(t, err, "failed setting up acme mount") + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. + basePath, err := pki.UpdateClusterConfigLocalAddr() + require.NoError(t, err, "failed updating cluster config") + + hostname := "go-lang-wildcard-client.dadgarcorp.com" + wildcard := "*." + hostname + + // Do validation without a role first. + directoryUrl := basePath + "/acme/directory" + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "dns", Value: hostname}, + {Type: "dns", Value: wildcard}, + } + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: wildcard}, + DNSNames: []string{hostname, wildcard}, + } + + provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { + // For each dns-01 challenge, place the record in the associated DNS resolver. + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + logger.Trace("ignoring challenge not in status pending", "challenge", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + err = pki.AddDNSRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + require.NoError(t, err, "failed setting DNS record") + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + return challengesToAccept + } + + acmeCert := doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + require.Contains(t, acmeCert.DNSNames, hostname) + require.Contains(t, acmeCert.DNSNames, wildcard) + require.Equal(t, wildcard, acmeCert.Subject.CommonName) + pki.RemoveDNSRecordsForDomain(hostname) + + // Redo validation with a role this time. + err = pki.UpdateRole("wildcard", map[string]interface{}{ + "key_type": "any", + "allowed_domains": "go-lang-wildcard-client.dadgarcorp.com", + "allow_subdomains": true, + "allow_bare_domains": true, + "allow_wildcard_certificates": true, + "client_flag": false, + }) + require.NoError(t, err, "failed creating role wildcard") + directoryUrl = basePath + "/roles/wildcard/acme/directory" + + acmeCert = doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + require.Contains(t, acmeCert.DNSNames, hostname) + require.Contains(t, acmeCert.DNSNames, wildcard) + require.Equal(t, wildcard, acmeCert.Subject.CommonName) + pki.RemoveDNSRecordsForDomain(hostname) +} + +func SubtestACMEPreventsICADNS(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + pki, err := cluster.CreateAcmeMount("pki-dns-ica") + require.NoError(t, err, "failed setting up acme mount") + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. + basePath, err := pki.UpdateClusterConfigLocalAddr() + require.NoError(t, err, "failed updating cluster config") + + hostname := "go-lang-intermediate-ca-cert.dadgarcorp.com" + + // Do validation without a role first. + directoryUrl := basePath + "/acme/directory" + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "dns", Value: hostname}, + } + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: hostname}, + DNSNames: []string{hostname}, + ExtraExtensions: []pkix.Extension{ + // Basic Constraint with IsCA asserted to true. + { + Id: certutil.ExtensionBasicConstraintsOID, + Critical: true, + Value: []byte{0x30, 0x03, 0x01, 0x01, 0xFF}, + }, + }, + } + + provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { + // For each dns-01 challenge, place the record in the associated DNS resolver. + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + logger.Trace("ignoring challenge not in status pending", "challenge", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + err = pki.AddDNSRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + require.NoError(t, err, "failed setting DNS record") + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + return challengesToAccept + } + + doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "refusing to accept CSR with Basic Constraints extension") + pki.RemoveDNSRecordsForDomain(hostname) + + // Redo validation with a role this time. + err = pki.UpdateRole("ica", map[string]interface{}{ + "key_type": "any", + "allowed_domains": "go-lang-intermediate-ca-cert.dadgarcorp.com", + "allow_subdomains": true, + "allow_bare_domains": true, + "allow_wildcard_certificates": true, + "client_flag": false, + }) + require.NoError(t, err, "failed creating role wildcard") + directoryUrl = basePath + "/roles/ica/acme/directory" + + doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "refusing to accept CSR with Basic Constraints extension") + pki.RemoveDNSRecordsForDomain(hostname) +} + +// SubtestACMEStepDownNode Verify that we can properly run an ACME session through a +// secondary node, and midway through the challenge verification process, seal the +// active node and make sure we can complete the ACME session on the new active node. +func SubtestACMEStepDownNode(t *testing.T, cluster *VaultPkiCluster) { + logger := corehelpers.NewTestLogger(t) + + pki, err := cluster.CreateAcmeMount("stepdown-test") + require.NoError(t, err) + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. We also grab the non-active node here on purpose to verify + // ACME related APIs are properly forwarded across standby hosts. + nonActiveNodes := pki.GetNonActiveNodes() + require.GreaterOrEqual(t, len(nonActiveNodes), 1, "Need at least one non-active node") + + nonActiveNode := nonActiveNodes[0] + + basePath := fmt.Sprintf("https://%s/v1/%s", nonActiveNode.HostPort, pki.mount) + err = pki.UpdateClusterConfig(map[string]interface{}{ + "path": basePath, + }) + + hostname := "go-lang-stepdown-client.dadgarcorp.com" + + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "dns", Value: hostname}, + } + cr := &x509.CertificateRequest{ + DNSNames: []string{hostname, hostname}, + } + + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa account key") + + acmeClient := &acme.Client{ + Key: accountKey, + HTTPClient: &http.Client{Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }}, + DirectoryURL: basePath + "/acme/directory", + } + + testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancelFunc() + + // Create new account + _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, + func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an ACME order + order, err := acmeClient.AuthorizeOrder(testCtx, acmeOrderIdentifiers) + require.NoError(t, err, "failed creating ACME order") + + require.Len(t, order.AuthzURLs, 1, "expected a single authz url") + authUrl := order.AuthzURLs[0] + + authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + + dnsTxtRecordsToAdd := map[string]string{} + + var challengesToAccept []*acme.Challenge + for _, challenge := range authorization.Challenges { + if challenge.Status != acme.StatusPending { + logger.Trace("ignoring challenge not in status pending", "challenge", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + // Collect the challenges for us to add the DNS records after step-down + dnsTxtRecordsToAdd["_acme-challenge."+authorization.Identifier.Value] = challengeBody + challengesToAccept = append(challengesToAccept, challenge) + } + } + + // Tell the ACME server, that they can now validate those challenges, this will cause challenge + // verification failures on the main node as the DNS records do not exist. + for _, challenge := range challengesToAccept { + _, err = acmeClient.Accept(testCtx, challenge) + require.NoError(t, err, "failed to accept challenge: %v", challenge) + } + + // Now wait till we start seeing the challenge engine start failing the lookups. + testhelpers.RetryUntil(t, 10*time.Second, func() error { + myAuth, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + + for _, challenge := range myAuth.Challenges { + if challenge.Error != nil { + // The engine failed on one of the challenges, we are done waiting + return nil + } + } + + return fmt.Errorf("no challenges for auth %v contained any errors", myAuth.Identifier) + }) + + // Seal the active node now and wait for the next node to appear + previousActiveNode := pki.GetActiveClusterNode() + logger.Trace("Stepping down node", "node_id", previousActiveNode.NodeID) + + haStatus, _ := previousActiveNode.APIClient().Sys().HAStatus() + logger.Trace("HA Status", "node", previousActiveNode.NodeID, "ha_status", haStatus) + + testhelpers.RetryUntil(t, 2*time.Minute, func() error { + state, err := previousActiveNode.APIClient().Sys().RaftAutopilotState() + if err != nil { + return err + } + + logger.Trace("Raft AutoPilotState", "node", previousActiveNode.NodeID, "state", state) + if !state.Healthy { + return fmt.Errorf("raft auto pilot state is not healthy") + } + + // Make sure that we have at least one node that can take over prior to sealing the current active node. + if state.FailureTolerance < 1 { + msg := fmt.Sprintf("there is no fault tolerance within raft state yet: %d", state.FailureTolerance) + logger.Trace(msg) + return errors.New(msg) + } + + return nil + }) + + logger.Trace("Sealing active node") + err = previousActiveNode.APIClient().Sys().Seal() + require.NoError(t, err, "failed stepping down node") + + // Add our DNS records now + logger.Trace("Adding DNS records") + for dnsHost, dnsValue := range dnsTxtRecordsToAdd { + err = pki.AddDNSRecord(dnsHost, "TXT", dnsValue) + require.NoError(t, err, "failed adding DNS record: %s:%s", dnsHost, dnsValue) + } + + // Wait for our new active node to come up + testhelpers.RetryUntil(t, 2*time.Minute, func() error { + newNode := pki.GetActiveClusterNode() + if newNode.NodeID == previousActiveNode.NodeID { + return fmt.Errorf("existing node is still the leader after stepdown: %s", newNode.NodeID) + } + + logger.Trace("New active node", "node_id", newNode.NodeID) + return nil + }) + + // Wait for the order/challenges to be validated. + _, err = acmeClient.WaitOrder(testCtx, order.URI) + if err != nil { + // We failed waiting for the order to become ready, lets print out current challenge statuses to help debugging + myAuth, authErr := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, authErr, "failed to lookup authorization at url: %s and wait order failed with: %v", authUrl, err) + + logger.Trace("Authorization Status", "status", myAuth.Status) + for _, challenge := range myAuth.Challenges { + // The engine failed on one of the challenges, we are done waiting + logger.Trace("challenge", "type", challenge.Type, "status", challenge.Status, "error", challenge.Error) + } + + require.NoError(t, err, "failed waiting for order to be ready") + } + + // Create/sign the CSR and ask ACME server to sign it returning us the final certificate + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) + require.NoError(t, err, "failed to get a certificate back from ACME") + + _, err = x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") +} + +func getDockerLog(logger hclog.Logger) (func(s string), *pkiext.LogConsumerWriter, *pkiext.LogConsumerWriter) { + logConsumer := func(s string) { + logger.Trace(s) + } + + logStdout := &pkiext.LogConsumerWriter{logConsumer} + logStderr := &pkiext.LogConsumerWriter{logConsumer} + return logConsumer, logStdout, logStderr +} diff --git a/builtin/logical/pkiext/pkiext_binary/pki_cluster.go b/builtin/logical/pkiext/pkiext_binary/pki_cluster.go new file mode 100644 index 000000000000..4462f5103879 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/pki_cluster.go @@ -0,0 +1,316 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pkiext_binary + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + dockhelper "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/hashicorp/vault/sdk/helper/testcluster/docker" +) + +type VaultPkiCluster struct { + cluster *docker.DockerCluster + Dns *dnstest.TestServer +} + +func NewVaultPkiCluster(t *testing.T) *VaultPkiCluster { + binary := os.Getenv("VAULT_BINARY") + if binary == "" { + t.Skip("only running docker test when $VAULT_BINARY present") + } + + opts := &docker.DockerClusterOptions{ + ImageRepo: "docker.mirror.hashicorp.services/hashicorp/vault", + // We're replacing the binary anyway, so we're not too particular about + // the docker image version tag. + ImageTag: "latest", + VaultBinary: binary, + ClusterOptions: testcluster.ClusterOptions{ + VaultNodeConfig: &testcluster.VaultNodeConfig{ + LogLevel: "TRACE", + }, + NumCores: 3, + }, + } + + cluster := docker.NewTestDockerCluster(t, opts) + + return &VaultPkiCluster{cluster: cluster} +} + +func NewVaultPkiClusterWithDNS(t *testing.T) *VaultPkiCluster { + cluster := NewVaultPkiCluster(t) + dns := dnstest.SetupResolverOnNetwork(t, "dadgarcorp.com", cluster.GetContainerNetworkName()) + cluster.Dns = dns + return cluster +} + +func (vpc *VaultPkiCluster) Cleanup() { + vpc.cluster.Cleanup() + if vpc.Dns != nil { + vpc.Dns.Cleanup() + } +} + +func (vpc *VaultPkiCluster) GetActiveClusterNode() *docker.DockerClusterNode { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + node, err := testcluster.WaitForActiveNode(ctx, vpc.cluster) + if err != nil { + panic(fmt.Sprintf("no cluster node became active in timeout window: %v", err)) + } + + return vpc.cluster.ClusterNodes[node] +} + +func (vpc *VaultPkiCluster) GetNonActiveNodes() []*docker.DockerClusterNode { + nodes := []*docker.DockerClusterNode{} + for _, node := range vpc.cluster.ClusterNodes { + leader, err := node.APIClient().Sys().Leader() + if err != nil { + continue + } + + if !leader.IsSelf { + nodes = append(nodes, node) + } + } + + return nodes +} + +func (vpc *VaultPkiCluster) GetActiveContainerHostPort() string { + return vpc.GetActiveClusterNode().HostPort +} + +func (vpc *VaultPkiCluster) GetContainerNetworkName() string { + return vpc.cluster.ClusterNodes[0].ContainerNetworkName +} + +func (vpc *VaultPkiCluster) GetActiveContainerIP() string { + return vpc.GetActiveClusterNode().ContainerIPAddress +} + +func (vpc *VaultPkiCluster) GetActiveContainerID() string { + return vpc.GetActiveClusterNode().Container.ID +} + +func (vpc *VaultPkiCluster) GetActiveNode() *api.Client { + return vpc.GetActiveClusterNode().APIClient() +} + +// GetListenerCACertPEM returns the Vault cluster's PEM-encoded CA certificate. +func (vpc *VaultPkiCluster) GetListenerCACertPEM() []byte { + return vpc.cluster.CACertPEM +} + +func (vpc *VaultPkiCluster) AddHostname(hostname, ip string) error { + if vpc.Dns != nil { + vpc.Dns.AddRecord(hostname, "A", ip) + vpc.Dns.PushConfig() + return nil + } else { + return vpc.AddNameToHostFiles(hostname, ip) + } +} + +func (vpc *VaultPkiCluster) AddNameToHostFiles(hostname, ip string) error { + updateHostsCmd := []string{ + "sh", "-c", + "echo '" + ip + " " + hostname + "' >> /etc/hosts", + } + for _, node := range vpc.cluster.ClusterNodes { + containerID := node.Container.ID + _, _, retcode, err := dockhelper.RunCmdWithOutput(vpc.cluster.DockerAPI, context.Background(), containerID, updateHostsCmd) + if err != nil { + return fmt.Errorf("failed updating container %s host file: %w", containerID, err) + } + + if retcode != 0 { + return fmt.Errorf("expected zero retcode from updating vault host file in container %s got: %d", containerID, retcode) + } + } + + return nil +} + +func (vpc *VaultPkiCluster) AddDNSRecord(hostname, recordType, ip string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to provision custom records") + } + + vpc.Dns.AddRecord(hostname, recordType, ip) + vpc.Dns.PushConfig() + return nil +} + +func (vpc *VaultPkiCluster) RemoveDNSRecord(domain string, record string, value string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove specific record") + } + + vpc.Dns.RemoveRecord(domain, record, value) + return nil +} + +func (vpc *VaultPkiCluster) RemoveDNSRecordsOfTypeForDomain(domain string, record string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove all records of type") + } + + vpc.Dns.RemoveRecordsOfTypeForDomain(domain, record) + return nil +} + +func (vpc *VaultPkiCluster) RemoveDNSRecordsForDomain(domain string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove records for domain") + } + + vpc.Dns.RemoveRecordsForDomain(domain) + return nil +} + +func (vpc *VaultPkiCluster) RemoveAllDNSRecords() error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove all records") + } + + vpc.Dns.RemoveAllRecords() + return nil +} + +func (vpc *VaultPkiCluster) CreateMount(name string) (*VaultPkiMount, error) { + err := vpc.GetActiveNode().Sys().Mount(name, &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + AllowedResponseHeaders: []string{ + "Last-Modified", "Replay-Nonce", + "Link", "Location", + }, + }, + }) + if err != nil { + return nil, err + } + + return &VaultPkiMount{ + vpc, + name, + }, nil +} + +func (vpc *VaultPkiCluster) CreateAcmeMount(mountName string) (*VaultPkiMount, error) { + pki, err := vpc.CreateMount(mountName) + if err != nil { + return nil, fmt.Errorf("failed creating mount %s: %w", mountName, err) + } + + err = pki.UpdateClusterConfig(nil) + if err != nil { + return nil, fmt.Errorf("failed updating cluster config: %w", err) + } + + cfg := map[string]interface{}{ + "eab_policy": "not-required", + } + if vpc.Dns != nil { + cfg["dns_resolver"] = vpc.Dns.GetRemoteAddr() + } + + err = pki.UpdateAcmeConfig(true, cfg) + if err != nil { + return nil, fmt.Errorf("failed updating acme config: %w", err) + } + + // Setup root+intermediate CA hierarchy within this mount. + resp, err := pki.GenerateRootInternal(map[string]interface{}{ + "common_name": "Root X1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": "ec", + "key_bits": 256, + "use_pss": false, + "issuer_name": "root", + }) + if err != nil { + return nil, fmt.Errorf("failed generating root internal: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed generating root internal: nil or empty response but no error") + } + + resp, err = pki.GenerateIntermediateInternal(map[string]interface{}{ + "common_name": "Intermediate I1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": "ec", + "key_bits": 256, + "use_pss": false, + }) + if err != nil { + return nil, fmt.Errorf("failed generating int csr: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed generating int csr: nil or empty response but no error") + } + + resp, err = pki.SignIntermediary("default", resp.Data["csr"], map[string]interface{}{ + "common_name": "Intermediate I1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": "ec", + "csr": resp.Data["csr"], + }) + if err != nil { + return nil, fmt.Errorf("failed signing int csr: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed signing int csr: nil or empty response but no error") + } + intCert := resp.Data["certificate"].(string) + + resp, err = pki.ImportBundle(intCert, nil) + if err != nil { + return nil, fmt.Errorf("failed importing signed cert: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed importing signed cert: nil or empty response but no error") + } + + err = pki.UpdateDefaultIssuer(resp.Data["imported_issuers"].([]interface{})[0].(string), nil) + if err != nil { + return nil, fmt.Errorf("failed to set intermediate as default: %w", err) + } + + err = pki.UpdateIssuer("default", map[string]interface{}{ + "leaf_not_after_behavior": "truncate", + }) + if err != nil { + return nil, fmt.Errorf("failed to update intermediate ttl behavior: %w", err) + } + + err = pki.UpdateIssuer("root", map[string]interface{}{ + "leaf_not_after_behavior": "truncate", + }) + if err != nil { + return nil, fmt.Errorf("failed to update root ttl behavior: %w", err) + } + + return pki, nil +} diff --git a/builtin/logical/pkiext/pkiext_binary/pki_mount.go b/builtin/logical/pkiext/pkiext_binary/pki_mount.go new file mode 100644 index 000000000000..15ce16b2a3c4 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/pki_mount.go @@ -0,0 +1,160 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pkiext_binary + +import ( + "context" + "encoding/base64" + "fmt" + "path" + + "github.com/hashicorp/vault/api" +) + +type VaultPkiMount struct { + *VaultPkiCluster + mount string +} + +func (vpm *VaultPkiMount) UpdateClusterConfig(config map[string]interface{}) error { + defaultPath := "https://" + vpm.cluster.ClusterNodes[0].ContainerIPAddress + ":8200/v1/" + vpm.mount + defaults := map[string]interface{}{ + "path": defaultPath, + "aia_path": defaultPath, + } + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/config/cluster", mergeWithDefaults(config, defaults)) + return err +} + +func (vpm *VaultPkiMount) UpdateClusterConfigLocalAddr() (string, error) { + basePath := fmt.Sprintf("https://%s/v1/%s", vpm.GetActiveContainerHostPort(), vpm.mount) + return basePath, vpm.UpdateClusterConfig(map[string]interface{}{ + "path": basePath, + }) +} + +func (vpm *VaultPkiMount) UpdateAcmeConfig(enable bool, config map[string]interface{}) error { + defaults := map[string]interface{}{ + "enabled": enable, + } + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/config/acme", mergeWithDefaults(config, defaults)) + return err +} + +func (vpm *VaultPkiMount) GenerateRootInternal(props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "common_name": "root-test.com", + "key_type": "ec", + "issuer_name": "root", + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/root/generate/internal", mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) GenerateIntermediateInternal(props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "common_name": "intermediary-test.com", + "key_type": "ec", + "issuer_name": "intermediary", + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/intermediate/generate/internal", mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) SignIntermediary(signingIssuer string, csr interface{}, props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "csr": csr, + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/issuer/"+signingIssuer+"/sign-intermediate", + mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) ImportBundle(pemBundle interface{}, props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "pem_bundle": pemBundle, + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/issuers/import/bundle", mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) UpdateDefaultIssuer(issuerId string, props map[string]interface{}) error { + defaults := map[string]interface{}{ + "default": issuerId, + } + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/config/issuers", mergeWithDefaults(props, defaults)) + + return err +} + +func (vpm *VaultPkiMount) UpdateIssuer(issuerRef string, props map[string]interface{}) error { + defaults := map[string]interface{}{} + + _, err := vpm.GetActiveNode().Logical().JSONMergePatch(context.Background(), + vpm.mount+"/issuer/"+issuerRef, mergeWithDefaults(props, defaults)) + + return err +} + +func (vpm *VaultPkiMount) UpdateRole(roleName string, config map[string]interface{}) error { + defaults := map[string]interface{}{} + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/roles/"+roleName, mergeWithDefaults(config, defaults)) + + return err +} + +func (vpm *VaultPkiMount) GetEabKey(acmeDirectory string) (string, string, error) { + eabPath := path.Join(vpm.mount, acmeDirectory, "/new-eab") + resp, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), eabPath, map[string]interface{}{}) + if err != nil { + return "", "", fmt.Errorf("failed fetching eab from %s: %w", eabPath, err) + } + eabId := resp.Data["id"].(string) + base64EabKey := resp.Data["key"].(string) + // just make sure we get something valid back from the server, we still want to pass back the base64 version + // to the caller... + _, err = base64.RawURLEncoding.DecodeString(base64EabKey) + if err != nil { + return "", "", fmt.Errorf("failed decoding key response field: %s: %w", base64EabKey, err) + } + return eabId, base64EabKey, nil +} + +// GetCACertPEM retrieves the PKI mount's PEM-encoded CA certificate. +func (vpm *VaultPkiMount) GetCACertPEM() (string, error) { + caCertPath := path.Join(vpm.mount, "/cert/ca") + resp, err := vpm.GetActiveNode().Logical().ReadWithContext(context.Background(), caCertPath) + if err != nil { + return "", err + } + return resp.Data["certificate"].(string), nil +} + +func mergeWithDefaults(config map[string]interface{}, defaults map[string]interface{}) map[string]interface{} { + myConfig := config + if myConfig == nil { + myConfig = map[string]interface{}{} + } + for key, value := range defaults { + if origVal, exists := config[key]; !exists { + myConfig[key] = value + } else { + myConfig[key] = origVal + } + } + + return myConfig +} diff --git a/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http.json b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http.json new file mode 100644 index 000000000000..272ecd102575 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http.json @@ -0,0 +1,66 @@ +{ + "apps": { + "http": { + "servers": { + "srv0": { + "listen": [ + ":80", + ":443" + ], + "routes": [ + { + "match": [ + { + "host": [ + "{{.Hostname}}" + ] + } + ], + "handle": [ + { + "handler": "subroute", + "routes": [ + { + "handle": [ + { + "body": "Hello!", + "handler": "static_response" + } + ] + } + ] + } + ], + "terminal": true + } + ] + } + } + }, + "tls": { + "automation": { + "policies": [ + { + "subjects": [ + "{{.Hostname}}" + ], + "issuers": [ + { + "ca": "{{.Directory}}", + "module": "acme", + "challenges": { + "tls-alpn": { + "disabled": true + } + }, + "trusted_roots_pem_files": [ + "{{.CACert}}" + ] + } + ] + } + ] + } + } + } +} diff --git a/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http_eab.json b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http_eab.json new file mode 100644 index 000000000000..61cab8894958 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http_eab.json @@ -0,0 +1,70 @@ +{ + "apps": { + "http": { + "servers": { + "srv0": { + "listen": [ + ":80", + ":443" + ], + "routes": [ + { + "match": [ + { + "host": [ + "{{.Hostname}}" + ] + } + ], + "handle": [ + { + "handler": "subroute", + "routes": [ + { + "handle": [ + { + "body": "Hello!", + "handler": "static_response" + } + ] + } + ] + } + ], + "terminal": true + } + ] + } + } + }, + "tls": { + "automation": { + "policies": [ + { + "subjects": [ + "{{.Hostname}}" + ], + "issuers": [ + { + "ca": "{{.Directory}}", + "module": "acme", + "external_account": { + "key_id": "{{.EABID}}", + "mac_key": "{{.EABKey}}" + }, + "challenges": { + "tls-alpn": { + "disabled": true + } + }, + "trusted_roots_pem_files": [ + "{{.CACert}}" + ] + } + ] + } + ] + } + } + } +} diff --git a/builtin/logical/pkiext/pkiext_binary/testdata/caddy_tls_alpn.json b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_tls_alpn.json new file mode 100644 index 000000000000..0bc0ea9112e8 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_tls_alpn.json @@ -0,0 +1,66 @@ +{ + "apps": { + "http": { + "servers": { + "srv0": { + "listen": [ + ":80", + ":443" + ], + "routes": [ + { + "match": [ + { + "host": [ + "{{.Hostname}}" + ] + } + ], + "handle": [ + { + "handler": "subroute", + "routes": [ + { + "handle": [ + { + "body": "Hello!", + "handler": "static_response" + } + ] + } + ] + } + ], + "terminal": true + } + ] + } + } + }, + "tls": { + "automation": { + "policies": [ + { + "subjects": [ + "{{.Hostname}}" + ], + "issuers": [ + { + "ca": "{{.Directory}}", + "module": "acme", + "challenges": { + "http": { + "disabled": true + } + }, + "trusted_roots_pem_files": [ + "{{.CACert}}" + ] + } + ] + } + ] + } + } + } +} diff --git a/builtin/logical/pkiext/test_helpers.go b/builtin/logical/pkiext/test_helpers.go index 942c37a4a381..7f6abe36c10b 100644 --- a/builtin/logical/pkiext/test_helpers.go +++ b/builtin/logical/pkiext/test_helpers.go @@ -1,6 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pkiext import ( + "bufio" + "bytes" "crypto" "crypto/x509" "encoding/pem" @@ -63,3 +68,19 @@ func parseKey(t *testing.T, pemKey string) crypto.Signer { require.NoError(t, err) return key } + +type LogConsumerWriter struct { + Consumer func(string) +} + +func (l LogConsumerWriter) Write(p []byte) (n int, err error) { + // TODO this assumes that we're never passed partial log lines, which + // seems a safe assumption for now based on how docker looks to implement + // logging, but might change in the future. + scanner := bufio.NewScanner(bytes.NewReader(p)) + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + l.Consumer(scanner.Text()) + } + return len(p), nil +} diff --git a/builtin/logical/pkiext/zlint_test.go b/builtin/logical/pkiext/zlint_test.go index bf0d1e636b53..206d23cb03a5 100644 --- a/builtin/logical/pkiext/zlint_test.go +++ b/builtin/logical/pkiext/zlint_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pkiext import ( @@ -7,8 +10,7 @@ import ( "testing" "github.com/hashicorp/vault/builtin/logical/pki" - "github.com/hashicorp/vault/helper/testhelpers/docker" - + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/stretchr/testify/require" ) @@ -64,11 +66,12 @@ func RunZLintContainer(t *testing.T, certificate string) []byte { buildZLintContainer(t) }) + ctx := context.Background() // We don't actually care about the address, we just want to start the // container so we can run commands in it. We'd ideally like to skip this // step and only build a new image, but the zlint output would be // intermingled with container build stages, so its not that useful. - ctr, _, _, err := zRunner.Start(context.Background(), true, false) + result, err := zRunner.Start(ctx, true, false) if err != nil { t.Fatalf("Could not start golang container for zlint: %s", err) } @@ -76,13 +79,13 @@ func RunZLintContainer(t *testing.T, certificate string) []byte { // Copy the cert into the newly running container. certCtx := docker.NewBuildContext() certCtx["cert.pem"] = docker.PathContentsFromBytes([]byte(certificate)) - if err := zRunner.CopyTo(ctr.ID, "/go/", certCtx); err != nil { + if err := zRunner.CopyTo(result.Container.ID, "/go/", certCtx); err != nil { t.Fatalf("Could not copy certificate into container: %v", err) } // Run the zlint command and save the output. cmd := []string{"/go/bin/zlint", "/go/cert.pem"} - stdout, stderr, retcode, err := zRunner.RunCmdWithOutput(context.Background(), ctr.ID, cmd) + stdout, stderr, retcode, err := zRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) if err != nil { t.Fatalf("Could not run command in container: %v", err) } @@ -97,7 +100,7 @@ func RunZLintContainer(t *testing.T, certificate string) []byte { } // Clean up after ourselves. - if err := zRunner.Stop(context.Background(), ctr.ID); err != nil { + if err := zRunner.Stop(context.Background(), result.Container.ID); err != nil { t.Fatalf("failed to stop container: %v", err) } diff --git a/builtin/logical/postgresql/backend.go b/builtin/logical/postgresql/backend.go deleted file mode 100644 index 8763cbda4ef3..000000000000 --- a/builtin/logical/postgresql/backend.go +++ /dev/null @@ -1,171 +0,0 @@ -package postgresql - -import ( - "context" - "database/sql" - "fmt" - "strings" - "sync" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b := Backend(conf) - if err := b.Setup(ctx, conf); err != nil { - return nil, err - } - return b, nil -} - -func Backend(conf *logical.BackendConfig) *backend { - var b backend - b.Backend = &framework.Backend{ - Help: strings.TrimSpace(backendHelp), - - PathsSpecial: &logical.Paths{ - SealWrapStorage: []string{ - "config/connection", - }, - }, - - Paths: []*framework.Path{ - pathConfigConnection(&b), - pathConfigLease(&b), - pathListRoles(&b), - pathRoles(&b), - pathRoleCreate(&b), - }, - - Secrets: []*framework.Secret{ - secretCreds(&b), - }, - - Clean: b.ResetDB, - Invalidate: b.invalidate, - BackendType: logical.TypeLogical, - } - - b.logger = conf.Logger - return &b -} - -type backend struct { - *framework.Backend - - db *sql.DB - lock sync.Mutex - - logger log.Logger -} - -// DB returns the database connection. -func (b *backend) DB(ctx context.Context, s logical.Storage) (*sql.DB, error) { - b.logger.Debug("postgres/db: enter") - defer b.logger.Debug("postgres/db: exit") - - b.lock.Lock() - defer b.lock.Unlock() - - // If we already have a DB, we got it! - if b.db != nil { - if err := b.db.Ping(); err == nil { - return b.db, nil - } - // If the ping was unsuccessful, close it and ignore errors as we'll be - // reestablishing anyways - b.db.Close() - } - - // Otherwise, attempt to make connection - entry, err := s.Get(ctx, "config/connection") - if err != nil { - return nil, err - } - if entry == nil { - return nil, - fmt.Errorf("configure the DB connection with config/connection first") - } - - var connConfig connectionConfig - if err := entry.DecodeJSON(&connConfig); err != nil { - return nil, err - } - - conn := connConfig.ConnectionURL - if len(conn) == 0 { - conn = connConfig.ConnectionString - } - - // Ensure timezone is set to UTC for all the connections - if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") { - if strings.Contains(conn, "?") { - conn += "&timezone=utc" - } else { - conn += "?timezone=utc" - } - } else { - conn += "&timezone=utc" - } - - b.db, err = sql.Open("pgx", conn) - if err != nil { - return nil, err - } - - // Set some connection pool settings. We don't need much of this, - // since the request rate shouldn't be high. - b.db.SetMaxOpenConns(connConfig.MaxOpenConnections) - b.db.SetMaxIdleConns(connConfig.MaxIdleConnections) - - return b.db, nil -} - -// ResetDB forces a connection next time DB() is called. -func (b *backend) ResetDB(_ context.Context) { - b.logger.Debug("postgres/db: enter") - defer b.logger.Debug("postgres/db: exit") - - b.lock.Lock() - defer b.lock.Unlock() - - if b.db != nil { - b.db.Close() - } - - b.db = nil -} - -func (b *backend) invalidate(ctx context.Context, key string) { - switch key { - case "config/connection": - b.ResetDB(ctx) - } -} - -// Lease returns the lease information -func (b *backend) Lease(ctx context.Context, s logical.Storage) (*configLease, error) { - entry, err := s.Get(ctx, "config/lease") - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result configLease - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -const backendHelp = ` -The PostgreSQL backend dynamically generates database users. - -After mounting this backend, configure it using the endpoints within -the "config/" path. -` diff --git a/builtin/logical/postgresql/backend_test.go b/builtin/logical/postgresql/backend_test.go deleted file mode 100644 index 0c1dd46111d0..000000000000 --- a/builtin/logical/postgresql/backend_test.go +++ /dev/null @@ -1,532 +0,0 @@ -package postgresql - -import ( - "context" - "database/sql" - "encoding/json" - "fmt" - "log" - "path" - "reflect" - "testing" - - logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql" - "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/mapstructure" -) - -func TestBackend_config_connection(t *testing.T) { - var resp *logical.Response - var err error - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - configData := map[string]interface{}{ - "connection_url": "sample_connection_url", - "max_open_connections": 9, - "max_idle_connections": 7, - "verify_connection": false, - } - - configReq := &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/connection", - Storage: config.StorageView, - Data: configData, - } - resp, err = b.HandleRequest(context.Background(), configReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%s resp:%#v\n", err, resp) - } - - configReq.Operation = logical.ReadOperation - resp, err = b.HandleRequest(context.Background(), configReq) - if err != nil || (resp != nil && resp.IsError()) { - t.Fatalf("err:%s resp:%#v\n", err, resp) - } - - delete(configData, "verify_connection") - delete(configData, "connection_url") - if !reflect.DeepEqual(configData, resp.Data) { - t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data) - } -} - -func TestBackend_basic(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") - defer cleanup() - - connData := map[string]interface{}{ - "connection_url": connURL, - } - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connData, false), - testAccStepCreateRole(t, "web", testRole, false), - testAccStepReadCreds(t, b, config.StorageView, "web", connURL), - }, - }) -} - -func TestBackend_roleCrud(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") - defer cleanup() - - connData := map[string]interface{}{ - "connection_url": connURL, - } - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connData, false), - testAccStepCreateRole(t, "web", testRole, false), - testAccStepReadRole(t, "web", testRole), - testAccStepDeleteRole(t, "web"), - testAccStepReadRole(t, "web", ""), - }, - }) -} - -func TestBackend_BlockStatements(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") - defer cleanup() - - connData := map[string]interface{}{ - "connection_url": connURL, - } - jsonBlockStatement, err := json.Marshal(testBlockStatementRoleSlice) - if err != nil { - t.Fatal(err) - } - - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connData, false), - // This will also validate the query - testAccStepCreateRole(t, "web-block", testBlockStatementRole, true), - testAccStepCreateRole(t, "web-block", string(jsonBlockStatement), false), - }, - }) -} - -func TestBackend_roleReadOnly(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") - defer cleanup() - - connData := map[string]interface{}{ - "connection_url": connURL, - } - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connData, false), - testAccStepCreateRole(t, "web", testRole, false), - testAccStepCreateRole(t, "web-readonly", testReadOnlyRole, false), - testAccStepReadRole(t, "web-readonly", testReadOnlyRole), - testAccStepCreateTable(t, b, config.StorageView, "web", connURL), - testAccStepReadCreds(t, b, config.StorageView, "web-readonly", connURL), - testAccStepDropTable(t, b, config.StorageView, "web", connURL), - testAccStepDeleteRole(t, "web-readonly"), - testAccStepDeleteRole(t, "web"), - testAccStepReadRole(t, "web-readonly", ""), - }, - }) -} - -func TestBackend_roleReadOnly_revocationSQL(t *testing.T) { - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - b, err := Factory(context.Background(), config) - if err != nil { - t.Fatal(err) - } - - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") - defer cleanup() - - connData := map[string]interface{}{ - "connection_url": connURL, - } - logicaltest.Test(t, logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - testAccStepConfig(t, connData, false), - testAccStepCreateRoleWithRevocationSQL(t, "web", testRole, defaultRevocationSQL, false), - testAccStepCreateRoleWithRevocationSQL(t, "web-readonly", testReadOnlyRole, defaultRevocationSQL, false), - testAccStepReadRole(t, "web-readonly", testReadOnlyRole), - testAccStepCreateTable(t, b, config.StorageView, "web", connURL), - testAccStepReadCreds(t, b, config.StorageView, "web-readonly", connURL), - testAccStepDropTable(t, b, config.StorageView, "web", connURL), - testAccStepDeleteRole(t, "web-readonly"), - testAccStepDeleteRole(t, "web"), - testAccStepReadRole(t, "web-readonly", ""), - }, - }) -} - -func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "config/connection", - Data: d, - ErrorOk: true, - Check: func(resp *logical.Response) error { - if expectError { - if resp.Data == nil { - return fmt.Errorf("data is nil") - } - var e struct { - Error string `mapstructure:"error"` - } - if err := mapstructure.Decode(resp.Data, &e); err != nil { - return err - } - if len(e.Error) == 0 { - return fmt.Errorf("expected error, but write succeeded") - } - return nil - } else if resp != nil && resp.IsError() { - return fmt.Errorf("got an error response: %v", resp.Error()) - } - return nil - }, - } -} - -func testAccStepCreateRole(t *testing.T, name string, sql string, expectFail bool) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: path.Join("roles", name), - Data: map[string]interface{}{ - "sql": sql, - }, - ErrorOk: expectFail, - } -} - -func testAccStepCreateRoleWithRevocationSQL(t *testing.T, name, sql, revocationSQL string, expectFail bool) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: path.Join("roles", name), - Data: map[string]interface{}{ - "sql": sql, - "revocation_sql": revocationSQL, - }, - ErrorOk: expectFail, - } -} - -func testAccStepDeleteRole(t *testing.T, name string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.DeleteOperation, - Path: path.Join("roles", name), - } -} - -func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: path.Join("creds", name), - Check: func(resp *logical.Response) error { - var d struct { - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - log.Printf("[TRACE] Generated credentials: %v", d) - - db, err := sql.Open("pgx", connURL+"&timezone=utc") - if err != nil { - t.Fatal(err) - } - - returnedRows := func() int { - stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');") - if err != nil { - return -1 - } - defer stmt.Close() - - rows, err := stmt.Query(d.Username) - if err != nil { - return -1 - } - defer rows.Close() - - i := 0 - for rows.Next() { - i++ - } - return i - } - - // minNumPermissions is the minimum number of permissions that will always be present. - const minNumPermissions = 2 - - userRows := returnedRows() - if userRows < minNumPermissions { - t.Fatalf("did not get expected number of rows, got %d", userRows) - } - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.RevokeOperation, - Storage: s, - Secret: &logical.Secret{ - InternalData: map[string]interface{}{ - "secret_type": "creds", - "username": d.Username, - "role": name, - }, - }, - }) - if err != nil { - return err - } - if resp != nil { - if resp.IsError() { - return fmt.Errorf("error on resp: %#v", *resp) - } - } - - userRows = returnedRows() - // User shouldn't exist so returnedRows() should encounter an error and exit with -1 - if userRows != -1 { - t.Fatalf("did not get expected number of rows, got %d", userRows) - } - - return nil - }, - } -} - -func testAccStepCreateTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: path.Join("creds", name), - Check: func(resp *logical.Response) error { - var d struct { - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - log.Printf("[TRACE] Generated credentials: %v", d) - - db, err := sql.Open("pgx", connURL+"&timezone=utc") - if err != nil { - t.Fatal(err) - } - - _, err = db.Exec("CREATE TABLE test (id SERIAL PRIMARY KEY);") - if err != nil { - t.Fatal(err) - } - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.RevokeOperation, - Storage: s, - Secret: &logical.Secret{ - InternalData: map[string]interface{}{ - "secret_type": "creds", - "username": d.Username, - }, - }, - }) - if err != nil { - return err - } - if resp != nil { - if resp.IsError() { - return fmt.Errorf("error on resp: %#v", *resp) - } - } - - return nil - }, - } -} - -func testAccStepDropTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: path.Join("creds", name), - Check: func(resp *logical.Response) error { - var d struct { - Username string `mapstructure:"username"` - Password string `mapstructure:"password"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - log.Printf("[TRACE] Generated credentials: %v", d) - - db, err := sql.Open("pgx", connURL+"&timezone=utc") - if err != nil { - t.Fatal(err) - } - - _, err = db.Exec("DROP TABLE test;") - if err != nil { - t.Fatal(err) - } - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.RevokeOperation, - Storage: s, - Secret: &logical.Secret{ - InternalData: map[string]interface{}{ - "secret_type": "creds", - "username": d.Username, - }, - }, - }) - if err != nil { - return err - } - if resp != nil { - if resp.IsError() { - return fmt.Errorf("error on resp: %#v", *resp) - } - } - - return nil - }, - } -} - -func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.ReadOperation, - Path: "roles/" + name, - Check: func(resp *logical.Response) error { - if resp == nil { - if sql == "" { - return nil - } - - return fmt.Errorf("bad: %#v", resp) - } - - var d struct { - SQL string `mapstructure:"sql"` - } - if err := mapstructure.Decode(resp.Data, &d); err != nil { - return err - } - - if d.SQL != sql { - return fmt.Errorf("bad: %#v", resp) - } - - return nil - }, - } -} - -const testRole = ` -CREATE ROLE "{{name}}" WITH - LOGIN - PASSWORD '{{password}}' - VALID UNTIL '{{expiration}}'; -GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; -` - -const testReadOnlyRole = ` -CREATE ROLE "{{name}}" WITH - LOGIN - PASSWORD '{{password}}' - VALID UNTIL '{{expiration}}'; -GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}"; -GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}"; -` - -const testBlockStatementRole = ` -DO $$ -BEGIN - IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN - CREATE ROLE "foo-role"; - CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role"; - ALTER ROLE "foo-role" SET search_path = foo; - GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role"; - GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role"; - GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role"; - GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role"; - GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role"; - END IF; -END -$$ - -CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; -GRANT "foo-role" TO "{{name}}"; -ALTER ROLE "{{name}}" SET search_path = foo; -GRANT CONNECT ON DATABASE "postgres" TO "{{name}}"; -` - -var testBlockStatementRoleSlice = []string{ - ` -DO $$ -BEGIN - IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN - CREATE ROLE "foo-role"; - CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role"; - ALTER ROLE "foo-role" SET search_path = foo; - GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role"; - GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role"; - GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role"; - GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role"; - GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role"; - END IF; -END -$$ -`, - `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`, - `GRANT "foo-role" TO "{{name}}";`, - `ALTER ROLE "{{name}}" SET search_path = foo;`, - `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`, -} - -const defaultRevocationSQL = ` -REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; -REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; -REVOKE USAGE ON SCHEMA public FROM {{name}}; - -DROP ROLE IF EXISTS {{name}}; -` diff --git a/builtin/logical/postgresql/cmd/postgresql/main.go b/builtin/logical/postgresql/cmd/postgresql/main.go deleted file mode 100644 index 6610b975769d..000000000000 --- a/builtin/logical/postgresql/cmd/postgresql/main.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "os" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/logical/postgresql" - "github.com/hashicorp/vault/sdk/plugin" -) - -func main() { - apiClientMeta := &api.PluginAPIClientMeta{} - flags := apiClientMeta.FlagSet() - flags.Parse(os.Args[1:]) - - tlsConfig := apiClientMeta.GetTLSConfig() - tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - - if err := plugin.Serve(&plugin.ServeOpts{ - BackendFactoryFunc: postgresql.Factory, - TLSProviderFunc: tlsProviderFunc, - }); err != nil { - logger := hclog.New(&hclog.LoggerOptions{}) - - logger.Error("plugin shutting down", "error", err) - os.Exit(1) - } -} diff --git a/builtin/logical/postgresql/path_config_connection.go b/builtin/logical/postgresql/path_config_connection.go deleted file mode 100644 index 6f7b0f719e34..000000000000 --- a/builtin/logical/postgresql/path_config_connection.go +++ /dev/null @@ -1,168 +0,0 @@ -package postgresql - -import ( - "context" - "database/sql" - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" - _ "github.com/jackc/pgx/v4/stdlib" -) - -func pathConfigConnection(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/connection", - Fields: map[string]*framework.FieldSchema{ - "connection_url": { - Type: framework.TypeString, - Description: "DB connection string", - }, - - "value": { - Type: framework.TypeString, - Description: `DB connection string. Use 'connection_url' instead. -This will be deprecated.`, - }, - - "verify_connection": { - Type: framework.TypeBool, - Default: true, - Description: `If set, connection_url is verified by actually connecting to the database`, - }, - - "max_open_connections": { - Type: framework.TypeInt, - Description: `Maximum number of open connections to the database; -a zero uses the default value of two and a -negative value means unlimited`, - }, - - "max_idle_connections": { - Type: framework.TypeInt, - Description: `Maximum number of idle connections to the database; -a zero uses the value of max_open_connections -and a negative value disables idle connections. -If larger than max_open_connections it will be -reduced to the same size.`, - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConnectionWrite, - logical.ReadOperation: b.pathConnectionRead, - }, - - HelpSynopsis: pathConfigConnectionHelpSyn, - HelpDescription: pathConfigConnectionHelpDesc, - } -} - -// pathConnectionRead reads out the connection configuration -func (b *backend) pathConnectionRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - entry, err := req.Storage.Get(ctx, "config/connection") - if err != nil { - return nil, fmt.Errorf("failed to read connection configuration") - } - if entry == nil { - return nil, nil - } - - var config connectionConfig - if err := entry.DecodeJSON(&config); err != nil { - return nil, err - } - - return &logical.Response{ - Data: map[string]interface{}{ - "max_open_connections": config.MaxOpenConnections, - "max_idle_connections": config.MaxIdleConnections, - }, - }, nil -} - -func (b *backend) pathConnectionWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - connValue := data.Get("value").(string) - connURL := data.Get("connection_url").(string) - if connURL == "" { - if connValue == "" { - return logical.ErrorResponse("connection_url parameter must be supplied"), nil - } else { - connURL = connValue - } - } - - maxOpenConns := data.Get("max_open_connections").(int) - if maxOpenConns == 0 { - maxOpenConns = 2 - } - - maxIdleConns := data.Get("max_idle_connections").(int) - if maxIdleConns == 0 { - maxIdleConns = maxOpenConns - } - if maxIdleConns > maxOpenConns { - maxIdleConns = maxOpenConns - } - - // Don't check the connection_url if verification is disabled - verifyConnection := data.Get("verify_connection").(bool) - if verifyConnection { - // Verify the string - db, err := sql.Open("pgx", connURL) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error validating connection info: %s", err)), nil - } - defer db.Close() - if err := db.Ping(); err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error validating connection info: %s", err)), nil - } - } - - // Store it - entry, err := logical.StorageEntryJSON("config/connection", connectionConfig{ - ConnectionString: connValue, - ConnectionURL: connURL, - MaxOpenConnections: maxOpenConns, - MaxIdleConnections: maxIdleConns, - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - // Reset the DB connection - b.ResetDB(ctx) - - resp := &logical.Response{} - resp.AddWarning("Read access to this endpoint should be controlled via ACLs as it will return the connection string or URL as it is, including passwords, if any.") - - return resp, nil -} - -type connectionConfig struct { - ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"` - // Deprecate "value" in coming releases - ConnectionString string `json:"value" structs:"value" mapstructure:"value"` - MaxOpenConnections int `json:"max_open_connections" structs:"max_open_connections" mapstructure:"max_open_connections"` - MaxIdleConnections int `json:"max_idle_connections" structs:"max_idle_connections" mapstructure:"max_idle_connections"` -} - -const pathConfigConnectionHelpSyn = ` -Configure the connection string to talk to PostgreSQL. -` - -const pathConfigConnectionHelpDesc = ` -This path configures the connection string used to connect to PostgreSQL. -The value of the string can be a URL, or a PG style string in the -format of "user=foo host=bar" etc. - -The URL looks like: -"postgresql://user:pass@host:port/dbname" - -When configuring the connection string, the backend will verify its validity. -` diff --git a/builtin/logical/postgresql/path_config_lease.go b/builtin/logical/postgresql/path_config_lease.go deleted file mode 100644 index 1a8605926022..000000000000 --- a/builtin/logical/postgresql/path_config_lease.go +++ /dev/null @@ -1,101 +0,0 @@ -package postgresql - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathConfigLease(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "config/lease", - Fields: map[string]*framework.FieldSchema{ - "lease": { - Type: framework.TypeString, - Description: "Default lease for roles.", - }, - - "lease_max": { - Type: framework.TypeString, - Description: "Maximum time a credential is valid for.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathLeaseRead, - logical.UpdateOperation: b.pathLeaseWrite, - }, - - HelpSynopsis: pathConfigLeaseHelpSyn, - HelpDescription: pathConfigLeaseHelpDesc, - } -} - -func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - leaseRaw := d.Get("lease").(string) - leaseMaxRaw := d.Get("lease_max").(string) - - lease, err := time.ParseDuration(leaseRaw) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Invalid lease: %s", err)), nil - } - leaseMax, err := time.ParseDuration(leaseMaxRaw) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Invalid lease: %s", err)), nil - } - - // Store it - entry, err := logical.StorageEntryJSON("config/lease", &configLease{ - Lease: lease, - LeaseMax: leaseMax, - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - return nil, nil -} - -func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - lease, err := b.Lease(ctx, req.Storage) - if err != nil { - return nil, err - } - if lease == nil { - return nil, nil - } - - return &logical.Response{ - Data: map[string]interface{}{ - "lease": lease.Lease.String(), - "lease_max": lease.LeaseMax.String(), - }, - }, nil -} - -type configLease struct { - Lease time.Duration - LeaseMax time.Duration -} - -const pathConfigLeaseHelpSyn = ` -Configure the default lease information for generated credentials. -` - -const pathConfigLeaseHelpDesc = ` -This configures the default lease information used for credentials -generated by this backend. The lease specifies the duration that a -credential will be valid for, as well as the maximum session for -a set of credentials. - -The format for the lease is "1h" or integer and then unit. The longest -unit is hour. -` diff --git a/builtin/logical/postgresql/path_role_create.go b/builtin/logical/postgresql/path_role_create.go deleted file mode 100644 index 18162db1733a..000000000000 --- a/builtin/logical/postgresql/path_role_create.go +++ /dev/null @@ -1,149 +0,0 @@ -package postgresql - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/hashicorp/go-secure-stdlib/strutil" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/dbtxn" - "github.com/hashicorp/vault/sdk/logical" - _ "github.com/jackc/pgx/v4/stdlib" -) - -func pathRoleCreate(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "creds/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role.", - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathRoleCreateRead, - }, - - HelpSynopsis: pathRoleCreateReadHelpSyn, - HelpDescription: pathRoleCreateReadHelpDesc, - } -} - -func (b *backend) pathRoleCreateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - - // Get the role - role, err := b.Role(ctx, req.Storage, name) - if err != nil { - return nil, err - } - if role == nil { - return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil - } - - // Determine if we have a lease - lease, err := b.Lease(ctx, req.Storage) - if err != nil { - return nil, err - } - // Unlike some other backends we need a lease here (can't leave as 0 and - // let core fill it in) because Postgres also expires users as a safety - // measure, so cannot be zero - if lease == nil { - lease = &configLease{ - Lease: b.System().DefaultLeaseTTL(), - } - } - - // Generate the username, password and expiration. PG limits user to 63 characters - displayName := req.DisplayName - if len(displayName) > 26 { - displayName = displayName[:26] - } - userUUID, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - username := fmt.Sprintf("%s-%s", displayName, userUUID) - if len(username) > 63 { - username = username[:63] - } - password, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - - ttl, _, err := framework.CalculateTTL(b.System(), 0, lease.Lease, 0, lease.LeaseMax, 0, time.Time{}) - if err != nil { - return nil, err - } - expiration := time.Now(). - Add(ttl). - Format("2006-01-02 15:04:05-0700") - - // Get our handle - db, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - // Start a transaction - tx, err := db.Begin() - if err != nil { - return nil, err - } - defer func() { - tx.Rollback() - }() - - // Execute each query - for _, query := range strutil.ParseArbitraryStringSlice(role.SQL, ";") { - query = strings.TrimSpace(query) - if len(query) == 0 { - continue - } - - m := map[string]string{ - "name": username, - "password": password, - "expiration": expiration, - } - - if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { - return nil, err - } - } - - // Commit the transaction - - if err := tx.Commit(); err != nil { - return nil, err - } - - // Return the secret - - resp := b.Secret(SecretCredsType).Response(map[string]interface{}{ - "username": username, - "password": password, - }, map[string]interface{}{ - "username": username, - "role": name, - }) - resp.Secret.TTL = lease.Lease - resp.Secret.MaxTTL = lease.LeaseMax - return resp, nil -} - -const pathRoleCreateReadHelpSyn = ` -Request database credentials for a certain role. -` - -const pathRoleCreateReadHelpDesc = ` -This path reads database credentials for a certain role. The -database credentials will be generated on demand and will be automatically -revoked when the lease is up. -` diff --git a/builtin/logical/postgresql/path_roles.go b/builtin/logical/postgresql/path_roles.go deleted file mode 100644 index b1af8328f928..000000000000 --- a/builtin/logical/postgresql/path_roles.go +++ /dev/null @@ -1,197 +0,0 @@ -package postgresql - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathListRoles(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "roles/?$", - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ListOperation: b.pathRoleList, - }, - - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, - } -} - -func pathRoles(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "roles/" + framework.GenericNameRegex("name"), - Fields: map[string]*framework.FieldSchema{ - "name": { - Type: framework.TypeString, - Description: "Name of the role.", - }, - - "sql": { - Type: framework.TypeString, - Description: "SQL string to create a user. See help for more info.", - }, - - "revocation_sql": { - Type: framework.TypeString, - Description: `SQL statements to be executed to revoke a user. Must be a semicolon-separated -string, a base64-encoded semicolon-separated string, a serialized JSON string -array, or a base64-encoded serialized JSON string array. The '{{name}}' value -will be substituted.`, - }, - }, - - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathRoleRead, - logical.UpdateOperation: b.pathRoleCreate, - logical.DeleteOperation: b.pathRoleDelete, - }, - - HelpSynopsis: pathRoleHelpSyn, - HelpDescription: pathRoleHelpDesc, - } -} - -func (b *backend) Role(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { - entry, err := s.Get(ctx, "role/"+n) - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result roleEntry - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - - return &result, nil -} - -func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) - if err != nil { - return nil, err - } - - return nil, nil -} - -func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) - if err != nil { - return nil, err - } - if role == nil { - return nil, nil - } - - return &logical.Response{ - Data: map[string]interface{}{ - "sql": role.SQL, - "revocation_sql": role.RevocationSQL, - }, - }, nil -} - -func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - entries, err := req.Storage.List(ctx, "role/") - if err != nil { - return nil, err - } - - return logical.ListResponse(entries), nil -} - -func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - name := data.Get("name").(string) - sql := data.Get("sql").(string) - - // Get our connection - db, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - // Test the query by trying to prepare it - for _, query := range strutil.ParseArbitraryStringSlice(sql, ";") { - query = strings.TrimSpace(query) - if len(query) == 0 { - continue - } - - stmt, err := db.Prepare(Query(query, map[string]string{ - "name": "foo", - "password": "bar", - "expiration": "", - })) - if err != nil { - return logical.ErrorResponse(fmt.Sprintf( - "Error testing query: %s", err)), nil - } - stmt.Close() - } - - // Store it - entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{ - SQL: sql, - RevocationSQL: data.Get("revocation_sql").(string), - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - return nil, nil -} - -type roleEntry struct { - SQL string `json:"sql" mapstructure:"sql" structs:"sql"` - RevocationSQL string `json:"revocation_sql" mapstructure:"revocation_sql" structs:"revocation_sql"` -} - -const pathRoleHelpSyn = ` -Manage the roles that can be created with this backend. -` - -const pathRoleHelpDesc = ` -This path lets you manage the roles that can be created with this backend. - -The "sql" parameter customizes the SQL string used to create the role. -This can be a sequence of SQL queries. Some substitution will be done to the -SQL string for certain keys. The names of the variables must be surrounded -by "{{" and "}}" to be replaced. - - * "name" - The random username generated for the DB user. - - * "password" - The random password generated for the DB user. - - * "expiration" - The timestamp when this user will expire. - -Example of a decent SQL query to use: - - CREATE ROLE "{{name}}" WITH - LOGIN - PASSWORD '{{password}}' - VALID UNTIL '{{expiration}}'; - GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; - -Note the above user would be able to access everything in schema public. -For more complex GRANT clauses, see the PostgreSQL manual. - -The "revocation_sql" parameter customizes the SQL string used to revoke a user. -Example of a decent revocation SQL query to use: - - REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; - REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; - REVOKE USAGE ON SCHEMA public FROM {{name}}; - DROP ROLE IF EXISTS {{name}}; -` diff --git a/builtin/logical/postgresql/query.go b/builtin/logical/postgresql/query.go deleted file mode 100644 index e250a6fe3bda..000000000000 --- a/builtin/logical/postgresql/query.go +++ /dev/null @@ -1,15 +0,0 @@ -package postgresql - -import ( - "fmt" - "strings" -) - -// Query templates a query for us. -func Query(tpl string, data map[string]string) string { - for k, v := range data { - tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) - } - - return tpl -} diff --git a/builtin/logical/postgresql/secret_creds.go b/builtin/logical/postgresql/secret_creds.go deleted file mode 100644 index 74e8f2fffd92..000000000000 --- a/builtin/logical/postgresql/secret_creds.go +++ /dev/null @@ -1,269 +0,0 @@ -package postgresql - -import ( - "context" - "database/sql" - "fmt" - "strings" - "time" - - "github.com/hashicorp/vault/sdk/database/helper/dbutil" - - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/dbtxn" - "github.com/hashicorp/vault/sdk/logical" -) - -const SecretCredsType = "creds" - -func secretCreds(b *backend) *framework.Secret { - return &framework.Secret{ - Type: SecretCredsType, - Fields: map[string]*framework.FieldSchema{ - "username": { - Type: framework.TypeString, - Description: "Username", - }, - - "password": { - Type: framework.TypeString, - Description: "Password", - }, - }, - - Renew: b.secretCredsRenew, - Revoke: b.secretCredsRevoke, - } -} - -func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // Get the username from the internal data - usernameRaw, ok := req.Secret.InternalData["username"] - if !ok { - return nil, fmt.Errorf("secret is missing username internal data") - } - username, ok := usernameRaw.(string) - if !ok { - return nil, fmt.Errorf("usernameRaw is not a string") - } - // Get our connection - db, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - // Get the lease information - lease, err := b.Lease(ctx, req.Storage) - if err != nil { - return nil, err - } - if lease == nil { - lease = &configLease{} - } - - // Make sure we increase the VALID UNTIL endpoint for this user. - ttl, _, err := framework.CalculateTTL(b.System(), req.Secret.Increment, lease.Lease, 0, lease.LeaseMax, 0, req.Secret.IssueTime) - if err != nil { - return nil, err - } - if ttl > 0 { - expireTime := time.Now().Add(ttl) - // Adding a small buffer since the TTL will be calculated again afeter this call - // to ensure the database credential does not expire before the lease - expireTime = expireTime.Add(5 * time.Second) - expiration := expireTime.Format("2006-01-02 15:04:05-0700") - - query := fmt.Sprintf( - "ALTER ROLE %s VALID UNTIL '%s';", - dbutil.QuoteIdentifier(username), - expiration) - stmt, err := db.Prepare(query) - if err != nil { - return nil, err - } - defer stmt.Close() - if _, err := stmt.Exec(); err != nil { - return nil, err - } - } - - resp := &logical.Response{Secret: req.Secret} - resp.Secret.TTL = lease.Lease - resp.Secret.MaxTTL = lease.LeaseMax - return resp, nil -} - -func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - // Get the username from the internal data - usernameRaw, ok := req.Secret.InternalData["username"] - if !ok { - return nil, fmt.Errorf("secret is missing username internal data") - } - username, ok := usernameRaw.(string) - if !ok { - return nil, fmt.Errorf("usernameRaw is not a string") - } - var revocationSQL string - var resp *logical.Response - - roleNameRaw, ok := req.Secret.InternalData["role"] - if ok { - role, err := b.Role(ctx, req.Storage, roleNameRaw.(string)) - if err != nil { - return nil, err - } - if role == nil { - if resp == nil { - resp = &logical.Response{} - } - resp.AddWarning(fmt.Sprintf("Role %q cannot be found. Using default revocation SQL.", roleNameRaw.(string))) - } else { - revocationSQL = role.RevocationSQL - } - } - - // Get our connection - db, err := b.DB(ctx, req.Storage) - if err != nil { - return nil, err - } - - switch revocationSQL { - - // This is the default revocation logic. If revocation SQL is provided it - // is simply executed as-is. - case "": - // Check if the role exists - var exists bool - err = db.QueryRow("SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists) - if err != nil && err != sql.ErrNoRows { - return nil, err - } - - if !exists { - return resp, nil - } - - // Query for permissions; we need to revoke permissions before we can drop - // the role - // This isn't done in a transaction because even if we fail along the way, - // we want to remove as much access as possible - stmt, err := db.Prepare("SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;") - if err != nil { - return nil, err - } - defer stmt.Close() - - rows, err := stmt.Query(username) - if err != nil { - return nil, err - } - defer rows.Close() - - const initialNumRevocations = 16 - revocationStmts := make([]string, 0, initialNumRevocations) - for rows.Next() { - var schema string - err = rows.Scan(&schema) - if err != nil { - // keep going; remove as many permissions as possible right now - continue - } - revocationStmts = append(revocationStmts, fmt.Sprintf( - `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`, - dbutil.QuoteIdentifier(schema), - dbutil.QuoteIdentifier(username))) - - revocationStmts = append(revocationStmts, fmt.Sprintf( - `REVOKE USAGE ON SCHEMA %s FROM %s;`, - dbutil.QuoteIdentifier(schema), - dbutil.QuoteIdentifier(username))) - } - - // for good measure, revoke all privileges and usage on schema public - revocationStmts = append(revocationStmts, fmt.Sprintf( - `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`, - dbutil.QuoteIdentifier(username))) - - revocationStmts = append(revocationStmts, fmt.Sprintf( - "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;", - dbutil.QuoteIdentifier(username))) - - revocationStmts = append(revocationStmts, fmt.Sprintf( - "REVOKE USAGE ON SCHEMA public FROM %s;", - dbutil.QuoteIdentifier(username))) - - // get the current database name so we can issue a REVOKE CONNECT for - // this username - var dbname sql.NullString - if err := db.QueryRow("SELECT current_database();").Scan(&dbname); err != nil { - return nil, err - } - - if dbname.Valid { - revocationStmts = append(revocationStmts, fmt.Sprintf( - `REVOKE CONNECT ON DATABASE %s FROM %s;`, - dbutil.QuoteIdentifier(dbname.String), - dbutil.QuoteIdentifier(username))) - } - - // again, here, we do not stop on error, as we want to remove as - // many permissions as possible right now - var lastStmtError error - for _, query := range revocationStmts { - if err := dbtxn.ExecuteDBQueryDirect(ctx, db, nil, query); err != nil { - lastStmtError = err - } - } - - // can't drop if not all privileges are revoked - if rows.Err() != nil { - return nil, fmt.Errorf("could not generate revocation statements for all rows: %w", rows.Err()) - } - if lastStmtError != nil { - return nil, fmt.Errorf("could not perform all revocation statements: %w", lastStmtError) - } - - // Drop this user - stmt, err = db.Prepare(fmt.Sprintf( - `DROP ROLE IF EXISTS %s;`, dbutil.QuoteIdentifier(username))) - if err != nil { - return nil, err - } - defer stmt.Close() - if _, err := stmt.Exec(); err != nil { - return nil, err - } - - // We have revocation SQL, execute directly, within a transaction - default: - tx, err := db.Begin() - if err != nil { - return nil, err - } - defer func() { - tx.Rollback() - }() - - for _, query := range strutil.ParseArbitraryStringSlice(revocationSQL, ";") { - query = strings.TrimSpace(query) - if len(query) == 0 { - continue - } - - m := map[string]string{ - "name": username, - } - if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { - return nil, err - } - } - - if err := tx.Commit(); err != nil { - return nil, err - } - } - - return resp, nil -} diff --git a/builtin/logical/rabbitmq/backend.go b/builtin/logical/rabbitmq/backend.go index d1f223810a65..e6a093d54264 100644 --- a/builtin/logical/rabbitmq/backend.go +++ b/builtin/logical/rabbitmq/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( @@ -11,6 +14,8 @@ import ( rabbithole "github.com/michaelklishin/rabbit-hole/v2" ) +const operationPrefixRabbitMQ = "rabbit-mq" + // Factory creates and configures the backend func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() diff --git a/builtin/logical/rabbitmq/backend_test.go b/builtin/logical/rabbitmq/backend_test.go index 7df1384fefa4..21510b2f9881 100644 --- a/builtin/logical/rabbitmq/backend_test.go +++ b/builtin/logical/rabbitmq/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( @@ -8,8 +11,8 @@ import ( "testing" "github.com/hashicorp/go-secure-stdlib/base62" - "github.com/hashicorp/vault/helper/testhelpers/docker" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" rabbithole "github.com/michaelklishin/rabbit-hole/v2" diff --git a/builtin/logical/rabbitmq/cmd/rabbitmq/main.go b/builtin/logical/rabbitmq/cmd/rabbitmq/main.go index 516f699eaee6..942db13dc141 100644 --- a/builtin/logical/rabbitmq/cmd/rabbitmq/main.go +++ b/builtin/logical/rabbitmq/cmd/rabbitmq/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: rabbitmq.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/rabbitmq/passwords.go b/builtin/logical/rabbitmq/passwords.go index 01bfd41f0db2..ee6b9d02fcac 100644 --- a/builtin/logical/rabbitmq/passwords.go +++ b/builtin/logical/rabbitmq/passwords.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( diff --git a/builtin/logical/rabbitmq/path_config_connection.go b/builtin/logical/rabbitmq/path_config_connection.go index 51abe2547aa1..d586ffc035a7 100644 --- a/builtin/logical/rabbitmq/path_config_connection.go +++ b/builtin/logical/rabbitmq/path_config_connection.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( @@ -17,6 +20,13 @@ const ( func pathConfigConnection(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/connection", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationVerb: "configure", + OperationSuffix: "connection", + }, + Fields: map[string]*framework.FieldSchema{ "connection_uri": { Type: framework.TypeString, diff --git a/builtin/logical/rabbitmq/path_config_connection_test.go b/builtin/logical/rabbitmq/path_config_connection_test.go index dddee8f0c9b8..8e7de881c07a 100644 --- a/builtin/logical/rabbitmq/path_config_connection_test.go +++ b/builtin/logical/rabbitmq/path_config_connection_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( diff --git a/builtin/logical/rabbitmq/path_config_lease.go b/builtin/logical/rabbitmq/path_config_lease.go index 0b6bb572188d..cf82a2024cc1 100644 --- a/builtin/logical/rabbitmq/path_config_lease.go +++ b/builtin/logical/rabbitmq/path_config_lease.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( @@ -12,6 +15,11 @@ import ( func pathConfigLease(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/lease", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + }, + Fields: map[string]*framework.FieldSchema{ "ttl": { Type: framework.TypeDurationSecond, @@ -25,9 +33,21 @@ func pathConfigLease(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathLeaseRead, - logical.UpdateOperation: b.pathLeaseUpdate, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathLeaseRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "lease-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLeaseUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "lease", + }, + }, }, HelpSynopsis: pathConfigLeaseHelpSyn, diff --git a/builtin/logical/rabbitmq/path_config_lease_test.go b/builtin/logical/rabbitmq/path_config_lease_test.go index ec7e7e169c08..542a5d284e79 100644 --- a/builtin/logical/rabbitmq/path_config_lease_test.go +++ b/builtin/logical/rabbitmq/path_config_lease_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( diff --git a/builtin/logical/rabbitmq/path_role_create.go b/builtin/logical/rabbitmq/path_role_create.go index 5ad1ff6bdf41..956a01672086 100644 --- a/builtin/logical/rabbitmq/path_role_create.go +++ b/builtin/logical/rabbitmq/path_role_create.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( @@ -18,6 +21,13 @@ const ( func pathCreds(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationVerb: "request", + OperationSuffix: "credentials", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/rabbitmq/path_role_create_test.go b/builtin/logical/rabbitmq/path_role_create_test.go index 2c3d5f4b86d6..0f2591caf7dc 100644 --- a/builtin/logical/rabbitmq/path_role_create_test.go +++ b/builtin/logical/rabbitmq/path_role_create_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( diff --git a/builtin/logical/rabbitmq/path_roles.go b/builtin/logical/rabbitmq/path_roles.go index 2031c7d99ec5..9164d5780b68 100644 --- a/builtin/logical/rabbitmq/path_roles.go +++ b/builtin/logical/rabbitmq/path_roles.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( @@ -13,6 +16,10 @@ import ( func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationSuffix: "roles", + }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -24,6 +31,10 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameRegex("name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationSuffix: "role", + }, Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/rabbitmq/secret_creds.go b/builtin/logical/rabbitmq/secret_creds.go index b31dfc7188aa..eaaf2afd0a62 100644 --- a/builtin/logical/rabbitmq/secret_creds.go +++ b/builtin/logical/rabbitmq/secret_creds.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package rabbitmq import ( diff --git a/builtin/logical/ssh/backend.go b/builtin/logical/ssh/backend.go index fe4f40b334c9..f750e79fac56 100644 --- a/builtin/logical/ssh/backend.go +++ b/builtin/logical/ssh/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -10,6 +13,8 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) +const operationPrefixSSH = "ssh" + type backend struct { *framework.Backend view logical.Storage @@ -47,13 +52,12 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { SealWrapStorage: []string{ caPrivateKey, caPrivateKeyStoragePath, - "keys/", + keysStoragePrefix, }, }, Paths: []*framework.Path{ pathConfigZeroAddress(&b), - pathKeys(&b), pathListRoles(&b), pathRoles(&b), pathCredsCreate(&b), @@ -63,10 +67,10 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { pathSign(&b), pathIssue(&b), pathFetchPublicKey(&b), + pathCleanupKeys(&b), }, Secrets: []*framework.Secret{ - secretDynamicKey(&b), secretOTP(&b), }, @@ -112,8 +116,8 @@ const backendHelp = ` The SSH backend generates credentials allowing clients to establish SSH connections to remote hosts. -There are three variants of the backend, which generate different types of -credentials: dynamic keys, One-Time Passwords (OTPs) and certificate authority. The desired behavior +There are two variants of the backend, which generate different types of +credentials: One-Time Passwords (OTPs) and certificate authority. The desired behavior is role-specific and chosen at role creation time with the 'key_type' parameter. diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go index 4ad4a9f3c97f..ad2c048d5e95 100644 --- a/builtin/logical/ssh/backend_test.go +++ b/builtin/logical/ssh/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -13,30 +16,28 @@ import ( "time" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/logical" - - "golang.org/x/crypto/ssh" - "github.com/hashicorp/vault/builtin/credential/userpass" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/ssh" ) const ( - testIP = "127.0.0.1" - testUserName = "vaultssh" - testMultiUserName = "vaultssh,otherssh" - testAdminUser = "vaultssh" - testCaKeyType = "ca" - testOTPKeyType = "otp" - testDynamicKeyType = "dynamic" - testCIDRList = "127.0.0.1/32" - testAtRoleName = "test@RoleName" - testDynamicRoleName = "testDynamicRoleName" - testOTPRoleName = "testOTPRoleName" + testIP = "127.0.0.1" + testUserName = "vaultssh" + testMultiUserName = "vaultssh,otherssh" + testAdminUser = "vaultssh" + testCaKeyType = "ca" + testOTPKeyType = "otp" + testCIDRList = "127.0.0.1/32" + testAtRoleName = "test@RoleName" + testOTPRoleName = "testOTPRoleName" // testKeyName is the name of the entry that will be written to SSHMOUNTPOINT/ssh/keys testKeyName = "testKeyName" // testSharedPrivateKey is the value of the entry that will be written to SSHMOUNTPOINT/ssh/keys @@ -131,6 +132,8 @@ SjOQL/GkH1nkRcDS9++aAAAAAmNhAQID dockerImageTagSupportsNoRSA1 = "8.4_p1-r3-ls48" ) +var ctx = context.Background() + func prepareTestContainer(t *testing.T, tag, caPublicKeyPEM string) (func(), string) { if tag == "" { tag = dockerImageTagSupportsNoRSA1 @@ -521,7 +524,7 @@ func newTestingFactory(t *testing.T) func(ctx context.Context, conf *logical.Bac defaultLeaseTTLVal := 2 * time.Minute maxLeaseTTLVal := 10 * time.Minute return Factory(context.Background(), &logical.BackendConfig{ - Logger: vault.NewTestLogger(t), + Logger: corehelpers.NewTestLogger(t), StorageView: &logical.InmemStorage{}, System: &logical.StaticSystemView{ DefaultLeaseTTLVal: defaultLeaseTTLVal, @@ -537,36 +540,22 @@ func TestSSHBackend_Lookup(t *testing.T) { "default_user": testUserName, "cidr_list": testCIDRList, } - testDynamicRoleData := map[string]interface{}{ - "key_type": testDynamicKeyType, - "key": testKeyName, - "admin_user": testAdminUser, - "default_user": testAdminUser, - "cidr_list": testCIDRList, - } data := map[string]interface{}{ "ip": testIP, } resp1 := []string(nil) resp2 := []string{testOTPRoleName} - resp3 := []string{testDynamicRoleName, testOTPRoleName} - resp4 := []string{testDynamicRoleName} - resp5 := []string{testAtRoleName} + resp3 := []string{testAtRoleName} logicaltest.Test(t, logicaltest.TestCase{ LogicalFactory: newTestingFactory(t), Steps: []logicaltest.TestStep{ testLookupRead(t, data, resp1), testRoleWrite(t, testOTPRoleName, testOTPRoleData), testLookupRead(t, data, resp2), - testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), - testRoleWrite(t, testDynamicRoleName, testDynamicRoleData), - testLookupRead(t, data, resp3), testRoleDelete(t, testOTPRoleName), - testLookupRead(t, data, resp4), - testRoleDelete(t, testDynamicRoleName), testLookupRead(t, data, resp1), - testRoleWrite(t, testAtRoleName, testDynamicRoleData), - testLookupRead(t, data, resp5), + testRoleWrite(t, testAtRoleName, testOTPRoleData), + testLookupRead(t, data, resp3), testRoleDelete(t, testAtRoleName), testLookupRead(t, data, resp1), }, @@ -615,39 +604,6 @@ func TestSSHBackend_RoleList(t *testing.T) { }) } -func TestSSHBackend_DynamicKeyCreate(t *testing.T) { - cleanup, sshAddress := prepareTestContainer(t, "", "") - defer cleanup() - - host, port, err := net.SplitHostPort(sshAddress) - if err != nil { - t.Fatal(err) - } - - testDynamicRoleData := map[string]interface{}{ - "key_type": testDynamicKeyType, - "key": testKeyName, - "admin_user": testAdminUser, - "default_user": testAdminUser, - "cidr_list": host + "/32", - "port": port, - } - data := map[string]interface{}{ - "username": testUserName, - "ip": host, - } - logicaltest.Test(t, logicaltest.TestCase{ - LogicalFactory: newTestingFactory(t), - Steps: []logicaltest.TestStep{ - testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), - testRoleWrite(t, testDynamicRoleName, testDynamicRoleData), - testCredsWrite(t, testDynamicRoleName, data, false, sshAddress), - testRoleWrite(t, testAtRoleName, testDynamicRoleData), - testCredsWrite(t, testAtRoleName, data, false, sshAddress), - }, - }) -} - func TestSSHBackend_OTPRoleCrud(t *testing.T) { testOTPRoleData := map[string]interface{}{ "key_type": testOTPKeyType, @@ -675,50 +631,6 @@ func TestSSHBackend_OTPRoleCrud(t *testing.T) { }) } -func TestSSHBackend_DynamicRoleCrud(t *testing.T) { - testDynamicRoleData := map[string]interface{}{ - "key_type": testDynamicKeyType, - "key": testKeyName, - "admin_user": testAdminUser, - "default_user": testAdminUser, - "cidr_list": testCIDRList, - } - respDynamicRoleData := map[string]interface{}{ - "cidr_list": testCIDRList, - "port": 22, - "install_script": DefaultPublicKeyInstallScript, - "key_bits": 1024, - "key": testKeyName, - "admin_user": testUserName, - "default_user": testUserName, - "key_type": testDynamicKeyType, - } - logicaltest.Test(t, logicaltest.TestCase{ - LogicalFactory: newTestingFactory(t), - Steps: []logicaltest.TestStep{ - testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), - testRoleWrite(t, testDynamicRoleName, testDynamicRoleData), - testRoleRead(t, testDynamicRoleName, respDynamicRoleData), - testRoleDelete(t, testDynamicRoleName), - testRoleRead(t, testDynamicRoleName, nil), - testRoleWrite(t, testAtRoleName, testDynamicRoleData), - testRoleRead(t, testAtRoleName, respDynamicRoleData), - testRoleDelete(t, testAtRoleName), - testRoleRead(t, testAtRoleName, nil), - }, - }) -} - -func TestSSHBackend_NamedKeysCrud(t *testing.T) { - logicaltest.Test(t, logicaltest.TestCase{ - LogicalFactory: newTestingFactory(t), - Steps: []logicaltest.TestStep{ - testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), - testNamedKeysDelete(t), - }, - }) -} - func TestSSHBackend_OTPCreate(t *testing.T) { cleanup, sshAddress := prepareTestContainer(t, "", "") defer func() { @@ -772,24 +684,14 @@ func TestSSHBackend_ConfigZeroAddressCRUD(t *testing.T) { "default_user": testUserName, "cidr_list": testCIDRList, } - testDynamicRoleData := map[string]interface{}{ - "key_type": testDynamicKeyType, - "key": testKeyName, - "admin_user": testAdminUser, - "default_user": testAdminUser, - "cidr_list": testCIDRList, - } req1 := map[string]interface{}{ "roles": testOTPRoleName, } resp1 := map[string]interface{}{ "roles": []string{testOTPRoleName}, } - req2 := map[string]interface{}{ - "roles": fmt.Sprintf("%s,%s", testOTPRoleName, testDynamicRoleName), - } resp2 := map[string]interface{}{ - "roles": []string{testOTPRoleName, testDynamicRoleName}, + "roles": []string{testOTPRoleName}, } resp3 := map[string]interface{}{ "roles": []string{}, @@ -801,11 +703,7 @@ func TestSSHBackend_ConfigZeroAddressCRUD(t *testing.T) { testRoleWrite(t, testOTPRoleName, testOTPRoleData), testConfigZeroAddressWrite(t, req1), testConfigZeroAddressRead(t, resp1), - testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), - testRoleWrite(t, testDynamicRoleName, testDynamicRoleData), - testConfigZeroAddressWrite(t, req2), testConfigZeroAddressRead(t, resp2), - testRoleDelete(t, testDynamicRoleName), testConfigZeroAddressRead(t, resp1), testRoleDelete(t, testOTPRoleName), testConfigZeroAddressRead(t, resp3), @@ -839,43 +737,6 @@ func TestSSHBackend_CredsForZeroAddressRoles_otp(t *testing.T) { }) } -func TestSSHBackend_CredsForZeroAddressRoles_dynamic(t *testing.T) { - cleanup, sshAddress := prepareTestContainer(t, "", "") - defer cleanup() - - host, port, err := net.SplitHostPort(sshAddress) - if err != nil { - t.Fatal(err) - } - - dynamicRoleData := map[string]interface{}{ - "key_type": testDynamicKeyType, - "key": testKeyName, - "admin_user": testAdminUser, - "default_user": testAdminUser, - "port": port, - } - data := map[string]interface{}{ - "username": testUserName, - "ip": host, - } - req2 := map[string]interface{}{ - "roles": testDynamicRoleName, - } - logicaltest.Test(t, logicaltest.TestCase{ - LogicalFactory: newTestingFactory(t), - Steps: []logicaltest.TestStep{ - testNamedKeysWrite(t, testKeyName, testSharedPrivateKey), - testRoleWrite(t, testDynamicRoleName, dynamicRoleData), - testCredsWrite(t, testDynamicRoleName, data, true, sshAddress), - testConfigZeroAddressWrite(t, req2), - testCredsWrite(t, testDynamicRoleName, data, false, sshAddress), - testConfigZeroAddressDelete(t), - testCredsWrite(t, testDynamicRoleName, data, true, sshAddress), - }, - }) -} - func TestSSHBackend_CA(t *testing.T) { testCases := []struct { name string @@ -1087,12 +948,63 @@ cKumubUxOfFdy1ZvAAAAEm5jY0BtYnAudWJudC5sb2NhbA== return nil }, }, + testIssueCert("testcarole", "ec", testUserName, sshAddress, expectError), + testIssueCert("testcarole", "ed25519", testUserName, sshAddress, expectError), + testIssueCert("testcarole", "rsa", testUserName, sshAddress, expectError), }, } logicaltest.Test(t, testCase) } +func testIssueCert(role string, keyType string, testUserName string, sshAddress string, expectError bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "issue/" + role, + ErrorOk: expectError, + Data: map[string]interface{}{ + "key_type": keyType, + "valid_principals": testUserName, + }, + + Check: func(resp *logical.Response) error { + // Tolerate nil response if an error was expected + if expectError && resp == nil { + return nil + } + + signedKey := strings.TrimSpace(resp.Data["signed_key"].(string)) + if signedKey == "" { + return errors.New("no signed key in response") + } + + privKey, err := ssh.ParsePrivateKey([]byte(resp.Data["private_key"].(string))) + if err != nil { + return fmt.Errorf("error parsing private key: %v", err) + } + + parsedKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(signedKey)) + if err != nil { + return fmt.Errorf("error parsing signed key: %v", err) + } + certSigner, err := ssh.NewCertSigner(parsedKey.(*ssh.Certificate), privKey) + if err != nil { + return err + } + + err = testSSH(testUserName, sshAddress, ssh.PublicKeys(certSigner), "date") + if expectError && err == nil { + return fmt.Errorf("expected error but got none") + } + if !expectError && err != nil { + return err + } + + return nil + }, + } +} + func TestSSHBackend_CAUpgradeAlgorithmSigner(t *testing.T) { cleanup, sshAddress := prepareTestContainer(t, dockerImageTagSupportsRSA1, testCAPublicKey) defer cleanup() @@ -2414,23 +2326,6 @@ func testVerifyWrite(t *testing.T, data map[string]interface{}, expected map[str } } -func testNamedKeysWrite(t *testing.T, name, key string) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: fmt.Sprintf("keys/%s", name), - Data: map[string]interface{}{ - "key": key, - }, - } -} - -func testNamedKeysDelete(t *testing.T) logicaltest.TestStep { - return logicaltest.TestStep{ - Operation: logical.DeleteOperation, - Path: fmt.Sprintf("keys/%s", testKeyName), - } -} - func testLookupRead(t *testing.T, data map[string]interface{}, expected []string) logicaltest.TestStep { return logicaltest.TestStep{ Operation: logical.UpdateOperation, @@ -2495,10 +2390,6 @@ func testRoleRead(t *testing.T, roleName string, expected map[string]interface{} if d.KeyType != expected["key_type"] || d.DefaultUser != expected["default_user"] || d.CIDRList != expected["cidr_list"] { return fmt.Errorf("data mismatch. bad: %#v", resp) } - case "dynamic": - if d.AdminUser != expected["admin_user"] || d.CIDRList != expected["cidr_list"] || d.KeyName != expected["key"] || d.KeyType != expected["key_type"] { - return fmt.Errorf("data mismatch. bad: %#v", resp) - } default: return fmt.Errorf("unknown key type. bad: %#v", resp) } @@ -2539,7 +2430,7 @@ func testCredsWrite(t *testing.T, roleName string, data map[string]interface{}, } return nil } - if roleName == testDynamicRoleName || roleName == testAtRoleName { + if roleName == testAtRoleName { var d struct { Key string `mapstructure:"key"` } @@ -2569,3 +2460,383 @@ func testCredsWrite(t *testing.T, roleName string, data map[string]interface{}, }, } } + +func TestBackend_CleanupDynamicHostKeys(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Running on a clean mount shouldn't do anything. + cleanRequest := &logical.Request{ + Operation: logical.DeleteOperation, + Path: "tidy/dynamic-keys", + Storage: config.StorageView, + } + + resp, err := b.HandleRequest(context.Background(), cleanRequest) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotNil(t, resp.Data["message"]) + require.Contains(t, resp.Data["message"], "0 of 0") + + // Write a bunch of bogus entries. + for i := 0; i < 15; i++ { + data := map[string]interface{}{ + "host": "localhost", + "key": "nothing-to-see-here", + } + entry, err := logical.StorageEntryJSON(fmt.Sprintf("%vexample-%v", keysStoragePrefix, i), &data) + require.NoError(t, err) + err = config.StorageView.Put(context.Background(), entry) + require.NoError(t, err) + } + + // Should now have 15 + resp, err = b.HandleRequest(context.Background(), cleanRequest) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotNil(t, resp.Data["message"]) + require.Contains(t, resp.Data["message"], "15 of 15") + + // Should have none left. + resp, err = b.HandleRequest(context.Background(), cleanRequest) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotNil(t, resp.Data["message"]) + require.Contains(t, resp.Data["message"], "0 of 0") +} + +type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) + +func isPermDenied(err error) bool { + return strings.Contains(err.Error(), "permission denied") +} + +func isUnsupportedPathOperation(err error) bool { + return strings.Contains(err.Error(), "unsupported path") || strings.Contains(err.Error(), "unsupported operation") +} + +func isDeniedOp(err error) bool { + return isPermDenied(err) || isUnsupportedPathOperation(err) +} + +func pathShouldBeAuthed(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to read %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to list %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to write %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to delete %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to patch %v while unauthed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedReadList(t *testing.T, client *api.Client, path string, token string) { + // Should be able to read both with and without a token. + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // Read will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ReadWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to read %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // List will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ListWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to list %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + + // These should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during write on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow read/list, but not modification still. + client.SetToken(token) + resp, err = client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to read %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to list %v while authed: %v / %v", path, err, resp) + } + + // Should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during write on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while authed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedWriteOnly(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during read on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during list on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on write-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow writing, but nothing else. + client.SetToken(token) + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during read on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + if resp != nil || err != nil { + t.Fatalf("unexpected failure during list on write-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on write-only path %v while authed: %v / %v", path, err, resp) + } +} + +type pathAuthChecker int + +const ( + shouldBeAuthed pathAuthChecker = iota + shouldBeUnauthedReadList + shouldBeUnauthedWriteOnly +) + +var pathAuthChckerMap = map[pathAuthChecker]pathAuthCheckerFunc{ + shouldBeAuthed: pathShouldBeAuthed, + shouldBeUnauthedReadList: pathShouldBeUnauthedReadList, + shouldBeUnauthedWriteOnly: pathShouldBeUnauthedWriteOnly, +} + +func TestProperAuthing(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "ssh": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + token := client.Token() + + // Mount SSH. + err := client.Sys().MountWithContext(ctx, "ssh", &api.MountInput{ + Type: "ssh", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Setup basic configuration. + _, err = client.Logical().WriteWithContext(ctx, "ssh/config/ca", map[string]interface{}{ + "generate_signing_key": true, + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "ssh/roles/test-ca", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "ssh/issue/test-ca", map[string]interface{}{ + "username": "toor", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "ssh/roles/test-otp", map[string]interface{}{ + "key_type": "otp", + "default_user": "toor", + "cidr_list": "127.0.0.0/24", + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().WriteWithContext(ctx, "ssh/creds/test-otp", map[string]interface{}{ + "username": "toor", + "ip": "127.0.0.1", + }) + if err != nil || resp == nil { + t.Fatal(err) + } + // key := resp.Data["key"].(string) + + paths := map[string]pathAuthChecker{ + "config/ca": shouldBeAuthed, + "config/zeroaddress": shouldBeAuthed, + "creds/test-otp": shouldBeAuthed, + "issue/test-ca": shouldBeAuthed, + "lookup": shouldBeAuthed, + "public_key": shouldBeUnauthedReadList, + "roles/test-ca": shouldBeAuthed, + "roles/test-otp": shouldBeAuthed, + "roles/": shouldBeAuthed, + "sign/test-ca": shouldBeAuthed, + "tidy/dynamic-keys": shouldBeAuthed, + "verify": shouldBeUnauthedWriteOnly, + } + for path, checkerType := range paths { + checker := pathAuthChckerMap[checkerType] + checker(t, client, "ssh/"+path, token) + } + + client.SetToken(token) + openAPIResp, err := client.Logical().ReadWithContext(ctx, "sys/internal/specs/openapi") + if err != nil { + t.Fatalf("failed to get openapi data: %v", err) + } + + if len(openAPIResp.Data["paths"].(map[string]interface{})) == 0 { + t.Fatalf("expected to get response from OpenAPI; got empty path list") + } + + validatedPath := false + for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { + if !strings.HasPrefix(openapi_path, "/ssh/") { + t.Logf("Skipping path: %v", openapi_path) + continue + } + + t.Logf("Validating path: %v", openapi_path) + validatedPath = true + + // Substitute values in from our testing map. + raw_path := openapi_path[5:] + if strings.Contains(raw_path, "{role}") && strings.Contains(raw_path, "roles/") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test-ca") + } + if strings.Contains(raw_path, "{role}") && (strings.Contains(raw_path, "sign/") || strings.Contains(raw_path, "issue/")) { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test-ca") + } + if strings.Contains(raw_path, "{role}") && strings.Contains(raw_path, "creds") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test-otp") + } + + handler, present := paths[raw_path] + if !present { + t.Fatalf("OpenAPI reports SSH mount contains %v -> %v but was not tested to be authed or not authed.", + openapi_path, raw_path) + } + + openapi_data := raw_data.(map[string]interface{}) + hasList := false + rawGetData, hasGet := openapi_data["get"] + if hasGet { + getData := rawGetData.(map[string]interface{}) + getParams, paramsPresent := getData["parameters"].(map[string]interface{}) + if getParams != nil && paramsPresent { + if _, hasList = getParams["list"]; hasList { + // LIST is exclusive from GET on the same endpoint usually. + hasGet = false + } + } + } + _, hasPost := openapi_data["post"] + _, hasDelete := openapi_data["delete"] + + if handler == shouldBeUnauthedReadList { + if hasPost || hasDelete { + t.Fatalf("Unauthed read-only endpoints should not have POST/DELETE capabilities") + } + } + } + + if !validatedPath { + t.Fatalf("Expected to have validated at least one path.") + } +} diff --git a/builtin/logical/ssh/cmd/ssh/main.go b/builtin/logical/ssh/cmd/ssh/main.go index d04bd30af67e..4a2163d99b68 100644 --- a/builtin/logical/ssh/cmd/ssh/main.go +++ b/builtin/logical/ssh/cmd/ssh/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: ssh.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/ssh/communicator.go b/builtin/logical/ssh/communicator.go deleted file mode 100644 index 8950c41e1cec..000000000000 --- a/builtin/logical/ssh/communicator.go +++ /dev/null @@ -1,350 +0,0 @@ -package ssh - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "path/filepath" - - log "github.com/hashicorp/go-hclog" - - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" -) - -type comm struct { - client *ssh.Client - config *SSHCommConfig - conn net.Conn - address string -} - -// SSHCommConfig is the structure used to configure the SSH communicator. -type SSHCommConfig struct { - // The configuration of the Go SSH connection - SSHConfig *ssh.ClientConfig - - // Connection returns a new connection. The current connection - // in use will be closed as part of the Close method, or in the - // case an error occurs. - Connection func() (net.Conn, error) - - // Pty, if true, will request a pty from the remote end. - Pty bool - - // DisableAgent, if true, will not forward the SSH agent. - DisableAgent bool - - // Logger for output - Logger log.Logger -} - -// Creates a new communicator implementation over SSH. This takes -// an already existing TCP connection and SSH configuration. -func SSHCommNew(address string, config *SSHCommConfig) (result *comm, err error) { - // Establish an initial connection and connect - result = &comm{ - config: config, - address: address, - } - - if err = result.reconnect(); err != nil { - result = nil - return - } - - return -} - -func (c *comm) Close() error { - var err error - if c.conn != nil { - err = c.conn.Close() - } - c.conn = nil - c.client = nil - return err -} - -func (c *comm) Upload(path string, input io.Reader, fi *os.FileInfo) error { - // The target directory and file for talking the SCP protocol - target_dir := filepath.Dir(path) - target_file := filepath.Base(path) - - // On windows, filepath.Dir uses backslash separators (ie. "\tmp"). - // This does not work when the target host is unix. Switch to forward slash - // which works for unix and windows - target_dir = filepath.ToSlash(target_dir) - - scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { - return scpUploadFile(target_file, input, w, stdoutR, fi) - } - - return c.scpSession("scp -vt "+target_dir, scpFunc) -} - -func (c *comm) NewSession() (session *ssh.Session, err error) { - if c.client == nil { - err = errors.New("client not available") - } else { - session, err = c.client.NewSession() - } - - if err != nil { - c.config.Logger.Error("ssh session open error, attempting reconnect", "error", err) - if err := c.reconnect(); err != nil { - c.config.Logger.Error("reconnect attempt failed", "error", err) - return nil, err - } - - return c.client.NewSession() - } - - return session, nil -} - -func (c *comm) reconnect() error { - // Close previous connection. - if c.conn != nil { - c.Close() - } - - var err error - c.conn, err = c.config.Connection() - if err != nil { - // Explicitly set this to the REAL nil. Connection() can return - // a nil implementation of net.Conn which will make the - // "if c.conn == nil" check fail above. Read here for more information - // on this psychotic language feature: - // - // http://golang.org/doc/faq#nil_error - c.conn = nil - c.config.Logger.Error("reconnection error", "error", err) - return err - } - - sshConn, sshChan, req, err := ssh.NewClientConn(c.conn, c.address, c.config.SSHConfig) - if err != nil { - c.config.Logger.Error("handshake error", "error", err) - c.Close() - return err - } - if sshConn != nil { - c.client = ssh.NewClient(sshConn, sshChan, req) - } - c.connectToAgent() - - return nil -} - -func (c *comm) connectToAgent() { - if c.client == nil { - return - } - - if c.config.DisableAgent { - return - } - - // open connection to the local agent - socketLocation := os.Getenv("SSH_AUTH_SOCK") - if socketLocation == "" { - return - } - agentConn, err := net.Dial("unix", socketLocation) - if err != nil { - c.config.Logger.Error("could not connect to local agent socket", "socket_path", socketLocation) - return - } - defer agentConn.Close() - - // create agent and add in auth - forwardingAgent := agent.NewClient(agentConn) - if forwardingAgent == nil { - c.config.Logger.Error("could not create agent client") - return - } - - // add callback for forwarding agent to SSH config - // XXX - might want to handle reconnects appending multiple callbacks - auth := ssh.PublicKeysCallback(forwardingAgent.Signers) - c.config.SSHConfig.Auth = append(c.config.SSHConfig.Auth, auth) - agent.ForwardToAgent(c.client, forwardingAgent) - - // Setup a session to request agent forwarding - session, err := c.NewSession() - if err != nil { - return - } - defer session.Close() - - err = agent.RequestAgentForwarding(session) - if err != nil { - c.config.Logger.Error("error requesting agent forwarding", "error", err) - return - } - return -} - -func (c *comm) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) error) error { - session, err := c.NewSession() - if err != nil { - return err - } - defer session.Close() - - // Get a pipe to stdin so that we can send data down - stdinW, err := session.StdinPipe() - if err != nil { - return err - } - - // We only want to close once, so we nil w after we close it, - // and only close in the defer if it hasn't been closed already. - defer func() { - if stdinW != nil { - stdinW.Close() - } - }() - - // Get a pipe to stdout so that we can get responses back - stdoutPipe, err := session.StdoutPipe() - if err != nil { - return err - } - stdoutR := bufio.NewReader(stdoutPipe) - - // Set stderr to a bytes buffer - stderr := new(bytes.Buffer) - session.Stderr = stderr - - // Start the sink mode on the other side - if err := session.Start(scpCommand); err != nil { - return err - } - - // Call our callback that executes in the context of SCP. We ignore - // EOF errors if they occur because it usually means that SCP prematurely - // ended on the other side. - if err := f(stdinW, stdoutR); err != nil && err != io.EOF { - return err - } - - // Close the stdin, which sends an EOF, and then set w to nil so that - // our defer func doesn't close it again since that is unsafe with - // the Go SSH package. - stdinW.Close() - stdinW = nil - - // Wait for the SCP connection to close, meaning it has consumed all - // our data and has completed. Or has errored. - err = session.Wait() - if err != nil { - if exitErr, ok := err.(*ssh.ExitError); ok { - // Otherwise, we have an ExitErorr, meaning we can just read - // the exit status - c.config.Logger.Error("got non-zero exit status", "exit_status", exitErr.ExitStatus()) - - // If we exited with status 127, it means SCP isn't available. - // Return a more descriptive error for that. - if exitErr.ExitStatus() == 127 { - return errors.New( - "SCP failed to start. This usually means that SCP is not\n" + - "properly installed on the remote system.") - } - } - - return err - } - return nil -} - -// checkSCPStatus checks that a prior command sent to SCP completed -// successfully. If it did not complete successfully, an error will -// be returned. -func checkSCPStatus(r *bufio.Reader) error { - code, err := r.ReadByte() - if err != nil { - return err - } - - if code != 0 { - // Treat any non-zero (really 1 and 2) as fatal errors - message, _, err := r.ReadLine() - if err != nil { - return fmt.Errorf("error reading error message: %w", err) - } - - return errors.New(string(message)) - } - - return nil -} - -func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, fi *os.FileInfo) error { - var mode os.FileMode - var size int64 - - if fi != nil && (*fi).Mode().IsRegular() { - mode = (*fi).Mode().Perm() - size = (*fi).Size() - } else { - // Create a temporary file where we can copy the contents of the src - // so that we can determine the length, since SCP is length-prefixed. - tf, err := ioutil.TempFile("", "vault-ssh-upload") - if err != nil { - return fmt.Errorf("error creating temporary file for upload: %w", err) - } - defer os.Remove(tf.Name()) - defer tf.Close() - - mode = 0o644 - - if _, err := io.Copy(tf, src); err != nil { - return err - } - - // Sync the file so that the contents are definitely on disk, then - // read the length of it. - if err := tf.Sync(); err != nil { - return fmt.Errorf("error creating temporary file for upload: %w", err) - } - - // Seek the file to the beginning so we can re-read all of it - if _, err := tf.Seek(0, 0); err != nil { - return fmt.Errorf("error creating temporary file for upload: %w", err) - } - - tfi, err := tf.Stat() - if err != nil { - return fmt.Errorf("error creating temporary file for upload: %w", err) - } - - size = tfi.Size() - src = tf - } - - // Start the protocol - perms := fmt.Sprintf("C%04o", mode) - - fmt.Fprintln(w, perms, size, dst) - if err := checkSCPStatus(r); err != nil { - return err - } - - if _, err := io.CopyN(w, src, size); err != nil { - return err - } - - fmt.Fprint(w, "\x00") - if err := checkSCPStatus(r); err != nil { - return err - } - - return nil -} diff --git a/builtin/logical/ssh/linux_install_script.go b/builtin/logical/ssh/linux_install_script.go deleted file mode 100644 index a2228b2fc2e0..000000000000 --- a/builtin/logical/ssh/linux_install_script.go +++ /dev/null @@ -1,71 +0,0 @@ -package ssh - -const ( - // This is a constant representing a script to install and uninstall public - // key in remote hosts. - DefaultPublicKeyInstallScript = ` -#!/bin/bash -# -# This is a default script which installs or uninstalls an RSA public key to/from -# authorized_keys file in a typical linux machine. -# -# If the platform differs or if the binaries used in this script are not available -# in target machine, use the 'install_script' parameter with 'roles/' endpoint to -# register a custom script (applicable for Dynamic type only). -# -# Vault server runs this script on the target machine with the following params: -# -# $1:INSTALL_OPTION: "install" or "uninstall" -# -# $2:PUBLIC_KEY_FILE: File name containing public key to be installed. Vault server -# uses UUID as name to avoid collisions with public keys generated for other requests. -# -# $3:AUTH_KEYS_FILE: Absolute path of the authorized_keys file. -# Currently, vault uses /home//.ssh/authorized_keys as the path. -# -# [Note: This script will be run by Vault using the registered admin username. -# Notice that some commands below are run as 'sudo'. For graceful execution of -# this script there should not be any password prompts. So, disable password -# prompt for the admin username registered with Vault. - -set -e - -# Storing arguments into variables, to increase readability of the script. -INSTALL_OPTION=$1 -PUBLIC_KEY_FILE=$2 -AUTH_KEYS_FILE=$3 - -# Delete the public key file and the temporary file -function cleanup -{ - rm -f "$PUBLIC_KEY_FILE" temp_$PUBLIC_KEY_FILE -} - -# 'cleanup' will be called if the script ends or if any command fails. -trap cleanup EXIT - -# Return if the option is anything other than 'install' or 'uninstall'. -if [ "$INSTALL_OPTION" != "install" ] && [ "$INSTALL_OPTION" != "uninstall" ]; then - exit 1 -fi - -# use locking to avoid parallel script execution -( - flock --timeout 10 200 - # Create the .ssh directory and authorized_keys file if it does not exist - SSH_DIR=$(dirname $AUTH_KEYS_FILE) - sudo mkdir -p "$SSH_DIR" - sudo touch "$AUTH_KEYS_FILE" - # Remove the key from authorized_keys file if it is already present. - # This step is common for both install and uninstall. Note that grep's - # return code is ignored, thus if grep fails all keys will be removed - # rather than none and it fails secure - sudo grep -vFf "$PUBLIC_KEY_FILE" "$AUTH_KEYS_FILE" > temp_$PUBLIC_KEY_FILE || true - cat temp_$PUBLIC_KEY_FILE | sudo tee "$AUTH_KEYS_FILE" - # Append the new public key to authorized_keys file - if [ "$INSTALL_OPTION" == "install" ]; then - cat "$PUBLIC_KEY_FILE" | sudo tee --append "$AUTH_KEYS_FILE" - fi -) 200> ${AUTH_KEYS_FILE}.lock -` -) diff --git a/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go b/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go new file mode 100644 index 000000000000..7d028def309a --- /dev/null +++ b/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package ssh + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const keysStoragePrefix = "keys/" + +func pathCleanupKeys(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "tidy/dynamic-keys", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "tidy", + OperationSuffix: "dynamic-host-keys", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.handleCleanupKeys, + }, + HelpSynopsis: `This endpoint removes the stored host keys used for the removed Dynamic Key feature, if present.`, + HelpDescription: `For more information, refer to the API documentation.`, + } +} + +func (b *backend) handleCleanupKeys(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + names, err := req.Storage.List(ctx, keysStoragePrefix) + if err != nil { + return nil, fmt.Errorf("unable to list keys for removal: %w", err) + } + + for index, name := range names { + keyPath := keysStoragePrefix + name + if err := req.Storage.Delete(ctx, keyPath); err != nil { + return nil, fmt.Errorf("unable to delete key %v of %v: %w", index+1, len(names), err) + } + } + + return &logical.Response{ + Data: map[string]interface{}{ + "message": fmt.Sprintf("Removed %v of %v host keys.", len(names), len(names)), + }, + }, nil +} diff --git a/builtin/logical/ssh/path_config_ca.go b/builtin/logical/ssh/path_config_ca.go index 5b759393870a..f33c98d8a3c6 100644 --- a/builtin/logical/ssh/path_config_ca.go +++ b/builtin/logical/ssh/path_config_ca.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -38,6 +41,11 @@ type keyStorageEntry struct { func pathConfigCA(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/ca", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + }, + Fields: map[string]*framework.FieldSchema{ "private_key": { Type: framework.TypeString, @@ -64,10 +72,26 @@ func pathConfigCA(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConfigCAUpdate, - logical.DeleteOperation: b.pathConfigCADelete, - logical.ReadOperation: b.pathConfigCARead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigCAUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "ca", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigCADelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "ca-configuration", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigCARead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "ca-configuration", + }, + }, }, HelpSynopsis: `Set the SSH private key used for signing certificates.`, @@ -333,7 +357,7 @@ func generateSSHKeyPair(randomSource io.Reader, keyType string, keyBits int) (st case 521: curve = elliptic.P521() default: - return "", "", fmt.Errorf("unknown ECDSA key pair algorithm: %v", keyType) + return "", "", fmt.Errorf("unknown ECDSA key pair algorithm and bits: %v / %v", keyType, keyBits) } } diff --git a/builtin/logical/ssh/path_config_ca_test.go b/builtin/logical/ssh/path_config_ca_test.go index 651ed42ce0fb..a096073c6963 100644 --- a/builtin/logical/ssh/path_config_ca_test.go +++ b/builtin/logical/ssh/path_config_ca_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( diff --git a/builtin/logical/ssh/path_config_zeroaddress.go b/builtin/logical/ssh/path_config_zeroaddress.go index d1e31e234df1..2d463ab1bde3 100644 --- a/builtin/logical/ssh/path_config_zeroaddress.go +++ b/builtin/logical/ssh/path_config_zeroaddress.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -18,6 +21,11 @@ type zeroAddressRoles struct { func pathConfigZeroAddress(b *backend) *framework.Path { return &framework.Path{ Pattern: "config/zeroaddress", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + }, + Fields: map[string]*framework.FieldSchema{ "roles": { Type: framework.TypeCommaStringSlice, @@ -26,10 +34,27 @@ func pathConfigZeroAddress(b *backend) *framework.Path { previously registered under these roles will be ignored.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConfigZeroAddressWrite, - logical.ReadOperation: b.pathConfigZeroAddressRead, - logical.DeleteOperation: b.pathConfigZeroAddressDelete, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigZeroAddressWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "zero-address", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigZeroAddressRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "zero-address-configuration", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigZeroAddressDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "zero-address-configuration", + }, + }, }, HelpSynopsis: pathConfigZeroAddressSyn, HelpDescription: pathConfigZeroAddressDesc, diff --git a/builtin/logical/ssh/path_creds_create.go b/builtin/logical/ssh/path_creds_create.go index 6a644ab881f9..781ce056f9dc 100644 --- a/builtin/logical/ssh/path_creds_create.go +++ b/builtin/logical/ssh/path_creds_create.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -20,6 +23,13 @@ type sshOTP struct { func pathCredsCreate(b *backend) *framework.Path { return &framework.Path{ Pattern: "creds/" + framework.GenericNameWithAtRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -34,9 +44,11 @@ func pathCredsCreate(b *backend) *framework.Path { Description: "[Required] IP of the remote host", }, }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathCredsCreateWrite, }, + HelpSynopsis: pathCredsCreateHelpSyn, HelpDescription: pathCredsCreateHelpDesc, } @@ -135,30 +147,7 @@ func (b *backend) pathCredsCreateWrite(ctx context.Context, req *logical.Request "otp": otp, }) } else if role.KeyType == KeyTypeDynamic { - // Generate an RSA key pair. This also installs the newly generated - // public key in the remote host. - dynamicPublicKey, dynamicPrivateKey, err := b.GenerateDynamicCredential(ctx, req, role, username, ip) - if err != nil { - return nil, err - } - - // Return the information relevant to user of dynamic type and save - // information required for later use in internal section of secret. - result = b.Secret(SecretDynamicKeyType).Response(map[string]interface{}{ - "key": dynamicPrivateKey, - "key_type": role.KeyType, - "username": username, - "ip": ip, - "port": role.Port, - }, map[string]interface{}{ - "admin_user": role.AdminUser, - "username": username, - "ip": ip, - "host_key_name": role.KeyName, - "dynamic_public_key": dynamicPublicKey, - "port": role.Port, - "install_script": role.InstallScript, - }) + return nil, fmt.Errorf("dynamic key types have been removed") } else { return nil, fmt.Errorf("key type unknown") } @@ -166,41 +155,6 @@ func (b *backend) pathCredsCreateWrite(ctx context.Context, req *logical.Request return result, nil } -// Generates a RSA key pair and installs it in the remote target -func (b *backend) GenerateDynamicCredential(ctx context.Context, req *logical.Request, role *sshRole, username, ip string) (string, string, error) { - // Fetch the host key to be used for dynamic key installation - keyEntry, err := req.Storage.Get(ctx, fmt.Sprintf("keys/%s", role.KeyName)) - if err != nil { - return "", "", fmt.Errorf("key %q not found: %w", role.KeyName, err) - } - - if keyEntry == nil { - return "", "", fmt.Errorf("key %q not found", role.KeyName) - } - - var hostKey sshHostKey - if err := keyEntry.DecodeJSON(&hostKey); err != nil { - return "", "", fmt.Errorf("error reading the host key: %w", err) - } - - // Generate a new RSA key pair with the given key length. - dynamicPublicKey, dynamicPrivateKey, err := generateRSAKeys(role.KeyBits) - if err != nil { - return "", "", fmt.Errorf("error generating key: %w", err) - } - - if len(role.KeyOptionSpecs) != 0 { - dynamicPublicKey = fmt.Sprintf("%s %s", role.KeyOptionSpecs, dynamicPublicKey) - } - - // Add the public key to authorized_keys file in target machine - err = b.installPublicKeyInTarget(ctx, role.AdminUser, username, ip, role.Port, hostKey.Key, dynamicPublicKey, role.InstallScript, true) - if err != nil { - return "", "", fmt.Errorf("failed to add public key to authorized_keys file in target: %w", err) - } - return dynamicPublicKey, dynamicPrivateKey, nil -} - // Generates a UUID OTP and its salted value based on the salt of the backend. func (b *backend) GenerateSaltedOTP(ctx context.Context) (string, string, error) { str, err := uuid.GenerateUUID() @@ -319,12 +273,8 @@ Creates a credential for establishing SSH connection with the remote host. const pathCredsCreateHelpDesc = ` This path will generate a new key for establishing SSH session with -target host. The key can either be a long lived dynamic key or a One -Time Password (OTP), using 'key_type' parameter being 'dynamic' or -'otp' respectively. For dynamic keys, a named key should be supplied. -Create named key using the 'keys/' endpoint, and this represents the -shared SSH key of target host. If this backend is mounted at 'ssh', -then "ssh/creds/web" would generate a key for 'web' role. +target host. The key can be a One Time Password (OTP) using 'key_type' +being 'otp'. Keys will have a lease associated with them. The access keys can be revoked by using the lease ID. diff --git a/builtin/logical/ssh/path_fetch.go b/builtin/logical/ssh/path_fetch.go index de5a3e60dd74..da5935bbd44c 100644 --- a/builtin/logical/ssh/path_fetch.go +++ b/builtin/logical/ssh/path_fetch.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -11,12 +14,17 @@ func pathFetchPublicKey(b *backend) *framework.Path { return &framework.Path{ Pattern: `public_key`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationSuffix: "public-key", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathFetchPublicKey, }, HelpSynopsis: `Retrieve the public key.`, - HelpDescription: `This allows the public key, that this backend has been configured with, to be fetched. This is a raw response endpoint without JSON encoding; use -format=raw or an external tool (e.g., curl) to fetch this value.`, + HelpDescription: `This allows the public key of the SSH CA certificate that this backend has been configured with to be fetched. This is a raw response endpoint without JSON encoding; use -format=raw or an external tool (e.g., curl) to fetch this value.`, } } diff --git a/builtin/logical/ssh/path_issue.go b/builtin/logical/ssh/path_issue.go index 77b644590fd0..422f6621ee0b 100644 --- a/builtin/logical/ssh/path_issue.go +++ b/builtin/logical/ssh/path_issue.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -19,6 +22,12 @@ func pathIssue(b *backend) *framework.Path { return &framework.Path{ Pattern: "issue/" + framework.GenericNameWithAtRegex("role"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "issue", + OperationSuffix: "certificate", + }, + Operations: map[logical.Operation]framework.OperationHandler{ logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathIssue, diff --git a/builtin/logical/ssh/path_issue_sign.go b/builtin/logical/ssh/path_issue_sign.go index 0ce45d518991..91fca5a40855 100644 --- a/builtin/logical/ssh/path_issue_sign.go +++ b/builtin/logical/ssh/path_issue_sign.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -498,7 +501,7 @@ func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) { // prepare certificate for signing nonce := make([]byte, 32) if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - return nil, fmt.Errorf("failed to generate signed SSH key: error generating random nonce") + return nil, fmt.Errorf("failed to generate signed SSH key: error generating random nonce: %w", err) } certificate := &ssh.Certificate{ Serial: serialNumber.Uint64(), diff --git a/builtin/logical/ssh/path_keys.go b/builtin/logical/ssh/path_keys.go deleted file mode 100644 index 6f0f7c9b2bdb..000000000000 --- a/builtin/logical/ssh/path_keys.go +++ /dev/null @@ -1,110 +0,0 @@ -package ssh - -import ( - "context" - "fmt" - - "golang.org/x/crypto/ssh" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" -) - -type sshHostKey struct { - Key string `json:"key"` -} - -func pathKeys(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "keys/" + framework.GenericNameRegex("key_name"), - Fields: map[string]*framework.FieldSchema{ - "key_name": { - Type: framework.TypeString, - Description: "[Required] Name of the key", - }, - "key": { - Type: framework.TypeString, - Description: "[Required] SSH private key with super user privileges in host", - }, - }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathKeysWrite, - logical.DeleteOperation: b.pathKeysDelete, - }, - HelpSynopsis: pathKeysSyn, - HelpDescription: pathKeysDesc, - } -} - -func (b *backend) getKey(ctx context.Context, s logical.Storage, n string) (*sshHostKey, error) { - entry, err := s.Get(ctx, "keys/"+n) - if err != nil { - return nil, err - } - if entry == nil { - return nil, nil - } - - var result sshHostKey - if err := entry.DecodeJSON(&result); err != nil { - return nil, err - } - return &result, nil -} - -func (b *backend) pathKeysDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - keyName := d.Get("key_name").(string) - keyPath := fmt.Sprintf("keys/%s", keyName) - err := req.Storage.Delete(ctx, keyPath) - if err != nil { - return nil, err - } - return nil, nil -} - -func (b *backend) pathKeysWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - keyName := d.Get("key_name").(string) - if keyName == "" { - return logical.ErrorResponse("Missing key_name"), nil - } - - keyString := d.Get("key").(string) - - // Check if the key provided is infact a private key - signer, err := ssh.ParsePrivateKey([]byte(keyString)) - if err != nil || signer == nil { - return logical.ErrorResponse("Invalid key"), nil - } - - if keyString == "" { - return logical.ErrorResponse("Missing key"), nil - } - - keyPath := fmt.Sprintf("keys/%s", keyName) - - // Store the key - entry, err := logical.StorageEntryJSON(keyPath, map[string]interface{}{ - "key": keyString, - }) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - return nil, nil -} - -const pathKeysSyn = ` -Register a shared private key with Vault. -` - -const pathKeysDesc = ` -Vault uses this key to install and uninstall dynamic keys in remote hosts. This -key should have sudoer privileges in remote hosts. This enables installing keys -for unprivileged usernames. - -If this backend is mounted as "ssh", then the endpoint for registering shared -key is "ssh/keys/". The name given here can be associated with any number -of roles via the endpoint "ssh/roles/". -` diff --git a/builtin/logical/ssh/path_lookup.go b/builtin/logical/ssh/path_lookup.go index 05b62af96afd..f3a6b58ecc3e 100644 --- a/builtin/logical/ssh/path_lookup.go +++ b/builtin/logical/ssh/path_lookup.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -12,15 +15,24 @@ import ( func pathLookup(b *backend) *framework.Path { return &framework.Path{ Pattern: "lookup", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "list", + OperationSuffix: "roles-by-ip", + }, + Fields: map[string]*framework.FieldSchema{ "ip": { Type: framework.TypeString, Description: "[Required] IP address of remote host", }, }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathLookupWrite, }, + HelpSynopsis: pathLookupSyn, HelpDescription: pathLookupDesc, } diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go index 6e525c42bce3..5e1a00194a95 100644 --- a/builtin/logical/ssh/path_roles.go +++ b/builtin/logical/ssh/path_roles.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -17,7 +20,7 @@ import ( const ( // KeyTypeOTP is an key of type OTP KeyTypeOTP = "otp" - // KeyTypeDynamic is dynamic key type + // KeyTypeDynamic is dynamic key type; removed. KeyTypeDynamic = "dynamic" // KeyTypeCA is an key of type CA KeyTypeCA = "ca" @@ -32,24 +35,19 @@ const ( ) // Structure that represents a role in SSH backend. This is a common role structure -// for both OTP and Dynamic roles. Not all the fields are mandatory for both type. +// for both OTP and CA roles. Not all the fields are mandatory for both type. // Some are applicable for one and not for other. It doesn't matter. type sshRole struct { KeyType string `mapstructure:"key_type" json:"key_type"` - KeyName string `mapstructure:"key" json:"key"` - KeyBits int `mapstructure:"key_bits" json:"key_bits"` - AdminUser string `mapstructure:"admin_user" json:"admin_user"` DefaultUser string `mapstructure:"default_user" json:"default_user"` DefaultUserTemplate bool `mapstructure:"default_user_template" json:"default_user_template"` CIDRList string `mapstructure:"cidr_list" json:"cidr_list"` ExcludeCIDRList string `mapstructure:"exclude_cidr_list" json:"exclude_cidr_list"` Port int `mapstructure:"port" json:"port"` - InstallScript string `mapstructure:"install_script" json:"install_script"` AllowedUsers string `mapstructure:"allowed_users" json:"allowed_users"` AllowedUsersTemplate bool `mapstructure:"allowed_users_template" json:"allowed_users_template"` AllowedDomains string `mapstructure:"allowed_domains" json:"allowed_domains"` AllowedDomainsTemplate bool `mapstructure:"allowed_domains_template" json:"allowed_domains_template"` - KeyOptionSpecs string `mapstructure:"key_option_specs" json:"key_option_specs"` MaxTTL string `mapstructure:"max_ttl" json:"max_ttl"` TTL string `mapstructure:"ttl" json:"ttl"` DefaultCriticalOptions map[string]string `mapstructure:"default_critical_options" json:"default_critical_options"` @@ -74,6 +72,11 @@ func pathListRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathRoleList, }, @@ -86,6 +89,12 @@ func pathListRoles(b *backend) *framework.Path { func pathRoles(b *backend) *framework.Path { return &framework.Path{ Pattern: "roles/" + framework.GenericNameWithAtRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ "role": { Type: framework.TypeString, @@ -93,30 +102,10 @@ func pathRoles(b *backend) *framework.Path { [Required for all types] Name of the role being created.`, }, - "key": { - Type: framework.TypeString, - Description: ` - [Required for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] - Name of the registered key in Vault. Before creating the role, use the - 'keys/' endpoint to create a named key.`, - }, - "admin_user": { - Type: framework.TypeString, - Description: ` - [Required for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] - Admin user at remote host. The shared key being registered should be - for this user and should have root privileges. Everytime a dynamic - credential is being generated for other users, Vault uses this admin - username to login to remote host and install the generated credential - for the other user.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "Admin Username", - }, - }, "default_user": { Type: framework.TypeString, Description: ` - [Required for Dynamic type] [Required for OTP type] [Optional for CA type] + [Required for OTP type] [Optional for CA type] Default username for which a credential will be generated. When the endpoint 'creds/' is used without a username, this value will be used as default username.`, @@ -127,7 +116,7 @@ func pathRoles(b *backend) *framework.Path { "default_user_template": { Type: framework.TypeBool, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] If set, Default user can be specified using identity template policies. Non-templated users are also permitted. `, @@ -136,7 +125,7 @@ func pathRoles(b *backend) *framework.Path { "cidr_list": { Type: framework.TypeString, Description: ` - [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type] + [Optional for OTP type] [Not applicable for CA type] Comma separated list of CIDR blocks for which the role is applicable for. CIDR blocks can belong to more than one role.`, DisplayAttrs: &framework.DisplayAttributes{ @@ -146,7 +135,7 @@ func pathRoles(b *backend) *framework.Path { "exclude_cidr_list": { Type: framework.TypeString, Description: ` - [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type] + [Optional for OTP type] [Not applicable for CA type] Comma separated list of CIDR blocks. IP addresses belonging to these blocks are not accepted by the role. This is particularly useful when big CIDR blocks are being used by the role and certain parts of it needs to be kept out.`, @@ -157,7 +146,7 @@ func pathRoles(b *backend) *framework.Path { "port": { Type: framework.TypeInt, Description: ` - [Optional for Dynamic type] [Optional for OTP type] [Not applicable for CA type] + [Optional for OTP type] [Not applicable for CA type] Port number for SSH connection. Default is '22'. Port number does not play any role in creation of OTP. For 'otp' type, this is just a way to inform client about the port number to use. Port number will be @@ -170,27 +159,13 @@ func pathRoles(b *backend) *framework.Path { Type: framework.TypeString, Description: ` [Required for all types] - Type of key used to login to hosts. It can be either 'otp', 'dynamic' or 'ca'. + Type of key used to login to hosts. It can be either 'otp' or 'ca'. 'otp' type requires agent to be installed in remote hosts.`, - AllowedValues: []interface{}{"otp", "dynamic", "ca"}, + AllowedValues: []interface{}{"otp", "ca"}, DisplayAttrs: &framework.DisplayAttributes{ Value: "ca", }, }, - "key_bits": { - Type: framework.TypeInt, - Description: ` - [Optional for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] - Length of the RSA dynamic key in bits. It is 1024 by default or it can be 2048.`, - }, - "install_script": { - Type: framework.TypeString, - Description: ` - [Optional for Dynamic type] [Not-applicable for OTP type] [Not applicable for CA type] - Script used to install and uninstall public keys in the target machine. - The inbuilt default install script will be for Linux hosts. For sample - script, refer the project documentation website.`, - }, "allowed_users": { Type: framework.TypeString, Description: ` @@ -210,7 +185,7 @@ func pathRoles(b *backend) *framework.Path { "allowed_users_template": { Type: framework.TypeBool, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] If set, Allowed users can be specified using identity template policies. Non-templated users are also permitted. `, @@ -219,7 +194,7 @@ func pathRoles(b *backend) *framework.Path { "allowed_domains": { Type: framework.TypeString, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] If this option is not specified, client can request for a signed certificate for any valid host. If only certain domains are allowed, then this list enforces it. `, @@ -227,25 +202,16 @@ func pathRoles(b *backend) *framework.Path { "allowed_domains_template": { Type: framework.TypeBool, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] If set, Allowed domains can be specified using identity template policies. Non-templated domains are also permitted. `, Default: false, }, - "key_option_specs": { - Type: framework.TypeString, - Description: ` - [Optional for Dynamic type] [Not applicable for OTP type] [Not applicable for CA type] - Comma separated option specifications which will be prefixed to RSA key in - authorized_keys file. Options should be valid and comply with authorized_keys - file format and should not contain spaces. - `, - }, "ttl": { Type: framework.TypeDurationSecond, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] The lease duration if no specific lease duration is requested. The lease duration controls the expiration of certificates issued by this backend. Defaults to @@ -257,7 +223,7 @@ func pathRoles(b *backend) *framework.Path { "max_ttl": { Type: framework.TypeDurationSecond, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] The maximum allowed lease duration `, DisplayAttrs: &framework.DisplayAttributes{ @@ -267,7 +233,7 @@ func pathRoles(b *backend) *framework.Path { "allowed_critical_options": { Type: framework.TypeString, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] A comma-separated list of critical options that certificates can have when signed. To allow any critical options, set this to an empty string. `, @@ -275,7 +241,7 @@ func pathRoles(b *backend) *framework.Path { "allowed_extensions": { Type: framework.TypeString, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] A comma-separated list of extensions that certificates can have when signed. An empty list means that no extension overrides are allowed by an end-user; explicitly specify '*' to allow any extensions to be set. @@ -284,8 +250,8 @@ func pathRoles(b *backend) *framework.Path { "default_critical_options": { Type: framework.TypeMap, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] - [Optional for CA type] Critical options certificates should + [Not applicable for OTP type] [Optional for CA type] + Critical options certificates should have if none are provided when signing. This field takes in key value pairs in JSON format. Note that these are not restricted by "allowed_critical_options". Defaults to none. @@ -294,8 +260,8 @@ func pathRoles(b *backend) *framework.Path { "default_extensions": { Type: framework.TypeMap, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] - [Optional for CA type] Extensions certificates should have if + [Not applicable for OTP type] [Optional for CA type] + Extensions certificates should have if none are provided when signing. This field takes in key value pairs in JSON format. Note that these are not restricted by "allowed_extensions". Defaults to none. @@ -304,7 +270,7 @@ func pathRoles(b *backend) *framework.Path { "default_extensions_template": { Type: framework.TypeBool, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] If set, Default extension values can be specified using identity template policies. Non-templated extension values are also permitted. `, @@ -313,7 +279,7 @@ func pathRoles(b *backend) *framework.Path { "allow_user_certificates": { Type: framework.TypeBool, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] If set, certificates are allowed to be signed for use as a 'user'. `, Default: false, @@ -321,7 +287,7 @@ func pathRoles(b *backend) *framework.Path { "allow_host_certificates": { Type: framework.TypeBool, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] If set, certificates are allowed to be signed for use as a 'host'. `, Default: false, @@ -329,7 +295,7 @@ func pathRoles(b *backend) *framework.Path { "allow_bare_domains": { Type: framework.TypeBool, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] If set, host certificates that are requested are allowed to use the base domains listed in "allowed_domains", e.g. "example.com". This is a separate option as in some cases this can be considered a security threat. @@ -338,14 +304,14 @@ func pathRoles(b *backend) *framework.Path { "allow_subdomains": { Type: framework.TypeBool, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] If set, host certificates that are requested are allowed to use subdomains of those listed in "allowed_domains". `, }, "allow_user_key_ids": { Type: framework.TypeBool, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] If true, users can override the key ID for a signed certificate with the "key_id" field. When false, the key ID will always be the token display name. The key ID is logged by the SSH server and can be useful for auditing. @@ -357,7 +323,7 @@ func pathRoles(b *backend) *framework.Path { "key_id_format": { Type: framework.TypeString, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] + [Not applicable for OTP type] [Optional for CA type] When supplied, this value specifies a custom format for the key id of a signed certificate. The following variables are available for use: '{{token_display_name}}' - The display name of the token used to make the request. '{{role_name}}' - The name of the role signing the request. @@ -370,17 +336,18 @@ func pathRoles(b *backend) *framework.Path { "allowed_user_key_lengths": { Type: framework.TypeMap, Description: ` - [Not applicable for Dynamic type] [Not applicable for OTP type] [Optional for CA type] - If set, allows the enforcement of key types and minimum key sizes to be signed. - `, + [Not applicable for OTP type] [Optional for CA type] + If set, allows the enforcement of key types and minimum key sizes to be signed. + `, }, "algorithm_signer": { Type: framework.TypeString, Description: ` + [Not applicable for OTP type] [Optional for CA type] When supplied, this value specifies a signing algorithm for the key. Possible values: ssh-rsa, rsa-sha2-256, rsa-sha2-512, default, or the empty string. `, - AllowedValues: []interface{}{"", ssh.SigAlgoRSA, ssh.SigAlgoRSASHA2256, ssh.SigAlgoRSASHA2512}, + AllowedValues: []interface{}{"", DefaultAlgorithmSigner, ssh.SigAlgoRSA, ssh.SigAlgoRSASHA2256, ssh.SigAlgoRSASHA2512}, DisplayAttrs: &framework.DisplayAttributes{ Name: "Signing Algorithm", }, @@ -389,6 +356,7 @@ func pathRoles(b *backend) *framework.Path { Type: framework.TypeDurationSecond, Default: 30, Description: ` + [Not applicable for OTP type] [Optional for CA type] The duration that the SSH certificate should be backdated by at issuance.`, DisplayAttrs: &framework.DisplayAttributes{ Name: "Not before duration", @@ -414,7 +382,7 @@ func (b *backend) pathRoleWrite(ctx context.Context, req *logical.Request, d *fr return logical.ErrorResponse("missing role name"), nil } - // Allowed users is an optional field, applicable for both OTP and Dynamic types. + // Allowed users is an optional field, applicable for both OTP and CA types. allowedUsers := d.Get("allowed_users").(string) // Validate the CIDR blocks @@ -459,13 +427,6 @@ func (b *backend) pathRoleWrite(ctx context.Context, req *logical.Request, d *fr return logical.ErrorResponse("missing default user"), nil } - // Admin user is not used if OTP key type is used because there is - // no need to login to remote machine. - adminUser := d.Get("admin_user").(string) - if adminUser != "" { - return logical.ErrorResponse("admin user not required for OTP type"), nil - } - // Below are the only fields used from the role structure for OTP type. roleEntry = sshRole{ DefaultUser: defaultUser, @@ -477,59 +438,7 @@ func (b *backend) pathRoleWrite(ctx context.Context, req *logical.Request, d *fr Version: roleEntryVersion, } } else if keyType == KeyTypeDynamic { - defaultUser := d.Get("default_user").(string) - if defaultUser == "" { - return logical.ErrorResponse("missing default user"), nil - } - // Key name is required by dynamic type and not by OTP type. - keyName := d.Get("key").(string) - if keyName == "" { - return logical.ErrorResponse("missing key name"), nil - } - keyEntry, err := req.Storage.Get(ctx, fmt.Sprintf("keys/%s", keyName)) - if err != nil || keyEntry == nil { - return logical.ErrorResponse(fmt.Sprintf("invalid 'key': %q", keyName)), nil - } - - installScript := d.Get("install_script").(string) - keyOptionSpecs := d.Get("key_option_specs").(string) - - // Setting the default script here. The script will install the - // generated public key in the authorized_keys file of linux host. - if installScript == "" { - installScript = DefaultPublicKeyInstallScript - } - - adminUser := d.Get("admin_user").(string) - if adminUser == "" { - return logical.ErrorResponse("missing admin username"), nil - } - - // This defaults to 2048, but it can also be 1024, 3072, 4096, or 8192. - // In the near future, we should disallow 1024-bit SSH keys. - keyBits := d.Get("key_bits").(int) - if keyBits == 0 { - keyBits = 2048 - } - if keyBits != 1024 && keyBits != 2048 && keyBits != 3072 && keyBits != 4096 && keyBits != 8192 { - return logical.ErrorResponse("invalid key_bits field"), nil - } - - // Store all the fields required by dynamic key type - roleEntry = sshRole{ - KeyName: keyName, - AdminUser: adminUser, - DefaultUser: defaultUser, - CIDRList: cidrList, - ExcludeCIDRList: excludeCidrList, - Port: port, - KeyType: KeyTypeDynamic, - KeyBits: keyBits, - InstallScript: installScript, - AllowedUsers: allowedUsers, - KeyOptionSpecs: keyOptionSpecs, - Version: roleEntryVersion, - } + return logical.ErrorResponse("dynamic key type roles are no longer supported"), nil } else if keyType == KeyTypeCA { algorithmSigner := DefaultAlgorithmSigner algorithmSignerRaw, ok := d.GetOk("algorithm_signer") @@ -776,7 +685,6 @@ func (b *backend) parseRole(role *sshRole) (map[string]interface{}, error) { "allow_user_key_ids": role.AllowUserKeyIDs, "key_id_format": role.KeyIDFormat, "key_type": role.KeyType, - "key_bits": role.KeyBits, "default_critical_options": role.DefaultCriticalOptions, "default_extensions": role.DefaultExtensions, "default_extensions_template": role.DefaultExtensionsTemplate, @@ -785,23 +693,7 @@ func (b *backend) parseRole(role *sshRole) (map[string]interface{}, error) { "not_before_duration": int64(role.NotBeforeDuration.Seconds()), } case KeyTypeDynamic: - result = map[string]interface{}{ - "key": role.KeyName, - "admin_user": role.AdminUser, - "default_user": role.DefaultUser, - "cidr_list": role.CIDRList, - "exclude_cidr_list": role.ExcludeCIDRList, - "port": role.Port, - "key_type": role.KeyType, - "key_bits": role.KeyBits, - "allowed_users": role.AllowedUsers, - "key_option_specs": role.KeyOptionSpecs, - // Returning install script will make the output look messy. - // But this is one way for clients to see the script that is - // being used to install the key. If there is some problem, - // the script can be modified and configured by clients. - "install_script": role.InstallScript, - } + return nil, fmt.Errorf("dynamic key type roles are no longer supported") default: return nil, fmt.Errorf("invalid key type: %v", role.KeyType) } diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go index 19196013e6d5..20bd2259a34d 100644 --- a/builtin/logical/ssh/path_sign.go +++ b/builtin/logical/ssh/path_sign.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -12,6 +15,12 @@ func pathSign(b *backend) *framework.Path { return &framework.Path{ Pattern: "sign/" + framework.GenericNameWithAtRegex("role"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "sign", + OperationSuffix: "certificate", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathSign, }, diff --git a/builtin/logical/ssh/path_verify.go b/builtin/logical/ssh/path_verify.go index 7d9814751fd1..323fecd02791 100644 --- a/builtin/logical/ssh/path_verify.go +++ b/builtin/logical/ssh/path_verify.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( @@ -11,6 +14,11 @@ import ( func pathVerify(b *backend) *framework.Path { return &framework.Path{ Pattern: "verify", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "verify", + OperationSuffix: "otp", + }, Fields: map[string]*framework.FieldSchema{ "otp": { Type: framework.TypeString, diff --git a/builtin/logical/ssh/secret_dynamic_key.go b/builtin/logical/ssh/secret_dynamic_key.go deleted file mode 100644 index 80b9c5ca0e7c..000000000000 --- a/builtin/logical/ssh/secret_dynamic_key.go +++ /dev/null @@ -1,70 +0,0 @@ -package ssh - -import ( - "context" - "fmt" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/logical" - "github.com/mitchellh/mapstructure" -) - -const SecretDynamicKeyType = "secret_dynamic_key_type" - -func secretDynamicKey(b *backend) *framework.Secret { - return &framework.Secret{ - Type: SecretDynamicKeyType, - Fields: map[string]*framework.FieldSchema{ - "username": { - Type: framework.TypeString, - Description: "Username in host", - }, - "ip": { - Type: framework.TypeString, - Description: "IP address of host", - }, - }, - - Renew: b.secretDynamicKeyRenew, - Revoke: b.secretDynamicKeyRevoke, - } -} - -func (b *backend) secretDynamicKeyRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - return &logical.Response{Secret: req.Secret}, nil -} - -func (b *backend) secretDynamicKeyRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - type sec struct { - AdminUser string `mapstructure:"admin_user"` - Username string `mapstructure:"username"` - IP string `mapstructure:"ip"` - HostKeyName string `mapstructure:"host_key_name"` - DynamicPublicKey string `mapstructure:"dynamic_public_key"` - InstallScript string `mapstructure:"install_script"` - Port int `mapstructure:"port"` - } - - intSec := &sec{} - err := mapstructure.Decode(req.Secret.InternalData, intSec) - if err != nil { - return nil, fmt.Errorf("secret internal data could not be decoded: %w", err) - } - - // Fetch the host key using the key name - hostKey, err := b.getKey(ctx, req.Storage, intSec.HostKeyName) - if err != nil { - return nil, fmt.Errorf("key %q not found error: %w", intSec.HostKeyName, err) - } - if hostKey == nil { - return nil, fmt.Errorf("key %q not found", intSec.HostKeyName) - } - - // Remove the public key from authorized_keys file in target machine - // The last param 'false' indicates that the key should be uninstalled. - err = b.installPublicKeyInTarget(ctx, intSec.AdminUser, intSec.Username, intSec.IP, intSec.Port, hostKey.Key, intSec.DynamicPublicKey, intSec.InstallScript, false) - if err != nil { - return nil, fmt.Errorf("error removing public key from authorized_keys file in target") - } - return nil, nil -} diff --git a/builtin/logical/ssh/secret_otp.go b/builtin/logical/ssh/secret_otp.go index 72e9903f16bb..522c60e2dfdb 100644 --- a/builtin/logical/ssh/secret_otp.go +++ b/builtin/logical/ssh/secret_otp.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( diff --git a/builtin/logical/ssh/util.go b/builtin/logical/ssh/util.go index 1923caa346b3..89980ada0132 100644 --- a/builtin/logical/ssh/util.go +++ b/builtin/logical/ssh/util.go @@ -1,7 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ssh import ( - "bytes" "context" "crypto/rand" "crypto/rsa" @@ -11,9 +13,7 @@ import ( "fmt" "net" "strings" - "time" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/logical" "golang.org/x/crypto/ssh" @@ -40,69 +40,6 @@ func generateRSAKeys(keyBits int) (publicKeyRsa string, privateKeyRsa string, er return } -// Public key and the script to install the key are uploaded to remote machine. -// Public key is either added or removed from authorized_keys file using the -// script. Default script is for a Linux machine and hence the path of the -// authorized_keys file is hard coded to resemble Linux. -// -// The last param 'install' if false, uninstalls the key. -func (b *backend) installPublicKeyInTarget(ctx context.Context, adminUser, username, ip string, port int, hostkey, dynamicPublicKey, installScript string, install bool) error { - // Transfer the newly generated public key to remote host under a random - // file name. This is to avoid name collisions from other requests. - _, publicKeyFileName, err := b.GenerateSaltedOTP(ctx) - if err != nil { - return err - } - - comm, err := createSSHComm(b.Logger(), adminUser, ip, port, hostkey) - if err != nil { - return err - } - defer comm.Close() - - err = comm.Upload(publicKeyFileName, bytes.NewBufferString(dynamicPublicKey), nil) - if err != nil { - return fmt.Errorf("error uploading public key: %w", err) - } - - // Transfer the script required to install or uninstall the key to the remote - // host under a random file name as well. This is to avoid name collisions - // from other requests. - scriptFileName := fmt.Sprintf("%s.sh", publicKeyFileName) - err = comm.Upload(scriptFileName, bytes.NewBufferString(installScript), nil) - if err != nil { - return fmt.Errorf("error uploading install script: %w", err) - } - - // Create a session to run remote command that triggers the script to install - // or uninstall the key. - session, err := comm.NewSession() - if err != nil { - return fmt.Errorf("unable to create SSH Session using public keys: %w", err) - } - if session == nil { - return fmt.Errorf("invalid session object") - } - defer session.Close() - - authKeysFileName := fmt.Sprintf("/home/%s/.ssh/authorized_keys", username) - - var installOption string - if install { - installOption = "install" - } else { - installOption = "uninstall" - } - - // Give execute permissions to install script, run and delete it. - chmodCmd := fmt.Sprintf("chmod +x %s", scriptFileName) - scriptCmd := fmt.Sprintf("./%s %s %s %s", scriptFileName, installOption, publicKeyFileName, authKeysFileName) - rmCmd := fmt.Sprintf("rm -f %s", scriptFileName) - targetCmd := fmt.Sprintf("%s;%s;%s", chmodCmd, scriptCmd, rmCmd) - - return session.Run(targetCmd) -} - // Takes an IP address and role name and checks if the IP is part // of CIDR blocks belonging to the role. func roleContainsIP(ctx context.Context, s logical.Storage, roleName string, ip string) (bool, error) { @@ -152,52 +89,6 @@ func cidrListContainsIP(ip, cidrList string) (bool, error) { return false, nil } -func insecureIgnoreHostWarning(logger log.Logger) ssh.HostKeyCallback { - return func(hostname string, remote net.Addr, key ssh.PublicKey) error { - logger.Warn("cannot verify server key: host key validation disabled") - return nil - } -} - -func createSSHComm(logger log.Logger, username, ip string, port int, hostkey string) (*comm, error) { - signer, err := ssh.ParsePrivateKey([]byte(hostkey)) - if err != nil { - return nil, err - } - - clientConfig := &ssh.ClientConfig{ - User: username, - Auth: []ssh.AuthMethod{ - ssh.PublicKeys(signer), - }, - HostKeyCallback: insecureIgnoreHostWarning(logger), - Timeout: 1 * time.Minute, - } - - connfunc := func() (net.Conn, error) { - c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", ip, port), 15*time.Second) - if err != nil { - return nil, err - } - - if tcpConn, ok := c.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(5 * time.Second) - } - - return c, nil - } - config := &SSHCommConfig{ - SSHConfig: clientConfig, - Connection: connfunc, - Pty: false, - DisableAgent: true, - Logger: logger, - } - - return SSHCommNew(fmt.Sprintf("%s:%d", ip, port), config) -} - func parsePublicSSHKey(key string) (ssh.PublicKey, error) { keyParts := strings.Split(key, " ") if len(keyParts) > 1 { diff --git a/builtin/logical/totp/backend.go b/builtin/logical/totp/backend.go index d2494b499549..08cbe385a01d 100644 --- a/builtin/logical/totp/backend.go +++ b/builtin/logical/totp/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package totp import ( @@ -10,6 +13,8 @@ import ( cache "github.com/patrickmn/go-cache" ) +const operationPrefixTOTP = "totp" + func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b := Backend() if err := b.Setup(ctx, conf); err != nil { diff --git a/builtin/logical/totp/backend_test.go b/builtin/logical/totp/backend_test.go index 0b68599df64c..1d3ba4d4f9ca 100644 --- a/builtin/logical/totp/backend_test.go +++ b/builtin/logical/totp/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package totp import ( diff --git a/builtin/logical/totp/cmd/totp/main.go b/builtin/logical/totp/cmd/totp/main.go index 4c96df7f3146..c051e133a4c6 100644 --- a/builtin/logical/totp/cmd/totp/main.go +++ b/builtin/logical/totp/cmd/totp/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: totp.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/totp/path_code.go b/builtin/logical/totp/path_code.go index af56f37da689..7e7278c10f87 100644 --- a/builtin/logical/totp/path_code.go +++ b/builtin/logical/totp/path_code.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package totp import ( @@ -14,6 +17,12 @@ import ( func pathCode(b *backend) *framework.Path { return &framework.Path{ Pattern: "code/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTOTP, + OperationSuffix: "code", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -25,9 +34,19 @@ func pathCode(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathReadCode, - logical.UpdateOperation: b.pathValidateCode, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathReadCode, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "generate", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathValidateCode, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "validate", + }, + }, }, HelpSynopsis: pathCodeHelpSyn, diff --git a/builtin/logical/totp/path_keys.go b/builtin/logical/totp/path_keys.go index d7f7f2abe323..049ddddad3e5 100644 --- a/builtin/logical/totp/path_keys.go +++ b/builtin/logical/totp/path_keys.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package totp import ( @@ -21,6 +24,11 @@ func pathListKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTOTP, + OperationSuffix: "keys", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathKeyList, }, @@ -33,6 +41,12 @@ func pathListKeys(b *backend) *framework.Path { func pathKeys(b *backend) *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTOTP, + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -108,10 +122,25 @@ func pathKeys(b *backend) *framework.Path { }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.ReadOperation: b.pathKeyRead, - logical.UpdateOperation: b.pathKeyCreate, - logical.DeleteOperation: b.pathKeyDelete, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathKeyRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathKeyCreate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "create", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathKeyDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + }, + }, }, HelpSynopsis: pathKeyHelpSyn, diff --git a/builtin/logical/transit/backend.go b/builtin/logical/transit/backend.go index 668254747962..e30d7660496b 100644 --- a/builtin/logical/transit/backend.go +++ b/builtin/logical/transit/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -16,8 +19,12 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -// Minimum cache size for transit backend -const minCacheSize = 10 +const ( + operationPrefixTransit = "transit" + + // Minimum cache size for transit backend + minCacheSize = 10 +) func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { b, err := Backend(ctx, conf) @@ -50,6 +57,7 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) b.pathImportVersion(), b.pathKeys(), b.pathListKeys(), + b.pathBYOKExportKeys(), b.pathExportKeys(), b.pathKeysConfig(), b.pathEncrypt(), @@ -65,6 +73,8 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) b.pathTrim(), b.pathCacheConfig(), b.pathConfigKeys(), + b.pathCreateCsr(), + b.pathImportCertChain(), }, Secrets: []*framework.Secret{}, @@ -73,6 +83,8 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) PeriodicFunc: b.periodicFunc, } + b.backendUUID = conf.BackendUUID + // determine cacheSize to use. Defaults to 0 which means unlimited cacheSize := 0 useCache := !conf.System.CachingDisabled() @@ -106,6 +118,7 @@ type backend struct { cacheSizeChanged bool checkAutoRotateAfter time.Time autoRotateOnce sync.Once + backendUUID string } func GetCacheSizeFromStorage(ctx context.Context, s logical.Storage) (int, error) { @@ -235,6 +248,7 @@ func (b *backend) autoRotateKeys(ctx context.Context, req *logical.Request) erro continue } + // rotateIfRequired properly acquires/releases the lock on p err = b.rotateIfRequired(ctx, req, key, p) if err != nil { errs = multierror.Append(errs, err) @@ -262,6 +276,11 @@ func (b *backend) rotateIfRequired(ctx context.Context, req *logical.Request, ke return nil } + // We can't auto-rotate managed keys + if p.Type == keysutil.KeyType_MANAGED_KEY { + return nil + } + // Retrieve the latest version of the policy and determine if it is time to rotate. latestKey := p.Keys[strconv.Itoa(p.LatestVersion)] if time.Now().After(latestKey.CreationTime.Add(p.AutoRotatePeriod)) { diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go index 71cbfb641d82..b7d5cc8b4bf7 100644 --- a/builtin/logical/transit/backend_test.go +++ b/builtin/logical/transit/backend_test.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" "crypto" + "crypto/ed25519" cryptoRand "crypto/rand" "crypto/x509" "encoding/base64" @@ -1068,9 +1072,7 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT if err != nil { t.Fatal(err) } - if resp != nil { - t.Fatal("expected nil response") - } + require.NotNil(t, resp, "expected populated request") p, err := keysutil.LoadPolicy(context.Background(), storage, path.Join("policy", "testkey")) if err != nil { @@ -1151,10 +1153,12 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // Now test encrypting the same value twice req.Data = map[string]interface{}{ - "plaintext": "emlwIHphcA==", // "zip zap" - "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" + "plaintext": "emlwIHphcA==", // "zip zap" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } + if ver == 0 { + req.Data["nonce"] = "b25ldHdvdGhyZWVl" // "onetwothreee" + } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) @@ -1185,11 +1189,10 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // For sanity, also check a different nonce value... req.Data = map[string]interface{}{ - "plaintext": "emlwIHphcA==", // "zip zap" - "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour" + "plaintext": "emlwIHphcA==", // "zip zap" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } - if ver < 2 { + if ver == 0 { req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" } else { req.Data["context"] = "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOldandSdd7S" @@ -1228,10 +1231,12 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // ...and a different context value req.Data = map[string]interface{}{ - "plaintext": "emlwIHphcA==", // "zip zap" - "nonce": "dHdvdGhyZWVmb3Vy", // "twothreefour" + "plaintext": "emlwIHphcA==", // "zip zap" "context": "qV4h9iQyvn+raODOer4JNAsOhkXBwdT4HZ677Ql4KLqXSU+Jk4C/fXBWbv6xkSYT", } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) @@ -1343,9 +1348,11 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // Finally, check operations on empty values // First, check without setting a plaintext at all req.Data = map[string]interface{}{ - "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } resp, err = b.HandleRequest(context.Background(), req) if err == nil { t.Fatal("expected error, got nil") @@ -1360,9 +1367,11 @@ func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyT // Now set plaintext to empty req.Data = map[string]interface{}{ "plaintext": "", - "nonce": "b25ldHdvdGhyZWVl", // "onetwothreee" "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } resp, err = b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) @@ -1556,9 +1565,7 @@ func TestBadInput(t *testing.T) { if err != nil { t.Fatal(err) } - if resp != nil { - t.Fatal("expected nil response") - } + require.NotNil(t, resp, "expected populated request") req.Path = "decrypt/test" req.Data = map[string]interface{}{ @@ -1647,9 +1654,7 @@ func TestTransit_AutoRotateKeys(t *testing.T) { if err != nil { t.Fatal(err) } - if resp != nil { - t.Fatal("expected nil response") - } + require.NotNil(t, resp, "expected populated request") // Write a key with an auto rotate value one day in the future req = &logical.Request{ @@ -1664,9 +1669,7 @@ func TestTransit_AutoRotateKeys(t *testing.T) { if err != nil { t.Fatal(err) } - if resp != nil { - t.Fatal("expected nil response") - } + require.NotNil(t, resp, "expected populated request") // Run the rotation check and ensure none of the keys have rotated b.checkAutoRotateAfter = time.Now() @@ -2019,3 +2022,284 @@ func TestTransitPKICSR(t *testing.T) { t.Logf("root: %v", rootCertPEM) t.Logf("leaf: %v", leafCertPEM) } + +func TestTransit_ReadPublicKeyImported(t *testing.T) { + testTransit_ReadPublicKeyImported(t, "rsa-2048") + testTransit_ReadPublicKeyImported(t, "ecdsa-p256") + testTransit_ReadPublicKeyImported(t, "ed25519") +} + +func testTransit_ReadPublicKeyImported(t *testing.T, keyType string) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatalf("failed to extract the public key: %s", err) + } + + // Import key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) + } + + // Read key + readReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "keys/" + keyID, + Storage: s, + } + + readResp, err := b.HandleRequest(context.Background(), readReq) + if err != nil || (readResp != nil && readResp.IsError()) { + t.Fatalf("failed to read key. err: %s\nresp: %#v", err, readResp) + } +} + +func TestTransit_SignWithImportedPublicKey(t *testing.T) { + testTransit_SignWithImportedPublicKey(t, "rsa-2048") + testTransit_SignWithImportedPublicKey(t, "ecdsa-p256") + testTransit_SignWithImportedPublicKey(t, "ed25519") +} + +func testTransit_SignWithImportedPublicKey(t *testing.T, keyType string) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatalf("failed to extract the public key: %s", err) + } + + // Import key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) + } + + // Sign text + signReq := &logical.Request{ + Path: "sign/" + keyID, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), + }, + } + + _, err = b.HandleRequest(context.Background(), signReq) + if err == nil { + t.Fatalf("expected error, should have failed to sign input") + } +} + +func TestTransit_VerifyWithImportedPublicKey(t *testing.T) { + generateKeys(t) + keyType := "rsa-2048" + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Retrieve public wrapping key + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + // generate ciphertext + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + + // Import private key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + "type": keyType, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import key. err: %s\nresp: %#v", err, importResp) + } + + // Sign text + signReq := &logical.Request{ + Storage: s, + Path: "sign/" + keyID, + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), + }, + } + + signResp, err := b.HandleRequest(context.Background(), signReq) + if err != nil || (signResp != nil && signResp.IsError()) { + t.Fatalf("failed to sign plaintext. err: %s\nresp: %#v", err, signResp) + } + + // Get signature + signature := signResp.Data["signature"].(string) + + // Import new key as public key + importPubReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", "public-key-rsa"), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + importPubResp, err := b.HandleRequest(context.Background(), importPubReq) + if err != nil || (importPubResp != nil && importPubResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importPubResp) + } + + // Verify signed text + verifyReq := &logical.Request{ + Path: "verify/public-key-rsa", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), + "signature": signature, + }, + } + + verifyResp, err := b.HandleRequest(context.Background(), verifyReq) + if err != nil || (importResp != nil && verifyResp.IsError()) { + t.Fatalf("failed to verify signed data. err: %s\nresp: %#v", err, importResp) + } +} + +func TestTransit_ExportPublicKeyImported(t *testing.T) { + testTransit_ExportPublicKeyImported(t, "rsa-2048") + testTransit_ExportPublicKeyImported(t, "ecdsa-p256") + testTransit_ExportPublicKeyImported(t, "ed25519") +} + +func testTransit_ExportPublicKeyImported(t *testing.T, keyType string) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatalf("failed to extract the public key: %s", err) + } + + t.Logf("generated key: %v", string(publicKeyBytes)) + + // Import key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + "exportable": true, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) + } + + t.Logf("importing key: %v", importResp) + + // Export key + exportReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s/latest", keyID), + Storage: s, + } + + exportResp, err := b.HandleRequest(context.Background(), exportReq) + if err != nil || (exportResp != nil && exportResp.IsError()) { + t.Fatalf("failed to export key. err: %v\nresp: %#v", err, exportResp) + } + + t.Logf("exporting key: %v", exportResp) + + responseKeys, exist := exportResp.Data["keys"] + if !exist { + t.Fatal("expected response data to hold a 'keys' field") + } + + exportedKeyBytes := responseKeys.(map[string]string)["1"] + + if keyType != "ed25519" { + exportedKeyBlock, _ := pem.Decode([]byte(exportedKeyBytes)) + publicKeyBlock, _ := pem.Decode(publicKeyBytes) + + if !reflect.DeepEqual(publicKeyBlock.Bytes, exportedKeyBlock.Bytes) { + t.Fatalf("exported key bytes should have matched with imported key for key type: %v\nexported: %v\nimported: %v", keyType, exportedKeyBlock.Bytes, publicKeyBlock.Bytes) + } + } else { + exportedKey, err := base64.StdEncoding.DecodeString(exportedKeyBytes) + if err != nil { + t.Fatalf("error decoding exported key bytes (%v) to base64 for key type %v: %v", exportedKeyBytes, keyType, err) + } + + publicKeyBlock, _ := pem.Decode(publicKeyBytes) + publicKeyParsed, err := x509.ParsePKIXPublicKey(publicKeyBlock.Bytes) + if err != nil { + t.Fatalf("error decoding source key bytes (%v) from PKIX marshaling for key type %v: %v", publicKeyBlock.Bytes, keyType, err) + } + + if !reflect.DeepEqual([]byte(publicKeyParsed.(ed25519.PublicKey)), exportedKey) { + t.Fatalf("exported key bytes should have matched with imported key for key type: %v\nexported: %v\nimported: %v", keyType, exportedKey, publicKeyParsed) + } + } +} diff --git a/builtin/logical/transit/cmd/transit/main.go b/builtin/logical/transit/cmd/transit/main.go index 25d4675b9083..701f7f00e763 100644 --- a/builtin/logical/transit/cmd/transit/main.go +++ b/builtin/logical/transit/cmd/transit/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( @@ -17,9 +20,11 @@ func main() { tlsConfig := apiClientMeta.GetTLSConfig() tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) - if err := plugin.Serve(&plugin.ServeOpts{ + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ BackendFactoryFunc: transit.Factory, - TLSProviderFunc: tlsProviderFunc, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, }); err != nil { logger := hclog.New(&hclog.LoggerOptions{}) diff --git a/builtin/logical/transit/managed_key_util.go b/builtin/logical/transit/managed_key_util.go new file mode 100644 index 000000000000..ccc1c324f2ec --- /dev/null +++ b/builtin/logical/transit/managed_key_util.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package transit + +import ( + "context" + "errors" +) + +var errEntOnly = errors.New("managed keys are supported within enterprise edition only") + +func GetManagedKeyUUID(ctx context.Context, b *backend, keyName string, keyId string) (uuid string, err error) { + return "", errEntOnly +} diff --git a/builtin/logical/transit/path_backup.go b/builtin/logical/transit/path_backup.go index ef13f0aab88c..019a28408e67 100644 --- a/builtin/logical/transit/path_backup.go +++ b/builtin/logical/transit/path_backup.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -10,6 +13,13 @@ import ( func (b *backend) pathBackup() *framework.Path { return &framework.Path{ Pattern: "backup/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "back-up", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_backup_test.go b/builtin/logical/transit/path_backup_test.go index 89c5c3db597f..05d7a10f600b 100644 --- a/builtin/logical/transit/path_backup_test.go +++ b/builtin/logical/transit/path_backup_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -36,6 +39,7 @@ func TestTransit_BackupRestore(t *testing.T) { testBackupRestore(t, "rsa-2048", "hmac-verify") testBackupRestore(t, "rsa-3072", "hmac-verify") testBackupRestore(t, "rsa-4096", "hmac-verify") + testBackupRestore(t, "hmac", "hmac-verify") } func testBackupRestore(t *testing.T, keyType, feature string) { @@ -54,6 +58,9 @@ func testBackupRestore(t *testing.T, keyType, feature string) { "exportable": true, }, } + if keyType == "hmac" { + keyReq.Data["key_size"] = 32 + } resp, err = b.HandleRequest(context.Background(), keyReq) if err != nil || (resp != nil && resp.IsError()) { t.Fatalf("resp: %#v\nerr: %v", resp, err) diff --git a/builtin/logical/transit/path_byok.go b/builtin/logical/transit/path_byok.go new file mode 100644 index 000000000000..4826203352cc --- /dev/null +++ b/builtin/logical/transit/path_byok.go @@ -0,0 +1,206 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package transit + +import ( + "context" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathBYOKExportKeys() *framework.Path { + return &framework.Path{ + Pattern: "byok-export/" + framework.GenericNameRegex("destination") + "/" + framework.GenericNameRegex("source") + framework.OptionalParamRegex("version"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "byok", + OperationSuffix: "key|key-version", + }, + + Fields: map[string]*framework.FieldSchema{ + "destination": { + Type: framework.TypeString, + Description: "Destination key to export to; usually the public wrapping key of another Transit instance.", + }, + "source": { + Type: framework.TypeString, + Description: "Source key to export; could be any present key within Transit.", + }, + "version": { + Type: framework.TypeString, + Description: "Optional version of the key to export, else all key versions are exported.", + }, + "hash": { + Type: framework.TypeString, + Description: "Hash function to use for inner OAEP encryption. Defaults to SHA256.", + Default: "SHA256", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathPolicyBYOKExportRead, + }, + + HelpSynopsis: pathBYOKExportHelpSyn, + HelpDescription: pathBYOKExportHelpDesc, + } +} + +func (b *backend) pathPolicyBYOKExportRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + dst := d.Get("destination").(string) + src := d.Get("source").(string) + version := d.Get("version").(string) + hash := d.Get("hash").(string) + + dstP, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: dst, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if dstP == nil { + return nil, fmt.Errorf("no such destination key to export to") + } + if !b.System().CachingDisabled() { + dstP.Lock(false) + } + defer dstP.Unlock() + + srcP, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: src, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if srcP == nil { + return nil, fmt.Errorf("no such source key for export") + } + if !b.System().CachingDisabled() { + srcP.Lock(false) + } + defer srcP.Unlock() + + if !srcP.Exportable { + return logical.ErrorResponse("key is not exportable"), nil + } + + retKeys := map[string]string{} + switch version { + case "": + for k, v := range srcP.Keys { + exportKey, err := getBYOKExportKey(dstP, srcP, &v, hash) + if err != nil { + return nil, err + } + retKeys[k] = exportKey + } + + default: + var versionValue int + if version == "latest" { + versionValue = srcP.LatestVersion + } else { + version = strings.TrimPrefix(version, "v") + versionValue, err = strconv.Atoi(version) + if err != nil { + return logical.ErrorResponse("invalid key version"), logical.ErrInvalidRequest + } + } + + if versionValue < srcP.MinDecryptionVersion { + return logical.ErrorResponse("version for export is below minimum decryption version"), logical.ErrInvalidRequest + } + key, ok := srcP.Keys[strconv.Itoa(versionValue)] + if !ok { + return logical.ErrorResponse("version does not exist or cannot be found"), logical.ErrInvalidRequest + } + + exportKey, err := getBYOKExportKey(dstP, srcP, &key, hash) + if err != nil { + return nil, err + } + + retKeys[strconv.Itoa(versionValue)] = exportKey + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "name": srcP.Name, + "type": srcP.Type.String(), + "keys": retKeys, + }, + } + + return resp, nil +} + +func getBYOKExportKey(dstP *keysutil.Policy, srcP *keysutil.Policy, key *keysutil.KeyEntry, hash string) (string, error) { + if dstP == nil || srcP == nil { + return "", errors.New("nil policy provided") + } + + var targetKey interface{} + switch srcP.Type { + case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305, keysutil.KeyType_HMAC: + targetKey = key.Key + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: + targetKey = key.RSAKey + case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ECDSA_P384, keysutil.KeyType_ECDSA_P521: + var curve elliptic.Curve + switch srcP.Type { + case keysutil.KeyType_ECDSA_P384: + curve = elliptic.P384() + case keysutil.KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + pubKey := ecdsa.PublicKey{ + Curve: curve, + X: key.EC_X, + Y: key.EC_Y, + } + targetKey = &ecdsa.PrivateKey{ + PublicKey: pubKey, + D: key.EC_D, + } + case keysutil.KeyType_ED25519: + targetKey = ed25519.PrivateKey(key.Key) + default: + return "", fmt.Errorf("unable to export to unknown key type: %v", srcP.Type) + } + + hasher, err := parseHashFn(hash) + if err != nil { + return "", err + } + + return dstP.WrapKey(0, targetKey, srcP.Type, hasher) +} + +const pathBYOKExportHelpSyn = `Securely export named encryption or signing key` + +const pathBYOKExportHelpDesc = ` +This path is used to export the named keys that are configured as +exportable. + +Unlike the regular /export/:name[/:version] paths, this path uses +the same encryption specification /import, allowing secure migration +of keys between clusters to enable workloads to communicate between +them. + +Presently this only works for RSA destination keys. +` diff --git a/builtin/logical/transit/path_byok_test.go b/builtin/logical/transit/path_byok_test.go new file mode 100644 index 000000000000..44dbafa18be8 --- /dev/null +++ b/builtin/logical/transit/path_byok_test.go @@ -0,0 +1,229 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package transit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_BYOKExportImport(t *testing.T) { + // Test encryption/decryption after a restore for supported keys + testBYOKExportImport(t, "aes128-gcm96", "encrypt-decrypt") + testBYOKExportImport(t, "aes256-gcm96", "encrypt-decrypt") + testBYOKExportImport(t, "chacha20-poly1305", "encrypt-decrypt") + testBYOKExportImport(t, "rsa-2048", "encrypt-decrypt") + testBYOKExportImport(t, "rsa-3072", "encrypt-decrypt") + testBYOKExportImport(t, "rsa-4096", "encrypt-decrypt") + + // Test signing/verification after a restore for supported keys + testBYOKExportImport(t, "ecdsa-p256", "sign-verify") + testBYOKExportImport(t, "ecdsa-p384", "sign-verify") + testBYOKExportImport(t, "ecdsa-p521", "sign-verify") + testBYOKExportImport(t, "ed25519", "sign-verify") + testBYOKExportImport(t, "rsa-2048", "sign-verify") + testBYOKExportImport(t, "rsa-3072", "sign-verify") + testBYOKExportImport(t, "rsa-4096", "sign-verify") + + // Test HMAC sign/verify after a restore for supported keys. + testBYOKExportImport(t, "hmac", "hmac-verify") +} + +func testBYOKExportImport(t *testing.T, keyType, feature string) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + // Create a key + keyReq := &logical.Request{ + Path: "keys/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "type": keyType, + "exportable": true, + }, + } + if keyType == "hmac" { + keyReq.Data["key_size"] = 32 + } + resp, err = b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Read the wrapping key. + wrapKeyReq := &logical.Request{ + Path: "wrapping_key", + Operation: logical.ReadOperation, + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), wrapKeyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Import the wrapping key. + wrapKeyImportReq := &logical.Request{ + Path: "keys/wrapper/import", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "public_key": resp.Data["public_key"], + "type": "rsa-4096", + }, + } + resp, err = b.HandleRequest(context.Background(), wrapKeyImportReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Export the key + backupReq := &logical.Request{ + Path: "byok-export/wrapper/test-source", + Operation: logical.ReadOperation, + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), backupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + keys := resp.Data["keys"].(map[string]string) + + // Import the key to a new name. + restoreReq := &logical.Request{ + Path: "keys/test/import", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "ciphertext": keys["1"], + "type": keyType, + }, + } + resp, err = b.HandleRequest(context.Background(), restoreReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + plaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" + // Perform encryption, signing or hmac-ing based on the set 'feature' + var encryptReq, signReq, hmacReq *logical.Request + var ciphertext, signature, hmac string + switch feature { + case "encrypt-decrypt": + encryptReq = &logical.Request{ + Path: "encrypt/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "plaintext": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), encryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + ciphertext = resp.Data["ciphertext"].(string) + + case "sign-verify": + signReq = &logical.Request{ + Path: "sign/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), signReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + signature = resp.Data["signature"].(string) + + case "hmac-verify": + hmacReq = &logical.Request{ + Path: "hmac/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), hmacReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + hmac = resp.Data["hmac"].(string) + } + + // validationFunc verifies the ciphertext, signature or hmac based on the + // set 'feature' + validationFunc := func(keyName string) { + var decryptReq *logical.Request + var verifyReq *logical.Request + switch feature { + case "encrypt-decrypt": + decryptReq = &logical.Request{ + Path: "decrypt/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "ciphertext": ciphertext, + }, + } + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + if resp.Data["plaintext"].(string) != plaintextB64 { + t.Fatalf("bad: plaintext; expected: %q, actual: %q", plaintextB64, resp.Data["plaintext"].(string)) + } + case "sign-verify": + verifyReq = &logical.Request{ + Path: "verify/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "signature": signature, + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if resp.Data["valid"].(bool) != true { + t.Fatalf("bad: signature verification failed for key type %q", keyType) + } + + case "hmac-verify": + verifyReq = &logical.Request{ + Path: "verify/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "hmac": hmac, + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if resp.Data["valid"].(bool) != true { + t.Fatalf("bad: HMAC verification failed for key type %q", keyType) + } + } + } + + // Ensure that the restored key is functional + validationFunc("test") + + // Ensure the original key is functional + validationFunc("test-source") +} diff --git a/builtin/logical/transit/path_cache_config.go b/builtin/logical/transit/path_cache_config.go index e7692997668d..941f98250ad2 100644 --- a/builtin/logical/transit/path_cache_config.go +++ b/builtin/logical/transit/path_cache_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -11,6 +14,11 @@ import ( func (b *backend) pathCacheConfig() *framework.Path { return &framework.Path{ Pattern: "cache-config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + }, + Fields: map[string]*framework.FieldSchema{ "size": { Type: framework.TypeInt, @@ -24,16 +32,18 @@ func (b *backend) pathCacheConfig() *framework.Path { logical.ReadOperation: &framework.PathOperation{ Callback: b.pathCacheConfigRead, Summary: "Returns the size of the active cache", + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "cache-configuration", + }, }, logical.UpdateOperation: &framework.PathOperation{ Callback: b.pathCacheConfigWrite, Summary: "Configures a new cache of the specified size", - }, - - logical.CreateOperation: &framework.PathOperation{ - Callback: b.pathCacheConfigWrite, - Summary: "Configures a new cache of the specified size", + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "cache", + }, }, }, @@ -65,7 +75,11 @@ func (b *backend) pathCacheConfigWrite(ctx context.Context, req *logical.Request return nil, err } - return nil, nil + return &logical.Response{ + Data: map[string]interface{}{ + "size": cacheSize, + }, + }, nil } type configCache struct { diff --git a/builtin/logical/transit/path_cache_config_test.go b/builtin/logical/transit/path_cache_config_test.go index d8e0a7b56d87..0141f6a32def 100644 --- a/builtin/logical/transit/path_cache_config_test.go +++ b/builtin/logical/transit/path_cache_config_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_certificates.go b/builtin/logical/transit/path_certificates.go new file mode 100644 index 000000000000..bf61b8425e52 --- /dev/null +++ b/builtin/logical/transit/path_certificates.go @@ -0,0 +1,292 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package transit + +import ( + "context" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/helper/errutil" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathCreateCsr() *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("name") + "/csr", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate-csr-for-key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Required: true, + Description: "Name of the key", + }, + "version": { + Type: framework.TypeInt, + Required: false, + Description: "Optional version of key, 'latest' if not set", + }, + "csr": { + Type: framework.TypeString, + Required: false, + Description: `PEM encoded CSR template. The information attributes +will be used as a basis for the CSR with the key in transit. If not set, an empty CSR is returned.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCreateCsrWrite, + }, + }, + HelpSynopsis: pathCreateCsrHelpSyn, + HelpDescription: pathCreateCsrHelpDesc, + } +} + +func (b *backend) pathImportCertChain() *framework.Path { + return &framework.Path{ + // NOTE: `set-certificate` or `set_certificate`? Paths seem to use different + // case, such as `transit/wrapping_key` and `transit/cache-config`. + Pattern: "keys/" + framework.GenericNameRegex("name") + "/set-certificate", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "set-certificate-for-key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Required: true, + Description: "Name of the key", + }, + "version": { + Type: framework.TypeInt, + Required: false, + Description: "Optional version of key, 'latest' if not set", + }, + "certificate_chain": { + Type: framework.TypeString, + Required: true, + Description: `PEM encoded certificate chain. It should be composed +by one or more concatenated PEM blocks and ordered starting from the end-entity certificate.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathImportCertChainWrite, + }, + }, + HelpSynopsis: pathImportCertChainHelpSyn, + HelpDescription: pathImportCertChainHelpDesc, + } +} + +func (b *backend) pathCreateCsrWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse(fmt.Sprintf("key with provided name '%s' not found", name)), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(false) // NOTE: No lock on "read" operations? + } + defer p.Unlock() + + // Check if transit key supports signing + if !p.Type.SigningSupported() { + return logical.ErrorResponse(fmt.Sprintf("key type '%s' does not support signing", p.Type)), logical.ErrInvalidRequest + } + + // Check if key can be derived + if p.Derived { + return logical.ErrorResponse("operation not supported on keys with derivation enabled"), logical.ErrInvalidRequest + } + + // Transit key version + signingKeyVersion := p.LatestVersion + // NOTE: BYOK endpoints seem to remove "v" prefix from version, + // are versions like that also supported? + if version, ok := d.GetOk("version"); ok { + signingKeyVersion = version.(int) + } + + // Read and parse CSR template + pemCsrTemplate := d.Get("csr").(string) + csrTemplate, err := parseCsr(pemCsrTemplate) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + pemCsr, err := p.CreateCsr(signingKeyVersion, csrTemplate) + if err != nil { + prefixedErr := fmt.Errorf("could not create the csr: %w", err) + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(prefixedErr.Error()), logical.ErrInvalidRequest + default: + return nil, prefixedErr + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "name": p.Name, + "type": p.Type.String(), + "csr": string(pemCsr), + }, + } + + return resp, nil +} + +func (b *backend) pathImportCertChainWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse(fmt.Sprintf("key with provided name '%s' not found", name)), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(true) // NOTE: Lock as we are might write to the policy + } + defer p.Unlock() + + // Check if transit key supports signing + if !p.Type.SigningSupported() { + return logical.ErrorResponse(fmt.Sprintf("key type %s does not support signing", p.Type)), logical.ErrInvalidRequest + } + + // Check if key can be derived + if p.Derived { + return logical.ErrorResponse("operation not supported on keys with derivation enabled"), logical.ErrInvalidRequest + } + + // Transit key version + keyVersion := p.LatestVersion + if version, ok := d.GetOk("version"); ok { + keyVersion = version.(int) + } + + // Get certificate chain + pemCertChain := d.Get("certificate_chain").(string) + certChain, err := parseCertificateChain(pemCertChain) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + err = p.ValidateAndPersistCertificateChain(ctx, keyVersion, certChain, req.Storage) + if err != nil { + prefixedErr := fmt.Errorf("failed to persist certificate chain: %w", err) + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(prefixedErr.Error()), logical.ErrInvalidRequest + default: + return nil, prefixedErr + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "name": p.Name, + "type": p.Type.String(), + "certificate-chain": pemCertChain, + }, + } + + return resp, nil +} + +func parseCsr(csrStr string) (*x509.CertificateRequest, error) { + if csrStr == "" { + return &x509.CertificateRequest{}, nil + } + + block, _ := pem.Decode([]byte(csrStr)) + if block == nil { + return nil, errors.New("could not decode PEM certificate request") + } + + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + + return csr, nil +} + +func parseCertificateChain(certChainString string) ([]*x509.Certificate, error) { + var certificates []*x509.Certificate + + var pemCertBlocks []*pem.Block + pemBytes := []byte(strings.TrimSpace(certChainString)) + for len(pemBytes) > 0 { + var pemCertBlock *pem.Block + pemCertBlock, pemBytes = pem.Decode(pemBytes) + if pemCertBlock == nil { + return nil, errors.New("could not decode PEM block in certificate chain") + } + + switch pemCertBlock.Type { + case "CERTIFICATE", "X05 CERTIFICATE": + pemCertBlocks = append(pemCertBlocks, pemCertBlock) + default: + // Ignore any other entries + } + } + + if len(pemCertBlocks) == 0 { + return nil, errors.New("provided certificate chain did not contain any valid PEM certificate") + } + + for _, certBlock := range pemCertBlocks { + cert, err := x509.ParseCertificate(certBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate in certificate chain: %w", err) + } + + certificates = append(certificates, cert) + } + + return certificates, nil +} + +const pathCreateCsrHelpSyn = `Create a CSR from a key in transit` + +const pathCreateCsrHelpDesc = `This path is used to create a CSR from a key in +transit. If a CSR template is provided, its significant information, expect key +related data, are included in the CSR otherwise an empty CSR is returned. +` + +const pathImportCertChainHelpSyn = `Imports an externally-signed certificate +chain into an existing key version` + +const pathImportCertChainHelpDesc = `This path is used to import an externally- +signed certificate chain into a key in transit. The leaf certificate key has to +match the selected key in transit. +` diff --git a/builtin/logical/transit/path_certificates_test.go b/builtin/logical/transit/path_certificates_test.go new file mode 100644 index 000000000000..9a6305e7a048 --- /dev/null +++ b/builtin/logical/transit/path_certificates_test.go @@ -0,0 +1,379 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package transit + +import ( + "context" + cryptoRand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" +) + +func TestTransit_Certs_CreateCsr(t *testing.T) { + // NOTE: Use an existing CSR or generate one here? + templateCsr := ` +-----BEGIN CERTIFICATE REQUEST----- +MIICRTCCAS0CAQAwADCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM49 +McW7u3ILuAJfSFLUtGOMGBytHmMFcjTiX+5JcajFj0Uszb+HQ7eIsJJNXhVc/7fg +Z01DZvcCqb9ChEWE3xi4GEkPMXay7p7G1ooSLnQp6Z0lL5CuIFfMVOTvjfhTwRaJ +l9v2mMlm80BeiAUBqeoyGVrIh5fKASxaE0jrhjAxhGzqrXdDnL8A4na6ArprV4iS +aEAziODd2WmplSKgUwEaFdeG1t1bJf3o5ZQRCnKNtQcAk8UmgtvFEO8ohGMln/Fj +O7u7s6iRhOGf1g1NCAP5pGqxNx3bjz5f/CUcTSIGAReEomg41QTIhD9muCTL8qnm +6lS87wkGTv7qbeIGB7sCAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQAfjE+jNqIk +4V1tL3g5XPjxr2+QcwddPf8opmbAzgt0+TiIHcDGBAxsXyi7sC9E5AFfFp7W07Zv +r5+v4i529K9q0BgGtHFswoEnhd4dC8Ye53HtSoEtXkBpZMDrtbS7eZa9WccT6zNx +4taTkpptZVrmvPj+jLLFkpKJJ3d+Gbrp6hiORPadT+igLKkqvTeocnhOdAtt427M +RXTVgN14pV3tqO+5MXzNw5tGNPcwWARWwPH9eCRxLwLUuxE4Qu73pUeEFjDEfGkN +iBnlTsTXBOMqSGryEkmRaZslWDvblvYeObYw+uc3kCbJ7jRy9soVwkbb5FueF/yC +O1aQIm23HrrG +-----END CERTIFICATE REQUEST----- +` + + testTransit_CreateCsr(t, "rsa-2048", templateCsr) + testTransit_CreateCsr(t, "rsa-3072", templateCsr) + testTransit_CreateCsr(t, "rsa-4096", templateCsr) + testTransit_CreateCsr(t, "ecdsa-p256", templateCsr) + testTransit_CreateCsr(t, "ecdsa-p384", templateCsr) + testTransit_CreateCsr(t, "ecdsa-p521", templateCsr) + testTransit_CreateCsr(t, "ed25519", templateCsr) + testTransit_CreateCsr(t, "aes256-gcm96", templateCsr) +} + +func testTransit_CreateCsr(t *testing.T, keyType, pemTemplateCsr string) { + var resp *logical.Response + var err error + b, s := createBackendWithStorage(t) + + // Create the policy + policyReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/test-key", + Storage: s, + Data: map[string]interface{}{ + "type": keyType, + }, + } + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + csrSignReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/test-key/csr", + Storage: s, + Data: map[string]interface{}{ + "csr": pemTemplateCsr, + }, + } + + resp, err = b.HandleRequest(context.Background(), csrSignReq) + + switch keyType { + case "rsa-2048", "rsa-3072", "rsa-4096", "ecdsa-p256", "ecdsa-p384", "ecdsa-p521", "ed25519": + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("failed to sign CSR, err:%v resp:%#v", err, resp) + } + + signedCsrBytes, ok := resp.Data["csr"] + if !ok { + t.Fatal("expected response data to hold a 'csr' field") + } + + signedCsr, err := parseCsr(signedCsrBytes.(string)) + if err != nil { + t.Fatalf("failed to parse returned csr, err:%v", err) + } + + templateCsr, err := parseCsr(pemTemplateCsr) + if err != nil { + t.Fatalf("failed to parse returned template csr, err:%v", err) + } + + // NOTE: Check other fields? + if !reflect.DeepEqual(signedCsr.Subject, templateCsr.Subject) { + t.Fatalf("subjects should have matched, err:%v", err) + } + + default: + if err == nil || (resp != nil && !resp.IsError()) { + t.Fatalf("should have failed to sign CSR, provided key type does not support signing") + } + } +} + +func TestTransit_Certs_ImportCertChain(t *testing.T) { + // Create Cluster + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": Factory, + "pki": pki.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + // Mount transit backend + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + require.NoError(t, err) + + // Mount PKI backend + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }) + require.NoError(t, err) + + testTransit_ImportCertChain(t, client, "rsa-2048") + testTransit_ImportCertChain(t, client, "rsa-3072") + testTransit_ImportCertChain(t, client, "rsa-4096") + testTransit_ImportCertChain(t, client, "ecdsa-p256") + testTransit_ImportCertChain(t, client, "ecdsa-p384") + testTransit_ImportCertChain(t, client, "ecdsa-p521") + testTransit_ImportCertChain(t, client, "ed25519") +} + +func testTransit_ImportCertChain(t *testing.T, apiClient *api.Client, keyType string) { + keyName := fmt.Sprintf("%s", keyType) + issuerName := fmt.Sprintf("%s-issuer", keyType) + + // Create transit key + _, err := apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s", keyName), map[string]interface{}{ + "type": keyType, + }) + require.NoError(t, err) + + // Setup a new CSR + privKey, err := rsa.GenerateKey(cryptoRand.Reader, 3072) + require.NoError(t, err) + + var csrTemplate x509.CertificateRequest + csrTemplate.Subject.CommonName = "example.com" + reqCsrBytes, err := x509.CreateCertificateRequest(cryptoRand.Reader, &csrTemplate, privKey) + require.NoError(t, err) + + pemTemplateCsr := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: reqCsrBytes, + }) + t.Logf("csr: %v", string(pemTemplateCsr)) + + // Create CSR from template CSR fields and key in transit + resp, err := apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s/csr", keyName), map[string]interface{}{ + "csr": string(pemTemplateCsr), + }) + require.NoError(t, err) + require.NotNil(t, resp) + pemCsr := resp.Data["csr"].(string) + + // Generate PKI root + resp, err = apiClient.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "issuer_name": issuerName, + "common_name": "PKI Root X1", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + rootCertPEM := resp.Data["certificate"].(string) + pemBlock, _ := pem.Decode([]byte(rootCertPEM)) + require.NotNil(t, pemBlock) + + rootCert, err := x509.ParseCertificate(pemBlock.Bytes) + require.NoError(t, err) + + // Create role to be used in the certificate issuing + resp, err = apiClient.Logical().Write("pki/roles/example-dot-com", map[string]interface{}{ + "issuer_ref": issuerName, + "allowed_domains": "example.com", + "allow_bare_domains": true, + "basic_constraints_valid_for_non_ca": true, + "key_type": "any", + }) + require.NoError(t, err) + + // Sign the CSR + resp, err = apiClient.Logical().Write("pki/sign/example-dot-com", map[string]interface{}{ + "issuer_ref": issuerName, + "csr": pemCsr, + "ttl": "10m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + leafCertPEM := resp.Data["certificate"].(string) + pemBlock, _ = pem.Decode([]byte(leafCertPEM)) + require.NotNil(t, pemBlock) + + leafCert, err := x509.ParseCertificate(pemBlock.Bytes) + require.NoError(t, err) + + require.NoError(t, leafCert.CheckSignatureFrom(rootCert)) + t.Logf("root: %v", rootCertPEM) + t.Logf("leaf: %v", leafCertPEM) + + certificateChain := strings.Join([]string{leafCertPEM, rootCertPEM}, "\n") + // Import certificate chain to transit key version + resp, err = apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s/set-certificate", keyName), map[string]interface{}{ + "certificate_chain": certificateChain, + }) + require.NoError(t, err) + require.NotNil(t, resp) + + resp, err = apiClient.Logical().Read(fmt.Sprintf("transit/keys/%s", keyName)) + require.NoError(t, err) + require.NotNil(t, resp) + keys, ok := resp.Data["keys"].(map[string]interface{}) + if !ok { + t.Fatalf("could not cast Keys value") + } + keyData, ok := keys["1"].(map[string]interface{}) + if !ok { + t.Fatalf("could not cast key version 1 from keys") + } + _, present := keyData["certificate_chain"] + if !present { + t.Fatalf("certificate chain not present in key version 1") + } +} + +func TestTransit_Certs_ImportInvalidCertChain(t *testing.T) { + // Create Cluster + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": Factory, + "pki": pki.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + // Mount transit backend + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + require.NoError(t, err) + + // Mount PKI backend + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }) + require.NoError(t, err) + + testTransit_ImportInvalidCertChain(t, client, "rsa-2048") + testTransit_ImportInvalidCertChain(t, client, "rsa-3072") + testTransit_ImportInvalidCertChain(t, client, "rsa-4096") + testTransit_ImportInvalidCertChain(t, client, "ecdsa-p256") + testTransit_ImportInvalidCertChain(t, client, "ecdsa-p384") + testTransit_ImportInvalidCertChain(t, client, "ecdsa-p521") + testTransit_ImportInvalidCertChain(t, client, "ed25519") +} + +func testTransit_ImportInvalidCertChain(t *testing.T, apiClient *api.Client, keyType string) { + keyName := fmt.Sprintf("%s", keyType) + issuerName := fmt.Sprintf("%s-issuer", keyType) + + // Create transit key + _, err := apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s", keyName), map[string]interface{}{ + "type": keyType, + }) + require.NoError(t, err) + + // Generate PKI root + resp, err := apiClient.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "issuer_name": issuerName, + "common_name": "PKI Root X1", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + rootCertPEM := resp.Data["certificate"].(string) + pemBlock, _ := pem.Decode([]byte(rootCertPEM)) + require.NotNil(t, pemBlock) + + rootCert, err := x509.ParseCertificate(pemBlock.Bytes) + require.NoError(t, err) + + pkiKeyType := "rsa" + pkiKeyBits := "0" + if strings.HasPrefix(keyType, "rsa") { + pkiKeyBits = keyType[4:] + } else if strings.HasPrefix(keyType, "ecdas") { + pkiKeyType = "ec" + pkiKeyBits = keyType[7:] + } else if keyType == "ed25519" { + pkiKeyType = "ed25519" + pkiKeyBits = "0" + } + + // Create role to be used in the certificate issuing + resp, err = apiClient.Logical().Write("pki/roles/example-dot-com", map[string]interface{}{ + "issuer_ref": issuerName, + "allowed_domains": "example.com", + "allow_bare_domains": true, + "basic_constraints_valid_for_non_ca": true, + "key_type": pkiKeyType, + "key_bits": pkiKeyBits, + }) + require.NoError(t, err) + + // XXX -- Note subtle error: we issue a certificate with a new key, + // not using a CSR from Transit. + resp, err = apiClient.Logical().Write("pki/issue/example-dot-com", map[string]interface{}{ + "common_name": "example.com", + "issuer_ref": issuerName, + "ttl": "10m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + leafCertPEM := resp.Data["certificate"].(string) + pemBlock, _ = pem.Decode([]byte(leafCertPEM)) + require.NotNil(t, pemBlock) + + leafCert, err := x509.ParseCertificate(pemBlock.Bytes) + require.NoError(t, err) + + require.NoError(t, leafCert.CheckSignatureFrom(rootCert)) + t.Logf("root: %v", rootCertPEM) + t.Logf("leaf: %v", leafCertPEM) + + certificateChain := strings.Join([]string{leafCertPEM, rootCertPEM}, "\n") + + // Import certificate chain to transit key version + resp, err = apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s/set-certificate", keyName), map[string]interface{}{ + "certificate_chain": certificateChain, + }) + require.Error(t, err) +} diff --git a/builtin/logical/transit/path_config_keys.go b/builtin/logical/transit/path_config_keys.go index 2294636e3951..45c38ac49f83 100644 --- a/builtin/logical/transit/path_config_keys.go +++ b/builtin/logical/transit/path_config_keys.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -21,6 +24,11 @@ var defaultKeysConfig = keysConfig{ func (b *backend) pathConfigKeys() *framework.Path { return &framework.Path{ Pattern: "config/keys", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + }, + Fields: map[string]*framework.FieldSchema{ "disable_upsert": { Type: framework.TypeBool, @@ -29,9 +37,20 @@ keys on the encrypt endpoint.`, }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathConfigKeysWrite, - logical.ReadOperation: b.pathConfigKeysRead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigKeysWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "keys", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigKeysRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "keys-configuration", + }, + }, }, HelpSynopsis: pathConfigKeysHelpSyn, diff --git a/builtin/logical/transit/path_config_keys_test.go b/builtin/logical/transit/path_config_keys_test.go index 8d8f9f940f28..d5aa12b9cfdf 100644 --- a/builtin/logical/transit/path_config_keys_test.go +++ b/builtin/logical/transit/path_config_keys_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_datakey.go b/builtin/logical/transit/path_datakey.go index 42da16191639..53aff54690bb 100644 --- a/builtin/logical/transit/path_datakey.go +++ b/builtin/logical/transit/path_datakey.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" "crypto/rand" "encoding/base64" + "errors" "fmt" "github.com/hashicorp/vault/helper/constants" @@ -16,6 +20,13 @@ import ( func (b *backend) pathDatakey() *framework.Path { return &framework.Path{ Pattern: "datakey/" + framework.GenericNameRegex("plaintext") + "/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate", + OperationSuffix: "data-key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -131,7 +142,23 @@ func (b *backend) pathDatakeyWrite(ctx context.Context, req *logical.Request, d return nil, err } - ciphertext, err := p.Encrypt(ver, context, nonce, base64.StdEncoding.EncodeToString(newKey)) + var managedKeyFactory ManagedKeyFactory + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + return nil, errors.New("unsupported system view") + } + + managedKeyFactory = ManagedKeyFactory{ + managedKeyParams: keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + }, + } + } + + ciphertext, err := p.EncryptWithFactory(ver, context, nonce, base64.StdEncoding.EncodeToString(newKey), nil, managedKeyFactory) if err != nil { switch err.(type) { case errutil.UserError: @@ -160,6 +187,10 @@ func (b *backend) pathDatakeyWrite(ctx context.Context, req *logical.Request, d }, } + if len(nonce) > 0 && !nonceAllowed(p) { + return nil, ErrNonceNotAllowed + } + if constants.IsFIPS() && shouldWarnAboutNonceUsage(p, nonce) { resp.AddWarning("A provided nonce value was used within FIPS mode, this violates FIPS 140 compliance.") } diff --git a/builtin/logical/transit/path_decrypt.go b/builtin/logical/transit/path_decrypt.go index 429279e657d5..1daf74daf5d1 100644 --- a/builtin/logical/transit/path_decrypt.go +++ b/builtin/logical/transit/path_decrypt.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" "encoding/base64" + "errors" "fmt" "github.com/hashicorp/vault/sdk/framework" @@ -28,6 +32,12 @@ type DecryptBatchResponseItem struct { func (b *backend) pathDecrypt() *framework.Path { return &framework.Path{ Pattern: "decrypt/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "decrypt", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -74,6 +84,15 @@ plaintext. On successful decryption, both the ciphertext and the associated data are attested not to have been tampered with. `, }, + + "batch_input": { + Type: framework.TypeSlice, + Description: ` +Specifies a list of items to be decrypted in a single batch. When this +parameter is set, if the parameters 'ciphertext', 'context' and 'nonce' are +also set, they will be ignored. Any batch output will preserve the order +of the batch input.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -165,6 +184,7 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() successesInBatch := false for i, item := range batchInputItems { @@ -182,7 +202,23 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d factory = AssocDataFactory{item.AssociatedData} } - plaintext, err := p.DecryptWithFactory(item.DecodedContext, item.DecodedNonce, item.Ciphertext, factory) + var managedKeyFactory ManagedKeyFactory + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + batchResponseItems[i].Error = errors.New("unsupported system view").Error() + } + + managedKeyFactory = ManagedKeyFactory{ + managedKeyParams: keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + }, + } + } + + plaintext, err := p.DecryptWithFactory(item.DecodedContext, item.DecodedNonce, item.Ciphertext, factory, managedKeyFactory) if err != nil { switch err.(type) { case errutil.InternalError: @@ -208,8 +244,6 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d } } else { if batchResponseItems[0].Error != "" { - p.Unlock() - if internalErrorInBatch { return nil, errutil.InternalError{Err: batchResponseItems[0].Error} } @@ -221,8 +255,6 @@ func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d } } - p.Unlock() - return batchRequestResponse(d, resp, req, successesInBatch, userErrorInBatch, internalErrorInBatch) } diff --git a/builtin/logical/transit/path_decrypt_bench_test.go b/builtin/logical/transit/path_decrypt_bench_test.go index 67d4bc3b5d5a..d0816fdb6444 100644 --- a/builtin/logical/transit/path_decrypt_bench_test.go +++ b/builtin/logical/transit/path_decrypt_bench_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_decrypt_test.go b/builtin/logical/transit/path_decrypt_test.go index 928439dd35de..a61d85ddc2aa 100644 --- a/builtin/logical/transit/path_decrypt_test.go +++ b/builtin/logical/transit/path_decrypt_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_encrypt.go b/builtin/logical/transit/path_encrypt.go index 5c2b029d90d4..05199cae09b1 100644 --- a/builtin/logical/transit/path_encrypt.go +++ b/builtin/logical/transit/path_encrypt.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" "net/http" "reflect" @@ -74,9 +78,23 @@ func (a AssocDataFactory) GetAssociatedData() ([]byte, error) { return base64.StdEncoding.DecodeString(a.Encoded) } +type ManagedKeyFactory struct { + managedKeyParams keysutil.ManagedKeyParameters +} + +func (m ManagedKeyFactory) GetManagedKeyParameters() keysutil.ManagedKeyParameters { + return m.managedKeyParams +} + func (b *backend) pathEncrypt() *framework.Path { return &framework.Path{ Pattern: "encrypt/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "encrypt", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -152,6 +170,14 @@ plaintext. On successful decryption, both the ciphertext and the associated data are attested not to have been tampered with. `, }, + + "batch_input": { + Type: framework.TypeSlice, + Description: ` +Specifies a list of items to be encrypted in a single batch. When this parameter +is set, if the parameters 'plaintext', 'context' and 'nonce' are also set, they +will be ignored. Any batch output will preserve the order of the batch input.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -412,6 +438,8 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d polReq.KeyType = keysutil.KeyType_ChaCha20_Poly1305 case "ecdsa-p256", "ecdsa-p384", "ecdsa-p521": return logical.ErrorResponse(fmt.Sprintf("key type %v not supported for this operation", keyType)), logical.ErrInvalidRequest + case "managed_key": + polReq.KeyType = keysutil.KeyType_MANAGED_KEY default: return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest } @@ -432,6 +460,7 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() // Process batch request items. If encryption of any request // item fails, respectively mark the error in the response @@ -440,6 +469,13 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d successesInBatch := false for i, item := range batchInputItems { if batchResponseItems[i].Error != "" { + userErrorInBatch = true + continue + } + + if item.Nonce != "" && !nonceAllowed(p) { + userErrorInBatch = true + batchResponseItems[i].Error = ErrNonceNotAllowed.Error() continue } @@ -457,7 +493,23 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d factory = AssocDataFactory{item.AssociatedData} } - ciphertext, err := p.EncryptWithFactory(item.KeyVersion, item.DecodedContext, item.DecodedNonce, item.Plaintext, factory) + var managedKeyFactory ManagedKeyFactory + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + batchResponseItems[i].Error = errors.New("unsupported system view").Error() + } + + managedKeyFactory = ManagedKeyFactory{ + managedKeyParams: keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + }, + } + } + + ciphertext, err := p.EncryptWithFactory(item.KeyVersion, item.DecodedContext, item.DecodedNonce, item.Plaintext, factory, managedKeyFactory) if err != nil { switch err.(type) { case errutil.InternalError: @@ -496,8 +548,6 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d } } else { if batchResponseItems[0].Error != "" { - p.Unlock() - if internalErrorInBatch { return nil, errutil.InternalError{Err: batchResponseItems[0].Error} } @@ -519,11 +569,28 @@ func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d resp.AddWarning("Attempted creation of the key during the encrypt operation, but it was created beforehand") } - p.Unlock() - return batchRequestResponse(d, resp, req, successesInBatch, userErrorInBatch, internalErrorInBatch) } +func nonceAllowed(p *keysutil.Policy) bool { + var supportedKeyType bool + switch p.Type { + case keysutil.KeyType_MANAGED_KEY: + return true + case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305: + supportedKeyType = true + default: + supportedKeyType = false + } + + if supportedKeyType && p.ConvergentEncryption && p.ConvergentVersion == 1 { + // We only use the user supplied nonce for v1 convergent encryption keys + return true + } + + return false +} + // Depending on the errors in the batch, different status codes should be returned. User errors // will return a 400 and precede internal errors which return a 500. The reasoning behind this is // that user errors are non-retryable without making changes to the request, and should be surfaced diff --git a/builtin/logical/transit/path_encrypt_bench_test.go b/builtin/logical/transit/path_encrypt_bench_test.go index e648c6e02fc3..a57c90fa7d63 100644 --- a/builtin/logical/transit/path_encrypt_bench_test.go +++ b/builtin/logical/transit/path_encrypt_bench_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_encrypt_test.go b/builtin/logical/transit/path_encrypt_test.go index 5846ac13b3bd..4f5088e8e669 100644 --- a/builtin/logical/transit/path_encrypt_test.go +++ b/builtin/logical/transit/path_encrypt_test.go @@ -1,14 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" "encoding/json" + "fmt" + "net/http" "reflect" "strings" "testing" "github.com/hashicorp/vault/sdk/helper/keysutil" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/logical" "github.com/mitchellh/mapstructure" ) @@ -649,13 +655,26 @@ func TestTransit_BatchEncryptionCase12(t *testing.T) { } // Case13: Incorrect input for nonce when we aren't in convergent encryption should fail the operation -func TestTransit_BatchEncryptionCase13(t *testing.T) { +func TestTransit_EncryptionCase13(t *testing.T) { var err error b, s := createBackendWithStorage(t) + // Non-batch first + data := map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "R80hr9eNUIuFV52e"} + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: data, + } + resp, err := b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("expected invalid request") + } + batchInput := []interface{}{ - map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "YmFkbm9uY2U="}, + map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "R80hr9eNUIuFV52e"}, } batchData := map[string]interface{}{ @@ -667,10 +686,71 @@ func TestTransit_BatchEncryptionCase13(t *testing.T) { Storage: s, Data: batchData, } - _, err = b.HandleRequest(context.Background(), batchReq) + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil { + t.Fatal(err) + } + + if v, ok := resp.Data["http_status_code"]; !ok || v.(int) != http.StatusBadRequest { + t.Fatal("expected request error") + } +} + +// Case14: Incorrect input for nonce when we are in convergent version 3 should fail +func TestTransit_EncryptionCase14(t *testing.T) { + var err error + + b, s := createBackendWithStorage(t) + + cReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/my-key", + Storage: s, + Data: map[string]interface{}{ + "convergent_encryption": "true", + "derived": "true", + }, + } + resp, err := b.HandleRequest(context.Background(), cReq) + if err != nil { + t.Fatal(err) + } + + // Non-batch first + data := map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "context": "SGVsbG8sIFdvcmxkCg==", "nonce": "R80hr9eNUIuFV52e"} + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: data, + } + + resp, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("expected invalid request") + } + + batchInput := []interface{}{ + data, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) if err != nil { t.Fatal(err) } + + if v, ok := resp.Data["http_status_code"]; !ok || v.(int) != http.StatusBadRequest { + t.Fatal("expected request error") + } } // Test that the fast path function decodeBatchRequestItems behave like mapstructure.Decode() to decode []BatchRequestItem. @@ -941,3 +1021,48 @@ func TestShouldWarnAboutNonceUsage(t *testing.T) { } } } + +func TestTransit_EncryptWithRSAPublicKey(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyType := "rsa-2048" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: fmt.Sprintf("encrypt/%s", keyID), + Storage: s, + Data: map[string]interface{}{ + "plaintext": "bXkgc2VjcmV0IGRhdGE=", + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } +} diff --git a/builtin/logical/transit/path_export.go b/builtin/logical/transit/path_export.go index 3b0d97e15e73..c47b2b673543 100644 --- a/builtin/logical/transit/path_export.go +++ b/builtin/logical/transit/path_export.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" + "crypto" "crypto/ecdsa" "crypto/elliptic" - "crypto/rsa" "crypto/x509" "encoding/base64" "encoding/pem" @@ -19,18 +22,27 @@ import ( ) const ( - exportTypeEncryptionKey = "encryption-key" - exportTypeSigningKey = "signing-key" - exportTypeHMACKey = "hmac-key" + exportTypeEncryptionKey = "encryption-key" + exportTypeSigningKey = "signing-key" + exportTypeHMACKey = "hmac-key" + exportTypePublicKey = "public-key" + exportTypeCertificateChain = "certificate-chain" ) func (b *backend) pathExportKeys() *framework.Path { return &framework.Path{ Pattern: "export/" + framework.GenericNameRegex("type") + "/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("version"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "export", + OperationSuffix: "key|key-version", + }, + Fields: map[string]*framework.FieldSchema{ "type": { Type: framework.TypeString, - Description: "Type of key to export (encryption-key, signing-key, hmac-key)", + Description: "Type of key to export (encryption-key, signing-key, hmac-key, public-key)", }, "name": { Type: framework.TypeString, @@ -60,6 +72,8 @@ func (b *backend) pathPolicyExportRead(ctx context.Context, req *logical.Request case exportTypeEncryptionKey: case exportTypeSigningKey: case exportTypeHMACKey: + case exportTypePublicKey: + case exportTypeCertificateChain: default: return logical.ErrorResponse(fmt.Sprintf("invalid export type: %s", exportType)), logical.ErrInvalidRequest } @@ -79,8 +93,8 @@ func (b *backend) pathPolicyExportRead(ctx context.Context, req *logical.Request } defer p.Unlock() - if !p.Exportable { - return logical.ErrorResponse("key is not exportable"), nil + if !p.Exportable && exportType != exportTypePublicKey && exportType != exportTypeCertificateChain { + return logical.ErrorResponse("private key material is not exportable"), nil } switch exportType { @@ -92,6 +106,10 @@ func (b *backend) pathPolicyExportRead(ctx context.Context, req *logical.Request if !p.Type.SigningSupported() { return logical.ErrorResponse("signing not supported for the key"), logical.ErrInvalidRequest } + case exportTypeCertificateChain: + if !p.Type.SigningSupported() { + return logical.ErrorResponse("certificate chain not supported for keys that do not support signing"), logical.ErrInvalidRequest + } } retKeys := map[string]string{} @@ -151,7 +169,11 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st switch exportType { case exportTypeHMACKey: - return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.HMACKey)), nil + src := key.HMACKey + if policy.Type == keysutil.KeyType_HMAC { + src = key.Key + } + return strings.TrimSpace(base64.StdEncoding.EncodeToString(src)), nil case exportTypeEncryptionKey: switch policy.Type { @@ -159,7 +181,11 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: - return encodeRSAPrivateKey(key.RSAKey), nil + rsaKey, err := encodeRSAPrivateKey(key) + if err != nil { + return "", err + } + return rsaKey, nil } case exportTypeSigningKey: @@ -181,26 +207,122 @@ func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType st return ecKey, nil case keysutil.KeyType_ED25519: + if len(key.Key) == 0 { + return "", nil + } + return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: - return encodeRSAPrivateKey(key.RSAKey), nil + rsaKey, err := encodeRSAPrivateKey(key) + if err != nil { + return "", err + } + return rsaKey, nil + } + case exportTypePublicKey: + switch policy.Type { + case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ECDSA_P384, keysutil.KeyType_ECDSA_P521: + var curve elliptic.Curve + switch policy.Type { + case keysutil.KeyType_ECDSA_P384: + curve = elliptic.P384() + case keysutil.KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + ecKey, err := keyEntryToECPublicKey(key, curve) + if err != nil { + return "", err + } + return ecKey, nil + + case keysutil.KeyType_ED25519: + return strings.TrimSpace(key.FormattedPublicKey), nil + + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: + rsaKey, err := encodeRSAPublicKey(key) + if err != nil { + return "", err + } + return rsaKey, nil + } + case exportTypeCertificateChain: + if key.CertificateChain == nil { + return "", errors.New("selected key version does not have a certificate chain imported") } + + var pemCerts []string + for _, derCertBytes := range key.CertificateChain { + pemCert := strings.TrimSpace(string(pem.EncodeToMemory( + &pem.Block{ + Type: "CERTIFICATE", + Bytes: derCertBytes, + }))) + pemCerts = append(pemCerts, pemCert) + } + certChain := strings.Join(pemCerts, "\n") + + return certChain, nil } - return "", fmt.Errorf("unknown key type %v", policy.Type) + return "", fmt.Errorf("unknown key type %v for export type %v", policy.Type, exportType) } -func encodeRSAPrivateKey(key *rsa.PrivateKey) string { +func encodeRSAPrivateKey(key *keysutil.KeyEntry) (string, error) { + if key == nil { + return "", errors.New("nil KeyEntry provided") + } + + if key.IsPrivateKeyMissing() { + return "", nil + } + // When encoding PKCS1, the PEM header should be `RSA PRIVATE KEY`. When Go // has PKCS8 encoding support, we may want to change this. - derBytes := x509.MarshalPKCS1PrivateKey(key) + blockType := "RSA PRIVATE KEY" + derBytes := x509.MarshalPKCS1PrivateKey(key.RSAKey) + pemBlock := pem.Block{ + Type: blockType, + Bytes: derBytes, + } + + pemBytes := pem.EncodeToMemory(&pemBlock) + return string(pemBytes), nil +} + +func encodeRSAPublicKey(key *keysutil.KeyEntry) (string, error) { + if key == nil { + return "", errors.New("nil KeyEntry provided") + } + + var publicKey crypto.PublicKey + publicKey = key.RSAPublicKey + if key.RSAKey != nil { + // Prefer the private key if it exists + publicKey = key.RSAKey.Public() + } + + if publicKey == nil { + return "", errors.New("requested to encode an RSA public key with no RSA key present") + } + + // Encode the RSA public key in PEM format to return over the API + derBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return "", fmt.Errorf("error marshaling RSA public key: %w", err) + } pemBlock := &pem.Block{ - Type: "RSA PRIVATE KEY", + Type: "PUBLIC KEY", Bytes: derBytes, } pemBytes := pem.EncodeToMemory(pemBlock) - return string(pemBytes) + if pemBytes == nil || len(pemBytes) == 0 { + return "", fmt.Errorf("failed to PEM-encode RSA public key") + } + + return string(pemBytes), nil } func keyEntryToECPrivateKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) { @@ -208,27 +330,57 @@ func keyEntryToECPrivateKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, return "", errors.New("nil KeyEntry provided") } + if k.IsPrivateKeyMissing() { + return "", nil + } + + pubKey := ecdsa.PublicKey{ + Curve: curve, + X: k.EC_X, + Y: k.EC_Y, + } + + blockType := "EC PRIVATE KEY" privKey := &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: curve, - X: k.EC_X, - Y: k.EC_Y, - }, - D: k.EC_D, + PublicKey: pubKey, + D: k.EC_D, } - ecder, err := x509.MarshalECPrivateKey(privKey) + derBytes, err := x509.MarshalECPrivateKey(privKey) if err != nil { return "", err } - if ecder == nil { - return "", errors.New("no data returned when marshalling to private key") + + pemBlock := pem.Block{ + Type: blockType, + Bytes: derBytes, } - block := pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: ecder, + return strings.TrimSpace(string(pem.EncodeToMemory(&pemBlock))), nil +} + +func keyEntryToECPublicKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) { + if k == nil { + return "", errors.New("nil KeyEntry provided") } - return strings.TrimSpace(string(pem.EncodeToMemory(&block))), nil + + pubKey := ecdsa.PublicKey{ + Curve: curve, + X: k.EC_X, + Y: k.EC_Y, + } + + blockType := "PUBLIC KEY" + derBytes, err := x509.MarshalPKIXPublicKey(&pubKey) + if err != nil { + return "", err + } + + pemBlock := pem.Block{ + Type: blockType, + Bytes: derBytes, + } + + return strings.TrimSpace(string(pem.EncodeToMemory(&pemBlock))), nil } const pathExportHelpSyn = `Export named encryption or signing key` diff --git a/builtin/logical/transit/path_export_test.go b/builtin/logical/transit/path_export_test.go index 6d44894e64e5..2ea603463548 100644 --- a/builtin/logical/transit/path_export_test.go +++ b/builtin/logical/transit/path_export_test.go @@ -1,23 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" + cryptoRand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" "fmt" "reflect" "strconv" + "strings" "testing" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" ) +func TestTransit_Export_Unknown_ExportType(t *testing.T) { + t.Parallel() + + b, storage := createBackendWithSysView(t) + keyType := "ed25519" + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "exportable": true, + "type": keyType, + }, + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed creating key %s: %v", keyType, err) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/bad-export-type/foo", + } + rsp, err := b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatalf("did not error on bad export type got: %v", rsp) + } + if rsp == nil || !rsp.IsError() { + t.Fatalf("response did not contain an error on bad export type got: %v", rsp) + } + if !strings.Contains(rsp.Error().Error(), "invalid export type") { + t.Fatalf("failed with unexpected error: %v", err) + } +} + func TestTransit_Export_KeyVersion_ExportsCorrectVersion(t *testing.T) { + t.Parallel() + verifyExportsCorrectVersion(t, "encryption-key", "aes128-gcm96") verifyExportsCorrectVersion(t, "encryption-key", "aes256-gcm96") verifyExportsCorrectVersion(t, "encryption-key", "chacha20-poly1305") + verifyExportsCorrectVersion(t, "encryption-key", "rsa-2048") + verifyExportsCorrectVersion(t, "encryption-key", "rsa-3072") + verifyExportsCorrectVersion(t, "encryption-key", "rsa-4096") verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p256") verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p384") verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p521") verifyExportsCorrectVersion(t, "signing-key", "ed25519") + verifyExportsCorrectVersion(t, "signing-key", "rsa-2048") + verifyExportsCorrectVersion(t, "signing-key", "rsa-3072") + verifyExportsCorrectVersion(t, "signing-key", "rsa-4096") verifyExportsCorrectVersion(t, "hmac-key", "aes128-gcm96") verifyExportsCorrectVersion(t, "hmac-key", "aes256-gcm96") verifyExportsCorrectVersion(t, "hmac-key", "chacha20-poly1305") @@ -25,6 +83,14 @@ func TestTransit_Export_KeyVersion_ExportsCorrectVersion(t *testing.T) { verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p384") verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p521") verifyExportsCorrectVersion(t, "hmac-key", "ed25519") + verifyExportsCorrectVersion(t, "hmac-key", "hmac") + verifyExportsCorrectVersion(t, "public-key", "rsa-2048") + verifyExportsCorrectVersion(t, "public-key", "rsa-3072") + verifyExportsCorrectVersion(t, "public-key", "rsa-4096") + verifyExportsCorrectVersion(t, "public-key", "ecdsa-p256") + verifyExportsCorrectVersion(t, "public-key", "ecdsa-p384") + verifyExportsCorrectVersion(t, "public-key", "ecdsa-p521") + verifyExportsCorrectVersion(t, "public-key", "ed25519") } func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) { @@ -40,6 +106,9 @@ func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) { "exportable": true, "type": keyType, } + if keyType == "hmac" { + req.Data["key_size"] = 32 + } _, err := b.HandleRequest(context.Background(), req) if err != nil { t.Fatal(err) @@ -118,6 +187,8 @@ func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) { } func TestTransit_Export_ValidVersionsOnly(t *testing.T) { + t.Parallel() + b, storage := createBackendWithSysView(t) // First create a key, v1 @@ -218,6 +289,8 @@ func TestTransit_Export_ValidVersionsOnly(t *testing.T) { } func TestTransit_Export_KeysNotMarkedExportable_ReturnsError(t *testing.T) { + t.Parallel() + b, storage := createBackendWithSysView(t) req := &logical.Request{ @@ -248,6 +321,8 @@ func TestTransit_Export_KeysNotMarkedExportable_ReturnsError(t *testing.T) { } func TestTransit_Export_SigningDoesNotSupportSigning_ReturnsError(t *testing.T) { + t.Parallel() + b, storage := createBackendWithSysView(t) req := &logical.Request{ @@ -276,6 +351,8 @@ func TestTransit_Export_SigningDoesNotSupportSigning_ReturnsError(t *testing.T) } func TestTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T) { + t.Parallel() + testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p256") testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p384") testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p521") @@ -306,11 +383,55 @@ func testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testi } _, err = b.HandleRequest(context.Background(), req) if err == nil { - t.Fatal("Key does not support encryption but was exported without error.") + t.Fatalf("Key %s does not support encryption but was exported without error.", keyType) + } +} + +func TestTransit_Export_PublicKeyDoesNotSupportEncryption_ReturnsError(t *testing.T) { + t.Parallel() + + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "chacha20-poly1305") + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "aes128-gcm96") + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "aes256-gcm96") + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "hmac") +} + +func testTransit_Export_PublicKeyNotSupported_ReturnsError(t *testing.T, keyType string) { + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "type": keyType, + }, + } + if keyType == "hmac" { + req.Data["key_size"] = 32 + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed creating key %s: %v", keyType, err) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/public-key/foo", + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatalf("Key %s does not support public key exporting but was exported without error.", keyType) + } + if !strings.Contains(err.Error(), fmt.Sprintf("unknown key type %s for export type public-key", keyType)) { + t.Fatalf("unexpected error value for key type: %s: %v", keyType, err) } } func TestTransit_Export_KeysDoesNotExist_ReturnsNotFound(t *testing.T) { + t.Parallel() + b, storage := createBackendWithSysView(t) req := &logical.Request{ @@ -326,6 +447,8 @@ func TestTransit_Export_KeysDoesNotExist_ReturnsNotFound(t *testing.T) { } func TestTransit_Export_EncryptionKey_DoesNotExportHMACKey(t *testing.T) { + t.Parallel() + b, storage := createBackendWithSysView(t) req := &logical.Request{ @@ -374,3 +497,135 @@ func TestTransit_Export_EncryptionKey_DoesNotExportHMACKey(t *testing.T) { t.Fatal("Encryption key data matched hmac key data") } } + +func TestTransit_Export_CertificateChain(t *testing.T) { + t.Parallel() + + generateKeys(t) + + // Create Cluster + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": Factory, + "pki": pki.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + // Mount transit backend + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + require.NoError(t, err) + + // Mount PKI backend + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }) + require.NoError(t, err) + + testTransit_exportCertificateChain(t, client, "rsa-2048") + testTransit_exportCertificateChain(t, client, "rsa-3072") + testTransit_exportCertificateChain(t, client, "rsa-4096") + testTransit_exportCertificateChain(t, client, "ecdsa-p256") + testTransit_exportCertificateChain(t, client, "ecdsa-p384") + testTransit_exportCertificateChain(t, client, "ecdsa-p521") + testTransit_exportCertificateChain(t, client, "ed25519") +} + +func testTransit_exportCertificateChain(t *testing.T, apiClient *api.Client, keyType string) { + keyName := fmt.Sprintf("%s", keyType) + issuerName := fmt.Sprintf("%s-issuer", keyType) + + // Get key to be imported + privKey := getKey(t, keyType) + privKeyBytes, err := x509.MarshalPKCS8PrivateKey(privKey) + require.NoError(t, err, fmt.Sprintf("failed to marshal private key: %s", err)) + + // Create CSR + var csrTemplate x509.CertificateRequest + csrTemplate.Subject.CommonName = "example.com" + csrBytes, err := x509.CreateCertificateRequest(cryptoRand.Reader, &csrTemplate, privKey) + require.NoError(t, err, fmt.Sprintf("failed to create CSR: %s", err)) + + pemCsr := string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csrBytes, + })) + + // Generate PKI root + _, err = apiClient.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "issuer_name": issuerName, + "common_name": "PKI Root X1", + }) + require.NoError(t, err) + + // Create role to be used in the certificate issuing + _, err = apiClient.Logical().Write("pki/roles/example-dot-com", map[string]interface{}{ + "issuer_ref": issuerName, + "allowed_domains": "example.com", + "allow_bare_domains": true, + "basic_constraints_valid_for_non_ca": true, + "key_type": "any", + }) + require.NoError(t, err) + + // Sign the CSR + resp, err := apiClient.Logical().Write("pki/sign/example-dot-com", map[string]interface{}{ + "issuer_ref": issuerName, + "csr": pemCsr, + "ttl": "10m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + leafCertPEM := resp.Data["certificate"].(string) + + // Get wrapping key + resp, err = apiClient.Logical().Read("transit/wrapping_key") + require.NoError(t, err) + require.NotNil(t, resp) + + pubWrappingKeyString := strings.TrimSpace(resp.Data["public_key"].(string)) + wrappingKeyPemBlock, _ := pem.Decode([]byte(pubWrappingKeyString)) + + pubWrappingKey, err := x509.ParsePKIXPublicKey(wrappingKeyPemBlock.Bytes) + require.NoError(t, err, "failed to parse wrapping key") + + blob := wrapTargetPKCS8ForImport(t, pubWrappingKey.(*rsa.PublicKey), privKeyBytes, "SHA256") + + // Import key + _, err = apiClient.Logical().Write(fmt.Sprintf("/transit/keys/%s/import", keyName), map[string]interface{}{ + "ciphertext": blob, + "type": keyType, + }) + require.NoError(t, err) + + // Import cert chain + _, err = apiClient.Logical().Write(fmt.Sprintf("transit/keys/%s/set-certificate", keyName), map[string]interface{}{ + "certificate_chain": leafCertPEM, + }) + require.NoError(t, err) + + // Export cert chain + resp, err = apiClient.Logical().Read(fmt.Sprintf("transit/export/certificate-chain/%s", keyName)) + require.NoError(t, err) + require.NotNil(t, resp) + + exportedKeys := resp.Data["keys"].(map[string]interface{}) + exportedCertChainPEM := exportedKeys["1"].(string) + + if exportedCertChainPEM != leafCertPEM { + t.Fatalf("expected exported cert chain to match with imported value") + } +} diff --git a/builtin/logical/transit/path_hash.go b/builtin/logical/transit/path_hash.go index 51ca37daa231..dfebe7e6a3c0 100644 --- a/builtin/logical/transit/path_hash.go +++ b/builtin/logical/transit/path_hash.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -18,6 +21,13 @@ import ( func (b *backend) pathHash() *framework.Path { return &framework.Path{ Pattern: "hash" + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "hash", + OperationSuffix: "|with-algorithm", + }, + Fields: map[string]*framework.FieldSchema{ "input": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_hash_test.go b/builtin/logical/transit/path_hash_test.go index 3e5dce95c299..9ded6721a8d5 100644 --- a/builtin/logical/transit/path_hash_test.go +++ b/builtin/logical/transit/path_hash_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_hmac.go b/builtin/logical/transit/path_hmac.go index 2376f4926727..f71c9516ea5f 100644 --- a/builtin/logical/transit/path_hmac.go +++ b/builtin/logical/transit/path_hmac.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" "crypto/hmac" "encoding/base64" + "errors" "fmt" "strconv" "strings" @@ -44,6 +48,13 @@ type batchResponseHMACItem struct { func (b *backend) pathHMAC() *framework.Path { return &framework.Path{ Pattern: "hmac/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate", + OperationSuffix: "hmac|hmac-with-algorithm", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -83,6 +94,14 @@ Defaults to "sha2-256".`, Must be 0 (for latest) or a value greater than or equal to the min_encryption_version configured on the key.`, }, + + "batch_input": { + Type: framework.TypeSlice, + Description: ` +Specifies a list of items to be processed in a single batch. When this parameter +is set, if the parameter 'input' is also set, it will be ignored. +Any batch output will preserve the order of the batch input.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -117,6 +136,7 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() switch { case ver == 0: @@ -126,23 +146,19 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr case ver == p.LatestVersion: // Allowed case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: - p.Unlock() return logical.ErrorResponse("cannot generate HMAC: version is too old (disallowed by policy)"), logical.ErrInvalidRequest } key, err := p.HMACKey(ver) if err != nil { - p.Unlock() return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - if key == nil { - p.Unlock() + if key == nil && p.Type != keysutil.KeyType_MANAGED_KEY { return nil, fmt.Errorf("HMAC key value could not be computed") } hashAlgorithm, ok := keysutil.HashTypeMap[algorithm] if !ok { - p.Unlock() return logical.ErrorResponse("unsupported algorithm %q", hashAlgorithm), nil } @@ -153,18 +169,15 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr if batchInputRaw != nil { err = mapstructure.Decode(batchInputRaw, &batchInputItems) if err != nil { - p.Unlock() return nil, fmt.Errorf("failed to parse batch input: %w", err) } if len(batchInputItems) == 0 { - p.Unlock() return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest } } else { valueRaw, ok := d.GetOk("input") if !ok { - p.Unlock() return logical.ErrorResponse("missing input for HMAC"), logical.ErrInvalidRequest } @@ -191,17 +204,29 @@ func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *fr continue } - hf := hmac.New(hashAlg, key) - hf.Write(input) - retBytes := hf.Sum(nil) + var retBytes []byte + + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + response[i].err = errors.New("unsupported system view") + } + + retBytes, err = p.HMACWithManagedKey(ctx, ver, managedKeySystemView, b.backendUUID, algorithm, input) + if err != nil { + response[i].err = err + } + } else { + hf := hmac.New(hashAlg, key) + hf.Write(input) + retBytes = hf.Sum(nil) + } retStr := base64.StdEncoding.EncodeToString(retBytes) retStr = fmt.Sprintf("vault:v%s:%s", strconv.Itoa(ver), retStr) response[i].HMAC = retStr } - p.Unlock() - // Generate the response resp := &logical.Response{} if batchInputRaw != nil { @@ -232,7 +257,19 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f name := d.Get("name").(string) algorithm := d.Get("urlalgorithm").(string) if algorithm == "" { - algorithm = d.Get("algorithm").(string) + hashAlgorithmRaw, hasHashAlgorithm := d.GetOk("hash_algorithm") + algorithmRaw, hasAlgorithm := d.GetOk("algorithm") + + // As `algorithm` is deprecated, make sure we only read it if + // `hash_algorithm` is not present. + switch { + case hasHashAlgorithm: + algorithm = hashAlgorithmRaw.(string) + case hasAlgorithm: + algorithm = algorithmRaw.(string) + default: + algorithm = d.Get("hash_algorithm").(string) + } } // Get the policy @@ -249,10 +286,10 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() hashAlgorithm, ok := keysutil.HashTypeMap[algorithm] if !ok { - p.Unlock() return logical.ErrorResponse("unsupported algorithm %q", hashAlgorithm), nil } @@ -263,12 +300,10 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f if batchInputRaw != nil { err := mapstructure.Decode(batchInputRaw, &batchInputItems) if err != nil { - p.Unlock() return nil, fmt.Errorf("failed to parse batch input: %w", err) } if len(batchInputItems) == 0 { - p.Unlock() return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest } } else { @@ -365,8 +400,6 @@ func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *f response[i].Valid = hmac.Equal(retBytes, verBytes) } - p.Unlock() - // Generate the response resp := &logical.Response{} if batchInputRaw != nil { diff --git a/builtin/logical/transit/path_hmac_test.go b/builtin/logical/transit/path_hmac_test.go index 204e94ec04f8..3f21106c4cc9 100644 --- a/builtin/logical/transit/path_hmac_test.go +++ b/builtin/logical/transit/path_hmac_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -91,17 +94,40 @@ func TestTransit_HMAC(t *testing.T) { } // Now verify + verify := func() { + t.Helper() + + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errStr, ok := resp.Data["error"]; ok { + t.Fatalf("error validating hmac: %s", errStr) + } + if resp.Data["valid"].(bool) == false { + t.Fatalf(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp)) + } + } req.Path = strings.ReplaceAll(req.Path, "hmac", "verify") req.Data["hmac"] = value.(string) - resp, err = b.HandleRequest(context.Background(), req) - if err != nil { - t.Fatalf("%v: %v", err, resp) - } - if resp == nil { - t.Fatal("expected non-nil response") - } - if resp.Data["valid"].(bool) == false { - panic(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp)) + verify() + + // If `algorithm` parameter is used, try with `hash_algorithm` as well + if algorithm, ok := req.Data["algorithm"]; ok { + // Note that `hash_algorithm` takes precedence over `algorithm`, since the + // latter is deprecated. + req.Data["hash_algorithm"] = algorithm + req.Data["algorithm"] = "xxx" + defer func() { + // Restore the req fields, since it is re-used by the tests below + delete(req.Data, "hash_algorithm") + req.Data["algorithm"] = algorithm + }() + + verify() } } diff --git a/builtin/logical/transit/path_import.go b/builtin/logical/transit/path_import.go index 817cf5fc5ddf..355c4e7b83fe 100644 --- a/builtin/logical/transit/path_import.go +++ b/builtin/logical/transit/path_import.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -25,6 +28,13 @@ const EncryptedKeyBytes = 512 func (b *backend) pathImport() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/import", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "import", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -49,6 +59,10 @@ ephemeral AES key. Can be one of "SHA1", "SHA224", "SHA256" (default), "SHA384", Description: `The base64-encoded ciphertext of the keys. The AES key should be encrypted using OAEP with the wrapping key and then concatenated with the import key, wrapped by the AES key.`, }, + "public_key": { + Type: framework.TypeString, + Description: `The plaintext PEM public key to be imported. If "ciphertext" is set, this field is ignored.`, + }, "allow_rotation": { Type: framework.TypeBool, Description: "True if the imported key may be rotated within Vault; false otherwise.", @@ -101,6 +115,13 @@ key.`, func (b *backend) pathImportVersion() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/import_version", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "import", + OperationSuffix: "key-version", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -111,12 +132,21 @@ func (b *backend) pathImportVersion() *framework.Path { Description: `The base64-encoded ciphertext of the keys. The AES key should be encrypted using OAEP with the wrapping key and then concatenated with the import key, wrapped by the AES key.`, }, + "public_key": { + Type: framework.TypeString, + Description: `The plaintext public key to be imported. If "ciphertext" is set, this field is ignored.`, + }, "hash_function": { Type: framework.TypeString, Default: "SHA256", Description: `The hash function used as a random oracle in the OAEP wrapping of the user-generated, ephemeral AES key. Can be one of "SHA1", "SHA224", "SHA256" (default), "SHA384", or "SHA512"`, }, + "version": { + Type: framework.TypeInt, + Description: `Key version to be updated, if left empty, a new version will be created unless +a private key is specified and the 'Latest' key is missing a private key.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.UpdateOperation: b.pathImportVersionWrite, @@ -130,11 +160,9 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * name := d.Get("name").(string) derived := d.Get("derived").(bool) keyType := d.Get("type").(string) - hashFnStr := d.Get("hash_function").(string) exportable := d.Get("exportable").(bool) allowPlaintextBackup := d.Get("allow_plaintext_backup").(bool) autoRotatePeriod := time.Second * time.Duration(d.Get("auto_rotate_period").(int)) - ciphertextString := d.Get("ciphertext").(string) allowRotation := d.Get("allow_rotation").(bool) // Ensure the caller didn't supply "convergent_encryption" as a field, since it's not supported on import. @@ -146,6 +174,12 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * return nil, errors.New("allow_rotation must be set to true if auto-rotation is enabled") } + // Ensure that at least on `key` field has been set + isCiphertextSet, err := checkKeyFieldsSet(d) + if err != nil { + return nil, err + } + polReq := keysutil.PolicyRequest{ Storage: req.Storage, Name: name, @@ -154,6 +188,7 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * AllowPlaintextBackup: allowPlaintextBackup, AutoRotatePeriod: autoRotatePeriod, AllowImportedKeyRotation: allowRotation, + IsPrivateKey: isCiphertextSet, } switch strings.ToLower(keyType) { @@ -183,11 +218,6 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * return logical.ErrorResponse(fmt.Sprintf("unknown key type: %v", keyType)), logical.ErrInvalidRequest } - hashFn, err := parseHashFn(hashFnStr) - if err != nil { - return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest - } - p, _, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) if err != nil { return nil, err @@ -200,14 +230,9 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * return nil, errors.New("the import path cannot be used with an existing key; use import-version to rotate an existing imported key") } - ciphertext, err := base64.StdEncoding.DecodeString(ciphertextString) + key, resp, err := b.extractKeyFromFields(ctx, req, d, polReq.KeyType, isCiphertextSet) if err != nil { - return nil, err - } - - key, err := b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) - if err != nil { - return nil, err + return resp, err } err = b.lm.ImportPolicy(ctx, polReq, key, b.GetRandomReader()) @@ -220,20 +245,18 @@ func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d * func (b *backend) pathImportVersionWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) - hashFnStr := d.Get("hash_function").(string) - ciphertextString := d.Get("ciphertext").(string) - - polReq := keysutil.PolicyRequest{ - Storage: req.Storage, - Name: name, - Upsert: false, - } - hashFn, err := parseHashFn(hashFnStr) + isCiphertextSet, err := checkKeyFieldsSet(d) if err != nil { - return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + return nil, err } + polReq := keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + Upsert: false, + IsPrivateKey: isCiphertextSet, + } p, _, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) if err != nil { return nil, err @@ -253,15 +276,24 @@ func (b *backend) pathImportVersionWrite(ctx context.Context, req *logical.Reque } defer p.Unlock() - ciphertext, err := base64.StdEncoding.DecodeString(ciphertextString) + key, resp, err := b.extractKeyFromFields(ctx, req, d, p.Type, isCiphertextSet) if err != nil { - return nil, err + return resp, err } - importKey, err := b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) - if err != nil { - return nil, err + + // Get param version if set else import a new version. + if version, ok := d.GetOk("version"); ok { + versionToUpdate := version.(int) + + // Check if given version can be updated given input + err = p.KeyVersionCanBeUpdated(versionToUpdate, isCiphertextSet) + if err == nil { + err = p.ImportPrivateKeyForVersion(ctx, req.Storage, versionToUpdate, key) + } + } else { + err = p.ImportPublicOrPrivate(ctx, req.Storage, key, isCiphertextSet, b.GetRandomReader()) } - err = p.Import(ctx, req.Storage, importKey, b.GetRandomReader()) + if err != nil { return nil, err } @@ -319,6 +351,36 @@ func (b *backend) decryptImportedKey(ctx context.Context, storage logical.Storag return importKey, nil } +func (b *backend) extractKeyFromFields(ctx context.Context, req *logical.Request, d *framework.FieldData, keyType keysutil.KeyType, isPrivateKey bool) ([]byte, *logical.Response, error) { + var key []byte + if isPrivateKey { + hashFnStr := d.Get("hash_function").(string) + hashFn, err := parseHashFn(hashFnStr) + if err != nil { + return key, logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + ciphertextString := d.Get("ciphertext").(string) + ciphertext, err := base64.StdEncoding.DecodeString(ciphertextString) + if err != nil { + return key, nil, err + } + + key, err = b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) + if err != nil { + return key, nil, err + } + } else { + publicKeyString := d.Get("public_key").(string) + if !keyType.ImportPublicKeySupported() { + return key, nil, errors.New("provided type does not support public_key import") + } + key = []byte(publicKeyString) + } + + return key, nil, nil +} + func parseHashFn(hashFn string) (hash.Hash, error) { switch strings.ToUpper(hashFn) { case "SHA1": @@ -336,6 +398,29 @@ func parseHashFn(hashFn string) (hash.Hash, error) { } } +// checkKeyFieldsSet: Checks which key fields are set. If both are set, an error is returned +func checkKeyFieldsSet(d *framework.FieldData) (bool, error) { + ciphertextSet := isFieldSet("ciphertext", d) + publicKeySet := isFieldSet("publicKey", d) + + if ciphertextSet && publicKeySet { + return false, errors.New("only one of the following fields, ciphertext and public_key, can be set") + } else if ciphertextSet { + return true, nil + } else { + return false, nil + } +} + +func isFieldSet(fieldName string, d *framework.FieldData) bool { + _, fieldSet := d.Raw[fieldName] + if !fieldSet { + return false + } + + return true +} + const ( pathImportWriteSyn = "Imports an externally-generated key into a new transit key" pathImportWriteDesc = "This path is used to import an externally-generated " + diff --git a/builtin/logical/transit/path_import_test.go b/builtin/logical/transit/path_import_test.go index d31b12b454e4..ab471f0ff38b 100644 --- a/builtin/logical/transit/path_import_test.go +++ b/builtin/logical/transit/path_import_test.go @@ -1,7 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" + "crypto" "crypto/ecdsa" "crypto/ed25519" "crypto/elliptic" @@ -9,6 +13,7 @@ import ( "crypto/rsa" "crypto/x509" "encoding/base64" + "encoding/pem" "fmt" "strconv" "sync" @@ -46,7 +51,10 @@ var ( keys = map[string]interface{}{} ) -const nssFormattedEd25519Key = "MGcCAQAwFAYHKoZIzj0CAQYJKwYBBAHaRw8BBEwwSgIBAQQgfJm5R+LK4FMwGzOpemTBXksimEVOVCE8QeC+XBBfNU+hIwMhADaif7IhYx46IHcRTy1z8LeyhABep+UB8Da6olMZGx0i" +const ( + nssFormattedEd25519Key = "MGcCAQAwFAYHKoZIzj0CAQYJKwYBBAHaRw8BBEwwSgIBAQQgfJm5R+LK4FMwGzOpemTBXksimEVOVCE8QeC+XBBfNU+hIwMhADaif7IhYx46IHcRTy1z8LeyhABep+UB8Da6olMZGx0i" + rsaPSSFormattedKey = "MIIEvAIBADALBgkqhkiG9w0BAQoEggSoMIIEpAIBAAKCAQEAiFXSBaicB534+2qMZTVzQHMjuhb4NM9hi5H4EAFiYHEBuvm2BAk58NdBK3wiMq/p7Ewu5NQI0gJ7GlcV1MBU94U6MEmWNd0ztmlz37esEDuaCDhmLEBHKRzs8Om0bY9vczcNwcnRIYusP2KMxon3Gv2C86M2Jahig70AIq0E9C7esfrlYxFnoxUfO09XyYfiHlZY59+/dhyULp/RDIvaQ0/DqSSnYmXw8vRQ1gp6DqIzxx3j8ikUrpE7MK6348keFQj1eb83Z5w8qgIdceHHH4wbIAW7qWCPJ/vIJp8Pe1NEanlef61pDut2YcljvN79ccjX/QyqwqYv6xX2uzSlpQIDAQABAoIBACtpBCAoIVJtkv9e3EhHniR55PjWYn7SP5GEz3MtNalWokHqS/H6DBhrOcWCV5NDHx1N3qqe9xYDkzX+X6Wn/gX4RmBkte79uX8OEca8wY1DpRaT+riBWQc2vh0xlPFDuC177KX1QGFJi3V9SCzZdjSCXyV7pPyVopSm4/mmlMq5ANfN8bcHAtcArP7vPzEdckJqurjwHyzsUZJa9sk3OL3rBkKy5bmoPebE1ZQ7C+9eA4u9MKSy95WpTiqMe3rRhvr6zj4bzEvzS9M4r2EdwgAn4FyDwtGdOqtfbtSLTikb73f4MSINnWbt3YPBfRC4PGjWXIN2sMG5XYC3KH+RKbsCgYEAu0HOFInH8OtWiUY0aqRKZuo7lrBczNa5gnce3ZYnNkfrPlu1Xp0SjUkEWukznBLO0N9lvG9j3ksUDTQlPoKarJb9uf/1H0tYHhHm6mP8mH87yfVn2bLb3VPeIQYb+MXnDrwNVCAtxhuHlpnXJPldeuVKeRigHUNIEs76UMiiLqMCgYEAumJxm5NrKk0LXUQmeZolLh0lM/shg8zW7Vi3Ksz5Pe4Pcmg+hTbHjZuJwK6HesljEA0JDNkS0+5hkqiS5UDnj94XfDbi08/kKbPYA12GPVSRNTJxL8q70rFnEUZuMBeL0SKMPhEfR2z5TDDZUBoO6HBUUwgJAij1EsXrBAb0BxcCgYBKS3eKKohLi/PPjy0oynpCjtiJlvuawe7kVoLGg9aW8L3jBdvV6Bf+OmQh9bhmSggIUzo4IzHKdptECdZlEMhxhY6xh14nxmr1s0Cc6oLDtmdwX4+OjioxjB7rl1Ltxwc/j1jycbn3ieCn3e3AW7e9FNARb7XHJnSoEbq65n+CZQKBgQChLPozYAL/HIrkR0fCRmM6gmemkNeFo0CFFP+oWoJ6ZIAlHjJafmmIcmVoI0TzEG3C9pLJ8nmOnYjxCyekakEUryi9+LSkGBWlXmlBV8H7DUNYrlskyfssEs8fKDmnCuWUn3yJO8NBv+HBWkjCNRaJOIIjH0KzBHoRludJnz2tVwKBgQCsQF5lvcXefNfQojbhF+9NfyhvAc7EsMTXQhP9HEj0wVqTuuqyGyu8meXEkcQPRl6yD/yZKuMREDNNck4KV2fdGekBsh8zBgpxdHQ2DcbfxZfNgv3yoX3f0grb/ApQNJb3DVW9FVRigue8XPzFOFX/demJmkUnTg3zGFnXLXjgxg==" +) func generateKeys(t *testing.T) { t.Helper() @@ -114,6 +122,39 @@ func TestTransit_ImportNSSEd25519Key(t *testing.T) { } } +func TestTransit_ImportRSAPSS(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + rawPKCS8, err := base64.StdEncoding.DecodeString(rsaPSSFormattedKey) + if err != nil { + t.Fatalf("failed to parse rsa-pss base64: %v", err) + } + + blob := wrapTargetPKCS8ForImport(t, pubWrappingKey, rawPKCS8, "SHA256") + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: "keys/rsa-pss/import", + Data: map[string]interface{}{ + "ciphertext": blob, + "type": "rsa-2048", + }, + } + + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import RSA-PSS private key: %v", err) + } +} + func TestTransit_Import(t *testing.T) { generateKeys(t) b, s := createBackendWithStorage(t) @@ -388,6 +429,70 @@ func TestTransit_Import(t *testing.T) { } }, ) + + t.Run( + "import public key ed25519", + func(t *testing.T) { + keyType := "ed25519" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import ed25519 key: %v", err) + } + }) + + t.Run( + "import public key ecdsa", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + }) } func TestTransit_ImportVersion(t *testing.T) { @@ -534,6 +639,313 @@ func TestTransit_ImportVersion(t *testing.T) { } }, ) + + t.Run( + "import rsa public key and update version with private counterpart", + func(t *testing.T) { + keyType := "rsa-2048" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import RSA public key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // Update version - import RSA private key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + }, + ) +} + +func TestTransit_ImportVersionWithPublicKeys(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + + // Retrieve public wrapping key + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + // Import a public key then import private should give us one key + t.Run( + "import rsa public key and update version with private counterpart", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import EC public key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // Update version - import EC private key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + + // We should have one key on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 1 { + t.Fatalf("expected 1 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + }, + ) + + // Import a private and then public should give us two keys + t.Run( + "import ec private key and then its public counterpart", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import EC private key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + + // Update version - Import EC public key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // We should have two keys on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 2 { + t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + }, + ) + + // Import a public and another public should allow us to insert two private key. + t.Run( + "import two public keys and two private keys in reverse order", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey1 := getKey(t, keyType) + importBlob1 := wrapTargetKeyForImport(t, pubWrappingKey, privateKey1, keyType, "SHA256") + publicKeyBytes1, err := getPublicKey(privateKey1, keyType) + if err != nil { + t.Fatal(err) + } + + privateKey2, err := generateKey(keyType) + if err != nil { + t.Fatal(err) + } + importBlob2 := wrapTargetKeyForImport(t, pubWrappingKey, privateKey2, keyType, "SHA256") + publicKeyBytes2, err := getPublicKey(privateKey2, keyType) + if err != nil { + t.Fatal(err) + } + + // Import EC public key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes1, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + + // Update version - Import second EC public key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes2, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // We should have two keys on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 2 { + t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + + // Import second private key first, with no options. + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob2, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import private key: %s", err) + } + + // Import first private key second, with a version + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob1, + "version": 1, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import private key: %s", err) + } + + // We should still have two keys on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 2 { + t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + }, + ) } func wrapTargetKeyForImport(t *testing.T, wrappingKey *rsa.PublicKey, targetKey interface{}, targetKeyType string, hashFnName string) string { @@ -624,3 +1036,40 @@ func generateKey(keyType string) (interface{}, error) { return nil, fmt.Errorf("failed to generate unsupported key type: %s", keyType) } } + +func getPublicKey(privateKey crypto.PrivateKey, keyType string) ([]byte, error) { + var publicKey crypto.PublicKey + var publicKeyBytes []byte + switch keyType { + case "rsa-2048", "rsa-3072", "rsa-4096": + publicKey = privateKey.(*rsa.PrivateKey).Public() + case "ecdsa-p256", "ecdsa-p384", "ecdsa-p521": + publicKey = privateKey.(*ecdsa.PrivateKey).Public() + case "ed25519": + publicKey = privateKey.(ed25519.PrivateKey).Public() + default: + return publicKeyBytes, fmt.Errorf("failed to get public key from %s key", keyType) + } + + publicKeyBytes, err := publicKeyToBytes(publicKey) + if err != nil { + return publicKeyBytes, err + } + + return publicKeyBytes, nil +} + +func publicKeyToBytes(publicKey crypto.PublicKey) ([]byte, error) { + var publicKeyBytesPem []byte + publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return publicKeyBytesPem, fmt.Errorf("failed to marshal public key: %s", err) + } + + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: publicKeyBytes, + } + + return pem.EncodeToMemory(pemBlock), nil +} diff --git a/builtin/logical/transit/path_keys.go b/builtin/logical/transit/path_keys.go index e8edabc1769c..63e6019a5ced 100644 --- a/builtin/logical/transit/path_keys.go +++ b/builtin/logical/transit/path_keys.go @@ -1,13 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" "crypto/elliptic" - "crypto/x509" "encoding/base64" "encoding/pem" "fmt" "strconv" + "strings" "time" "golang.org/x/crypto/ed25519" @@ -22,6 +25,11 @@ func (b *backend) pathListKeys() *framework.Path { return &framework.Path{ Pattern: "keys/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationSuffix: "keys", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ListOperation: b.pathKeysList, }, @@ -34,6 +42,12 @@ func (b *backend) pathListKeys() *framework.Path { func (b *backend) pathKeys() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -108,12 +122,35 @@ key.`, Default: 0, Description: fmt.Sprintf("The key size in bytes for the algorithm. Only applies to HMAC and must be no fewer than %d bytes and no more than %d", keysutil.HmacMinKeySize, keysutil.HmacMaxKeySize), }, + "managed_key_name": { + Type: framework.TypeString, + Description: "The name of the managed key to use for this transit key", + }, + "managed_key_id": { + Type: framework.TypeString, + Description: "The UUID of the managed key to use for this transit key", + }, }, - Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.UpdateOperation: b.pathPolicyWrite, - logical.DeleteOperation: b.pathPolicyDelete, - logical.ReadOperation: b.pathPolicyRead, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathPolicyWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "create", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathPolicyDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathPolicyRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + }, + }, }, HelpSynopsis: pathPolicyHelpSyn, @@ -139,6 +176,8 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * exportable := d.Get("exportable").(bool) allowPlaintextBackup := d.Get("allow_plaintext_backup").(bool) autoRotatePeriod := time.Second * time.Duration(d.Get("auto_rotate_period").(int)) + managedKeyName := d.Get("managed_key_name").(string) + managedKeyId := d.Get("managed_key_id").(string) if autoRotatePeriod != 0 && autoRotatePeriod < time.Hour { return logical.ErrorResponse("auto rotate period must be 0 to disable or at least an hour"), nil @@ -182,6 +221,8 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * polReq.KeyType = keysutil.KeyType_RSA4096 case "hmac": polReq.KeyType = keysutil.KeyType_HMAC + case "managed_key": + polReq.KeyType = keysutil.KeyType_MANAGED_KEY default: return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest } @@ -195,6 +236,15 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * polReq.KeySize = keySize } + if polReq.KeyType == keysutil.KeyType_MANAGED_KEY { + keyId, err := GetManagedKeyUUID(ctx, b, managedKeyName, managedKeyId) + if err != nil { + return nil, err + } + + polReq.ManagedKeyUUID = keyId + } + p, upserted, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) if err != nil { return nil, err @@ -206,19 +256,22 @@ func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d * p.Unlock() } - resp := &logical.Response{} + resp, err := b.formatKeyPolicy(p, nil) + if err != nil { + return nil, err + } if !upserted { resp.AddWarning(fmt.Sprintf("key %s already existed", name)) } - - return nil, nil + return resp, nil } // Built-in helper type for returning asymmetric keys type asymKey struct { - Name string `json:"name" structs:"name" mapstructure:"name"` - PublicKey string `json:"public_key" structs:"public_key" mapstructure:"public_key"` - CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"` + Name string `json:"name" structs:"name" mapstructure:"name"` + PublicKey string `json:"public_key" structs:"public_key" mapstructure:"public_key"` + CertificateChain string `json:"certificate_chain" structs:"certificate_chain" mapstructure:"certificate_chain"` + CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"` } func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { @@ -239,6 +292,19 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f } defer p.Unlock() + contextRaw := d.Get("context").(string) + var context []byte + if len(contextRaw) != 0 { + context, err = base64.StdEncoding.DecodeString(contextRaw) + if err != nil { + return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest + } + } + + return b.formatKeyPolicy(p, context) +} + +func (b *backend) formatKeyPolicy(p *keysutil.Policy, context []byte) (*logical.Response, error) { // Return the response resp := &logical.Response{ Data: map[string]interface{}{ @@ -295,15 +361,6 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f } } - contextRaw := d.Get("context").(string) - var context []byte - if len(contextRaw) != 0 { - context, err = base64.StdEncoding.DecodeString(contextRaw) - if err != nil { - return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest - } - } - switch p.Type { case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305: retKeys := map[string]int64{} @@ -322,6 +379,18 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f if key.CreationTime.IsZero() { key.CreationTime = time.Unix(v.DeprecatedCreationTime, 0) } + if v.CertificateChain != nil { + var pemCerts []string + for _, derCertBytes := range v.CertificateChain { + pemCert := strings.TrimSpace(string(pem.EncodeToMemory( + &pem.Block{ + Type: "CERTIFICATE", + Bytes: derCertBytes, + }))) + pemCerts = append(pemCerts, pemCert) + } + key.CertificateChain = strings.Join(pemCerts, "\n") + } switch p.Type { case keysutil.KeyType_ECDSA_P256: @@ -341,7 +410,7 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f } derived, err := p.GetKey(context, ver, 32) if err != nil { - return nil, fmt.Errorf("failed to derive key to return public component") + return nil, fmt.Errorf("failed to derive key to return public component: %w", err) } pubKey := ed25519.PrivateKey(derived).Public().(ed25519.PublicKey) key.PublicKey = base64.StdEncoding.EncodeToString(pubKey) @@ -358,21 +427,11 @@ func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *f key.Name = "rsa-4096" } - // Encode the RSA public key in PEM format to return over the - // API - derBytes, err := x509.MarshalPKIXPublicKey(v.RSAKey.Public()) + pubKey, err := encodeRSAPublicKey(&v) if err != nil { - return nil, fmt.Errorf("error marshaling RSA public key: %w", err) - } - pemBlock := &pem.Block{ - Type: "PUBLIC KEY", - Bytes: derBytes, - } - pemBytes := pem.EncodeToMemory(pemBlock) - if pemBytes == nil || len(pemBytes) == 0 { - return nil, fmt.Errorf("failed to PEM-encode RSA public key") + return nil, err } - key.PublicKey = string(pemBytes) + key.PublicKey = pubKey } retKeys[k] = structs.New(key).Map() diff --git a/builtin/logical/transit/path_keys_config.go b/builtin/logical/transit/path_keys_config.go index f2628e4f0309..ed91d236dc92 100644 --- a/builtin/logical/transit/path_keys_config.go +++ b/builtin/logical/transit/path_keys_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -13,6 +16,13 @@ import ( func (b *backend) pathKeysConfig() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "configure", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -87,6 +97,8 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, } defer p.Unlock() + var warning string + originalMinDecryptionVersion := p.MinDecryptionVersion originalMinEncryptionVersion := p.MinEncryptionVersion originalDeletionAllowed := p.DeletionAllowed @@ -103,8 +115,6 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, } }() - resp = &logical.Response{} - persistNeeded := false minDecryptionVersionRaw, ok := d.GetOk("min_decryption_version") @@ -117,7 +127,7 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, if minDecryptionVersion == 0 { minDecryptionVersion = 1 - resp.AddWarning("since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1") + warning = "since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1" } if minDecryptionVersion != p.MinDecryptionVersion { @@ -208,10 +218,21 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, p.AutoRotatePeriod = autoRotatePeriod persistNeeded = true } + + if p.Type == keysutil.KeyType_MANAGED_KEY && autoRotatePeriod != 0 { + return logical.ErrorResponse("Auto rotation can not be set for managed keys"), nil + } } if !persistNeeded { - return nil, nil + resp, err := b.formatKeyPolicy(p, nil) + if err != nil { + return nil, err + } + if warning != "" { + resp.AddWarning(warning) + } + return resp, nil } switch { @@ -221,11 +242,18 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, return logical.ErrorResponse("min decryption version should not be less then min available version"), nil } - if len(resp.Warnings) == 0 { - return nil, p.Persist(ctx, req.Storage) + if err := p.Persist(ctx, req.Storage); err != nil { + return nil, err } - return resp, p.Persist(ctx, req.Storage) + resp, err = b.formatKeyPolicy(p, nil) + if err != nil { + return nil, err + } + if warning != "" { + resp.AddWarning(warning) + } + return resp, nil } const pathKeysConfigHelpSyn = `Configure a named encryption key` diff --git a/builtin/logical/transit/path_keys_config_test.go b/builtin/logical/transit/path_keys_config_test.go index f6dee45090dc..98bcbe448539 100644 --- a/builtin/logical/transit/path_keys_config_test.go +++ b/builtin/logical/transit/path_keys_config_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_keys_test.go b/builtin/logical/transit/path_keys_test.go index 04c1d8da092d..3a0abfeb0bb3 100644 --- a/builtin/logical/transit/path_keys_test.go +++ b/builtin/logical/transit/path_keys_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit_test import ( diff --git a/builtin/logical/transit/path_random.go b/builtin/logical/transit/path_random.go index 3b903e0b37fe..6e057a975fd8 100644 --- a/builtin/logical/transit/path_random.go +++ b/builtin/logical/transit/path_random.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -11,6 +14,13 @@ import ( func (b *backend) pathRandom() *framework.Path { return &framework.Path{ Pattern: "random(/" + framework.GenericNameRegex("source") + ")?" + framework.OptionalParamRegex("urlbytes"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate", + OperationSuffix: "random|random-with-source|random-with-bytes|random-with-source-and-bytes", + }, + Fields: map[string]*framework.FieldSchema{ "urlbytes": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_random_test.go b/builtin/logical/transit/path_random_test.go index 037a00b55f08..a58820b987b3 100644 --- a/builtin/logical/transit/path_random_test.go +++ b/builtin/logical/transit/path_random_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_restore.go b/builtin/logical/transit/path_restore.go index fa8c142bbab3..1b3b34599909 100644 --- a/builtin/logical/transit/path_restore.go +++ b/builtin/logical/transit/path_restore.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -12,6 +15,13 @@ import ( func (b *backend) pathRestore() *framework.Path { return &framework.Path{ Pattern: "restore" + framework.OptionalParamRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "restore", + OperationSuffix: "key|and-rename-key", + }, + Fields: map[string]*framework.FieldSchema{ "backup": { Type: framework.TypeString, diff --git a/builtin/logical/transit/path_restore_test.go b/builtin/logical/transit/path_restore_test.go index 6e13b985ee65..1cd0dcd61eb4 100644 --- a/builtin/logical/transit/path_restore_test.go +++ b/builtin/logical/transit/path_restore_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_rewrap.go b/builtin/logical/transit/path_rewrap.go index 24e772eaecee..49b69c7255e1 100644 --- a/builtin/logical/transit/path_rewrap.go +++ b/builtin/logical/transit/path_rewrap.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" "encoding/base64" + "errors" "fmt" "github.com/hashicorp/vault/helper/constants" @@ -13,9 +17,17 @@ import ( "github.com/mitchellh/mapstructure" ) +var ErrNonceNotAllowed = errors.New("provided nonce not allowed for this key") + func (b *backend) pathRewrap() *framework.Path { return &framework.Path{ Pattern: "rewrap/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "rewrap", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -43,6 +55,14 @@ func (b *backend) pathRewrap() *framework.Path { Must be 0 (for latest) or a value greater than or equal to the min_encryption_version configured on the key.`, }, + + "batch_input": { + Type: framework.TypeSlice, + Description: ` +Specifies a list of items to be re-encrypted in a single batch. When this parameter is set, +if the parameters 'ciphertext', 'context' and 'nonce' are also set, they will be ignored. +Any batch output will preserve the order of the batch input.`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -128,6 +148,7 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() warnAboutNonceUsage := false for i, item := range batchInputItems { @@ -135,6 +156,11 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * continue } + if item.Nonce != "" && !nonceAllowed(p) { + batchResponseItems[i].Error = ErrNonceNotAllowed.Error() + continue + } + plaintext, err := p.Decrypt(item.DecodedContext, item.DecodedNonce, item.Ciphertext) if err != nil { switch err.(type) { @@ -142,7 +168,6 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * batchResponseItems[i].Error = err.Error() continue default: - p.Unlock() return nil, err } } @@ -158,16 +183,13 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * batchResponseItems[i].Error = err.Error() continue case errutil.InternalError: - p.Unlock() return nil, err default: - p.Unlock() return nil, err } } if ciphertext == "" { - p.Unlock() return nil, fmt.Errorf("empty ciphertext returned for input item %d", i) } @@ -191,7 +213,6 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * } } else { if batchResponseItems[0].Error != "" { - p.Unlock() return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest } resp.Data = map[string]interface{}{ @@ -204,7 +225,6 @@ func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d * resp.AddWarning("A provided nonce value was used within FIPS mode, this violates FIPS 140 compliance.") } - p.Unlock() return resp, nil } diff --git a/builtin/logical/transit/path_rewrap_test.go b/builtin/logical/transit/path_rewrap_test.go index 04281a183752..55f28874656e 100644 --- a/builtin/logical/transit/path_rewrap_test.go +++ b/builtin/logical/transit/path_rewrap_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_rotate.go b/builtin/logical/transit/path_rotate.go index a74e69980512..c024aede4bc0 100644 --- a/builtin/logical/transit/path_rotate.go +++ b/builtin/logical/transit/path_rotate.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -11,11 +14,26 @@ import ( func (b *backend) pathRotate() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/rotate", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "rotate", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, Description: "Name of the key", }, + "managed_key_name": { + Type: framework.TypeString, + Description: "The name of the managed key to use for the new version of this transit key", + }, + "managed_key_id": { + Type: framework.TypeString, + Description: "The UUID of the managed key to use for the new version of this transit key", + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -29,6 +47,8 @@ func (b *backend) pathRotate() *framework.Path { func (b *backend) pathRotateWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { name := d.Get("name").(string) + managedKeyName := d.Get("managed_key_name").(string) + managedKeyId := d.Get("managed_key_id").(string) // Get the policy p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ @@ -44,12 +64,26 @@ func (b *backend) pathRotateWrite(ctx context.Context, req *logical.Request, d * if !b.System().CachingDisabled() { p.Lock(true) } + defer p.Unlock() + + if p.Type == keysutil.KeyType_MANAGED_KEY { + var keyId string + keyId, err = GetManagedKeyUUID(ctx, b, managedKeyName, managedKeyId) + if err != nil { + p.Unlock() + return nil, err + } + err = p.RotateManagedKey(ctx, req.Storage, keyId) + } else { + // Rotate the policy + err = p.Rotate(ctx, req.Storage, b.GetRandomReader()) + } - // Rotate the policy - err = p.Rotate(ctx, req.Storage, b.GetRandomReader()) + if err != nil { + return nil, err + } - p.Unlock() - return nil, err + return b.formatKeyPolicy(p, nil) } const pathRotateHelpSyn = `Rotate named encryption key` diff --git a/builtin/logical/transit/path_sign_verify.go b/builtin/logical/transit/path_sign_verify.go index 8a983eb5bcd4..3307c5ca99b9 100644 --- a/builtin/logical/transit/path_sign_verify.go +++ b/builtin/logical/transit/path_sign_verify.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( "context" "crypto/rsa" "encoding/base64" + "errors" "fmt" "strconv" "strings" @@ -25,12 +29,12 @@ type batchResponseSignItem struct { // request item Signature string `json:"signature,omitempty" mapstructure:"signature"` - // The key version to be used for encryption + // The key version to be used for signing KeyVersion int `json:"key_version" mapstructure:"key_version"` PublicKey []byte `json:"publickey,omitempty" mapstructure:"publickey"` - // Error, if set represents a failure encountered while encrypting a + // Error, if set represents a failure encountered while signing a // corresponding batch request item Error string `json:"error,omitempty" mapstructure:"error"` @@ -54,7 +58,7 @@ type batchResponseVerifyItem struct { // Valid indicates whether signature matches the signature derived from the input string Valid bool `json:"valid" mapstructure:"valid"` - // Error, if set represents a failure encountered while encrypting a + // Error, if set represents a failure encountered while verifying a // corresponding batch request item Error string `json:"error,omitempty" mapstructure:"error"` @@ -74,6 +78,13 @@ const defaultHashAlgorithm = "sha2-256" func (b *backend) pathSign() *framework.Path { return &framework.Path{ Pattern: "sign/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "sign", + OperationSuffix: "|with-algorithm", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -154,6 +165,14 @@ Options are 'pss' or 'pkcs1v15'. Defaults to 'pss'`, Description: `The salt length used to sign. Currently only applies to the RSA PSS signature scheme. Options are 'auto' (the default used by Golang, causing the salt to be as large as possible when signing), 'hash' (causes the salt length to equal the length of the hash used in the signature), or an integer between the minimum and the maximum permissible salt lengths for the given RSA key size. Defaults to 'auto'.`, }, + + "batch_input": { + Type: framework.TypeSlice, + Description: `Specifies a list of items for processing. When this parameter is set, +any supplied 'input' or 'context' parameters will be ignored. Responses are returned in the +'batch_results' array component of the 'data' element of the response. Any batch output will +preserve the order of the batch input`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -168,6 +187,13 @@ Options are 'auto' (the default used by Golang, causing the salt to be as large func (b *backend) pathVerify() *framework.Path { return &framework.Path{ Pattern: "verify/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "verify", + OperationSuffix: "|with-algorithm", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -233,7 +259,7 @@ none on signing path.`, "signature_algorithm": { Type: framework.TypeString, - Description: `The signature algorithm to use for signature verification. Currently only applies to RSA key types. + Description: `The signature algorithm to use for signature verification. Currently only applies to RSA key types. Options are 'pss' or 'pkcs1v15'. Defaults to 'pss'`, }, @@ -249,6 +275,14 @@ Options are 'pss' or 'pkcs1v15'. Defaults to 'pss'`, Description: `The salt length used to sign. Currently only applies to the RSA PSS signature scheme. Options are 'auto' (the default used by Golang, causing the salt to be as large as possible when signing), 'hash' (causes the salt length to equal the length of the hash used in the signature), or an integer between the minimum and the maximum permissible salt lengths for the given RSA key size. Defaults to 'auto'.`, }, + + "batch_input": { + Type: framework.TypeSlice, + Description: `Specifies a list of items for processing. When this parameter is set, +any supplied 'input', 'hmac' or 'signature' parameters will be ignored. Responses are returned in the +'batch_results' array component of the 'data' element of the response. Any batch output will +preserve the order of the batch input`, + }, }, Callbacks: map[logical.Operation]framework.OperationFunc{ @@ -319,10 +353,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - if hashAlgorithm == keysutil.HashTypeNone && (!prehashed || sigAlgorithm != "pkcs1v15") { - return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest - } - // Get the policy p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ Storage: req.Storage, @@ -332,28 +362,33 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr return nil, err } if p == nil { - return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("signing key not found"), logical.ErrInvalidRequest } if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() if !p.Type.SigningSupported() { - p.Unlock() return logical.ErrorResponse(fmt.Sprintf("key type %v does not support signing", p.Type)), logical.ErrInvalidRequest } + // Allow managed keys to specify no hash algo without additional conditions. + if hashAlgorithm == keysutil.HashTypeNone && p.Type != keysutil.KeyType_MANAGED_KEY { + if !prehashed || sigAlgorithm != "pkcs1v15" { + return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest + } + } + batchInputRaw := d.Raw["batch_input"] var batchInputItems []batchRequestSignItem if batchInputRaw != nil { err = mapstructure.Decode(batchInputRaw, &batchInputItems) if err != nil { - p.Unlock() return nil, fmt.Errorf("failed to parse batch input: %w", err) } if len(batchInputItems) == 0 { - p.Unlock() return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest } } else { @@ -366,7 +401,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr } response := make([]batchResponseSignItem, len(batchInputItems)) - for i, item := range batchInputItems { rawInput, ok := item["input"] @@ -385,8 +419,10 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr if p.Type.HashSignatureInput() && !prehashed { hf := keysutil.HashFuncMap[hashAlgorithm]() - hf.Write(input) - input = hf.Sum(nil) + if hf != nil { + hf.Write(input) + input = hf.Sum(nil) + } } contextRaw := item["context"] @@ -400,11 +436,26 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr } } + var managedKeyParameters keysutil.ManagedKeyParameters + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + return nil, errors.New("unsupported system view") + } + + managedKeyParameters = keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + } + } + sig, err := p.SignWithOptions(ver, context, input, &keysutil.SigningOptions{ - HashAlgorithm: hashAlgorithm, - Marshaling: marshaling, - SaltLength: saltLength, - SigAlgorithm: sigAlgorithm, + HashAlgorithm: hashAlgorithm, + Marshaling: marshaling, + SaltLength: saltLength, + SigAlgorithm: sigAlgorithm, + ManagedKeyParams: managedKeyParameters, }) if err != nil { if batchInputRaw != nil { @@ -437,7 +488,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr } } else { if response[0].Error != "" || response[0].err != nil { - p.Unlock() if response[0].Error != "" { return logical.ErrorResponse(response[0].Error), response[0].err } @@ -455,7 +505,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr } } - p.Unlock() return resp, nil } @@ -557,10 +606,6 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest } - if hashAlgorithm == keysutil.HashTypeNone && (!prehashed || sigAlgorithm != "pkcs1v15") { - return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest - } - // Get the policy p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ Storage: req.Storage, @@ -570,17 +615,24 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * return nil, err } if p == nil { - return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest + return logical.ErrorResponse("signature verification key not found"), logical.ErrInvalidRequest } if !b.System().CachingDisabled() { p.Lock(false) } + defer p.Unlock() if !p.Type.SigningSupported() { - p.Unlock() return logical.ErrorResponse(fmt.Sprintf("key type %v does not support verification", p.Type)), logical.ErrInvalidRequest } + // Allow managed keys to specify no hash algo without additional conditions. + if hashAlgorithm == keysutil.HashTypeNone && p.Type != keysutil.KeyType_MANAGED_KEY { + if !prehashed || sigAlgorithm != "pkcs1v15" { + return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest + } + } + response := make([]batchResponseVerifyItem, len(batchInputItems)) for i, item := range batchInputItems { @@ -608,8 +660,10 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * if p.Type.HashSignatureInput() && !prehashed { hf := keysutil.HashFuncMap[hashAlgorithm]() - hf.Write(input) - input = hf.Sum(nil) + if hf != nil { + hf.Write(input) + input = hf.Sum(nil) + } } contextRaw := item["context"] @@ -622,13 +676,29 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * continue } } + var managedKeyParameters keysutil.ManagedKeyParameters + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + return nil, errors.New("unsupported system view") + } - valid, err := p.VerifySignatureWithOptions(context, input, sig, &keysutil.SigningOptions{ - HashAlgorithm: hashAlgorithm, - Marshaling: marshaling, - SaltLength: saltLength, - SigAlgorithm: sigAlgorithm, - }) + managedKeyParameters = keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + } + } + + signingOptions := &keysutil.SigningOptions{ + HashAlgorithm: hashAlgorithm, + Marshaling: marshaling, + SaltLength: saltLength, + SigAlgorithm: sigAlgorithm, + ManagedKeyParams: managedKeyParameters, + } + + valid, err := p.VerifySignatureWithOptions(context, input, sig, signingOptions) if err != nil { switch err.(type) { case errutil.UserError: @@ -657,7 +727,6 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * } } else { if response[0].Error != "" || response[0].err != nil { - p.Unlock() if response[0].Error != "" { return logical.ErrorResponse(response[0].Error), response[0].err } @@ -668,7 +737,6 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d * } } - p.Unlock() return resp, nil } diff --git a/builtin/logical/transit/path_sign_verify_test.go b/builtin/logical/transit/path_sign_verify_test.go index e679a089729c..a7abf6be45ba 100644 --- a/builtin/logical/transit/path_sign_verify_test.go +++ b/builtin/logical/transit/path_sign_verify_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_trim.go b/builtin/logical/transit/path_trim.go index 60d6ef9dda6d..3f0a4df0ad24 100644 --- a/builtin/logical/transit/path_trim.go +++ b/builtin/logical/transit/path_trim.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -11,6 +14,13 @@ import ( func (b *backend) pathTrim() *framework.Path { return &framework.Path{ Pattern: "keys/" + framework.GenericNameRegex("name") + "/trim", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "trim", + OperationSuffix: "key", + }, + Fields: map[string]*framework.FieldSchema{ "name": { Type: framework.TypeString, @@ -90,7 +100,7 @@ func (b *backend) pathTrimUpdate() framework.OperationFunc { return nil, err } - return nil, nil + return b.formatKeyPolicy(p, nil) } } diff --git a/builtin/logical/transit/path_trim_test.go b/builtin/logical/transit/path_trim_test.go index db38aad938ad..448d0fba34b0 100644 --- a/builtin/logical/transit/path_trim_test.go +++ b/builtin/logical/transit/path_trim_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/path_wrapping_key.go b/builtin/logical/transit/path_wrapping_key.go index 1a08318db339..42ccb888a245 100644 --- a/builtin/logical/transit/path_wrapping_key.go +++ b/builtin/logical/transit/path_wrapping_key.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -17,6 +20,10 @@ const WrappingKeyName = "wrapping-key" func (b *backend) pathWrappingKey() *framework.Path { return &framework.Path{ Pattern: "wrapping_key", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationSuffix: "wrapping-key", + }, Callbacks: map[logical.Operation]framework.OperationFunc{ logical.ReadOperation: b.pathWrappingKeyRead, }, diff --git a/builtin/logical/transit/path_wrapping_key_test.go b/builtin/logical/transit/path_wrapping_key_test.go index da90585a4ac9..9ed58e45c284 100644 --- a/builtin/logical/transit/path_wrapping_key_test.go +++ b/builtin/logical/transit/path_wrapping_key_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( diff --git a/builtin/logical/transit/stepwise_test.go b/builtin/logical/transit/stepwise_test.go index b64aca9861e9..77cf093b991a 100644 --- a/builtin/logical/transit/stepwise_test.go +++ b/builtin/logical/transit/stepwise_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package transit import ( @@ -18,7 +21,7 @@ func TestAccBackend_basic_docker(t *testing.T) { decryptData := make(map[string]interface{}) envOptions := stepwise.MountOptions{ RegistryName: "updatedtransit", - PluginType: stepwise.PluginTypeSecrets, + PluginType: api.PluginTypeSecrets, PluginName: "transit", MountPathPrefix: "transit_temp", } diff --git a/builtin/plugin/backend.go b/builtin/plugin/backend.go index b165a10c1ec1..4ab5c593df68 100644 --- a/builtin/plugin/backend.go +++ b/builtin/plugin/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package plugin import ( @@ -83,23 +86,15 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, runningVersion = versioner.PluginVersion().Version } - external := false - if externaler, ok := raw.(logical.Externaler); ok { - external = externaler.IsExternal() - } - // Cleanup meta plugin backend raw.Cleanup(ctx) // Initialize b.Backend with placeholder backend since plugin // backends will need to be lazy loaded. - b.Backend = &placeholderBackend{ - Backend: framework.Backend{ - PathsSpecial: paths, - BackendType: btype, - RunningVersion: runningVersion, - }, - external: external, + b.Backend = &framework.Backend{ + PathsSpecial: paths, + BackendType: btype, + RunningVersion: runningVersion, } b.config = conf @@ -107,23 +102,6 @@ func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, return &b, nil } -// placeholderBackend is used a placeholder before a backend is lazy-loaded. -// It is mostly used to mark that the backend is an external backend. -type placeholderBackend struct { - framework.Backend - - external bool -} - -func (p *placeholderBackend) IsExternal() bool { - return p.external -} - -var ( - _ logical.Externaler = (*placeholderBackend)(nil) - _ logical.PluginVersioner = (*placeholderBackend)(nil) -) - // PluginBackend is a thin wrapper around plugin.BackendPluginClient type PluginBackend struct { Backend logical.Backend @@ -323,14 +301,4 @@ func (b *PluginBackend) PluginVersion() logical.PluginVersion { return logical.EmptyPluginVersion } -func (b *PluginBackend) IsExternal() bool { - if externaler, ok := b.Backend.(logical.Externaler); ok { - return externaler.IsExternal() - } - return false -} - -var ( - _ logical.PluginVersioner = (*PluginBackend)(nil) - _ logical.Externaler = (*PluginBackend)(nil) -) +var _ logical.PluginVersioner = (*PluginBackend)(nil) diff --git a/builtin/plugin/backend_lazyLoad_test.go b/builtin/plugin/backend_lazyLoad_test.go index 4d2727037adc..094047c03f1c 100644 --- a/builtin/plugin/backend_lazyLoad_test.go +++ b/builtin/plugin/backend_lazyLoad_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package plugin import ( diff --git a/builtin/plugin/backend_test.go b/builtin/plugin/backend_test.go index d7a678ba1f6b..713444061286 100644 --- a/builtin/plugin/backend_test.go +++ b/builtin/plugin/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package plugin_test import ( @@ -9,6 +12,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" @@ -112,7 +116,11 @@ func TestBackend_PluginMain_Multiplexed(t *testing.T) { } func testConfig(t *testing.T, pluginCmd string) (*logical.BackendConfig, func()) { - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + t.Helper() + pluginDir := corehelpers.MakeTestPluginDir(t) + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + PluginDirectory: pluginDir, + }, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, }) cluster.Start() @@ -132,9 +140,8 @@ func testConfig(t *testing.T, pluginCmd string) (*logical.BackendConfig, func()) }, } - os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) - - vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "", pluginCmd, []string{}, "") + vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "", pluginCmd, + []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)}) return config, func() { cluster.Cleanup() diff --git a/builtin/plugin/mock_plugin_test.go b/builtin/plugin/mock_plugin_test.go index 532b7c763286..6c189a846a89 100644 --- a/builtin/plugin/mock_plugin_test.go +++ b/builtin/plugin/mock_plugin_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package plugin import ( diff --git a/builtin/plugin/v5/backend.go b/builtin/plugin/v5/backend.go index 3f7a9a884ce7..38433dff71bb 100644 --- a/builtin/plugin/v5/backend.go +++ b/builtin/plugin/v5/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package plugin import ( @@ -77,7 +80,6 @@ func (b *backend) reloadBackend(ctx context.Context, storage logical.Storage) er err = b.Backend.Initialize(ctx, &logical.InitializationRequest{ Storage: storage, }) - if err != nil { return err } diff --git a/changelog/10961.txt b/changelog/10961.txt new file mode 100644 index 000000000000..5387a53d38ae --- /dev/null +++ b/changelog/10961.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/aws: Added support for signed GET requests for authenticating to vault using the aws iam method. +``` diff --git a/changelog/12666.txt b/changelog/12666.txt new file mode 100644 index 000000000000..a6e008773a36 --- /dev/null +++ b/changelog/12666.txt @@ -0,0 +1,4 @@ + +```release-note:improvement +storage/etcd: Make etcd parameter MaxCallSendMsgSize configurable +``` diff --git a/changelog/12684.txt b/changelog/12684.txt new file mode 100644 index 000000000000..7abf3d43d29a --- /dev/null +++ b/changelog/12684.txt @@ -0,0 +1,3 @@ +```release-note:bug +Doc: Expanded the description of "What is Vault?" to align with CSA team's content. +``` diff --git a/changelog/14998.txt b/changelog/14998.txt new file mode 100644 index 000000000000..64615f2e1742 --- /dev/null +++ b/changelog/14998.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update mount backend form to use selectable cards +``` diff --git a/changelog/17076.txt b/changelog/17076.txt new file mode 100644 index 000000000000..93e7c1eacbe3 --- /dev/null +++ b/changelog/17076.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core/cli: Warning related to VAULT_ADDR & -address not set with CLI requests. +``` + diff --git a/changelog/17575.txt b/changelog/17575.txt new file mode 100644 index 000000000000..f08b53ff851c --- /dev/null +++ b/changelog/17575.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: use the combined activity log (partial + historic) API for client count dashboard and remove use of monthly endpoint +``` diff --git a/changelog/17848.txt b/changelog/17848.txt deleted file mode 100644 index 40579e4e184c..000000000000 --- a/changelog/17848.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -autopilot: Update version to v.0.2.0 to add better support for respecting min quorum -``` \ No newline at end of file diff --git a/changelog/17893.txt b/changelog/17893.txt new file mode 100644 index 000000000000..3dddc7659b8d --- /dev/null +++ b/changelog/17893.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Kubernetes Secrets Engine UI**: Kubernetes is now available in the UI as a supported secrets engine. +``` \ No newline at end of file diff --git a/changelog/17894.txt b/changelog/17894.txt new file mode 100644 index 000000000000..bd056cdf34e6 --- /dev/null +++ b/changelog/17894.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: allow selection of "default" for ssh algorithm_signer in web interface +``` diff --git a/changelog/17919.txt b/changelog/17919.txt new file mode 100644 index 000000000000..8fbb41db44a3 --- /dev/null +++ b/changelog/17919.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: property based testing for LifetimeWatcher sleep duration calculation +``` diff --git a/changelog/17934.txt b/changelog/17934.txt new file mode 100644 index 000000000000..7f087a915a28 --- /dev/null +++ b/changelog/17934.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Add support to import public keys in transit engine and allow encryption and verification of signed data +``` diff --git a/changelog/18039.txt b/changelog/18039.txt new file mode 100644 index 000000000000..ea522a7539b2 --- /dev/null +++ b/changelog/18039.txt @@ -0,0 +1,6 @@ +```release-note:improvement +plugins: Mark logical database plugins Removed and remove the plugin code. +``` +```release-note:improvement +plugins: Mark app-id auth method Removed and remove the plugin code. +``` diff --git a/changelog/18128.txt b/changelog/18128.txt new file mode 100644 index 000000000000..32dc53766471 --- /dev/null +++ b/changelog/18128.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: Add `elide_list_responses` option, providing a countermeasure for a common source of oversized audit log entries +``` diff --git a/changelog/18186.txt b/changelog/18186.txt new file mode 100644 index 000000000000..13710826284e --- /dev/null +++ b/changelog/18186.txt @@ -0,0 +1,6 @@ +```release-note:breaking-change +secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . +``` \ No newline at end of file diff --git a/changelog/18225.txt b/changelog/18225.txt new file mode 100644 index 000000000000..567c3c78da95 --- /dev/null +++ b/changelog/18225.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: allow providing the LDAP password via an env var when authenticating via the CLI +``` diff --git a/changelog/18230.txt b/changelog/18230.txt new file mode 100644 index 000000000000..335f9670db2a --- /dev/null +++ b/changelog/18230.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: allow configuration of alias dereferencing in LDAP search +``` diff --git a/changelog/18299.txt b/changelog/18299.txt new file mode 100644 index 000000000000..b340b9523af0 --- /dev/null +++ b/changelog/18299.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Do not warn about unrecognized parameter 'batch_input' +``` diff --git a/changelog/18302.txt b/changelog/18302.txt new file mode 100644 index 000000000000..1f4b69dc863a --- /dev/null +++ b/changelog/18302.txt @@ -0,0 +1,3 @@ +```release-note:improvement +hcp/status: Expand node-level status information +``` diff --git a/changelog/18351.txt b/changelog/18351.txt new file mode 100644 index 000000000000..07faa06d1356 --- /dev/null +++ b/changelog/18351.txt @@ -0,0 +1,3 @@ +```release-note:improvement +hcp/status: Add cluster-level status information +``` diff --git a/changelog/18376.txt b/changelog/18376.txt new file mode 100644 index 000000000000..1edc3df5a1ad --- /dev/null +++ b/changelog/18376.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add openapi response definitions to pki/config_*.go +``` diff --git a/changelog/18397.txt b/changelog/18397.txt new file mode 100644 index 000000000000..aafb9d71e6fd --- /dev/null +++ b/changelog/18397.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow UserID Field (https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1) to be set on Certificates when +allowed by role``` diff --git a/changelog/18401.txt b/changelog/18401.txt index 441e9a08b0df..8f1c148fd13a 100644 --- a/changelog/18401.txt +++ b/changelog/18401.txt @@ -1,3 +1,3 @@ ```release-note:bug -expiration: Prevent panics on perf standbys when an irrevocable release gets deleted. +expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. ``` diff --git a/changelog/18403.txt b/changelog/18403.txt new file mode 100644 index 000000000000..458f6c92633f --- /dev/null +++ b/changelog/18403.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/config: Allow config directories to be specified with -config, and allow multiple -configs to be supplied. +``` diff --git a/changelog/18437.txt b/changelog/18437.txt new file mode 100644 index 000000000000..9ca8a8dc3bb3 --- /dev/null +++ b/changelog/18437.txt @@ -0,0 +1,3 @@ +```release-note:improvement +client/pki: Add a new command verify-sign which checks the relationship between two certificates. +``` diff --git a/changelog/18456.txt b/changelog/18456.txt new file mode 100644 index 000000000000..ee297508f2a4 --- /dev/null +++ b/changelog/18456.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response defintions to /sys/audit endpoints +``` \ No newline at end of file diff --git a/changelog/18463.txt b/changelog/18463.txt new file mode 100644 index 000000000000..538f66eb1dc7 --- /dev/null +++ b/changelog/18463.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Add List-Intermediates functionality to pki client. +``` diff --git a/changelog/18465.txt b/changelog/18465.txt new file mode 100644 index 000000000000..928da99bc4fe --- /dev/null +++ b/changelog/18465.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response defintions to /sys/auth endpoints +``` \ No newline at end of file diff --git a/changelog/18466.txt b/changelog/18466.txt new file mode 100644 index 000000000000..220e058a16bb --- /dev/null +++ b/changelog/18466.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Allow patching issuer to set an empty issuer name. +``` diff --git a/changelog/18467.txt b/changelog/18467.txt new file mode 100644 index 000000000000..55a85a6487a2 --- /dev/null +++ b/changelog/18467.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Add pki issue command, which creates a CSR, has a vault mount sign it, then reimports it. +``` \ No newline at end of file diff --git a/changelog/18468.txt b/changelog/18468.txt new file mode 100644 index 000000000000..362bf05018c5 --- /dev/null +++ b/changelog/18468.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response defintions to /sys/capabilities endpoints +``` \ No newline at end of file diff --git a/changelog/18472.txt b/changelog/18472.txt new file mode 100644 index 000000000000..e34d53afc2f6 --- /dev/null +++ b/changelog/18472.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response defintions to /sys/config and /sys/generate-root endpoints +``` \ No newline at end of file diff --git a/changelog/18492.txt b/changelog/18492.txt new file mode 100644 index 000000000000..6b0b3b50771a --- /dev/null +++ b/changelog/18492.txt @@ -0,0 +1,3 @@ +```release-note:improvement +framework: Make it an error for `CreateOperation` to be defined without an `ExistenceCheck`, thereby fixing misleading `x-vault-createSupported` in OpenAPI +``` diff --git a/changelog/18499.txt b/changelog/18499.txt new file mode 100644 index 000000000000..b329ed0db08b --- /dev/null +++ b/changelog/18499.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. +``` \ No newline at end of file diff --git a/changelog/18513.txt b/changelog/18513.txt new file mode 100644 index 000000000000..6b3ca2fe486f --- /dev/null +++ b/changelog/18513.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: latest version of chrome does not automatically redirect back to the app after authentication unless triggered by the user, hence added a link to redirect back to the app. +``` \ No newline at end of file diff --git a/changelog/18515.txt b/changelog/18515.txt new file mode 100644 index 000000000000..86eb71b19167 --- /dev/null +++ b/changelog/18515.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add openapi response definitions to vault/logical_system_paths.go defined endpoints. +``` diff --git a/changelog/18521.txt b/changelog/18521.txt new file mode 100644 index 000000000000..4111aea2c98e --- /dev/null +++ b/changelog/18521.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: wait for wanted message event during OIDC callback instead of using the first message event +``` diff --git a/changelog/18542.txt b/changelog/18542.txt new file mode 100644 index 000000000000..ff4674010f4b --- /dev/null +++ b/changelog/18542.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/internal endpoints +``` diff --git a/changelog/18554.txt b/changelog/18554.txt new file mode 100644 index 000000000000..68d1d8433161 --- /dev/null +++ b/changelog/18554.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps +``` diff --git a/changelog/18556.txt b/changelog/18556.txt new file mode 100644 index 000000000000..a48dacde5ba2 --- /dev/null +++ b/changelog/18556.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters +``` diff --git a/changelog/18568.txt b/changelog/18568.txt new file mode 100644 index 000000000000..a1fbabf2545a --- /dev/null +++ b/changelog/18568.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix spurious `permission denied` for all HelpOperations on sudo-protected paths +``` diff --git a/changelog/18571.txt b/changelog/18571.txt new file mode 100644 index 000000000000..dd811d9fd441 --- /dev/null +++ b/changelog/18571.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token, sys: Fix path-help being unavailable for some list-only endpoints +``` diff --git a/changelog/18585.txt b/changelog/18585.txt new file mode 100644 index 000000000000..a0832e2d415e --- /dev/null +++ b/changelog/18585.txt @@ -0,0 +1,3 @@ +```release-note:improvement +hcp/connectivity: Only update SCADA session metadata if status changes +``` diff --git a/changelog/18587.txt b/changelog/18587.txt new file mode 100644 index 000000000000..7471d9a4c025 --- /dev/null +++ b/changelog/18587.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kubernetes: Add /check endpoint to determine if environment variables are set [[GH-18](https://github.com/hashicorp/vault-plugin-secrets-kubernetes/pull/18)] +``` diff --git a/changelog/18589.txt b/changelog/18589.txt new file mode 100644 index 000000000000..2e1ef4878dab --- /dev/null +++ b/changelog/18589.txt @@ -0,0 +1,3 @@ +```release-note:improvement +vault/diagnose: Upgrade `go.opentelemetry.io/otel`, `go.opentelemetry.io/otel/sdk`, `go.opentelemetry.io/otel/trace` to v1.11.2 +``` \ No newline at end of file diff --git a/changelog/18598.txt b/changelog/18598.txt new file mode 100644 index 000000000000..62d13d0e705f --- /dev/null +++ b/changelog/18598.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: include mount counts when de-duplicating current and historical month data +``` diff --git a/changelog/18604.txt b/changelog/18604.txt new file mode 100644 index 000000000000..7645cbb40394 --- /dev/null +++ b/changelog/18604.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: add `detect_deadlocks` config to optionally detect core state deadlocks +``` \ No newline at end of file diff --git a/changelog/18610.txt b/changelog/18610.txt new file mode 100644 index 000000000000..bac3add5a1a3 --- /dev/null +++ b/changelog/18610.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth: Allow naming login MFA methods and using those names instead of IDs in satisfying MFA requirement for requests. +Make passcode arguments consistent across login MFA method types. +``` \ No newline at end of file diff --git a/changelog/18624.txt b/changelog/18624.txt new file mode 100644 index 000000000000..91209bb46d9e --- /dev/null +++ b/changelog/18624.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/rotate endpoints +``` diff --git a/changelog/18625.txt b/changelog/18625.txt new file mode 100644 index 000000000000..526d6b63e6f6 --- /dev/null +++ b/changelog/18625.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/seal endpoints +``` \ No newline at end of file diff --git a/changelog/18626.txt b/changelog/18626.txt new file mode 100644 index 000000000000..6bb2ba0f4d89 --- /dev/null +++ b/changelog/18626.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/tool endpoints +``` \ No newline at end of file diff --git a/changelog/18627.txt b/changelog/18627.txt new file mode 100644 index 000000000000..e2a4dfb5f2e6 --- /dev/null +++ b/changelog/18627.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/wrapping endpoints +``` \ No newline at end of file diff --git a/changelog/18628.txt b/changelog/18628.txt new file mode 100644 index 000000000000..0722856c93b9 --- /dev/null +++ b/changelog/18628.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/version-history, /sys/leader, /sys/ha-status, /sys/host-info, /sys/in-flight-req +``` \ No newline at end of file diff --git a/changelog/18632.txt b/changelog/18632.txt new file mode 100644 index 000000000000..535961367a3a --- /dev/null +++ b/changelog/18632.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/postgres: Support multiline strings for revocation statements. +``` diff --git a/changelog/18633.txt b/changelog/18633.txt new file mode 100644 index 000000000000..2048c46d914e --- /dev/null +++ b/changelog/18633.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add openapi response definitions to /sys defined endpoints. +``` \ No newline at end of file diff --git a/changelog/18635.txt b/changelog/18635.txt new file mode 100644 index 000000000000..43f3fdf67365 --- /dev/null +++ b/changelog/18635.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Add response schema validation method framework/FieldData.ValidateStrict and two test helpers (ValidateResponse, ValidateResponseData) +``` diff --git a/changelog/18636.txt b/changelog/18636.txt new file mode 100644 index 000000000000..9f260e2e86fc --- /dev/null +++ b/changelog/18636.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Adding FindResponseSchema test helper to assist with response schema validation in tests +``` diff --git a/changelog/18638.txt b/changelog/18638.txt new file mode 100644 index 000000000000..727c85a66996 --- /dev/null +++ b/changelog/18638.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: allows some parts of config to be reloaded without requiring a restart. +``` \ No newline at end of file diff --git a/changelog/18645.txt b/changelog/18645.txt new file mode 100644 index 000000000000..0122111bae42 --- /dev/null +++ b/changelog/18645.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow tidying of the legacy ca_bundle, improving startup on post-migrated, seal-wrapped PKI mounts. +``` diff --git a/changelog/18651.txt b/changelog/18651.txt new file mode 100644 index 000000000000..9fc7ff8e4ec0 --- /dev/null +++ b/changelog/18651.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: cleanup unsaved auth method ember data record when navigating away from mount backend form +``` \ No newline at end of file diff --git a/changelog/18663.txt b/changelog/18663.txt new file mode 100644 index 000000000000..941b2715ef75 --- /dev/null +++ b/changelog/18663.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: generic_mount_paths: Move implementation fully into server, rather than partially in plugin framework; recognize all 4 singleton mounts (auth/token, cubbyhole, identity, system) rather than just 2; change parameter from `{mountPath}` to `{_mount_path}` +``` diff --git a/changelog/18673.txt b/changelog/18673.txt new file mode 100644 index 000000000000..73a2a8f43925 --- /dev/null +++ b/changelog/18673.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods. +``` \ No newline at end of file diff --git a/changelog/18675.txt b/changelog/18675.txt new file mode 100644 index 000000000000..90a8ed64d21c --- /dev/null +++ b/changelog/18675.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Added sys/locked-users endpoint to list locked users. Changed api endpoint from +sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] to sys/locked-users/[mount_accessor]/unlock/[alias_identifier]. +``` \ No newline at end of file diff --git a/changelog/18682.txt b/changelog/18682.txt new file mode 100644 index 000000000000..904210903351 --- /dev/null +++ b/changelog/18682.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Add experiments system and `events.alpha1` experiment. +``` + diff --git a/changelog/18684.txt b/changelog/18684.txt new file mode 100644 index 000000000000..803c7cc571f9 --- /dev/null +++ b/changelog/18684.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Add note in logs when starting Vault Agent indicating if the version differs to the Vault Server. +``` diff --git a/changelog/18704.txt b/changelog/18704.txt new file mode 100644 index 000000000000..bc76db9ba923 --- /dev/null +++ b/changelog/18704.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix race with follower heartbeat tracker during teardown. +``` \ No newline at end of file diff --git a/changelog/18708.txt b/changelog/18708.txt new file mode 100644 index 000000000000..1db2ba6239f0 --- /dev/null +++ b/changelog/18708.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Remove timeout logic from ReadRaw functions and add ReadRawWithContext +``` diff --git a/changelog/18716.txt b/changelog/18716.txt new file mode 100644 index 000000000000..e3fa257f1aa0 --- /dev/null +++ b/changelog/18716.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] +``` diff --git a/changelog/18718.txt b/changelog/18718.txt new file mode 100644 index 000000000000..a5b9b133421c --- /dev/null +++ b/changelog/18718.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add vault.core.locked_users telemetry metric to emit information about total number of locked users. +``` \ No newline at end of file diff --git a/changelog/18729.txt b/changelog/18729.txt new file mode 100644 index 000000000000..975d0274bc6f --- /dev/null +++ b/changelog/18729.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk/backend: prevent panic when computing the zero value for a `TypeInt64` schema field. +``` \ No newline at end of file diff --git a/changelog/18740.txt b/changelog/18740.txt new file mode 100644 index 000000000000..f493995d48a6 --- /dev/null +++ b/changelog/18740.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Added `token_file` auto-auth configuration to allow using a pre-existing token for Vault Agent. +``` diff --git a/changelog/18743.txt b/changelog/18743.txt new file mode 100644 index 000000000000..7cdfb79a5fe4 --- /dev/null +++ b/changelog/18743.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes query parameters not passed in api explorer test requests +``` \ No newline at end of file diff --git a/changelog/18752.txt b/changelog/18752.txt new file mode 100644 index 000000000000..95346e0431d1 --- /dev/null +++ b/changelog/18752.txt @@ -0,0 +1,3 @@ +```release-note:improvement +**Redis ElastiCache DB Engine**: Renamed configuration parameters for disambiguation; old parameters still supported for compatibility. +``` \ No newline at end of file diff --git a/changelog/18766.txt b/changelog/18766.txt new file mode 100644 index 000000000000..50743b3916f1 --- /dev/null +++ b/changelog/18766.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. +``` diff --git a/changelog/18772.txt b/changelog/18772.txt new file mode 100644 index 000000000000..55c0696de1ba --- /dev/null +++ b/changelog/18772.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add openapi response definitions to approle/path_login.go & approle/path_tidy_user_id.go +``` diff --git a/changelog/18787.txt b/changelog/18787.txt new file mode 100644 index 000000000000..e865125de6ec --- /dev/null +++ b/changelog/18787.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. +``` diff --git a/changelog/18799.txt b/changelog/18799.txt new file mode 100644 index 000000000000..1d7159363b5b --- /dev/null +++ b/changelog/18799.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters +``` diff --git a/changelog/18808.txt b/changelog/18808.txt new file mode 100644 index 000000000000..12c80e62d0ec --- /dev/null +++ b/changelog/18808.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes keymgmt key details page +`` \ No newline at end of file diff --git a/changelog/18809.txt b/changelog/18809.txt new file mode 100644 index 000000000000..a1ec06f5799d --- /dev/null +++ b/changelog/18809.txt @@ -0,0 +1,3 @@ +```release-note:bug +activity (enterprise): Fix misattribution of entities to no or child namespace auth methods +``` diff --git a/changelog/18811.txt b/changelog/18811.txt new file mode 100644 index 000000000000..34a155dda5a4 --- /dev/null +++ b/changelog/18811.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth: Provide an IP address of the requests from Vault to a Duo challenge after successful authentication. +``` diff --git a/changelog/18817.txt b/changelog/18817.txt new file mode 100644 index 000000000000..17c93aab75c8 --- /dev/null +++ b/changelog/18817.txt @@ -0,0 +1,3 @@ +```release-note:improvement +migration: allow parallelization of key migration for `vault operator migrate` in order to speed up a migration. +``` \ No newline at end of file diff --git a/changelog/18842.txt b/changelog/18842.txt new file mode 100644 index 000000000000..9a69ff66f126 --- /dev/null +++ b/changelog/18842.txt @@ -0,0 +1,3 @@ +```release-note:feature +**New PKI UI**: Add beta support for new and improved PKI UI +``` \ No newline at end of file diff --git a/changelog/18859.txt b/changelog/18859.txt new file mode 100644 index 000000000000..0ee2c361e291 --- /dev/null +++ b/changelog/18859.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/auth: Return a 403 instead of a 500 for wrapping requests when token is not provided +``` diff --git a/changelog/18863.txt b/changelog/18863.txt new file mode 100644 index 000000000000..c1f2800c2890 --- /dev/null +++ b/changelog/18863.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: JWT auto-auth has a new config option, `remove_jwt_follows_symlinks` (default: false), that, if set to true will now remove the JWT, instead of the symlink to the JWT, if a symlink to a JWT has been provided in the `path` option, and the `remove_jwt_after_reading` config option is set to true (default). +``` \ No newline at end of file diff --git a/changelog/18870.txt b/changelog/18870.txt new file mode 100644 index 000000000000..1b694895fec6 --- /dev/null +++ b/changelog/18870.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core:provide more descriptive error message when calling enterprise feature paths in open-source +``` \ No newline at end of file diff --git a/changelog/18874.txt b/changelog/18874.txt new file mode 100644 index 000000000000..7483c43f9060 --- /dev/null +++ b/changelog/18874.txt @@ -0,0 +1,3 @@ +```release-note:security +secrets/ssh: removal of the deprecated dynamic keys mode. **When any remaining dynamic key leases expire**, an error stating `secret is unsupported by this backend` will be thrown by the lease manager. +``` diff --git a/changelog/18885.txt b/changelog/18885.txt new file mode 100644 index 000000000000..99878c89c103 --- /dev/null +++ b/changelog/18885.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +auth/cert: Load config, crls from InitializeFunc to allow parallel processing. +``` diff --git a/changelog/18887.txt b/changelog/18887.txt new file mode 100644 index 000000000000..55e8600878df --- /dev/null +++ b/changelog/18887.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Add transit import key helper commands for BYOK to Transit/Transform. +``` \ No newline at end of file diff --git a/changelog/18890.txt b/changelog/18890.txt new file mode 100644 index 000000000000..056e58599698 --- /dev/null +++ b/changelog/18890.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: removes strings.ToLower for alias name from pathLoginAliasLookahead function in userpass. This fixes +the storage entry for locked users by having the correct alias name in path. +`` \ No newline at end of file diff --git a/changelog/18892.txt b/changelog/18892.txt new file mode 100644 index 000000000000..65b6ebf246a7 --- /dev/null +++ b/changelog/18892.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: updated `vault operator rekey` prompts to describe recovery keys when `-target=recovery` +``` diff --git a/changelog/18899.txt b/changelog/18899.txt new file mode 100644 index 000000000000..92f2474ed570 --- /dev/null +++ b/changelog/18899.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: fix race between tidy's cert counting and tidy status reporting. +``` diff --git a/changelog/18916.txt b/changelog/18916.txt new file mode 100644 index 000000000000..eb2792b31e40 --- /dev/null +++ b/changelog/18916.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. +``` diff --git a/changelog/18923.txt b/changelog/18923.txt new file mode 100644 index 000000000000..2b4abae0157c --- /dev/null +++ b/changelog/18923.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted +``` diff --git a/changelog/18934.txt b/changelog/18934.txt new file mode 100644 index 000000000000..e84f6667ccb2 --- /dev/null +++ b/changelog/18934.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Change gen_openapi.sh to generate schema with generic mount paths +``` diff --git a/changelog/18935.txt b/changelog/18935.txt new file mode 100644 index 000000000000..c55cda115cd8 --- /dev/null +++ b/changelog/18935.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add default values to thing_mount_path parameters +``` diff --git a/changelog/18938.txt b/changelog/18938.txt new file mode 100644 index 000000000000..de937fc1ad37 --- /dev/null +++ b/changelog/18938.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. +``` diff --git a/changelog/18939.txt b/changelog/18939.txt new file mode 100644 index 000000000000..aa7f8e7c6658 --- /dev/null +++ b/changelog/18939.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Allow removing SSH host keys from the dynamic keys feature. +``` diff --git a/changelog/18945.txt b/changelog/18945.txt new file mode 100644 index 000000000000..a6f6a66305ac --- /dev/null +++ b/changelog/18945.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Address a race condition accessing the loaded crls without a lock +``` diff --git a/changelog/18951.txt b/changelog/18951.txt new file mode 100644 index 000000000000..9617c0d49d88 --- /dev/null +++ b/changelog/18951.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null +``` diff --git a/changelog/18962.txt b/changelog/18962.txt new file mode 100644 index 000000000000..322c34780a2e --- /dev/null +++ b/changelog/18962.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Remove dependency on sdk module. +``` diff --git a/changelog/18984.txt b/changelog/18984.txt new file mode 100644 index 000000000000..4652bf299bfe --- /dev/null +++ b/changelog/18984.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: consistently use UTC for CA's notAfter exceeded error message +``` diff --git a/changelog/19002.txt b/changelog/19002.txt new file mode 100644 index 000000000000..d1a1ff5371ab --- /dev/null +++ b/changelog/19002.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Added `reload` option to cert auth configuration in case of external renewals of local x509 key-pairs. +``` \ No newline at end of file diff --git a/changelog/19005.txt b/changelog/19005.txt new file mode 100644 index 000000000000..27e251e193dd --- /dev/null +++ b/changelog/19005.txt @@ -0,0 +1,7 @@ +```release-note:change +auth/alicloud: require the `role` field on login +``` + +```release-note:bug +auth/alicloud: fix regression in vault login command that caused login to fail +``` diff --git a/changelog/19018.txt b/changelog/19018.txt new file mode 100644 index 000000000000..bd79dbd15911 --- /dev/null +++ b/changelog/19018.txt @@ -0,0 +1,7 @@ +```release-note:feature +**GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. +``` + +```release-note:bug +secrets/gcp: fix issue where IAM bindings were not preserved during policy update +``` diff --git a/changelog/19032.txt b/changelog/19032.txt new file mode 100644 index 000000000000..a474c22ce6b7 --- /dev/null +++ b/changelog/19032.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/ldap: Add max_page_size configurable to LDAP configuration +``` diff --git a/changelog/19036.txt b/changelog/19036.txt new file mode 100644 index 000000000000..ebe62a7a6b58 --- /dev/null +++ b/changelog/19036.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes logout route wrapped_token bug +`` \ No newline at end of file diff --git a/changelog/19037.txt b/changelog/19037.txt new file mode 100644 index 000000000000..2ccd65615165 --- /dev/null +++ b/changelog/19037.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) +``` diff --git a/changelog/19043.txt b/changelog/19043.txt new file mode 100644 index 000000000000..20a1a77bb789 --- /dev/null +++ b/changelog/19043.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: added ability to validate response structures against openapi schema for test clusters +``` \ No newline at end of file diff --git a/changelog/19044.txt b/changelog/19044.txt new file mode 100644 index 000000000000..7926bb66c9a7 --- /dev/null +++ b/changelog/19044.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/redis-elasticache: changed config argument names for disambiguation +``` \ No newline at end of file diff --git a/changelog/19056.txt b/changelog/19056.txt new file mode 100644 index 000000000000..b5b1ae352030 --- /dev/null +++ b/changelog/19056.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kv: make upgrade synchronous when no keys to upgrade +``` \ No newline at end of file diff --git a/changelog/19061.txt b/changelog/19061.txt new file mode 100644 index 000000000000..ddf794358def --- /dev/null +++ b/changelog/19061.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/ad: Fix bug where updates to config would fail if password isn't provided +``` diff --git a/changelog/19063.txt b/changelog/19063.txt new file mode 100644 index 000000000000..df361111bd9f --- /dev/null +++ b/changelog/19063.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcpkms: Updated plugin from v0.13.0 to v0.14.0 +``` diff --git a/changelog/19068.txt b/changelog/19068.txt new file mode 100644 index 000000000000..6edb29fe1103 --- /dev/null +++ b/changelog/19068.txt @@ -0,0 +1,3 @@ +```release-note:change +sdk: Remove version package, make useragent.String versionless. +``` diff --git a/changelog/19071.txt b/changelog/19071.txt new file mode 100644 index 000000000000..ca988dbebbce --- /dev/null +++ b/changelog/19071.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode +``` diff --git a/changelog/19076.txt b/changelog/19076.txt new file mode 100644 index 000000000000..c206f44e82c3 --- /dev/null +++ b/changelog/19076.txt @@ -0,0 +1,6 @@ +```release-note:improvement +auth/oidc: Adds ability to set Google Workspace domain for groups search +``` +```release-note:improvement +auth/oidc: Adds `abort_on_error` parameter to CLI login command to help in non-interactive contexts +``` \ No newline at end of file diff --git a/changelog/19077.txt b/changelog/19077.txt new file mode 100644 index 000000000000..604cea57b5c6 --- /dev/null +++ b/changelog/19077.txt @@ -0,0 +1,11 @@ +```release-note:feature +**Azure Auth Rotate Root**: Add support for rotate root in Azure Auth engine +``` + +```release-note:feature +**Azure Auth Managed Identities**: Allow any Azure resource that supports managed identities to authenticate with Vault +``` + +```release-note:feature +**VMSS Flex Authentication**: Adds support for Virtual Machine Scale Set Flex Authentication +``` \ No newline at end of file diff --git a/changelog/19084.txt b/changelog/19084.txt new file mode 100644 index 000000000000..97896d3d27bc --- /dev/null +++ b/changelog/19084.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kubernetes: add /check endpoint to determine if environment variables are set +``` diff --git a/changelog/19094.txt b/changelog/19094.txt new file mode 100644 index 000000000000..d3d872d91c96 --- /dev/null +++ b/changelog/19094.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/kubernetes: fixes and dep updates for the auth-kubernetes plugin (see plugin changelog for details) +``` diff --git a/changelog/19096.txt b/changelog/19096.txt new file mode 100644 index 000000000000..2cb0bbf04a68 --- /dev/null +++ b/changelog/19096.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/azure: Adds ability to persist an application for the lifetime of a role. +``` \ No newline at end of file diff --git a/changelog/19098.txt b/changelog/19098.txt new file mode 100644 index 000000000000..df0f9c11ca3b --- /dev/null +++ b/changelog/19098.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cf: Remove incorrect usage of CreateOperation from path_config +``` diff --git a/changelog/19100.txt b/changelog/19100.txt new file mode 100644 index 000000000000..a2f1b72e18df --- /dev/null +++ b/changelog/19100.txt @@ -0,0 +1,4 @@ +```release-note:improvement +Bump github.com/hashicorp/go-plugin version from 1.4.5 to 1.4.8 +``` + diff --git a/changelog/19103.txt b/changelog/19103.txt new file mode 100644 index 000000000000..868db6226f94 --- /dev/null +++ b/changelog/19103.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database: Adds error message requiring password on root crednetial rotation. +``` \ No newline at end of file diff --git a/changelog/19111.txt b/changelog/19111.txt new file mode 100644 index 000000000000..35b7803d6974 --- /dev/null +++ b/changelog/19111.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/mongodb-atlas: Fix a bug that did not allow WAL rollback to handle partial failures when creating API keys +``` diff --git a/changelog/19116.txt b/changelog/19116.txt new file mode 100644 index 000000000000..5dfcd9ecfada --- /dev/null +++ b/changelog/19116.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Allows license-banners to be dismissed. Saves preferences in localStorage. +``` \ No newline at end of file diff --git a/changelog/19135.txt b/changelog/19135.txt new file mode 100644 index 000000000000..a3e085b5a580 --- /dev/null +++ b/changelog/19135.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui (enterprise): Fix cancel button from transform engine role creation page +``` diff --git a/changelog/19139.txt b/changelog/19139.txt new file mode 100644 index 000000000000..75e9a7847e17 --- /dev/null +++ b/changelog/19139.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes bug in kmip role form that caused `operation_all` to persist after deselecting all operation checkboxes +``` diff --git a/changelog/19145.txt b/changelog/19145.txt new file mode 100644 index 000000000000..9cca8e85d634 --- /dev/null +++ b/changelog/19145.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/kv: Emit events on write if events system enabled +``` + diff --git a/changelog/19160.txt b/changelog/19160.txt new file mode 100644 index 000000000000..66a3baa15758 --- /dev/null +++ b/changelog/19160.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Addressed a couple of issues that arose as edge cases for the -output-policy flag. Specifically around properly handling list commands, distinguishing kv V1/V2, and correctly recognizing protected paths. +``` \ No newline at end of file diff --git a/changelog/19170.txt b/changelog/19170.txt new file mode 100644 index 000000000000..9a421dd183a2 --- /dev/null +++ b/changelog/19170.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: fix database static-user sample payload +``` diff --git a/changelog/19186.txt b/changelog/19186.txt new file mode 100644 index 000000000000..cb3b59a9f92c --- /dev/null +++ b/changelog/19186.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion +``` diff --git a/changelog/19187.txt b/changelog/19187.txt new file mode 100644 index 000000000000..c04234a1bb9b --- /dev/null +++ b/changelog/19187.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Add rotate root documentation for azure secrets engine +``` diff --git a/changelog/19190.txt b/changelog/19190.txt new file mode 100644 index 000000000000..480006b1ebc8 --- /dev/null +++ b/changelog/19190.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: show Get credentials button for static roles detail page when a user has the proper permissions. +``` diff --git a/changelog/19194.txt b/changelog/19194.txt new file mode 100644 index 000000000000..b2a5ff383f12 --- /dev/null +++ b/changelog/19194.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. +``` + diff --git a/changelog/19196.txt b/changelog/19196.txt new file mode 100644 index 000000000000..aab2638ceac7 --- /dev/null +++ b/changelog/19196.txt @@ -0,0 +1,5 @@ +```release-note:feature +**PKI Cross-Cluster Revocations**: Revocation information can now be +synchronized across primary and performance replica clusters offering +a unified CRL/OCSP view of revocations across cluster boundaries. +``` diff --git a/changelog/19215.txt b/changelog/19215.txt new file mode 100644 index 000000000000..33fea94666bd --- /dev/null +++ b/changelog/19215.txt @@ -0,0 +1,5 @@ +```release-note:feature +**Secrets/Auth Plugin Multiplexing**: The plugin will be multiplexed when run +as an external plugin by vault versions that support secrets/auth plugin +multiplexing (> 1.12) +``` diff --git a/changelog/19216.txt b/changelog/19216.txt new file mode 100644 index 000000000000..e03e866e08b4 --- /dev/null +++ b/changelog/19216.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: adds allowed_response_headers as param for secret engine mount config +``` diff --git a/changelog/19220.txt b/changelog/19220.txt new file mode 100644 index 000000000000..cbfe7e5a9336 --- /dev/null +++ b/changelog/19220.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: remove wizard +``` diff --git a/changelog/19244.txt b/changelog/19244.txt new file mode 100644 index 000000000000..63a663e9d6e4 --- /dev/null +++ b/changelog/19244.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config +``` diff --git a/changelog/19247.txt b/changelog/19247.txt new file mode 100644 index 000000000000..f51e8479c97f --- /dev/null +++ b/changelog/19247.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/oidc: Adds support for group membership parsing when using IBM ISAM as an OIDC provider. +``` diff --git a/changelog/19252.txt b/changelog/19252.txt new file mode 100644 index 000000000000..99121351d98c --- /dev/null +++ b/changelog/19252.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Consistently stop Vault server on exit in gen_openapi.sh +``` diff --git a/changelog/19260.txt b/changelog/19260.txt new file mode 100644 index 000000000000..77138a38607c --- /dev/null +++ b/changelog/19260.txt @@ -0,0 +1,3 @@ +```release-note:feature +**agent/auto-auth:**: Add OCI (Oracle Cloud Infrastructure) auto-auth method +``` diff --git a/changelog/19265.txt b/changelog/19265.txt new file mode 100644 index 000000000000..23d957e2d594 --- /dev/null +++ b/changelog/19265.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/pki: Decode integer values properly in health-check configuration file +``` diff --git a/changelog/19269.txt b/changelog/19269.txt new file mode 100644 index 000000000000..57ff2072a18c --- /dev/null +++ b/changelog/19269.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file +``` diff --git a/changelog/19274.txt b/changelog/19274.txt new file mode 100644 index 000000000000..a7f5d8c29293 --- /dev/null +++ b/changelog/19274.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/pki: Fix path for role health-check warning messages +``` diff --git a/changelog/19276.txt b/changelog/19276.txt new file mode 100644 index 000000000000..373199478f92 --- /dev/null +++ b/changelog/19276.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/pki: Properly report permission issues within health-check mount tune checks +``` diff --git a/changelog/19290.txt b/changelog/19290.txt new file mode 100644 index 000000000000..1a4511590c69 --- /dev/null +++ b/changelog/19290.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. +``` diff --git a/changelog/19296.txt b/changelog/19296.txt new file mode 100644 index 000000000000..1ef62a0cde2e --- /dev/null +++ b/changelog/19296.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Sidebar Navigation in UI**: A new sidebar navigation panel has been added in the UI to replace the top navigation bar. +``` \ No newline at end of file diff --git a/changelog/19311.txt b/changelog/19311.txt new file mode 100644 index 000000000000..5ad6e2c01a81 --- /dev/null +++ b/changelog/19311.txt @@ -0,0 +1,3 @@ +```release-note:bug +server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled +``` diff --git a/changelog/19319.txt b/changelog/19319.txt new file mode 100644 index 000000000000..4702344afb08 --- /dev/null +++ b/changelog/19319.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Improve operationId/request/response naming strategy +``` diff --git a/changelog/19334.txt b/changelog/19334.txt new file mode 100644 index 000000000000..7df68268aabe --- /dev/null +++ b/changelog/19334.txt @@ -0,0 +1,3 @@ +```release-note:deprecation +secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. +``` \ No newline at end of file diff --git a/changelog/19365.txt b/changelog/19365.txt new file mode 100644 index 000000000000..774c750f4951 --- /dev/null +++ b/changelog/19365.txt @@ -0,0 +1,7 @@ +```release-note: enhancement +auth/aws: Support request cancellation with AWS requests +``` + +```release-note: enhancement +secrets/aws: Support request cancellation with AWS requests +``` diff --git a/changelog/19373.txt b/changelog/19373.txt new file mode 100644 index 000000000000..87751805e7d8 --- /dev/null +++ b/changelog/19373.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/transit: Fix import, import-version command invocation +``` diff --git a/changelog/19378.txt b/changelog/19378.txt new file mode 100644 index 000000000000..40a1e82fcb64 --- /dev/null +++ b/changelog/19378.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/kv: add -mount flag to kv list +``` diff --git a/changelog/19416.txt b/changelog/19416.txt new file mode 100644 index 000000000000..f2a7d3275b64 --- /dev/null +++ b/changelog/19416.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token: Fix cubbyhole and revocation for legacy service tokens +``` diff --git a/changelog/19428.txt b/changelog/19428.txt new file mode 100644 index 000000000000..c1ae6d54bbcb --- /dev/null +++ b/changelog/19428.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes crypto.randomUUID error in unsecure contexts from third party ember-data library +``` \ No newline at end of file diff --git a/changelog/19429.txt b/changelog/19429.txt new file mode 100644 index 000000000000..341fbf5a7347 --- /dev/null +++ b/changelog/19429.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: pass encodeBase64 param to HMAC transit-key-actions. +``` diff --git a/changelog/19448.txt b/changelog/19448.txt new file mode 100644 index 000000000000..8c75b79f140c --- /dev/null +++ b/changelog/19448.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes SSH engine config deletion +``` diff --git a/changelog/19460.txt b/changelog/19460.txt new file mode 100644 index 000000000000..6334c7fdc5d2 --- /dev/null +++ b/changelog/19460.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url +``` diff --git a/changelog/19468.txt b/changelog/19468.txt new file mode 100644 index 000000000000..5afce90eb65f --- /dev/null +++ b/changelog/19468.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugin/reload: Fix a possible data race with rollback manager and plugin reload +``` diff --git a/changelog/19472.txt b/changelog/19472.txt new file mode 100644 index 000000000000..db9ec7276550 --- /dev/null +++ b/changelog/19472.txt @@ -0,0 +1,3 @@ +```release-note:improvement +autopilot: Update version to v0.2.0 to add better support for respecting min quorum +``` diff --git a/changelog/19483.txt b/changelog/19483.txt new file mode 100644 index 000000000000..c7ba6f66d97d --- /dev/null +++ b/changelog/19483.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix panic when SIGHUP is issued to Agent while it has a non-TLS listener. +``` diff --git a/changelog/19495.txt b/changelog/19495.txt new file mode 100644 index 000000000000..dac2ca00dfb8 --- /dev/null +++ b/changelog/19495.txt @@ -0,0 +1,3 @@ +```release-note:bug +shamir: change mul and div implementations to be constant-time +``` \ No newline at end of file diff --git a/changelog/19519.txt b/changelog/19519.txt new file mode 100644 index 000000000000..6756f62b2d47 --- /dev/null +++ b/changelog/19519.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Allow importing RSA-PSS OID (1.2.840.113549.1.1.10) private keys via BYOK. +``` diff --git a/changelog/19520.txt b/changelog/19520.txt new file mode 100644 index 000000000000..726be2c13a60 --- /dev/null +++ b/changelog/19520.txt @@ -0,0 +1,3 @@ +```release-note:improvement +http: Support responding to HEAD operation from plugins +``` diff --git a/changelog/19541.txt b/changelog/19541.txt new file mode 100644 index 000000000000..9bdecc35832d --- /dev/null +++ b/changelog/19541.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted +``` diff --git a/changelog/19545.txt b/changelog/19545.txt new file mode 100644 index 000000000000..615742cd3265 --- /dev/null +++ b/changelog/19545.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/elasticsearch: Update error messages resulting from Elasticsearch API errors +``` \ No newline at end of file diff --git a/changelog/19585.txt b/changelog/19585.txt new file mode 100644 index 000000000000..f68c0dc6f603 --- /dev/null +++ b/changelog/19585.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. +``` diff --git a/changelog/19591.txt b/changelog/19591.txt new file mode 100644 index 000000000000..f15d3979ad12 --- /dev/null +++ b/changelog/19591.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: validate name identifiers in mssql physical storage backend prior use +``` diff --git a/changelog/19593.txt b/changelog/19593.txt new file mode 100644 index 000000000000..8f170578ec07 --- /dev/null +++ b/changelog/19593.txt @@ -0,0 +1,4 @@ +```release-note:improvement +events: Suppress log warnings triggered when events are sent but the events system is not enabled. +``` + diff --git a/changelog/19600.txt b/changelog/19600.txt new file mode 100644 index 000000000000..f2c1f71fa027 --- /dev/null +++ b/changelog/19600.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix logic for labeling unauthenticated/sudo paths. +``` diff --git a/changelog/19616.txt b/changelog/19616.txt new file mode 100644 index 000000000000..3afcc608d19a --- /dev/null +++ b/changelog/19616.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/postgresql: Add configuration to scram-sha-256 encrypt passwords on Vault before sending them to PostgreSQL +``` \ No newline at end of file diff --git a/changelog/19624.txt b/changelog/19624.txt new file mode 100644 index 000000000000..7bc2df63ea85 --- /dev/null +++ b/changelog/19624.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix PKI revocation request forwarding from standby nodes due to an error wrapping bug +``` diff --git a/changelog/19640.txt b/changelog/19640.txt new file mode 100644 index 000000000000..8dcf59bf87fb --- /dev/null +++ b/changelog/19640.txt @@ -0,0 +1,3 @@ +```release-note:bug + secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. +``` diff --git a/changelog/19676.txt b/changelog/19676.txt new file mode 100644 index 000000000000..090dc801b2df --- /dev/null +++ b/changelog/19676.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. +``` diff --git a/changelog/19703.txt b/changelog/19703.txt new file mode 100644 index 000000000000..6bf8e5c18989 --- /dev/null +++ b/changelog/19703.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes issue navigating back a level using the breadcrumb from secret metadata view +``` \ No newline at end of file diff --git a/changelog/19721.txt b/changelog/19721.txt new file mode 100644 index 000000000000..9818a0facfe2 --- /dev/null +++ b/changelog/19721.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. +``` \ No newline at end of file diff --git a/changelog/19776.txt b/changelog/19776.txt new file mode 100644 index 000000000000..786cfd321673 --- /dev/null +++ b/changelog/19776.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Vault Agent now reports its name and version as part of the User-Agent header in all requests issued. +``` diff --git a/changelog/19791.txt b/changelog/19791.txt new file mode 100644 index 000000000000..26722cde3133 --- /dev/null +++ b/changelog/19791.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add allowed_managed_keys field to secret engine mount options +``` diff --git a/changelog/19798.txt b/changelog/19798.txt new file mode 100644 index 000000000000..4bae8b637897 --- /dev/null +++ b/changelog/19798.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/terraform: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/19799.txt b/changelog/19799.txt new file mode 100644 index 000000000000..aee76ca689aa --- /dev/null +++ b/changelog/19799.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bad link to namespace when namespace name includes `.` +``` \ No newline at end of file diff --git a/changelog/19811.txt b/changelog/19811.txt new file mode 100644 index 000000000000..49af10ccebcf --- /dev/null +++ b/changelog/19811.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/kv: Undelete now properly handles KV-V2 mount paths that are more than one layer deep. +``` diff --git a/changelog/19814.txt b/changelog/19814.txt new file mode 100644 index 000000000000..687527efca8a --- /dev/null +++ b/changelog/19814.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: add plugin metadata, including plugin name, type, version, sha256, and whether plugin is external, to audit logging +``` \ No newline at end of file diff --git a/changelog/19829.txt b/changelog/19829.txt new file mode 100644 index 000000000000..e8472b2717ed --- /dev/null +++ b/changelog/19829.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ad: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/19846.txt b/changelog/19846.txt new file mode 100644 index 000000000000..269b11797b9e --- /dev/null +++ b/changelog/19846.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/alicloud: upgrades dependencies +``` diff --git a/changelog/19861.txt b/changelog/19861.txt new file mode 100644 index 000000000000..ee5bc703e9cb --- /dev/null +++ b/changelog/19861.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/mongodbatlas: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/19862.txt b/changelog/19862.txt new file mode 100644 index 000000000000..c1ce6d8bb710 --- /dev/null +++ b/changelog/19862.txt @@ -0,0 +1,3 @@ +```release-note:improvement +build: Prefer GOBIN when set over GOPATH/bin when building the binary +``` diff --git a/changelog/19875.txt b/changelog/19875.txt new file mode 100644 index 000000000000..1167e39b3ee7 --- /dev/null +++ b/changelog/19875.txt @@ -0,0 +1,3 @@ +```release-note:bug +helper/random: Fix race condition in string generator helper +``` diff --git a/changelog/19878.txt b/changelog/19878.txt new file mode 100644 index 000000000000..4135434b7923 --- /dev/null +++ b/changelog/19878.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Remove the Bulma CSS framework. +``` \ No newline at end of file diff --git a/changelog/19891.txt b/changelog/19891.txt new file mode 100644 index 000000000000..b030151e858b --- /dev/null +++ b/changelog/19891.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core (enterprise): add configuration for license reporting +``` \ No newline at end of file diff --git a/changelog/19901.txt b/changelog/19901.txt new file mode 100644 index 000000000000..8e0bbbddb5ec --- /dev/null +++ b/changelog/19901.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Updates UI javascript dependencies +``` \ No newline at end of file diff --git a/changelog/19913.txt b/changelog/19913.txt new file mode 100644 index 000000000000..eccdec6533ad --- /dev/null +++ b/changelog/19913.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds whitespace warning to secrets engine and auth method path inputs +``` \ No newline at end of file diff --git a/changelog/19954.txt b/changelog/19954.txt new file mode 100644 index 000000000000..e0ff45f87d22 --- /dev/null +++ b/changelog/19954.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/mongodb: upgrade mongo driver to 1.11 +``` diff --git a/changelog/19993.txt b/changelog/19993.txt new file mode 100644 index 000000000000..90650863ab88 --- /dev/null +++ b/changelog/19993.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/openldap: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/20019.txt b/changelog/20019.txt new file mode 100644 index 000000000000..0483d1763fae --- /dev/null +++ b/changelog/20019.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: add an endpoint to write test activity log data, guarded by a build flag +``` \ No newline at end of file diff --git a/changelog/20034.txt b/changelog/20034.txt new file mode 100644 index 000000000000..c1050795bdc4 --- /dev/null +++ b/changelog/20034.txt @@ -0,0 +1,3 @@ +```release-note: bug +secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. +``` diff --git a/changelog/20044.txt b/changelog/20044.txt new file mode 100644 index 000000000000..014e61b46743 --- /dev/null +++ b/changelog/20044.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. +``` diff --git a/changelog/20057.txt b/changelog/20057.txt new file mode 100644 index 000000000000..585a07d91b3a --- /dev/null +++ b/changelog/20057.txt @@ -0,0 +1,3 @@ +```release-note: bug +secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. +``` diff --git a/changelog/20058.txt b/changelog/20058.txt new file mode 100644 index 000000000000..e43a1f4adf93 --- /dev/null +++ b/changelog/20058.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. +``` diff --git a/changelog/20064.txt b/changelog/20064.txt new file mode 100644 index 000000000000..c539119f713d --- /dev/null +++ b/changelog/20064.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes browser console formatting for help command output +``` \ No newline at end of file diff --git a/changelog/20070.txt b/changelog/20070.txt new file mode 100644 index 000000000000..34e6e5540d69 --- /dev/null +++ b/changelog/20070.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes remaining doc links to include /vault in path +``` \ No newline at end of file diff --git a/changelog/20073.txt b/changelog/20073.txt new file mode 100644 index 000000000000..10c21a58ba52 --- /dev/null +++ b/changelog/20073.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: refactor the activity log's generation of precomputed queries +``` \ No newline at end of file diff --git a/changelog/20078.txt b/changelog/20078.txt new file mode 100644 index 000000000000..8749354b315d --- /dev/null +++ b/changelog/20078.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: error when attempting to update retention configuration below the minimum +``` \ No newline at end of file diff --git a/changelog/20086.txt b/changelog/20086.txt new file mode 100644 index 000000000000..9511c97b66e3 --- /dev/null +++ b/changelog/20086.txt @@ -0,0 +1,4 @@ +```release-note:improvement +api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. +``` diff --git a/changelog/20109.txt b/changelog/20109.txt new file mode 100644 index 000000000000..8c7cb3b32de1 --- /dev/null +++ b/changelog/20109.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys/wrapping: Add example how to unwrap without authentication in Vault +``` diff --git a/changelog/20125.txt b/changelog/20125.txt new file mode 100644 index 000000000000..07dd8201dba8 --- /dev/null +++ b/changelog/20125.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: updates clients configuration edit form state based on census reporting configuration +``` \ No newline at end of file diff --git a/changelog/20144.txt b/changelog/20144.txt new file mode 100644 index 000000000000..ef8b9a01810c --- /dev/null +++ b/changelog/20144.txt @@ -0,0 +1,4 @@ +```release-note:improvement +sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. +``` diff --git a/changelog/20150.txt b/changelog/20150.txt new file mode 100644 index 000000000000..0ea8259f9e66 --- /dev/null +++ b/changelog/20150.txt @@ -0,0 +1,4 @@ +```release-note:improvement +api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. +``` diff --git a/changelog/20154.txt b/changelog/20154.txt new file mode 100644 index 000000000000..7bda3624fba1 --- /dev/null +++ b/changelog/20154.txt @@ -0,0 +1,2 @@ +```release-note:bug +auth/cert: Include OCSP parameters in read CA certificate role response. diff --git a/changelog/20163.txt b/changelog/20163.txt new file mode 100644 index 000000000000..0b845fbae0db --- /dev/null +++ b/changelog/20163.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: adds warning for commas in stringArray inputs and updates tooltip help text to remove references to comma separation +``` diff --git a/changelog/20181.txt b/changelog/20181.txt new file mode 100644 index 000000000000..121c869e4aaf --- /dev/null +++ b/changelog/20181.txt @@ -0,0 +1,4 @@ +```release-note:bug +sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. +auth/cert: Fix OCSP validation against Vault's PKI engine. +``` diff --git a/changelog/20201.txt b/changelog/20201.txt new file mode 100644 index 000000000000..d50c9bcb9da8 --- /dev/null +++ b/changelog/20201.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. +``` diff --git a/changelog/20216.txt b/changelog/20216.txt new file mode 100644 index 000000000000..59ee78c889e3 --- /dev/null +++ b/changelog/20216.txt @@ -0,0 +1,3 @@ +```release-note:bug +website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. +``` diff --git a/changelog/20220.txt b/changelog/20220.txt new file mode 100644 index 000000000000..1cf72aa81ceb --- /dev/null +++ b/changelog/20220.txt @@ -0,0 +1,3 @@ +```release-note:bug +pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it +``` diff --git a/changelog/20224.txt b/changelog/20224.txt new file mode 100644 index 000000000000..7ec5bf612177 --- /dev/null +++ b/changelog/20224.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server: New -dev-cluster-json writes a file describing the dev cluster in -dev and -dev-three-node modes, plus -dev-three-node now enables unauthenticated metrics and pprof requests. +``` diff --git a/changelog/20234.txt b/changelog/20234.txt new file mode 100644 index 000000000000..1f20bdc5a920 --- /dev/null +++ b/changelog/20234.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Better return OCSP validation errors during login to the caller. +``` diff --git a/changelog/20235.txt b/changelog/20235.txt new file mode 100644 index 000000000000..d1b9f8a6e923 --- /dev/null +++ b/changelog/20235.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: remove use of htmlSafe except when first sanitized +``` diff --git a/changelog/20243.txt b/changelog/20243.txt new file mode 100644 index 000000000000..8d5b04420b97 --- /dev/null +++ b/changelog/20243.txt @@ -0,0 +1,4 @@ +```release-note:improvement +cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. +``` diff --git a/changelog/20247.txt b/changelog/20247.txt new file mode 100644 index 000000000000..91f2f0d23fcd --- /dev/null +++ b/changelog/20247.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Add new docker-based cluster testing framework to the sdk. +``` diff --git a/changelog/20253.txt b/changelog/20253.txt new file mode 100644 index 000000000000..19edae1bc4f2 --- /dev/null +++ b/changelog/20253.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add warning when issuer lacks KeyUsage during CRL rebuilds; expose in logs and on rotation. +``` diff --git a/changelog/20257.txt b/changelog/20257.txt new file mode 100644 index 000000000000..c2dba4579126 --- /dev/null +++ b/changelog/20257.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows +``` diff --git a/changelog/20261.txt b/changelog/20261.txt new file mode 100644 index 000000000000..5f4eb977cce1 --- /dev/null +++ b/changelog/20261.txt @@ -0,0 +1,3 @@ +```release-note:improvement +* physical/etcd: Upgrade etcd3 client to v3.5.7 +``` \ No newline at end of file diff --git a/changelog/20263.txt b/changelog/20263.txt new file mode 100644 index 000000000000..8556fe8865b3 --- /dev/null +++ b/changelog/20263.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix OIDC provider logo showing when domain doesn't match +``` diff --git a/changelog/20265.txt b/changelog/20265.txt new file mode 100644 index 000000000000..8e27875f627f --- /dev/null +++ b/changelog/20265.txt @@ -0,0 +1,3 @@ +```release-note:improvement +* api: Add Config.TLSConfig method to fetch the TLS configuration from a client config. +``` \ No newline at end of file diff --git a/changelog/20276.txt b/changelog/20276.txt new file mode 100644 index 000000000000..71f288ab9a0d --- /dev/null +++ b/changelog/20276.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Include CA serial number, key UUID on issuers list endpoint. +``` diff --git a/changelog/20285.txt b/changelog/20285.txt new file mode 100644 index 000000000000..2bc2241dfe0b --- /dev/null +++ b/changelog/20285.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Small fixes for OpenAPI display attributes. Changed "log-in" to "login" +``` diff --git a/changelog/20294.txt b/changelog/20294.txt new file mode 100644 index 000000000000..92f7c291892b --- /dev/null +++ b/changelog/20294.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Add debug symbols back to builds to fix Dynatrace support +``` diff --git a/changelog/20341.txt b/changelog/20341.txt new file mode 100644 index 000000000000..652e5735ea7b --- /dev/null +++ b/changelog/20341.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix patching of leaf_not_after_behavior on issuers. +``` diff --git a/changelog/20354.txt b/changelog/20354.txt new file mode 100644 index 000000000000..abdacb7dac45 --- /dev/null +++ b/changelog/20354.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Include per-issuer enable_aia_url_templating in issuer read endpoint. +``` diff --git a/changelog/20368.txt b/changelog/20368.txt new file mode 100644 index 000000000000..bca5957d1d29 --- /dev/null +++ b/changelog/20368.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Allow updates of only the custom-metadata for entity alias. +``` \ No newline at end of file diff --git a/changelog/20375.txt b/changelog/20375.txt new file mode 100644 index 000000000000..92caf1e57642 --- /dev/null +++ b/changelog/20375.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: prevent panic on login after namespace is deleted that had mfa enforcement +``` \ No newline at end of file diff --git a/changelog/20411.txt b/changelog/20411.txt new file mode 100644 index 000000000000..093509040c02 --- /dev/null +++ b/changelog/20411.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: add a `mount_point` field to audit requests and response entries +``` diff --git a/changelog/20418.txt b/changelog/20418.txt new file mode 100644 index 000000000000..596b7e461d23 --- /dev/null +++ b/changelog/20418.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/server: fixes panic in Vault server command when running in recovery mode +``` \ No newline at end of file diff --git a/changelog/20425.txt b/changelog/20425.txt new file mode 100644 index 000000000000..20869fc19f70 --- /dev/null +++ b/changelog/20425.txt @@ -0,0 +1,3 @@ +```release-note:feature +**MongoDB Atlas Database Secrets**: Adds support for client certificate credentials +``` diff --git a/changelog/20430.txt b/changelog/20430.txt new file mode 100644 index 000000000000..5ac95f104cdb --- /dev/null +++ b/changelog/20430.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix secret render when path includes %. Resolves #11616. +``` diff --git a/changelog/20431.txt b/changelog/20431.txt new file mode 100644 index 000000000000..a0083d879ecd --- /dev/null +++ b/changelog/20431.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add download button for each secret value in KV v2 +``` diff --git a/changelog/20441.txt b/changelog/20441.txt new file mode 100644 index 000000000000..628784883f8c --- /dev/null +++ b/changelog/20441.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow determining existing issuers and keys on import. +``` diff --git a/changelog/20442.txt b/changelog/20442.txt new file mode 100644 index 000000000000..09636b69b060 --- /dev/null +++ b/changelog/20442.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add missing fields to tidy-status, include new last_auto_tidy_finished field. +``` diff --git a/changelog/20453.txt b/changelog/20453.txt new file mode 100644 index 000000000000..e605791bc6b5 --- /dev/null +++ b/changelog/20453.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/ldap: Set default value for `max_page_size` properly +``` diff --git a/changelog/20464.txt b/changelog/20464.txt new file mode 100644 index 000000000000..6b58153fccf6 --- /dev/null +++ b/changelog/20464.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Add walkSecretsTree helper function, which recursively walks secrets rooted at the given path +``` diff --git a/changelog/20477.txt b/changelog/20477.txt new file mode 100644 index 000000000000..e95305a70bec --- /dev/null +++ b/changelog/20477.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: CLI should take days as a unit of time for ttl like flags +``` diff --git a/changelog/20481.txt b/changelog/20481.txt new file mode 100644 index 000000000000..c6f27116311b --- /dev/null +++ b/changelog/20481.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add filtering by engine type and engine name to the Secret Engine list view. +``` diff --git a/changelog/20488.txt b/changelog/20488.txt new file mode 100644 index 000000000000..5ea0f78b3928 --- /dev/null +++ b/changelog/20488.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Improve addPrefixToKVPath helper +``` diff --git a/changelog/20502.txt b/changelog/20502.txt new file mode 100644 index 000000000000..153309ab84ce --- /dev/null +++ b/changelog/20502.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: disable printing flags warnings messages for the ssh command +``` diff --git a/changelog/20519.txt b/changelog/20519.txt new file mode 100644 index 000000000000..92f7c291892b --- /dev/null +++ b/changelog/20519.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Add debug symbols back to builds to fix Dynatrace support +``` diff --git a/changelog/20530.txt b/changelog/20530.txt new file mode 100644 index 000000000000..6f6d04bf17e8 --- /dev/null +++ b/changelog/20530.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Environment Variables through Vault Agent**: Introducing a new process-supervisor mode for Vault Agent which allows injecting secrets as environment variables into a child process using a new `env_template` configuration stanza. The process-supervisor configuration can be generated with a new `vault agent generate-config` helper tool. +``` diff --git a/changelog/20536.txt b/changelog/20536.txt new file mode 100644 index 000000000000..62aa93605c38 --- /dev/null +++ b/changelog/20536.txt @@ -0,0 +1,3 @@ +```release-note:feature +**AWS Static Roles**: The AWS Secrets Engine can manage static roles configured by users. +``` diff --git a/changelog/20548.txt b/changelog/20548.txt new file mode 100644 index 000000000000..fed5d2b4506e --- /dev/null +++ b/changelog/20548.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Vault Proxy**: Introduced Vault Proxy, a new subcommand of the Vault binary that can be invoked using `vault proxy -config=config.hcl`. It currently has the same feature set as Vault Agent's API proxy, but the two may diverge in the future. We plan to deprecate the API proxy functionality of Vault Agent in a future release. +``` diff --git a/changelog/20559.txt b/changelog/20559.txt new file mode 100644 index 000000000000..2ff6422db0db --- /dev/null +++ b/changelog/20559.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core, secrets/pki, audit: Update dependency go-jose to v3 due to v2 deprecation. +``` diff --git a/changelog/20569.txt b/changelog/20569.txt new file mode 100644 index 000000000000..e10a4643ea7f --- /dev/null +++ b/changelog/20569.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Add logic to validate env_template entries in configuration +``` diff --git a/changelog/20590.txt b/changelog/20590.txt new file mode 100644 index 000000000000..c1c7c9e2b526 --- /dev/null +++ b/changelog/20590.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update Web CLI with examples and a new `kv-get` command for reading kv v2 data and metadata +``` diff --git a/changelog/20595.txt b/changelog/20595.txt new file mode 100644 index 000000000000..982f41498f13 --- /dev/null +++ b/changelog/20595.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add possibility to decode a generated encoded root token via the rest API +``` diff --git a/changelog/20603.txt b/changelog/20603.txt new file mode 100644 index 000000000000..c3e7e2bbe7db --- /dev/null +++ b/changelog/20603.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes issue creating mfa login enforcement from method enforcements tab +``` \ No newline at end of file diff --git a/changelog/20609.txt b/changelog/20609.txt new file mode 100644 index 000000000000..fe92833da52d --- /dev/null +++ b/changelog/20609.txt @@ -0,0 +1,4 @@ +```release-note:improvement +command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. +``` \ No newline at end of file diff --git a/changelog/20626.txt b/changelog/20626.txt new file mode 100644 index 000000000000..2a13cee1735d --- /dev/null +++ b/changelog/20626.txt @@ -0,0 +1,4 @@ +```release-note:improvement +activitylog: EntityRecord protobufs now contain a ClientType field for +distinguishing client sources. +``` diff --git a/changelog/20628.txt b/changelog/20628.txt new file mode 100644 index 000000000000..978814601a30 --- /dev/null +++ b/changelog/20628.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: initial implementation of a process runner for injecting secrets via environment variables via vault agent +``` \ No newline at end of file diff --git a/changelog/20629.txt b/changelog/20629.txt new file mode 100644 index 000000000000..f5692f7691e0 --- /dev/null +++ b/changelog/20629.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server (enterprise): -dev-three-node now creates perf standbys instead of regular standbys. +``` \ No newline at end of file diff --git a/changelog/20636.txt b/changelog/20636.txt new file mode 100644 index 000000000000..6e20fcdbdfa0 --- /dev/null +++ b/changelog/20636.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Properly Handle nil identity_policies in Secret Data +``` \ No newline at end of file diff --git a/changelog/20642.txt b/changelog/20642.txt new file mode 100644 index 000000000000..8b8bc40a112b --- /dev/null +++ b/changelog/20642.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: add subject key identifier to read key response +``` diff --git a/changelog/20643.txt b/changelog/20643.txt new file mode 100644 index 000000000000..340ec5b547ff --- /dev/null +++ b/changelog/20643.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: report intermediate error messages during request forwarding +``` diff --git a/changelog/20652.txt b/changelog/20652.txt new file mode 100644 index 000000000000..c41e750c0472 --- /dev/null +++ b/changelog/20652.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Respond to writes with updated key policy, cache configuration. +``` diff --git a/changelog/20654.txt b/changelog/20654.txt new file mode 100644 index 000000000000..91e567477b5b --- /dev/null +++ b/changelog/20654.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/pki: Warning when issuing leafs from CSRs with basic constraints. In the future, issuance of non-CA leaf certs from CSRs with asserted IsCA Basic Constraints will be prohibited. +``` diff --git a/changelog/20664.txt b/changelog/20664.txt new file mode 100644 index 000000000000..6f2b4abe61ae --- /dev/null +++ b/changelog/20664.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. +``` diff --git a/changelog/20668.txt b/changelog/20668.txt new file mode 100644 index 000000000000..f3f840c47d1d --- /dev/null +++ b/changelog/20668.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. +``` diff --git a/changelog/20680.txt b/changelog/20680.txt new file mode 100644 index 000000000000..ff80ac466092 --- /dev/null +++ b/changelog/20680.txt @@ -0,0 +1,6 @@ +```release-note:improvement +core (enterprise): support reloading configuration for automated reporting via SIGHUP +``` +```release-note:improvement +core (enterprise): license updates trigger a reload of reporting and the activity log +``` \ No newline at end of file diff --git a/changelog/20694.txt b/changelog/20694.txt new file mode 100644 index 000000000000..07f790a666dd --- /dev/null +++ b/changelog/20694.txt @@ -0,0 +1,4 @@ +```release-note:improvement +api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period +``` diff --git a/changelog/20697.txt b/changelog/20697.txt new file mode 100644 index 000000000000..be80443714da --- /dev/null +++ b/changelog/20697.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: update detail views that render ttl durations to display full unit instead of letter (i.e. 'days' instead of 'd') +``` diff --git a/changelog/20701.txt b/changelog/20701.txt new file mode 100644 index 000000000000..24942d5d066c --- /dev/null +++ b/changelog/20701.txt @@ -0,0 +1,3 @@ +```release-notes:bug +secrets/pki: Fix race during runUnifiedTransfer when deciding to skip re-running a test within a short window. +``` diff --git a/changelog/20725.txt b/changelog/20725.txt new file mode 100644 index 000000000000..04399cca8f63 --- /dev/null +++ b/changelog/20725.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/gcp: Updated plugin from v0.15.0 to v0.16.0 +``` diff --git a/changelog/20731.txt b/changelog/20731.txt new file mode 100644 index 000000000000..1896c199add9 --- /dev/null +++ b/changelog/20731.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes auto_rotate_period ttl input for transit keys +``` diff --git a/changelog/20736.txt b/changelog/20736.txt new file mode 100644 index 000000000000..1c4c3d4d256e --- /dev/null +++ b/changelog/20736.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Support BYOK-encrypted export of keys to securely allow synchronizing specific keys and version across clusters. +``` diff --git a/changelog/20741.txt b/changelog/20741.txt new file mode 100644 index 000000000000..8034e456e0c6 --- /dev/null +++ b/changelog/20741.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Add integration tests for agent running in process supervisor mode +``` diff --git a/changelog/20742.txt b/changelog/20742.txt new file mode 100644 index 000000000000..d91237e1d391 --- /dev/null +++ b/changelog/20742.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Updated plugin from v0.9.1 to v0.10.0 +``` diff --git a/changelog/20745.txt b/changelog/20745.txt new file mode 100644 index 000000000000..57a4391ba22d --- /dev/null +++ b/changelog/20745.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/centrify: Updated plugin from v0.14.0 to v0.15.1 +``` diff --git a/changelog/20747.txt b/changelog/20747.txt new file mode 100644 index 000000000000..4c600d203fb3 --- /dev/null +++ b/changelog/20747.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add filtering by auth type and auth name to the Authentication Method list view. +``` diff --git a/changelog/20750.txt b/changelog/20750.txt new file mode 100644 index 000000000000..75a3e1da364e --- /dev/null +++ b/changelog/20750.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Updated plugin from v0.10.1-0.20230329210417-0b2cdb26cf5d to v0.16.0 +``` \ No newline at end of file diff --git a/changelog/20751.txt b/changelog/20751.txt new file mode 100644 index 000000000000..9b78b3dfe5a2 --- /dev/null +++ b/changelog/20751.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Updated plugin from v0.2.0 to v0.2.1 +``` diff --git a/changelog/20752.txt b/changelog/20752.txt new file mode 100644 index 000000000000..667bc37f37b3 --- /dev/null +++ b/changelog/20752.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Vault PKI ACME Server**: Support for the ACME certificate lifecycle management protocol has been added to the Vault PKI Plugin. This allows standard ACME clients, such as the EFF's certbot and the CNCF's k8s cert-manager, to request certificates from a Vault server with no knowledge of Vault APIs or authentication mechanisms. For public-facing Vault instances, we recommend requiring External Account Bindings (EAB) to limit the ability to request certificates to only authenticated clients. +``` diff --git a/changelog/20758.txt b/changelog/20758.txt new file mode 100644 index 000000000000..7eed0b075191 --- /dev/null +++ b/changelog/20758.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Updated plugin from v0.14.0 to v0.15.0 +``` \ No newline at end of file diff --git a/changelog/20763.txt b/changelog/20763.txt new file mode 100644 index 000000000000..311dcb0a62f4 --- /dev/null +++ b/changelog/20763.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/redis: Upgrade plugin dependencies +``` diff --git a/changelog/20764.txt b/changelog/20764.txt new file mode 100644 index 000000000000..adc14e07f152 --- /dev/null +++ b/changelog/20764.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Updated plugin from v0.9.0 to v0.9.2 +``` diff --git a/changelog/20767.txt b/changelog/20767.txt new file mode 100644 index 000000000000..b6d853a63903 --- /dev/null +++ b/changelog/20767.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/elasticsearch: Upgrade plugin dependencies +``` diff --git a/changelog/20771.txt b/changelog/20771.txt new file mode 100644 index 000000000000..5cc1ee2d472c --- /dev/null +++ b/changelog/20771.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth/kerberos: Enable plugin multiplexing +auth/kerberos: Upgrade plugin dependencies +``` diff --git a/changelog/20777.txt b/changelog/20777.txt new file mode 100644 index 000000000000..ec3c9e42b58b --- /dev/null +++ b/changelog/20777.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/aure: Updated plugin from v0.15.0 to v0.16.0 +``` \ No newline at end of file diff --git a/changelog/20783.txt b/changelog/20783.txt new file mode 100644 index 000000000000..372d36cb7b1e --- /dev/null +++ b/changelog/20783.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. +``` \ No newline at end of file diff --git a/changelog/20784.txt b/changelog/20784.txt new file mode 100644 index 000000000000..b24a857a2002 --- /dev/null +++ b/changelog/20784.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/gcpkms: Enable plugin multiplexing +secrets/gcpkms: Upgrade plugin dependencies +``` diff --git a/changelog/20787.txt b/changelog/20787.txt new file mode 100644 index 000000000000..a69b90d7de82 --- /dev/null +++ b/changelog/20787.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/alicloud: Updated plugin from v0.5.4-beta1.0.20230330124709-3fcfc5914a22 to v0.15.0 +``` \ No newline at end of file diff --git a/changelog/20790.txt b/changelog/20790.txt new file mode 100644 index 000000000000..1e185e3fc317 --- /dev/null +++ b/changelog/20790.txt @@ -0,0 +1,3 @@ +```release-note:feature +**UI LDAP secrets engine**: Add LDAP secrets engine to the UI. +``` diff --git a/changelog/20799.txt b/changelog/20799.txt new file mode 100644 index 000000000000..2e17ff921d7b --- /dev/null +++ b/changelog/20799.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Updated plugin from v0.15.0 to v0.16.0 +``` diff --git a/changelog/20802.txt b/changelog/20802.txt new file mode 100644 index 000000000000..de8e1b90dc06 --- /dev/null +++ b/changelog/20802.txt @@ -0,0 +1,6 @@ +```release-note:change +secrets/kubernetes: Update plugin to v0.5.0 +``` +```release-note:change +auth/kubernetes: Update plugin to v0.16.0 +``` diff --git a/changelog/20807.txt b/changelog/20807.txt new file mode 100644 index 000000000000..3a3c1f4cdad3 --- /dev/null +++ b/changelog/20807.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/database/snowflake: Updated plugin from v0.7.0 to v0.8.0 +``` \ No newline at end of file diff --git a/changelog/20816.txt b/changelog/20816.txt new file mode 100644 index 000000000000..aae4b59c48dc --- /dev/null +++ b/changelog/20816.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Updated plugin from v0.13.0 to v0.15.0 +``` diff --git a/changelog/20818.txt b/changelog/20818.txt new file mode 100644 index 000000000000..885ee92ce8aa --- /dev/null +++ b/changelog/20818.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcp: Updated plugin from v0.15.0 to v0.16.0 +``` diff --git a/changelog/20825.txt b/changelog/20825.txt new file mode 100644 index 000000000000..da993696b048 --- /dev/null +++ b/changelog/20825.txt @@ -0,0 +1,3 @@ +```release-note:change +storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. +``` \ No newline at end of file diff --git a/changelog/20826.txt b/changelog/20826.txt new file mode 100644 index 000000000000..8a693d9fc94c --- /dev/null +++ b/changelog/20826.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. +``` \ No newline at end of file diff --git a/changelog/20834.txt b/changelog/20834.txt new file mode 100644 index 000000000000..f17f1d326b58 --- /dev/null +++ b/changelog/20834.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Remove feature toggle for SSCTs, i.e. the env var VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS. +``` \ No newline at end of file diff --git a/changelog/20841.txt b/changelog/20841.txt new file mode 100644 index 000000000000..26a8d6316312 --- /dev/null +++ b/changelog/20841.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Fix generated types for duration strings +``` diff --git a/changelog/20864.txt b/changelog/20864.txt new file mode 100644 index 000000000000..7193c6b81fb9 --- /dev/null +++ b/changelog/20864.txt @@ -0,0 +1,5 @@ +```release-note:bug +secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. +``` diff --git a/changelog/20879.txt b/changelog/20879.txt new file mode 100644 index 000000000000..12bb1e4a4448 --- /dev/null +++ b/changelog/20879.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/mfa: Fixes to OpenAPI representation and returned error codes for `identity/mfa/method/*` APIs +``` diff --git a/changelog/20881.txt b/changelog/20881.txt new file mode 100644 index 000000000000..fd3e6d5fa44f --- /dev/null +++ b/changelog/20881.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec +``` diff --git a/changelog/20882.txt b/changelog/20882.txt new file mode 100644 index 000000000000..3694468641da --- /dev/null +++ b/changelog/20882.txt @@ -0,0 +1,6 @@ +```release-note:change +secrets/database/mongodbatlas: Updated plugin from v0.9.0 to v0.10.0 +``` +```release-note:feature +**MongoDB Atlas Database Secrets**: Adds support for generating X.509 certificates on dynamic roles for user authentication +``` \ No newline at end of file diff --git a/changelog/20891.txt b/changelog/20891.txt new file mode 100644 index 000000000000..3057ec56f40d --- /dev/null +++ b/changelog/20891.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/consul: Improve error message when ACL bootstrapping fails. +``` + diff --git a/changelog/20897.txt b/changelog/20897.txt new file mode 100644 index 000000000000..01be5ac718ca --- /dev/null +++ b/changelog/20897.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue unsealing cluster for seal types other than shamir +``` \ No newline at end of file diff --git a/changelog/20907.txt b/changelog/20907.txt new file mode 100644 index 000000000000..3f13a659de2b --- /dev/null +++ b/changelog/20907.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes key_bits and signature_bits reverting to default values when editing a pki role +``` \ No newline at end of file diff --git a/changelog/20933.txt b/changelog/20933.txt new file mode 100644 index 000000000000..580475e2b5d5 --- /dev/null +++ b/changelog/20933.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: remove unnecessary *BarrierView field from backendEntry struct +``` \ No newline at end of file diff --git a/changelog/20934.txt b/changelog/20934.txt new file mode 100644 index 000000000000..72c22574d615 --- /dev/null +++ b/changelog/20934.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix bug with 'cache' stanza validation +``` diff --git a/changelog/20943.txt b/changelog/20943.txt new file mode 100644 index 000000000000..7cf186d18420 --- /dev/null +++ b/changelog/20943.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Support TLS-ALPN-01 challenge type in ACME for DNS certificate identifiers. +``` diff --git a/changelog/20964.txt b/changelog/20964.txt new file mode 100644 index 000000000000..8bd9563c1623 --- /dev/null +++ b/changelog/20964.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Fixes duplicate groups creation with the same name but unique IDs. +``` \ No newline at end of file diff --git a/changelog/20965.txt b/changelog/20965.txt new file mode 100644 index 000000000000..43c1d97cc803 --- /dev/null +++ b/changelog/20965.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. +``` \ No newline at end of file diff --git a/changelog/20966.txt b/changelog/20966.txt new file mode 100644 index 000000000000..f9a3b8b26f29 --- /dev/null +++ b/changelog/20966.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Bump github.com/hashicorp/go-plugin version v1.4.9 -> v1.4.10 +``` \ No newline at end of file diff --git a/changelog/20981.txt b/changelog/20981.txt new file mode 100644 index 000000000000..26a5304c5d3d --- /dev/null +++ b/changelog/20981.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Limit ACME issued certificates NotAfter TTL to a maximum of 90 days +``` diff --git a/changelog/20986.txt b/changelog/20986.txt new file mode 100644 index 000000000000..c0615f9a3933 --- /dev/null +++ b/changelog/20986.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. +``` \ No newline at end of file diff --git a/changelog/20995.txt b/changelog/20995.txt new file mode 100644 index 000000000000..76653d4d5433 --- /dev/null +++ b/changelog/20995.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: adding a new api sys method for replication status +``` diff --git a/changelog/21010.txt b/changelog/21010.txt new file mode 100644 index 000000000000..bcd218794df9 --- /dev/null +++ b/changelog/21010.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. +``` \ No newline at end of file diff --git a/changelog/21057.txt b/changelog/21057.txt new file mode 100644 index 000000000000..7ca81cd37632 --- /dev/null +++ b/changelog/21057.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Dashboard UI**: Dashboard is now available in the UI as the new landing page. +``` \ No newline at end of file diff --git a/changelog/21081.txt b/changelog/21081.txt new file mode 100644 index 000000000000..ecf1713eb67a --- /dev/null +++ b/changelog/21081.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Add support to create CSRs from keys in transit engine and import/export x509 certificates +``` diff --git a/changelog/21100.txt b/changelog/21100.txt new file mode 100644 index 000000000000..50024c9c2d3a --- /dev/null +++ b/changelog/21100.txt @@ -0,0 +1,4 @@ +```release-note:bug +replication (enterprise): Fix regression causing token creation against a role +with a new entity alias to be incorrectly forwarded from perf standbys. +``` diff --git a/changelog/21110.txt b/changelog/21110.txt new file mode 100644 index 000000000000..2471fac770de --- /dev/null +++ b/changelog/21110.txt @@ -0,0 +1,4 @@ +```release-note:bug +core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. +``` \ No newline at end of file diff --git a/changelog/21165.txt b/changelog/21165.txt new file mode 100644 index 000000000000..dd6b6d05de0d --- /dev/null +++ b/changelog/21165.txt @@ -0,0 +1,3 @@ +```release-note:bug +raft/autopilot: Add dr-token flag for raft autopilot cli commands +``` diff --git a/changelog/21209.txt b/changelog/21209.txt new file mode 100644 index 000000000000..31ddf413c070 --- /dev/null +++ b/changelog/21209.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/pki: Allow issuance of root CAs without AIA, when templated AIA information includes issuer_id. +``` diff --git a/changelog/21215.txt b/changelog/21215.txt new file mode 100644 index 000000000000..ec4a63af9ebd --- /dev/null +++ b/changelog/21215.txt @@ -0,0 +1,4 @@ +```release-note:change +core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. +``` \ No newline at end of file diff --git a/changelog/21223.txt b/changelog/21223.txt new file mode 100644 index 000000000000..96605f0a4a3f --- /dev/null +++ b/changelog/21223.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. +``` diff --git a/changelog/21249.txt b/changelog/21249.txt new file mode 100644 index 000000000000..a088677ad8af --- /dev/null +++ b/changelog/21249.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix panic in sealed nodes using raft storage trying to emit raft metrics +``` diff --git a/changelog/21260.txt b/changelog/21260.txt new file mode 100644 index 000000000000..b291ec7b4bd5 --- /dev/null +++ b/changelog/21260.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. +``` diff --git a/changelog/21282.txt b/changelog/21282.txt new file mode 100644 index 000000000000..03f22e4856b9 --- /dev/null +++ b/changelog/21282.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/ldap: Normalize HTTP response codes when invalid credentials are provided +``` diff --git a/changelog/21297.txt b/changelog/21297.txt new file mode 100644 index 000000000000..9f98fd3e0d48 --- /dev/null +++ b/changelog/21297.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix agent generate-config to accept -namespace, VAULT_NAMESPACE, and other client-modifying flags. +``` diff --git a/changelog/21316.txt b/changelog/21316.txt new file mode 100644 index 000000000000..5573c7e4d319 --- /dev/null +++ b/changelog/21316.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. +``` diff --git a/changelog/21342.txt b/changelog/21342.txt new file mode 100644 index 000000000000..c1d8cd018bb3 --- /dev/null +++ b/changelog/21342.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Don't exit just because we think there's a potential deadlock. +``` diff --git a/changelog/21357.txt b/changelog/21357.txt new file mode 100644 index 000000000000..3b3bffddfc29 --- /dev/null +++ b/changelog/21357.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed issue with some durations not being properly parsed to include days. +``` \ No newline at end of file diff --git a/changelog/21375.txt b/changelog/21375.txt new file mode 100644 index 000000000000..fc427b0cd3a6 --- /dev/null +++ b/changelog/21375.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: implement hashicorp design system [alert](https://helios.hashicorp.design/components/alert) component +``` \ No newline at end of file diff --git a/changelog/21424.txt b/changelog/21424.txt new file mode 100644 index 000000000000..229e97e4d3fc --- /dev/null +++ b/changelog/21424.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: add support for cloning a Client's tls.Config. +``` diff --git a/changelog/21449.txt b/changelog/21449.txt new file mode 100644 index 000000000000..7711909a4c84 --- /dev/null +++ b/changelog/21449.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix response schema for PKI Issue requests +``` diff --git a/changelog/21458.txt b/changelog/21458.txt new file mode 100644 index 000000000000..352b8a04b1e0 --- /dev/null +++ b/changelog/21458.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix schema definitions for PKI EAB APIs +``` diff --git a/changelog/21460.txt b/changelog/21460.txt new file mode 100644 index 000000000000..79cd7bc2293b --- /dev/null +++ b/changelog/21460.txt @@ -0,0 +1,3 @@ +```release-note:feature +**raft-wal**: Add experimental support for raft-wal, a new backend engine for integrated storage. +``` diff --git a/changelog/21466.txt b/changelog/21466.txt new file mode 100644 index 000000000000..94d0af99a77c --- /dev/null +++ b/changelog/21466.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix "generate-config" command documentation URL +``` diff --git a/changelog/21470.txt b/changelog/21470.txt new file mode 100644 index 000000000000..9f047a9d6758 --- /dev/null +++ b/changelog/21470.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. +``` diff --git a/changelog/21495.txt b/changelog/21495.txt new file mode 100644 index 000000000000..645c947c61a6 --- /dev/null +++ b/changelog/21495.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Updating operator step-down docs to include info about possible failed requests during leader step down. +``` \ No newline at end of file diff --git a/changelog/21503.txt b/changelog/21503.txt new file mode 100644 index 000000000000..a61b22ba8a7d --- /dev/null +++ b/changelog/21503.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Surface DOMException error when browser settings prevent localStorage. +``` diff --git a/changelog/21520.txt b/changelog/21520.txt new file mode 100644 index 000000000000..38ab73523ba4 --- /dev/null +++ b/changelog/21520.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Replace inline confirm alert inside a popup-menu dropdown with confirm alert modal +``` diff --git a/changelog/21531.txt b/changelog/21531.txt new file mode 100644 index 000000000000..dff421a83a97 --- /dev/null +++ b/changelog/21531.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes styling of private key input when configuring an SSH key +``` \ No newline at end of file diff --git a/changelog/21546.txt b/changelog/21546.txt new file mode 100644 index 000000000000..8eaf53ed3929 --- /dev/null +++ b/changelog/21546.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Avoid printing "Success" message when `-field` flag is provided during a `vault write`. +``` diff --git a/changelog/21562.txt b/changelog/21562.txt new file mode 100644 index 000000000000..c41d727da36b --- /dev/null +++ b/changelog/21562.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces +``` \ No newline at end of file diff --git a/changelog/21563.txt b/changelog/21563.txt new file mode 100644 index 000000000000..7426ed24fa56 --- /dev/null +++ b/changelog/21563.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Better mount points for kv-v1 and kv-v2 in openapi.json +``` diff --git a/changelog/21578.txt b/changelog/21578.txt new file mode 100644 index 000000000000..30d4fac8cc08 --- /dev/null +++ b/changelog/21578.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add API Explorer link to Sidebar, under Tools. +``` diff --git a/changelog/21582.txt b/changelog/21582.txt new file mode 100644 index 000000000000..6a9d9a4276c1 --- /dev/null +++ b/changelog/21582.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes login screen display issue with Safari browser +``` \ No newline at end of file diff --git a/changelog/21583.txt b/changelog/21583.txt new file mode 100644 index 000000000000..f73feb96b7e2 --- /dev/null +++ b/changelog/21583.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add example modal to policy form +``` \ No newline at end of file diff --git a/changelog/21623.txt b/changelog/21623.txt new file mode 100644 index 000000000000..7fc272d13b5d --- /dev/null +++ b/changelog/21623.txt @@ -0,0 +1,3 @@ +```release-note:improvement +eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) +``` \ No newline at end of file diff --git a/changelog/21628.txt b/changelog/21628.txt new file mode 100644 index 000000000000..888108b27667 --- /dev/null +++ b/changelog/21628.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: add core audit events experiment +``` \ No newline at end of file diff --git a/changelog/21631.txt b/changelog/21631.txt new file mode 100644 index 000000000000..ffdb4bba4673 --- /dev/null +++ b/changelog/21631.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. +``` \ No newline at end of file diff --git a/changelog/21635.txt b/changelog/21635.txt new file mode 100644 index 000000000000..6d19e8da9688 --- /dev/null +++ b/changelog/21635.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Adds missing values to details view after generating PKI certificate +``` \ No newline at end of file diff --git a/changelog/21640.txt b/changelog/21640.txt new file mode 100644 index 000000000000..458561ae1aae --- /dev/null +++ b/changelog/21640.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fix race when updating a mount's route entry tainted status and incoming requests +``` diff --git a/changelog/21641.txt b/changelog/21641.txt new file mode 100644 index 000000000000..e615445a3e6f --- /dev/null +++ b/changelog/21641.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auto-auth: added support for LDAP auto-auth +``` diff --git a/changelog/21642.txt b/changelog/21642.txt new file mode 100644 index 000000000000..84af5b694f10 --- /dev/null +++ b/changelog/21642.txt @@ -0,0 +1,3 @@ +```release-note:bug +serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary +``` \ No newline at end of file diff --git a/changelog/21681.txt b/changelog/21681.txt new file mode 100644 index 000000000000..8d684423a440 --- /dev/null +++ b/changelog/21681.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. +``` diff --git a/changelog/21702.txt b/changelog/21702.txt new file mode 100644 index 000000000000..5475a486df0e --- /dev/null +++ b/changelog/21702.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. +``` diff --git a/changelog/21723.txt b/changelog/21723.txt new file mode 100644 index 000000000000..cefe5e1c5ad3 --- /dev/null +++ b/changelog/21723.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: List operations are now given first-class representation in the OpenAPI document, rather than sometimes being overlaid with a read operation at the same path +``` diff --git a/changelog/21739.txt b/changelog/21739.txt new file mode 100644 index 000000000000..7b559d97cd4c --- /dev/null +++ b/changelog/21739.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. +``` diff --git a/changelog/21742.txt b/changelog/21742.txt new file mode 100644 index 000000000000..713ce3c885d4 --- /dev/null +++ b/changelog/21742.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/dynamodb: Added three permit pool metrics for the DynamoDB backend, `pending_permits`, `active_permits`, and `pool_size`. +``` diff --git a/changelog/21743.txt b/changelog/21743.txt new file mode 100644 index 000000000000..1bb8279543ba --- /dev/null +++ b/changelog/21743.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/framework: Adds replication state helper for backends to check for read-only storage +``` diff --git a/changelog/21760.txt b/changelog/21760.txt new file mode 100644 index 000000000000..2285cda4464b --- /dev/null +++ b/changelog/21760.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Fix regexes for `sys/raw/` and `sys/leases/lookup/` to match prevailing conventions +``` diff --git a/changelog/21767.txt b/changelog/21767.txt new file mode 100644 index 000000000000..2092442e462f --- /dev/null +++ b/changelog/21767.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed secrets, leases, and policies filter dropping focus after a single character +``` diff --git a/changelog/21771.txt b/changelog/21771.txt new file mode 100644 index 000000000000..55252dcb3666 --- /dev/null +++ b/changelog/21771.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix styling for username input when editing a user +``` \ No newline at end of file diff --git a/changelog/21772.txt b/changelog/21772.txt new file mode 100644 index 000000000000..2ebdbf39565c --- /dev/null +++ b/changelog/21772.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Fix OpenAPI representation and `-output-policy` recognition of some non-standard sudo paths +``` diff --git a/changelog/21800.txt b/changelog/21800.txt new file mode 100644 index 000000000000..bfe8f6721d9c --- /dev/null +++ b/changelog/21800.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. +``` \ No newline at end of file diff --git a/changelog/21830.txt b/changelog/21830.txt new file mode 100644 index 000000000000..6e1972d447be --- /dev/null +++ b/changelog/21830.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Adds support for requiring hexadecimal-encoded non-string certificate extension values +``` \ No newline at end of file diff --git a/changelog/21854.txt b/changelog/21854.txt new file mode 100644 index 000000000000..2ab5acde88e9 --- /dev/null +++ b/changelog/21854.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: use Go stdlib functionalities instead of explicit byte/string conversions +``` diff --git a/changelog/21870.txt b/changelog/21870.txt new file mode 100644 index 000000000000..3cb9856ffca9 --- /dev/null +++ b/changelog/21870.txt @@ -0,0 +1,6 @@ +```release-note:bug +secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. +``` +```release-note:bug +secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. +``` diff --git a/changelog/21871.txt b/changelog/21871.txt new file mode 100644 index 000000000000..8333603efc53 --- /dev/null +++ b/changelog/21871.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: update unseal and DR operation token flow components +``` \ No newline at end of file diff --git a/changelog/21925.txt b/changelog/21925.txt new file mode 100644 index 000000000000..ca89ff75a76e --- /dev/null +++ b/changelog/21925.txt @@ -0,0 +1,3 @@ +```release-note:improvement +kmip (enterprise): Add namespace lock and unlock support +``` diff --git a/changelog/21926.txt b/changelog/21926.txt new file mode 100644 index 000000000000..a6020204b043 --- /dev/null +++ b/changelog/21926.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) +``` \ No newline at end of file diff --git a/changelog/21934.txt b/changelog/21934.txt new file mode 100644 index 000000000000..d4ce0b08a630 --- /dev/null +++ b/changelog/21934.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Fix response definitions for list operations +``` diff --git a/changelog/21942.txt b/changelog/21942.txt new file mode 100644 index 000000000000..4e2828efb43c --- /dev/null +++ b/changelog/21942.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Fix generation of correct fields in some rarer cases +``` diff --git a/changelog/21951.txt b/changelog/21951.txt new file mode 100644 index 000000000000..d53c0f18ea45 --- /dev/null +++ b/changelog/21951.txt @@ -0,0 +1,4 @@ +```release-note:bug +awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer +respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. +``` diff --git a/changelog/21960.txt b/changelog/21960.txt new file mode 100644 index 000000000000..cab19fab96f3 --- /dev/null +++ b/changelog/21960.txt @@ -0,0 +1,3 @@ +```release-note:improvement +aws/auth: Adds a new config field `use_sts_region_from_client` which allows for using dynamic regional sts endpoints based on Authorization header when using IAM-based authentication. +``` diff --git a/changelog/21968.txt b/changelog/21968.txt new file mode 100644 index 000000000000..3ba650d585c4 --- /dev/null +++ b/changelog/21968.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix styling for viewing certificate in kubernetes configuration +``` \ No newline at end of file diff --git a/changelog/22039.txt b/changelog/22039.txt new file mode 100644 index 000000000000..09c3e6ad8039 --- /dev/null +++ b/changelog/22039.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Display minus icon for empty MaskedInput value. Show MaskedInput for KV secrets without values +``` \ No newline at end of file diff --git a/changelog/22040.txt b/changelog/22040.txt new file mode 100644 index 000000000000..e96a428b95af --- /dev/null +++ b/changelog/22040.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. +``` diff --git a/changelog/22122.txt b/changelog/22122.txt new file mode 100644 index 000000000000..a7e723090caf --- /dev/null +++ b/changelog/22122.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: upgrade Ember to 4.12 +``` diff --git a/changelog/22126.txt b/changelog/22126.txt new file mode 100644 index 000000000000..e6633ec3a050 --- /dev/null +++ b/changelog/22126.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: allowed_domains are now compared in a case-insensitive manner if they use glob patterns +``` \ No newline at end of file diff --git a/changelog/22137.txt b/changelog/22137.txt new file mode 100644 index 000000000000..6f5a3bee945a --- /dev/null +++ b/changelog/22137.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. +``` \ No newline at end of file diff --git a/changelog/22153.txt b/changelog/22153.txt new file mode 100644 index 000000000000..4c51718dc783 --- /dev/null +++ b/changelog/22153.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: correct doctype for index.html +``` diff --git a/changelog/22160.txt b/changelog/22160.txt new file mode 100644 index 000000000000..19f590bfc539 --- /dev/null +++ b/changelog/22160.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: display CertificateCard instead of MaskedInput for certificates in PKI +``` \ No newline at end of file diff --git a/changelog/22185.txt b/changelog/22185.txt new file mode 100644 index 000000000000..1fae58d96ede --- /dev/null +++ b/changelog/22185.txt @@ -0,0 +1,5 @@ +```release-note:improvement +auth/ldap: introduce cap/ldap.Client for LDAP authentication +auth/ldap: deprecates `connection_timeout` in favor of `request_timeout` for timeouts +sdk/ldaputil: deprecates Client in favor of cap/ldap.Client +``` diff --git a/changelog/22191.txt b/changelog/22191.txt new file mode 100644 index 000000000000..9fa7c85d5910 --- /dev/null +++ b/changelog/22191.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki +``` \ No newline at end of file diff --git a/changelog/22233.txt b/changelog/22233.txt new file mode 100644 index 000000000000..f6b1a5c33a70 --- /dev/null +++ b/changelog/22233.txt @@ -0,0 +1,3 @@ +```release-note:improvement +docs: Clarify when a entity is created +``` diff --git a/changelog/22235.txt b/changelog/22235.txt new file mode 100644 index 000000000000..3d62e70cb11b --- /dev/null +++ b/changelog/22235.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. +``` diff --git a/changelog/22249.txt b/changelog/22249.txt new file mode 100644 index 000000000000..d470b9743ff5 --- /dev/null +++ b/changelog/22249.txt @@ -0,0 +1,4 @@ +```release-note:bug +sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap +``` \ No newline at end of file diff --git a/changelog/22253.txt b/changelog/22253.txt new file mode 100644 index 000000000000..c3a9ab039c4e --- /dev/null +++ b/changelog/22253.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database: Improves error logging for static role rotations by including the database and role names. +``` \ No newline at end of file diff --git a/changelog/22264.txt b/changelog/22264.txt new file mode 100644 index 000000000000..5ee53785d3a6 --- /dev/null +++ b/changelog/22264.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). +``` diff --git a/changelog/22277.txt b/changelog/22277.txt new file mode 100644 index 000000000000..0d0dbf2dcf1d --- /dev/null +++ b/changelog/22277.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.16.0 +``` diff --git a/changelog/22304.txt b/changelog/22304.txt new file mode 100644 index 000000000000..eeec038ae9ed --- /dev/null +++ b/changelog/22304.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy +``` \ No newline at end of file diff --git a/changelog/22322.txt b/changelog/22322.txt new file mode 100644 index 000000000000..8df620c385dc --- /dev/null +++ b/changelog/22322.txt @@ -0,0 +1,4 @@ +```release-note:bug +agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. +``` + diff --git a/changelog/22330.txt b/changelog/22330.txt new file mode 100644 index 000000000000..427fe398a707 --- /dev/null +++ b/changelog/22330.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. +``` \ No newline at end of file diff --git a/changelog/22333.txt b/changelog/22333.txt new file mode 100644 index 000000000000..67debb7421f8 --- /dev/null +++ b/changelog/22333.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Implement Helios Design System copy button component making copy buttons accessible +``` \ No newline at end of file diff --git a/changelog/22355.txt b/changelog/22355.txt new file mode 100644 index 000000000000..d748796c1d92 --- /dev/null +++ b/changelog/22355.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix bug where background thread to update locked user entries runs on DR secondaries. +``` \ No newline at end of file diff --git a/changelog/22362.txt b/changelog/22362.txt new file mode 100644 index 000000000000..0de5440efe39 --- /dev/null +++ b/changelog/22362.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix readonly errors that could occur while loading mounts/auths during unseal +``` diff --git a/changelog/22363.txt b/changelog/22363.txt new file mode 100644 index 000000000000..faa5a24462a0 --- /dev/null +++ b/changelog/22363.txt @@ -0,0 +1,3 @@ +```release-note:bug +license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. +``` diff --git a/changelog/22374.txt b/changelog/22374.txt new file mode 100644 index 000000000000..2f744c5c3386 --- /dev/null +++ b/changelog/22374.txt @@ -0,0 +1,3 @@ +```release-note:bug +expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. +``` diff --git a/changelog/22390.txt b/changelog/22390.txt new file mode 100644 index 000000000000..449a8a2d2278 --- /dev/null +++ b/changelog/22390.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes text readability issue in revoke token confirmation dialog +``` \ No newline at end of file diff --git a/changelog/22394.txt b/changelog/22394.txt new file mode 100644 index 000000000000..4f5a2b9c89ff --- /dev/null +++ b/changelog/22394.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults +``` \ No newline at end of file diff --git a/changelog/22396.txt b/changelog/22396.txt new file mode 100644 index 000000000000..d05cbb7acaa4 --- /dev/null +++ b/changelog/22396.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Fix link formatting in Vault lambda extension docs +``` diff --git a/changelog/22400.txt b/changelog/22400.txt new file mode 100644 index 000000000000..54e8cd299c9e --- /dev/null +++ b/changelog/22400.txt @@ -0,0 +1,3 @@ +```release-note:change +telemetry: Replace `vault.rollback.attempt.{MOUNT_POINT}` and `vault.route.rollback.{MOUNT_POINT}` metrics with `vault.rollback.attempt` and `vault.route.rollback metrics` by default. Added a telemetry configuration `add_mount_point_rollback_metrics` which, when set to true, causes vault to emit the metrics with mount points in their names. +``` diff --git a/changelog/22410.txt b/changelog/22410.txt new file mode 100644 index 000000000000..25fcc3335f5d --- /dev/null +++ b/changelog/22410.txt @@ -0,0 +1,3 @@ +```release-note:bug +api/client: Fix deadlock in client.CloneWithHeaders when used alongside other client methods. +``` \ No newline at end of file diff --git a/changelog/22445.txt b/changelog/22445.txt new file mode 100644 index 000000000000..11b310d6898c --- /dev/null +++ b/changelog/22445.txt @@ -0,0 +1,3 @@ +```release-note:feature +**GCP IAM Support**: Adds support for IAM-based authentication to MySQL and PostgreSQL backends using Google Cloud SQL. +``` \ No newline at end of file diff --git a/changelog/22452.txt b/changelog/22452.txt new file mode 100644 index 000000000000..88657b284d3d --- /dev/null +++ b/changelog/22452.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core : Add field that allows rate-limit namespace quotas to be inherited by child namespaces. +``` diff --git a/changelog/22458.txt b/changelog/22458.txt new file mode 100644 index 000000000000..6ce09295099a --- /dev/null +++ b/changelog/22458.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes model defaults overwriting input value when user tries to clear form input +``` \ No newline at end of file diff --git a/changelog/22468.txt b/changelog/22468.txt new file mode 100644 index 000000000000..538da1482497 --- /dev/null +++ b/changelog/22468.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary +``` diff --git a/changelog/22471.txt b/changelog/22471.txt new file mode 100644 index 000000000000..67b110cd67d8 --- /dev/null +++ b/changelog/22471.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: enables create and update KV secret workflow when control group present +``` \ No newline at end of file diff --git a/changelog/22474.txt b/changelog/22474.txt new file mode 100644 index 000000000000..9f18050a418d --- /dev/null +++ b/changelog/22474.txt @@ -0,0 +1,3 @@ +```release-note:feature +Add subscribe capability and subscribe_event_types to policies for events. +``` diff --git a/changelog/22484.txt b/changelog/22484.txt new file mode 100644 index 000000000000..6992e7c2fa56 --- /dev/null +++ b/changelog/22484.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Database Static Role Advanced TTL Management**: Adds the ability to rotate +static roles on a defined schedule. +``` diff --git a/changelog/22487.txt b/changelog/22487.txt new file mode 100644 index 000000000000..bc555f05d525 --- /dev/null +++ b/changelog/22487.txt @@ -0,0 +1,6 @@ +```release-note:change +events: `data_path` will include full data path of secret, including name. +``` +```release-note:change +sdk/logical/events: `EventSender` interface method is now `SendEvent` instead of `Send`. +``` diff --git a/changelog/22502.txt b/changelog/22502.txt new file mode 100644 index 000000000000..b9d21c2ce277 --- /dev/null +++ b/changelog/22502.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: KV View Secret card will link to list view if input ends in "/" +``` \ No newline at end of file diff --git a/changelog/22516.txt b/changelog/22516.txt new file mode 100644 index 000000000000..661b77d25687 --- /dev/null +++ b/changelog/22516.txt @@ -0,0 +1,3 @@ +```release-note:change +database/snowflake: Update plugin to v0.9.0 +``` diff --git a/changelog/22519.txt b/changelog/22519.txt new file mode 100644 index 000000000000..5882cfb25fb9 --- /dev/null +++ b/changelog/22519.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix display for "Last Vault Rotation" timestamp for static database roles which was not rendering or copyable +``` \ No newline at end of file diff --git a/changelog/22521.txt b/changelog/22521.txt new file mode 100644 index 000000000000..9310b64c1406 --- /dev/null +++ b/changelog/22521.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: do not check TLS validity on ACME requests redirected to https +``` diff --git a/changelog/22523.txt b/changelog/22523.txt new file mode 100644 index 000000000000..e53ab652b2e5 --- /dev/null +++ b/changelog/22523.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. +``` diff --git a/changelog/22533.txt b/changelog/22533.txt new file mode 100644 index 000000000000..8c9fb6dbc321 --- /dev/null +++ b/changelog/22533.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/alicloud: Update plugin to v0.15.1 +``` diff --git a/changelog/22540.txt b/changelog/22540.txt new file mode 100644 index 000000000000..191342bd2913 --- /dev/null +++ b/changelog/22540.txt @@ -0,0 +1,3 @@ +```release-note:improvement +events: Allow subscriptions to multiple namespaces +``` diff --git a/changelog/22541.txt b/changelog/22541.txt new file mode 100644 index 000000000000..918af3eac1ee --- /dev/null +++ b/changelog/22541.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix blank page or ghost secret when canceling KV secret create +``` diff --git a/changelog/22551.txt b/changelog/22551.txt new file mode 100644 index 000000000000..fa3c9483ae50 --- /dev/null +++ b/changelog/22551.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Copyable KV v2 paths in UI**: KV v2 secret paths are copyable for use in CLI commands or API calls +``` \ No newline at end of file diff --git a/changelog/22559.txt b/changelog/22559.txt new file mode 100644 index 000000000000..162e6afe0d7e --- /dev/null +++ b/changelog/22559.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Improved KV V2 UI**: Updated and restructured secret engine for KV (version 2 only) +``` \ No newline at end of file diff --git a/changelog/22567.txt b/changelog/22567.txt new file mode 100644 index 000000000000..d9e5570139bc --- /dev/null +++ b/changelog/22567.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. +``` \ No newline at end of file diff --git a/changelog/22583.txt b/changelog/22583.txt new file mode 100644 index 000000000000..0bc29d60fea8 --- /dev/null +++ b/changelog/22583.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Reduce overhead for role calculation when using cloud auth methods. +``` \ No newline at end of file diff --git a/changelog/22584.txt b/changelog/22584.txt new file mode 100644 index 000000000000..4820498fa69a --- /dev/null +++ b/changelog/22584.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Update plugin to v0.2.2 +``` diff --git a/changelog/22593.txt b/changelog/22593.txt new file mode 100644 index 000000000000..8f5ee5f76de5 --- /dev/null +++ b/changelog/22593.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: JSON diff view available in "Create New Version" form for KV v2 +``` diff --git a/changelog/22597.txt b/changelog/22597.txt new file mode 100644 index 000000000000..0c37e561be28 --- /dev/null +++ b/changelog/22597.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. +``` diff --git a/changelog/22598.txt b/changelog/22598.txt new file mode 100644 index 000000000000..1c36e9960a2f --- /dev/null +++ b/changelog/22598.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Update plugin to v0.2.3 +``` diff --git a/changelog/22612.txt b/changelog/22612.txt new file mode 100644 index 000000000000..d7852d1e9c91 --- /dev/null +++ b/changelog/22612.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/gcp: Update plugin to v0.16.1 +``` diff --git a/changelog/22646.txt b/changelog/22646.txt new file mode 100644 index 000000000000..08673e713b04 --- /dev/null +++ b/changelog/22646.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Update plugin to v0.16.0 +``` diff --git a/changelog/22651.txt b/changelog/22651.txt new file mode 100644 index 000000000000..5ca281983767 --- /dev/null +++ b/changelog/22651.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/quotas: Add configuration to allow skipping of expensive role calculations +``` \ No newline at end of file diff --git a/changelog/22654.txt b/changelog/22654.txt new file mode 100644 index 000000000000..97c81d7ef8d9 --- /dev/null +++ b/changelog/22654.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis: Update plugin to v0.2.2 +``` diff --git a/changelog/22655.txt b/changelog/22655.txt new file mode 100644 index 000000000000..e9cc88a974b7 --- /dev/null +++ b/changelog/22655.txt @@ -0,0 +1,3 @@ +```release-note:change +database/mongodbatlas: Update plugin to v0.10.1 +``` diff --git a/changelog/22657.txt b/changelog/22657.txt new file mode 100644 index 000000000000..89a8ab440936 --- /dev/null +++ b/changelog/22657.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server: add `-dev-tls-san` flag to configure subject alternative names for the certificate generated when using `-dev-tls`. +``` diff --git a/changelog/22659.txt b/changelog/22659.txt new file mode 100644 index 000000000000..501fb4ecc866 --- /dev/null +++ b/changelog/22659.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: improved login speed by adding concurrency to LDAP token group searches +``` diff --git a/changelog/22678.txt b/changelog/22678.txt new file mode 100644 index 000000000000..b711e406921c --- /dev/null +++ b/changelog/22678.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.17.0 +``` diff --git a/changelog/22694.txt b/changelog/22694.txt new file mode 100644 index 000000000000..26f61b866066 --- /dev/null +++ b/changelog/22694.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit: Prevent panic due to nil pointer receiver for audit header formatting. +``` diff --git a/changelog/22696.txt b/changelog/22696.txt new file mode 100644 index 000000000000..3bdeacc8a140 --- /dev/null +++ b/changelog/22696.txt @@ -0,0 +1,3 @@ +```release-note:change +database/elasticsearch: Update plugin to v0.13.3 +``` diff --git a/changelog/22709.txt b/changelog/22709.txt new file mode 100644 index 000000000000..68684f80d17b --- /dev/null +++ b/changelog/22709.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kubernetes: Update plugin to v0.17.0 +``` diff --git a/changelog/22712.txt b/changelog/22712.txt new file mode 100644 index 000000000000..ece09c9f7153 --- /dev/null +++ b/changelog/22712.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Plugin Containers**: Vault supports registering, managing, and running plugins inside a container on Linux. +``` \ No newline at end of file diff --git a/changelog/22716.txt b/changelog/22716.txt new file mode 100644 index 000000000000..1f6664759377 --- /dev/null +++ b/changelog/22716.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kv: Update plugin to v0.16.1 +``` diff --git a/changelog/22734.txt b/changelog/22734.txt new file mode 100644 index 000000000000..82067fefbf91 --- /dev/null +++ b/changelog/22734.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/openldap: Update plugin to v0.11.2 +``` diff --git a/changelog/22746.txt b/changelog/22746.txt new file mode 100644 index 000000000000..09879609074a --- /dev/null +++ b/changelog/22746.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcp: Update plugin to v0.17.0 +``` diff --git a/changelog/22748.txt b/changelog/22748.txt new file mode 100644 index 000000000000..d466eaa19896 --- /dev/null +++ b/changelog/22748.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Update plugin to v0.10.1 +``` diff --git a/changelog/22753.txt b/changelog/22753.txt new file mode 100644 index 000000000000..a297337f92b7 --- /dev/null +++ b/changelog/22753.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: fix panic when providing non-PEM formatted public key for import +``` diff --git a/changelog/22757.txt b/changelog/22757.txt new file mode 100644 index 000000000000..5917de17aefb --- /dev/null +++ b/changelog/22757.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcpkms: Update plugin to v0.15.1 +``` diff --git a/changelog/22758.txt b/changelog/22758.txt new file mode 100644 index 000000000000..2ce3a1516849 --- /dev/null +++ b/changelog/22758.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/cf: Update plugin to v0.15.1 +``` diff --git a/changelog/22774.txt b/changelog/22774.txt new file mode 100644 index 000000000000..7ef69177a03d --- /dev/null +++ b/changelog/22774.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/oci: Update plugin to v0.14.1 +``` diff --git a/changelog/22790.txt b/changelog/22790.txt new file mode 100644 index 000000000000..1ac145f6dffd --- /dev/null +++ b/changelog/22790.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kv: Update plugin to v0.16.2 +``` diff --git a/changelog/22795.txt b/changelog/22795.txt new file mode 100644 index 000000000000..372e2d696920 --- /dev/null +++ b/changelog/22795.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.16.1 +``` diff --git a/changelog/22797.txt b/changelog/22797.txt new file mode 100644 index 000000000000..373a572c87d1 --- /dev/null +++ b/changelog/22797.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kerberos: Update plugin to v0.10.1 +``` diff --git a/changelog/22799.txt b/changelog/22799.txt new file mode 100644 index 000000000000..2242fbf33735 --- /dev/null +++ b/changelog/22799.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.16.2 +``` diff --git a/changelog/22805.txt b/changelog/22805.txt new file mode 100644 index 000000000000..62ef17aa75c4 --- /dev/null +++ b/changelog/22805.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/oci: Update plugin to v0.14.2 +``` diff --git a/changelog/22812.txt b/changelog/22812.txt new file mode 100644 index 000000000000..a0161af8068f --- /dev/null +++ b/changelog/22812.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: allow users to specify files for child process stdout/stderr +``` diff --git a/changelog/22815.txt b/changelog/22815.txt new file mode 100644 index 000000000000..478fa4c8cb0b --- /dev/null +++ b/changelog/22815.txt @@ -0,0 +1,3 @@ +```release-note:improvement +events: Enabled by default +``` diff --git a/changelog/22818.txt b/changelog/22818.txt new file mode 100644 index 000000000000..1ef9b6440b99 --- /dev/null +++ b/changelog/22818.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. +``` diff --git a/changelog/22823.txt b/changelog/22823.txt new file mode 100644 index 000000000000..fa98bf501598 --- /dev/null +++ b/changelog/22823.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kubernetes: Update plugin to v0.6.0 +``` diff --git a/changelog/22824.txt b/changelog/22824.txt new file mode 100644 index 000000000000..5ab3deb632ef --- /dev/null +++ b/changelog/22824.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.16.3 +``` diff --git a/changelog/22832.txt b/changelog/22832.txt new file mode 100644 index 000000000000..7153e7694bf2 --- /dev/null +++ b/changelog/22832.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes form field label tooltip alignment +``` \ No newline at end of file diff --git a/changelog/22835.txt b/changelog/22835.txt new file mode 100644 index 000000000000..c8e3d46cea36 --- /dev/null +++ b/changelog/22835.txt @@ -0,0 +1,3 @@ +```release-note:improvement +events: WebSocket subscriptions add support for boolean filter expressions +``` diff --git a/changelog/22852.txt b/changelog/22852.txt new file mode 100644 index 000000000000..3a667eb23bb0 --- /dev/null +++ b/changelog/22852.txt @@ -0,0 +1,3 @@ +```release-note:security +secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. +``` diff --git a/changelog/22854.txt b/changelog/22854.txt new file mode 100644 index 000000000000..71db25095da7 --- /dev/null +++ b/changelog/22854.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Update plugin to v0.9.3 +``` diff --git a/changelog/22855.txt b/changelog/22855.txt new file mode 100644 index 000000000000..a911e2112387 --- /dev/null +++ b/changelog/22855.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: don't exclude features present on license +``` \ No newline at end of file diff --git a/changelog/22856.txt b/changelog/22856.txt new file mode 100644 index 000000000000..a4596e3a18b2 --- /dev/null +++ b/changelog/22856.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Update plugin to v0.16.1 +``` diff --git a/changelog/22871.txt b/changelog/22871.txt new file mode 100644 index 000000000000..0b7048f49ca5 --- /dev/null +++ b/changelog/22871.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Update plugin to v0.9.4 +``` diff --git a/changelog/22879.txt b/changelog/22879.txt new file mode 100644 index 000000000000..335b099ce46a --- /dev/null +++ b/changelog/22879.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kubernetes: Update plugin to v0.17.1 +``` diff --git a/changelog/22907.txt b/changelog/22907.txt new file mode 100644 index 000000000000..dfaa4e1b0431 --- /dev/null +++ b/changelog/22907.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/terraform: Update plugin to v0.7.3 +``` diff --git a/changelog/22914.txt b/changelog/22914.txt new file mode 100644 index 000000000000..2764d4856938 --- /dev/null +++ b/changelog/22914.txt @@ -0,0 +1,6 @@ +```release-note:bug +plugins: Fix instance where broken/unresponsive plugins could cause Vault to hang. +``` +```release-note:bug +plugins: Fix instance where Vault could fail to kill broken/unresponsive plugins. +``` diff --git a/changelog/22926.txt b/changelog/22926.txt new file mode 100644 index 000000000000..69da688a10d5 --- /dev/null +++ b/changelog/22926.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds mount configuration details to Kubernetes secrets engine configuration view +``` \ No newline at end of file diff --git a/changelog/22994.txt b/changelog/22994.txt new file mode 100644 index 000000000000..f84bc5d74d6d --- /dev/null +++ b/changelog/22994.txt @@ -0,0 +1,5 @@ +```release-note:improvement +auth/azure: Add support for azure workload identity authentication (see issue +#18257). Update go-kms-wrapping dependency to include [PR +#155](https://github.com/hashicorp/go-kms-wrapping/pull/155) +``` \ No newline at end of file diff --git a/changelog/22996.txt b/changelog/22996.txt new file mode 100644 index 000000000000..7b67605864d7 --- /dev/null +++ b/changelog/22996.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auto-auth/azure: Support setting the `authenticate_from_environment` variable to "true" and "false" string literals, too. +``` diff --git a/changelog/22997.txt b/changelog/22997.txt new file mode 100644 index 000000000000..41a162eb623e --- /dev/null +++ b/changelog/22997.txt @@ -0,0 +1,4 @@ +```release-note:change +events: Log level for processing an event dropped from info to debug. +``` + diff --git a/changelog/23007.txt b/changelog/23007.txt new file mode 100644 index 000000000000..02fee8c150b5 --- /dev/null +++ b/changelog/23007.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. +``` diff --git a/changelog/23010.txt b/changelog/23010.txt new file mode 100644 index 000000000000..f6a72ecf9eeb --- /dev/null +++ b/changelog/23010.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies +``` \ No newline at end of file diff --git a/changelog/23013.txt b/changelog/23013.txt new file mode 100644 index 000000000000..78987e659ea1 --- /dev/null +++ b/changelog/23013.txt @@ -0,0 +1,7 @@ +```release-note:bug +storage/consul: fix a bug where an active node in a specific sort of network +partition could continue to write data to Consul after a new leader is elected +potentially causing data loss or corruption for keys with many concurrent +writers. For Enterprise clusters this could cause corruption of the merkle trees +leading to failure to complete merkle sync without a full re-index. +``` diff --git a/changelog/23022.txt b/changelog/23022.txt new file mode 100644 index 000000000000..9d58a95d3ed0 --- /dev/null +++ b/changelog/23022.txt @@ -0,0 +1,5 @@ +```release-note:improvement +core: update sys/seal-status (and CLI vault status) to report the type of +the seal when unsealed, as well as the type of the recovery seal if an +auto-seal. +``` \ No newline at end of file diff --git a/changelog/23025.txt b/changelog/23025.txt new file mode 100644 index 000000000000..5392c75f730c --- /dev/null +++ b/changelog/23025.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui (enterprise): Fix error message when generating SSH credential with control group +``` \ No newline at end of file diff --git a/changelog/23042.txt b/changelog/23042.txt new file mode 100644 index 000000000000..da73a307539e --- /dev/null +++ b/changelog/23042.txt @@ -0,0 +1,4 @@ +```release-note:bug +events: Ensure subscription resources are cleaned up on close. +``` + diff --git a/changelog/23047.txt b/changelog/23047.txt new file mode 100644 index 000000000000..7c8c4471c69f --- /dev/null +++ b/changelog/23047.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: added new API field to Vault responses, `mount_type`, returning mount information (e.g. `kv` for KVV1/KVV2) for mount when appropriate. +``` \ No newline at end of file diff --git a/changelog/23050.txt b/changelog/23050.txt new file mode 100644 index 000000000000..391d6c6c4057 --- /dev/null +++ b/changelog/23050.txt @@ -0,0 +1,3 @@ +```release-note:deprecation +auth/centrify: Centrify plugin is deprecated as of 1.15, slated for removal in 1.17 +``` diff --git a/changelog/23059.txt b/changelog/23059.txt new file mode 100644 index 000000000000..96a2dc461208 --- /dev/null +++ b/changelog/23059.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/azure: Added Azure API configurable retry options +``` diff --git a/changelog/23060.txt b/changelog/23060.txt new file mode 100644 index 000000000000..0df1086057e6 --- /dev/null +++ b/changelog/23060.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.16.2 +``` diff --git a/changelog/23066.txt b/changelog/23066.txt new file mode 100644 index 000000000000..f4636b97dbb0 --- /dev/null +++ b/changelog/23066.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix the issue where confirm delete dropdown is being cut off +``` diff --git a/changelog/23103.txt b/changelog/23103.txt new file mode 100644 index 000000000000..a66533281dfc --- /dev/null +++ b/changelog/23103.txt @@ -0,0 +1,3 @@ +```release-note:bug +cap/ldap: Downgrade go-ldap client from v3.4.5 to v3.4.4 due to race condition found +``` diff --git a/changelog/23118.txt b/changelog/23118.txt new file mode 100644 index 000000000000..f93138652919 --- /dev/null +++ b/changelog/23118.txt @@ -0,0 +1,3 @@ +```release-note:bug +ldaputil: Disable tests for ARM64 +``` diff --git a/changelog/23119.txt b/changelog/23119.txt new file mode 100644 index 000000000000..fd5f694db3a0 --- /dev/null +++ b/changelog/23119.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Added allowed_domains_template field for CA type role in SSH engine +``` diff --git a/changelog/23123.txt b/changelog/23123.txt new file mode 100644 index 000000000000..4bfc0c0e7935 --- /dev/null +++ b/changelog/23123.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes filter and search bug in secrets engines +``` diff --git a/changelog/23143.txt b/changelog/23143.txt new file mode 100644 index 000000000000..5db4d66d159e --- /dev/null +++ b/changelog/23143.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Surface warning banner if UI has stopped auto-refreshing token +``` diff --git a/changelog/23155.txt b/changelog/23155.txt new file mode 100644 index 000000000000..0c6914a7820f --- /dev/null +++ b/changelog/23155.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixes list password policy to include those with names containing / characters. +``` \ No newline at end of file diff --git a/changelog/23160.txt b/changelog/23160.txt new file mode 100644 index 000000000000..66e97bfaf653 --- /dev/null +++ b/changelog/23160.txt @@ -0,0 +1,3 @@ +```release-note:improvement +replication: Add re-index status metric to telemetry +``` diff --git a/changelog/23166.txt b/changelog/23166.txt new file mode 100644 index 000000000000..c3377679242d --- /dev/null +++ b/changelog/23166.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update pki mount configuration details to match the new mount configuration details pattern +``` diff --git a/changelog/23169.txt b/changelog/23169.txt new file mode 100644 index 000000000000..4f7d266dfb6f --- /dev/null +++ b/changelog/23169.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Implement Helios Design System pagination component +``` \ No newline at end of file diff --git a/changelog/23171.txt b/changelog/23171.txt new file mode 100644 index 000000000000..75bd32d4cd24 --- /dev/null +++ b/changelog/23171.txt @@ -0,0 +1,6 @@ +```release-note:bug +plugins: Runtime catalog returns 404 instead of 500 when reading a runtime that does not exist +``` +```release-note:bug +plugins: `vault plugin runtime list` can successfully list plugin runtimes with GET +``` diff --git a/changelog/23193.txt b/changelog/23193.txt new file mode 100644 index 000000000000..b895907ec6e4 --- /dev/null +++ b/changelog/23193.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add pagination to PKI roles, keys, issuers, and certificates list pages +``` diff --git a/changelog/23200.txt b/changelog/23200.txt new file mode 100644 index 000000000000..245cc694afbb --- /dev/null +++ b/changelog/23200.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Move access to KV V2 version diff view to toolbar in Version History +``` \ No newline at end of file diff --git a/changelog/23215.txt b/changelog/23215.txt new file mode 100644 index 000000000000..8c0ee8ccaf92 --- /dev/null +++ b/changelog/23215.txt @@ -0,0 +1,6 @@ +```release-note:bug +plugins: Containerized plugins can be run with mlock enabled. +``` +```release-note:improvement +plugins: Containerized plugins can be configured to still work when running with systemd's PrivateTmp=true setting. +``` \ No newline at end of file diff --git a/changelog/23225.txt b/changelog/23225.txt new file mode 100644 index 000000000000..31d5b6490a01 --- /dev/null +++ b/changelog/23225.txt @@ -0,0 +1,3 @@ +```release-note:bug +docs: fix wrong api path for ldap secrets cli-commands +``` diff --git a/changelog/23232.txt b/changelog/23232.txt new file mode 100644 index 000000000000..8084391cc799 --- /dev/null +++ b/changelog/23232.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds tidy_revoked_certs to PKI tidy status page +``` \ No newline at end of file diff --git a/changelog/23240.txt b/changelog/23240.txt new file mode 100644 index 000000000000..da202c7a9111 --- /dev/null +++ b/changelog/23240.txt @@ -0,0 +1,3 @@ +```release-note:bug +mongo-db: allow non-admin database for root credential rotation +``` diff --git a/changelog/23256.txt b/changelog/23256.txt new file mode 100644 index 000000000000..ef21e0bbd36a --- /dev/null +++ b/changelog/23256.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/db: Remove the `service_account_json` parameter when reading DB connection details +``` \ No newline at end of file diff --git a/changelog/23260.txt b/changelog/23260.txt new file mode 100644 index 000000000000..52de9b805275 --- /dev/null +++ b/changelog/23260.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds warning before downloading KV v2 secret values +``` \ No newline at end of file diff --git a/changelog/23272.txt b/changelog/23272.txt new file mode 100644 index 000000000000..39eddda18674 --- /dev/null +++ b/changelog/23272.txt @@ -0,0 +1,3 @@ +```release-note:improvement +.release/linux: add LimitCORE=0 to vault.service +``` \ No newline at end of file diff --git a/changelog/23277.txt b/changelog/23277.txt new file mode 100644 index 000000000000..329b3ebc6fb1 --- /dev/null +++ b/changelog/23277.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add warning message to the namespace picker warning users about the behavior when logging in with a root token. +``` \ No newline at end of file diff --git a/changelog/23278.txt b/changelog/23278.txt new file mode 100644 index 000000000000..cd02679e7687 --- /dev/null +++ b/changelog/23278.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Stop processing in-flight ACME verifications when an active node steps down +``` diff --git a/changelog/23282.txt b/changelog/23282.txt new file mode 100644 index 000000000000..1026ccf41913 --- /dev/null +++ b/changelog/23282.txt @@ -0,0 +1,3 @@ +```release-note:bug +expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. +``` \ No newline at end of file diff --git a/changelog/23287.txt b/changelog/23287.txt new file mode 100644 index 000000000000..6d3229fb1b07 --- /dev/null +++ b/changelog/23287.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: fix inaccuracies with unauthenticated_in_flight_requests_access parameter +``` \ No newline at end of file diff --git a/changelog/23297.txt b/changelog/23297.txt new file mode 100644 index 000000000000..64bf55fb2075 --- /dev/null +++ b/changelog/23297.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes policy input toolbar scrolling by default +``` \ No newline at end of file diff --git a/changelog/23331.txt b/changelog/23331.txt new file mode 100644 index 000000000000..f2734a5cbb32 --- /dev/null +++ b/changelog/23331.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. +``` diff --git a/changelog/23382.txt b/changelog/23382.txt new file mode 100644 index 000000000000..50a7d4773bc3 --- /dev/null +++ b/changelog/23382.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Makes modals accessible by implementing Helios Design System modal component +``` diff --git a/changelog/23446.txt b/changelog/23446.txt new file mode 100644 index 000000000000..e290eb2951f7 --- /dev/null +++ b/changelog/23446.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bug where a change on OpenAPI added a double forward slash on some LIST endpoints. +``` diff --git a/changelog/23457.txt b/changelog/23457.txt new file mode 100644 index 000000000000..a41ec10b5029 --- /dev/null +++ b/changelog/23457.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Add Snapshot Inspector Tool**: Add CLI tool to inspect Vault snapshots +``` \ No newline at end of file diff --git a/changelog/23470.txt b/changelog/23470.txt new file mode 100644 index 000000000000..744fa76c7d45 --- /dev/null +++ b/changelog/23470.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix AWS secret engine to allow empty policy_document field. +``` \ No newline at end of file diff --git a/changelog/23500.txt b/changelog/23500.txt new file mode 100644 index 000000000000..52f95c9c4c47 --- /dev/null +++ b/changelog/23500.txt @@ -0,0 +1,3 @@ +```release-note:bug +events: Ignore sending context to give more time for events to send +``` diff --git a/changelog/23503.txt b/changelog/23503.txt new file mode 100644 index 000000000000..962693d1afcf --- /dev/null +++ b/changelog/23503.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: show banner when resultant-acl check fails due to permissions or wrong namespace. +``` \ No newline at end of file diff --git a/changelog/23516.txt b/changelog/23516.txt new file mode 100644 index 000000000000..f87ab2092710 --- /dev/null +++ b/changelog/23516.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized +``` \ No newline at end of file diff --git a/changelog/23528.txt b/changelog/23528.txt new file mode 100644 index 000000000000..ad9ec4f4b7bd --- /dev/null +++ b/changelog/23528.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/aws: update credential rotation deadline when static role rotation period is updated +``` diff --git a/changelog/23534.txt b/changelog/23534.txt new file mode 100644 index 000000000000..5f101cb18a75 --- /dev/null +++ b/changelog/23534.txt @@ -0,0 +1,3 @@ +```release-note:feature +config/listener: allow per-listener configuration settings to redact sensitive parts of response to unauthenticated endpoints. +``` \ No newline at end of file diff --git a/changelog/23547.txt b/changelog/23547.txt new file mode 100644 index 000000000000..f5ddb19938b6 --- /dev/null +++ b/changelog/23547.txt @@ -0,0 +1,3 @@ +```release-note:feature +config/listener: allow per-listener configuration setting to disable replication status endpoints. +``` \ No newline at end of file diff --git a/changelog/23549.txt b/changelog/23549.txt new file mode 100644 index 000000000000..078cc232d0ed --- /dev/null +++ b/changelog/23549.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api/plugins: add `tls-server-name` arg for plugin registration +``` diff --git a/changelog/23555.txt b/changelog/23555.txt new file mode 100644 index 000000000000..32405057f54e --- /dev/null +++ b/changelog/23555.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/aws: Fixes a panic that can occur in IAM-based login when a [client config](https://developer.hashicorp.com/vault/api-docs/auth/aws#configure-client) does not exist. +``` \ No newline at end of file diff --git a/changelog/23565.txt b/changelog/23565.txt new file mode 100644 index 000000000000..5447d34c7db6 --- /dev/null +++ b/changelog/23565.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix regression that broke the oktaNumberChallenge on the ui. +``` diff --git a/changelog/23571.txt b/changelog/23571.txt new file mode 100644 index 000000000000..62185d25b4d5 --- /dev/null +++ b/changelog/23571.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Reload seal configuration on SIGHUP**: Seal configuration is reloaded on SIGHUP so that seal configuration can +be changed without shutting down vault +``` diff --git a/changelog/23573.txt b/changelog/23573.txt new file mode 100644 index 000000000000..6bb0562b971f --- /dev/null +++ b/changelog/23573.txt @@ -0,0 +1,5 @@ +```release-note:bug +Seal HA (enterprise/beta): Fix rejection of a seal configuration change +from two to one auto seal due to persistence of the previous seal type being +"multiseal". +``` diff --git a/changelog/23580.txt b/changelog/23580.txt new file mode 100644 index 000000000000..f0f9129ba0b4 --- /dev/null +++ b/changelog/23580.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Upgrade HDS version to fix sidebar navigation issues when it collapses in smaller viewports. +``` diff --git a/changelog/23585.txt b/changelog/23585.txt new file mode 100644 index 000000000000..42c9a498c67b --- /dev/null +++ b/changelog/23585.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Assumes version 1 for kv engines when options are null because no version is specified +``` \ No newline at end of file diff --git a/changelog/23598.txt b/changelog/23598.txt new file mode 100644 index 000000000000..9f260d83efdb --- /dev/null +++ b/changelog/23598.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit: Fix bug reopening 'file' audit devices on SIGHUP. +``` diff --git a/changelog/23620.txt b/changelog/23620.txt new file mode 100644 index 000000000000..60667c28062d --- /dev/null +++ b/changelog/23620.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue where you could not share the list view URL from the KV v2 secrets engine. +``` \ No newline at end of file diff --git a/changelog/23621.txt b/changelog/23621.txt new file mode 100644 index 000000000000..2af5f337e635 --- /dev/null +++ b/changelog/23621.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Vault Proxy Static Secret Caching (enterprise)**: Adds support for static secret (KVv1 and KVv2) caching to Vault Proxy. +``` diff --git a/changelog/23636.txt b/changelog/23636.txt new file mode 100644 index 000000000000..26255607251b --- /dev/null +++ b/changelog/23636.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/server: Fix bug with sigusr2 where pprof files were not closed correctly +``` diff --git a/changelog/23667.txt b/changelog/23667.txt new file mode 100644 index 000000000000..63cd2cf2c305 --- /dev/null +++ b/changelog/23667.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Secrets Sync UI (enterprise)**: Adds secret syncing for KV v2 secrets to external destinations using the UI. +``` \ No newline at end of file diff --git a/changelog/23673.txt b/changelog/23673.txt new file mode 100644 index 000000000000..33bd6de01903 --- /dev/null +++ b/changelog/23673.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/aws: fix requeueing of rotation entry in cases where rotation fails +``` diff --git a/changelog/23690.txt b/changelog/23690.txt new file mode 100644 index 000000000000..8e0708f28827 --- /dev/null +++ b/changelog/23690.txt @@ -0,0 +1,3 @@ +```release-note:feature +**secrets/aws**: Support issuing an STS Session Token directly from the root credential. +``` \ No newline at end of file diff --git a/changelog/23695.txt b/changelog/23695.txt new file mode 100644 index 000000000000..104670645fb8 --- /dev/null +++ b/changelog/23695.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Decode the connection url for display on the connection details page +``` diff --git a/changelog/23700.txt b/changelog/23700.txt new file mode 100644 index 000000000000..ca7e7c839aa9 --- /dev/null +++ b/changelog/23700.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. +``` diff --git a/changelog/23702.txt b/changelog/23702.txt new file mode 100644 index 000000000000..3fee98a1e341 --- /dev/null +++ b/changelog/23702.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds a warning when whitespace is detected in a key of a KV secret +``` \ No newline at end of file diff --git a/changelog/23703.txt b/changelog/23703.txt new file mode 100644 index 000000000000..57d1fb3c4fa2 --- /dev/null +++ b/changelog/23703.txt @@ -0,0 +1,6 @@ +```release-note:change +Upgrade grpc to v1.58.3 +``` +```release-note:change +Upgrade x/net to v0.17.0 +``` diff --git a/changelog/23723.txt b/changelog/23723.txt new file mode 100644 index 000000000000..25828f99655d --- /dev/null +++ b/changelog/23723.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Do not allow auto rotation on managed_key key types +``` diff --git a/changelog/23726.txt b/changelog/23726.txt new file mode 100644 index 000000000000..f4e21989fcd6 --- /dev/null +++ b/changelog/23726.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issues displaying accurate TLS state in dashboard configuration details +``` \ No newline at end of file diff --git a/changelog/23747.txt b/changelog/23747.txt new file mode 100644 index 000000000000..bf611ed142fc --- /dev/null +++ b/changelog/23747.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file +``` \ No newline at end of file diff --git a/changelog/23771.txt b/changelog/23771.txt new file mode 100644 index 000000000000..9b8bd8ad4457 --- /dev/null +++ b/changelog/23771.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/plugin: Fix an issue where external plugins were not reporting logs below INFO level +``` diff --git a/changelog/23781.txt b/changelog/23781.txt new file mode 100644 index 000000000000..32d3b51e95e3 --- /dev/null +++ b/changelog/23781.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: Fixes segments fragment loss due to exceeding entry record size limit +``` \ No newline at end of file diff --git a/changelog/23786.txt b/changelog/23786.txt new file mode 100644 index 000000000000..b6e73142eb6c --- /dev/null +++ b/changelog/23786.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/approle: Normalized error response messages when invalid credentials are provided +``` diff --git a/changelog/23797.txt b/changelog/23797.txt new file mode 100644 index 000000000000..32369b0fbd5e --- /dev/null +++ b/changelog/23797.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Allow users in userpass auth mount to update their own password +``` \ No newline at end of file diff --git a/changelog/23802.txt b/changelog/23802.txt new file mode 100644 index 000000000000..49caebc4fce0 --- /dev/null +++ b/changelog/23802.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary +``` diff --git a/changelog/23837.txt b/changelog/23837.txt new file mode 100644 index 000000000000..b3e17a00c928 --- /dev/null +++ b/changelog/23837.txt @@ -0,0 +1,3 @@ +```release-note:change +telemetry: Seal wrap encrypt/decrypt metrics now differentiate between seals using a metrics label of seal name rather than separate metric names. +``` \ No newline at end of file diff --git a/changelog/23849.txt b/changelog/23849.txt new file mode 100644 index 000000000000..e5d89a3030c4 --- /dev/null +++ b/changelog/23849.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Update plugin to v0.10.2 +``` diff --git a/changelog/23861.txt b/changelog/23861.txt new file mode 100644 index 000000000000..8c4ac70380eb --- /dev/null +++ b/changelog/23861.txt @@ -0,0 +1,4 @@ +```release-note:bug +api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. +``` diff --git a/changelog/23872.txt b/changelog/23872.txt new file mode 100644 index 000000000000..b486fd258a80 --- /dev/null +++ b/changelog/23872.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/etcd: etcd should only return keys when calling List() +``` diff --git a/changelog/23874.txt b/changelog/23874.txt new file mode 100644 index 000000000000..34ac61d56795 --- /dev/null +++ b/changelog/23874.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash +``` \ No newline at end of file diff --git a/changelog/23894.txt b/changelog/23894.txt new file mode 100644 index 000000000000..a94e1428eadd --- /dev/null +++ b/changelog/23894.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Skip unnecessary deriving of policies during Login MFA Check. +``` \ No newline at end of file diff --git a/changelog/23897.txt b/changelog/23897.txt new file mode 100644 index 000000000000..28f2f75c5b03 --- /dev/null +++ b/changelog/23897.txt @@ -0,0 +1,4 @@ +```release-note:feature +cli: introduce new command group hcp which groups subcommands for authentication of users or machines to HCP using +either provided arguments or retrieved HCP token through browser login. +``` \ No newline at end of file diff --git a/changelog/23902.txt b/changelog/23902.txt new file mode 100644 index 000000000000..cbfec65096aa --- /dev/null +++ b/changelog/23902.txt @@ -0,0 +1,5 @@ +```release-note:bug +core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. +``` + diff --git a/changelog/23906.txt b/changelog/23906.txt new file mode 100644 index 000000000000..ed3671dbf421 --- /dev/null +++ b/changelog/23906.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fix rare panic due to a race condition with metrics collection during seal +``` diff --git a/changelog/23908.txt b/changelog/23908.txt new file mode 100644 index 000000000000..6bd39bb1a0a8 --- /dev/null +++ b/changelog/23908.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Updates OIDC/JWT login error handling to surface all role related errors +``` \ No newline at end of file diff --git a/changelog/23913.txt b/changelog/23913.txt new file mode 100644 index 000000000000..b01525edf205 --- /dev/null +++ b/changelog/23913.txt @@ -0,0 +1,5 @@ +```release-note:change +sdk: Upgrade dependent packages by sdk. +This includes github.com/docker/docker to v24.0.7+incompatible, +google.golang.org/grpc to v1.57.2 and golang.org/x/net to v0.17.0. +``` diff --git a/changelog/23921.txt b/changelog/23921.txt new file mode 100644 index 000000000000..cd03142227d0 --- /dev/null +++ b/changelog/23921.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: show error from API when seal fails +``` diff --git a/changelog/23942.txt b/changelog/23942.txt new file mode 100644 index 000000000000..a4d43d48f091 --- /dev/null +++ b/changelog/23942.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix broken GUI when accessing from listener with chroot_namespace defined +``` diff --git a/changelog/23964.txt b/changelog/23964.txt new file mode 100644 index 000000000000..7dcdf884dc90 --- /dev/null +++ b/changelog/23964.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update sidebar Secrets engine to title case. +``` diff --git a/changelog/23994.txt b/changelog/23994.txt new file mode 100644 index 000000000000..6eff0ae1d72b --- /dev/null +++ b/changelog/23994.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Increase base font-size from 14px to 16px and update use of rem vs pixels for size variables +``` diff --git a/changelog/24010.txt b/changelog/24010.txt new file mode 100644 index 000000000000..aa72bc977912 --- /dev/null +++ b/changelog/24010.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Upgrade to bbolt 1.3.8, along with an extra patch to reduce time scanning large freelist maps. +``` diff --git a/changelog/24027.txt b/changelog/24027.txt new file mode 100644 index 000000000000..d276928f93fb --- /dev/null +++ b/changelog/24027.txt @@ -0,0 +1,3 @@ +```release-note:bug +expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. +``` diff --git a/changelog/24054.txt b/changelog/24054.txt new file mode 100644 index 000000000000..2680d114ce45 --- /dev/null +++ b/changelog/24054.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Fix a panic when attempting to export a public RSA key +``` diff --git a/changelog/24056.txt b/changelog/24056.txt new file mode 100644 index 000000000000..baa7fa98bb8e --- /dev/null +++ b/changelog/24056.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/config: Use correct HCL config value when configuring `log_requests_level`. +``` \ No newline at end of file diff --git a/changelog/24099.txt b/changelog/24099.txt new file mode 100644 index 000000000000..bc33a184f988 --- /dev/null +++ b/changelog/24099.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Rotate Root for LDAP auth**: Rotate root operations are now supported for the LDAP auth engine. +``` diff --git a/changelog/24103.txt b/changelog/24103.txt new file mode 100644 index 000000000000..f86bfd996949 --- /dev/null +++ b/changelog/24103.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Sort list view of entities and aliases alphabetically using the item name +``` diff --git a/changelog/24108.txt b/changelog/24108.txt new file mode 100644 index 000000000000..0fcb8ac2e51a --- /dev/null +++ b/changelog/24108.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Close rate-limit blocked client purge goroutines when sealing +``` \ No newline at end of file diff --git a/changelog/24136.txt b/changelog/24136.txt new file mode 100644 index 000000000000..eaf2e2521681 --- /dev/null +++ b/changelog/24136.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk: Return error when failure occurs setting up node 0 in NewDockerCluster, instead of ignoring it. +``` \ No newline at end of file diff --git a/changelog/24147.txt b/changelog/24147.txt new file mode 100644 index 000000000000..960ae2250ca4 --- /dev/null +++ b/changelog/24147.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix error when tuning token auth configuration within namespace +``` \ No newline at end of file diff --git a/changelog/24165.txt b/changelog/24165.txt new file mode 100644 index 000000000000..04c0b9223431 --- /dev/null +++ b/changelog/24165.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: Fix an error that resulted in the wrong seal type being returned by sys/seal-status while +Vault is in seal migration mode. +``` diff --git a/changelog/24168.txt b/changelog/24168.txt new file mode 100644 index 000000000000..09f34ce8621c --- /dev/null +++ b/changelog/24168.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: capabilities-self is always called in the user's root namespace +``` \ No newline at end of file diff --git a/changelog/24191.txt b/changelog/24191.txt new file mode 100644 index 000000000000..2fe98e926d05 --- /dev/null +++ b/changelog/24191.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Implement Helios Design System footer component +``` diff --git a/changelog/24192.txt b/changelog/24192.txt new file mode 100644 index 000000000000..97a26746bd0f --- /dev/null +++ b/changelog/24192.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 +``` diff --git a/changelog/24193.txt b/changelog/24193.txt new file mode 100644 index 000000000000..67ea1d0ae974 --- /dev/null +++ b/changelog/24193.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Handle errors related to expired OCSP server responses +``` diff --git a/changelog/24201.txt b/changelog/24201.txt new file mode 100644 index 000000000000..9253e44ab8c0 --- /dev/null +++ b/changelog/24201.txt @@ -0,0 +1,3 @@ +```release-note:change +events: Source URL is now `vault://{vault node}` +``` diff --git a/changelog/24224.txt b/changelog/24224.txt new file mode 100644 index 000000000000..040b42d94da8 --- /dev/null +++ b/changelog/24224.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix JSON editor in KV V2 unable to handle pasted values +``` diff --git a/changelog/24236.txt b/changelog/24236.txt new file mode 100644 index 000000000000..215c7c6d8f11 --- /dev/null +++ b/changelog/24236.txt @@ -0,0 +1,3 @@ +```release-note:improvement +plugins: Containerized plugins can be run fully rootless with the runsc runtime. +``` diff --git a/changelog/24238.txt b/changelog/24238.txt new file mode 100644 index 000000000000..207a61d60952 --- /dev/null +++ b/changelog/24238.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/audit: Audit logging a Vault response will now use a 5 second context timeout, separate from the original request. +``` \ No newline at end of file diff --git a/changelog/24246.txt b/changelog/24246.txt new file mode 100644 index 000000000000..424a006f2da3 --- /dev/null +++ b/changelog/24246.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix a race whereby a new leader may present inconsistent node data to Autopilot. +``` \ No newline at end of file diff --git a/changelog/24250.txt b/changelog/24250.txt new file mode 100644 index 000000000000..e6aca7096ac3 --- /dev/null +++ b/changelog/24250.txt @@ -0,0 +1,6 @@ +```release-note:change +cli: `vault plugin info` and `vault plugin deregister` now require 2 positional arguments instead of accepting either 1 or 2. +``` +```release-note:improvement +cli: Improved error messages for `vault plugin` sub-commands. +``` diff --git a/changelog/24252.txt b/changelog/24252.txt new file mode 100644 index 000000000000..343811bfd050 --- /dev/null +++ b/changelog/24252.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent/logging: Agent should now honor correct -log-format and -log-file settings in logs generated by the consul-template library. +``` \ No newline at end of file diff --git a/changelog/24256.txt b/changelog/24256.txt new file mode 100644 index 000000000000..74124710b8a8 --- /dev/null +++ b/changelog/24256.txt @@ -0,0 +1,4 @@ +```release-note:bug +api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. +``` diff --git a/changelog/24270.txt b/changelog/24270.txt new file mode 100644 index 000000000000..eb8e4c04fb7c --- /dev/null +++ b/changelog/24270.txt @@ -0,0 +1,3 @@ +```release-note:change +api: add the `enterprise` parameter to the `/sys/health` endpoint +``` diff --git a/changelog/24280.txt b/changelog/24280.txt new file mode 100644 index 000000000000..dd3c42fe4c8d --- /dev/null +++ b/changelog/24280.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server: display logs on startup immediately if disable-gated-logs flag is set +``` diff --git a/changelog/24281.txt b/changelog/24281.txt new file mode 100644 index 000000000000..7d24c296a1f2 --- /dev/null +++ b/changelog/24281.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Correctly handle directory redirects from pre 1.15.0 Kv v2 list view urls. +``` diff --git a/changelog/24283.txt b/changelog/24283.txt new file mode 100644 index 000000000000..f8f885f3e11e --- /dev/null +++ b/changelog/24283.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: add subnav for replication items +``` diff --git a/changelog/24290.txt b/changelog/24290.txt new file mode 100644 index 000000000000..3533146b7d23 --- /dev/null +++ b/changelog/24290.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: When Kv v2 secret is an object, fix so details view defaults to readOnly JSON editor. +``` \ No newline at end of file diff --git a/changelog/24292.txt b/changelog/24292.txt new file mode 100644 index 000000000000..784e2e38f4c7 --- /dev/null +++ b/changelog/24292.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix payload sent when disabling replication +``` diff --git a/changelog/24297.txt b/changelog/24297.txt new file mode 100644 index 000000000000..d1433cfcd1f9 --- /dev/null +++ b/changelog/24297.txt @@ -0,0 +1,2 @@ +```release-note:change +logging: Vault server, Agent and Proxy now honor log file value and only add a timestamp on rotation. \ No newline at end of file diff --git a/changelog/24299.txt b/changelog/24299.txt new file mode 100644 index 000000000000..1b295b985687 --- /dev/null +++ b/changelog/24299.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update AlertInline component to use Helios Design System Alert component +``` diff --git a/changelog/24305.txt b/changelog/24305.txt new file mode 100644 index 000000000000..5fcde924fae2 --- /dev/null +++ b/changelog/24305.txt @@ -0,0 +1,3 @@ +```release-note:bug +eventlogger: Update library to v0.2.7 to address race condition +``` diff --git a/changelog/24325.txt b/changelog/24325.txt new file mode 100644 index 000000000000..ab5ce613c404 --- /dev/null +++ b/changelog/24325.txt @@ -0,0 +1,4 @@ +```release-note:change +identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. +``` \ No newline at end of file diff --git a/changelog/24336.txt b/changelog/24336.txt new file mode 100644 index 000000000000..63594dc6cee0 --- /dev/null +++ b/changelog/24336.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. +``` diff --git a/changelog/24339.txt b/changelog/24339.txt new file mode 100644 index 000000000000..7c103de7d17c --- /dev/null +++ b/changelog/24339.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Correctly handle redirects from pre 1.15.0 Kv v2 edit, create, and show urls. +``` diff --git a/changelog/24343.txt b/changelog/24343.txt new file mode 100644 index 000000000000..b77b3afc1938 --- /dev/null +++ b/changelog/24343.txt @@ -0,0 +1,5 @@ +```release-note:improvement +api: sys/health and sys/ha-status now expose information about how long +the last heartbeat took, and the estimated clock skew between standby and +active node based on that heartbeat duration. +``` \ No newline at end of file diff --git a/changelog/24352.txt b/changelog/24352.txt new file mode 100644 index 000000000000..c6cf651daeb9 --- /dev/null +++ b/changelog/24352.txt @@ -0,0 +1,3 @@ +```release-note:improvement +events: Add support for event subscription plugins, including SQS +``` diff --git a/changelog/24373.txt b/changelog/24373.txt new file mode 100644 index 000000000000..ae77aee6cac0 --- /dev/null +++ b/changelog/24373.txt @@ -0,0 +1,3 @@ +```release-note:bug +http: Include PATCH in the list of allowed CORS methods +``` \ No newline at end of file diff --git a/changelog/24382.txt b/changelog/24382.txt new file mode 100644 index 000000000000..4c76944e18a9 --- /dev/null +++ b/changelog/24382.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Default Lease Count Quota (enterprise)**: Apply a new global default lease count quota of 300k leases for all +new installs of Vault. +``` diff --git a/changelog/24387.txt b/changelog/24387.txt new file mode 100644 index 000000000000..3e7fe85c2581 --- /dev/null +++ b/changelog/24387.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Implement Helios Design System Breadcrumbs +``` \ No newline at end of file diff --git a/changelog/24404.txt b/changelog/24404.txt new file mode 100644 index 000000000000..6fab70d0bf12 --- /dev/null +++ b/changelog/24404.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix issue where kv v2 capabilities checks were not passing in the full secret path if secret was inside a directory. +``` diff --git a/changelog/24441.txt b/changelog/24441.txt new file mode 100644 index 000000000000..5a4d491c64ea --- /dev/null +++ b/changelog/24441.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/ha: fix panic that can occur when an HA cluster contains an active node with version >=1.12.0 and another node with version <1.10 +``` diff --git a/changelog/24472.txt b/changelog/24472.txt new file mode 100644 index 000000000000..538bb2b4b707 --- /dev/null +++ b/changelog/24472.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database: Add new reload/:plugin_name API to reload database plugins by name for a specific mount. +``` diff --git a/changelog/24476.txt b/changelog/24476.txt new file mode 100644 index 000000000000..797ed9a48d47 --- /dev/null +++ b/changelog/24476.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: improve accessibility - color contrast, labels, and automatic testing +``` diff --git a/changelog/24479.txt b/changelog/24479.txt new file mode 100644 index 000000000000..e053e74d6793 --- /dev/null +++ b/changelog/24479.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/token-capabilities: allow using accessor when listing token capabilities on a path +``` diff --git a/changelog/24492.txt b/changelog/24492.txt new file mode 100644 index 000000000000..d61c901a2c14 --- /dev/null +++ b/changelog/24492.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix navigation items shown to user when chroot_namespace configured +``` diff --git a/changelog/24512.txt b/changelog/24512.txt new file mode 100644 index 000000000000..efed04a22535 --- /dev/null +++ b/changelog/24512.txt @@ -0,0 +1,6 @@ +```release-note:change +plugins: Add a warning to the response from sys/plugins/reload/backend if no plugins were reloaded. +``` +```release-note:improvement +secrets/database: Support reloading named database plugins using the sys/plugins/reload/backend API endpoint. +``` diff --git a/changelog/24513.txt b/changelog/24513.txt new file mode 100644 index 000000000000..41b47f6be4e5 --- /dev/null +++ b/changelog/24513.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix KV v2 details view defaulting to JSON view when secret value includes `{` +``` diff --git a/changelog/24529.txt b/changelog/24529.txt new file mode 100644 index 000000000000..97d6904f1442 --- /dev/null +++ b/changelog/24529.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Update references to Key Value secrets engine from 'K/V' to 'KV' +``` diff --git a/changelog/24530.txt b/changelog/24530.txt new file mode 100644 index 000000000000..12525d48b87f --- /dev/null +++ b/changelog/24530.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: obscure JSON values when KV v2 secret has nested objects +``` diff --git a/changelog/24548.txt b/changelog/24548.txt new file mode 100644 index 000000000000..59882fb11b6e --- /dev/null +++ b/changelog/24548.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/template: Added max_connections_per_host to limit total number of connections per Vault host. +``` diff --git a/changelog/24549.txt b/changelog/24549.txt new file mode 100644 index 000000000000..6838b024c782 --- /dev/null +++ b/changelog/24549.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: sys/leader ActiveTime field no longer gets reset when we do an internal state change that doesn't change our active status. +``` diff --git a/changelog/24558.txt b/changelog/24558.txt new file mode 100644 index 000000000000..83d13d760863 --- /dev/null +++ b/changelog/24558.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Audit Filtering**: Audit devices support expression-based filter rules (powered by go-bexpr) to determine which entries are written to the audit log.``` +``` diff --git a/changelog/24616.txt b/changelog/24616.txt new file mode 100644 index 000000000000..54f0f1edfcd8 --- /dev/null +++ b/changelog/24616.txt @@ -0,0 +1,3 @@ +```release-note:bug +fairshare: fix a race condition in JobManager.GetWorkerCounts +``` \ No newline at end of file diff --git a/changelog/24649.txt b/changelog/24649.txt new file mode 100644 index 000000000000..2e0161e20d1d --- /dev/null +++ b/changelog/24649.txt @@ -0,0 +1,3 @@ +```release-note:bug +cassandra: Update Cassandra to set consistency prior to calling CreateSession, ensuring consistency setting is correct when opening connection. +``` diff --git a/changelog/24660.txt b/changelog/24660.txt new file mode 100644 index 000000000000..415944299e1a --- /dev/null +++ b/changelog/24660.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: The UI can now be used to create or update database roles by operator without permission on the database connection. +``` diff --git a/changelog/24667.txt b/changelog/24667.txt new file mode 100644 index 000000000000..b3e83d71f49b --- /dev/null +++ b/changelog/24667.txt @@ -0,0 +1,6 @@ +```release-note:improvement +agent: Added new namespace top level configuration parameter, which can be used to make requests made by Agent to go to that namespace. +``` +```release-note:improvement +proxy: Added new namespace top level configuration parameter, and prepend_configured_namespace API Proxy configuration parameter, which can be used to make requests made to Proxy get proxied to that namespace. +``` diff --git a/changelog/24686.txt b/changelog/24686.txt new file mode 100644 index 000000000000..30ef696f491e --- /dev/null +++ b/changelog/24686.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix incorrectly calculated capabilities on PKI issuer endpoints +``` diff --git a/changelog/24697.txt b/changelog/24697.txt new file mode 100644 index 000000000000..49492d19b290 --- /dev/null +++ b/changelog/24697.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes input for jwks_ca_pem when configuring a JWT auth method +``` \ No newline at end of file diff --git a/changelog/24710.txt b/changelog/24710.txt new file mode 100644 index 000000000000..4985cda86580 --- /dev/null +++ b/changelog/24710.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: Include secret_syncs in activity log responses +``` \ No newline at end of file diff --git a/changelog/24718.txt b/changelog/24718.txt new file mode 100644 index 000000000000..990de52941f9 --- /dev/null +++ b/changelog/24718.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Database Event Notifications**: The database plugin now emits event notifications. +``` diff --git a/changelog/24752.txt b/changelog/24752.txt new file mode 100644 index 000000000000..736684af282a --- /dev/null +++ b/changelog/24752.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Separates out client counts dashboard to overview and entity/non-entity tabs +``` \ No newline at end of file diff --git a/changelog/24790.txt b/changelog/24790.txt new file mode 100644 index 000000000000..14bc913c84a0 --- /dev/null +++ b/changelog/24790.txt @@ -0,0 +1,6 @@ +```release-note:bug +agent: Fixed incorrect parsing of boolean environment variables for configuration. +``` +```release-note:bug +proxy: Fixed incorrect parsing of boolean environment variables for configuration. +``` \ No newline at end of file diff --git a/changelog/24823.txt b/changelog/24823.txt new file mode 100644 index 000000000000..a15c8c7f1513 --- /dev/null +++ b/changelog/24823.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: Update icons to use Flight icons where available. +``` \ No newline at end of file diff --git a/changelog/24864.txt b/changelog/24864.txt new file mode 100644 index 000000000000..c29db4f54c05 --- /dev/null +++ b/changelog/24864.txt @@ -0,0 +1,3 @@ +```release-note:change +plugins: `/sys/plugins/runtimes/catalog` response will always include a list of "runtimes" in the response, even if empty. +``` diff --git a/changelog/24878.txt b/changelog/24878.txt new file mode 100644 index 000000000000..d7f03e4d0532 --- /dev/null +++ b/changelog/24878.txt @@ -0,0 +1,6 @@ +```release-note:improvement +plugins: New API `sys/plugins/reload/:type/:name` available in the root namespace for reloading a specific plugin across all namespaces. +``` +```release-note:change +cli: Using `vault plugin reload` with `-plugin` in the root namespace will now reload the plugin across all namespaces instead of just the root namespace. +``` diff --git a/changelog/24891.txt b/changelog/24891.txt new file mode 100644 index 000000000000..6f84e14290a5 --- /dev/null +++ b/changelog/24891.txt @@ -0,0 +1,3 @@ +```release-note:bug +helper/pkcs7: Fix slice out-of-bounds panic +``` diff --git a/changelog/24898.txt b/changelog/24898.txt new file mode 100644 index 000000000000..8180d72d8e25 --- /dev/null +++ b/changelog/24898.txt @@ -0,0 +1,3 @@ +```release-note:improvement +identity/tokens: adds plugin issuer with openid-configuration and keys APIs +``` \ No newline at end of file diff --git a/changelog/24925.txt b/changelog/24925.txt new file mode 100644 index 000000000000..7bce8d0bdebc --- /dev/null +++ b/changelog/24925.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Add identity token helpers to consistently apply new plugin WIF fields across integrations. +``` \ No newline at end of file diff --git a/changelog/24929.txt b/changelog/24929.txt new file mode 100644 index 000000000000..c6eac214f618 --- /dev/null +++ b/changelog/24929.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: adds new method to system view to allow plugins to request identity tokens +``` \ No newline at end of file diff --git a/changelog/24947.txt b/changelog/24947.txt new file mode 100644 index 000000000000..498158e2c530 --- /dev/null +++ b/changelog/24947.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed minor bugs with database secrets engine +``` \ No newline at end of file diff --git a/changelog/24954.txt b/changelog/24954.txt new file mode 100644 index 000000000000..8c023f1b57e5 --- /dev/null +++ b/changelog/24954.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: upgrade github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 to +support azure workload identities. +``` \ No newline at end of file diff --git a/changelog/24962.txt b/changelog/24962.txt new file mode 100644 index 000000000000..7a7cffb19d65 --- /dev/null +++ b/changelog/24962.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys: adds configuration of the key used to sign plugin identity tokens during mount enable and tune +``` \ No newline at end of file diff --git a/changelog/24968.txt b/changelog/24968.txt new file mode 100644 index 000000000000..47c71eaa82e5 --- /dev/null +++ b/changelog/24968.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit: Fix bug where use of 'log_raw' option could result in other devices logging raw audit data +``` diff --git a/changelog/24972.txt b/changelog/24972.txt new file mode 100644 index 000000000000..b2a7bda303cf --- /dev/null +++ b/changelog/24972.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.19.0 +``` diff --git a/changelog/24978.txt b/changelog/24978.txt new file mode 100644 index 000000000000..8bad557e316e --- /dev/null +++ b/changelog/24978.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Added new `plugin_tmpdir` config option for containerized plugins, in addition to the existing `VAULT_PLUGIN_TMPDIR` environment variable. +``` diff --git a/changelog/24979.txt b/changelog/24979.txt new file mode 100644 index 000000000000..bbbcedce077a --- /dev/null +++ b/changelog/24979.txt @@ -0,0 +1,3 @@ +```release-note:improvement +oidc/provider: Adds `code_challenge_methods_supported` to OpenID Connect Metadata +``` \ No newline at end of file diff --git a/changelog/24980.txt b/changelog/24980.txt new file mode 100644 index 000000000000..536bdb32c652 --- /dev/null +++ b/changelog/24980.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: adds plugin identity token to enable and tune commands for secret engines and auth methods +``` \ No newline at end of file diff --git a/changelog/24987.txt b/changelog/24987.txt new file mode 100644 index 000000000000..2eecf033f4d8 --- /dev/null +++ b/changelog/24987.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Plugin Identity Tokens**: Adds secret-less configuration of AWS secret engine using web identity federation. +``` diff --git a/changelog/24990.txt b/changelog/24990.txt new file mode 100644 index 000000000000..8079a4605c50 --- /dev/null +++ b/changelog/24990.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: fixes plugin register CLI failure to error when plugin image doesn't exist +``` \ No newline at end of file diff --git a/changelog/24991.txt b/changelog/24991.txt new file mode 100644 index 000000000000..28df55379bee --- /dev/null +++ b/changelog/24991.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Add support for larger transactions when using raft storage. +``` diff --git a/changelog/25001.txt b/changelog/25001.txt new file mode 100644 index 000000000000..de5f82d05562 --- /dev/null +++ b/changelog/25001.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Allows users to search within KV v2 directories from the Dashboard's quick action card. +``` \ No newline at end of file diff --git a/changelog/25004.txt b/changelog/25004.txt new file mode 100644 index 000000000000..9836a6f26aaf --- /dev/null +++ b/changelog/25004.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit/socket: Provide socket based audit backends with 'prefix' configuration option when supplied. +``` \ No newline at end of file diff --git a/changelog/25014.txt b/changelog/25014.txt new file mode 100644 index 000000000000..780da7a02b0f --- /dev/null +++ b/changelog/25014.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Update plugin to v0.16.1 +``` diff --git a/changelog/25020.txt b/changelog/25020.txt new file mode 100644 index 000000000000..de1bab99fe01 --- /dev/null +++ b/changelog/25020.txt @@ -0,0 +1,3 @@ +```release-note:change +database/snowflake: Update plugin to v0.9.1 +``` diff --git a/changelog/25040.txt b/changelog/25040.txt new file mode 100644 index 000000000000..cca7f2b7f9c2 --- /dev/null +++ b/changelog/25040.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/openldap: Update plugin to v0.11.3 +``` diff --git a/changelog/25058.txt b/changelog/25058.txt new file mode 100644 index 000000000000..bd2a704cf0ce --- /dev/null +++ b/changelog/25058.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Update plugin to v0.16.2 +``` diff --git a/changelog/25093.txt b/changelog/25093.txt new file mode 100644 index 000000000000..a0691e03e14c --- /dev/null +++ b/changelog/25093.txt @@ -0,0 +1,5 @@ +```release-note:feature +**Request Limiter (enterprise)**: Add adaptive concurrency limits to +write-based HTTP methods and special-case `pki/issue` requests to prevent +overloading the Vault server. +``` diff --git a/changelog/25095.txt b/changelog/25095.txt new file mode 100644 index 000000000000..69251984a99a --- /dev/null +++ b/changelog/25095.txt @@ -0,0 +1,3 @@ +```release-note:improvement +limits: Introduce a reloadable opt-in configuration for the Request Limiter. +``` diff --git a/changelog/25098.txt b/changelog/25098.txt new file mode 100644 index 000000000000..ab487d63148b --- /dev/null +++ b/changelog/25098.txt @@ -0,0 +1,4 @@ +```release-note:improvement +limits: Add a listener configuration option `disable_request_limiter` to allow +disabling the request limiter per-listener. +``` diff --git a/changelog/25105.txt b/changelog/25105.txt new file mode 100644 index 000000000000..4a9ae100c3e3 --- /dev/null +++ b/changelog/25105.txt @@ -0,0 +1,6 @@ +```release-note:change +plugins/database: Reading connection config at `database/config/:name` will now return a computed `running_plugin_version` field if a non-builtin version is running. +``` +```release-note:improvement +plugins: Add new pin version APIs to enforce all plugins of a specific type and name to run the same version. +``` diff --git a/changelog/25106.txt b/changelog/25106.txt new file mode 100644 index 000000000000..d861b1a98109 --- /dev/null +++ b/changelog/25106.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Allows users to dismiss the resultant-acl banner. +``` \ No newline at end of file diff --git a/changelog/25128.txt b/changelog/25128.txt new file mode 100644 index 000000000000..c9003ffea88d --- /dev/null +++ b/changelog/25128.txt @@ -0,0 +1,6 @@ +```release-note:change +plugins: By default, environment variables provided during plugin registration will now take precedence over system environment variables. +Use the environment variable `VAULT_PLUGIN_USE_LEGACY_ENV_LAYERING=true` to opt out and keep higher preference for system environment +variables. When this flag is set, Vault will check during unseal for conflicts and print warnings for any plugins with environment +variables that conflict with system environment variables. +``` diff --git a/changelog/25143.txt b/changelog/25143.txt new file mode 100644 index 000000000000..17d9b713c1bd --- /dev/null +++ b/changelog/25143.txt @@ -0,0 +1,3 @@ +```release-note:change +database/snowflake: Update plugin to v0.10.0 +``` diff --git a/changelog/25152.txt b/changelog/25152.txt new file mode 100644 index 000000000000..7a89d5fc8289 --- /dev/null +++ b/changelog/25152.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Update the KV secret data when you change the version you're viewing of a nested secret. +``` \ No newline at end of file diff --git a/changelog/25171.txt b/changelog/25171.txt new file mode 100644 index 000000000000..b2c0424dbc82 --- /dev/null +++ b/changelog/25171.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core (enterprise): Improve seal unwrap performance when in degraded mode with one or more unhealthy seals. +``` \ No newline at end of file diff --git a/changelog/25173.txt b/changelog/25173.txt new file mode 100644 index 000000000000..4ca773c63dd7 --- /dev/null +++ b/changelog/25173.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcp: Update plugin to v0.18.0 +``` diff --git a/changelog/25187.txt b/changelog/25187.txt new file mode 100644 index 000000000000..e90d97fdc406 --- /dev/null +++ b/changelog/25187.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Update plugin to v0.17.0 +``` diff --git a/changelog/25189.txt b/changelog/25189.txt new file mode 100644 index 000000000000..b537437ec161 --- /dev/null +++ b/changelog/25189.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.17.0 +``` diff --git a/changelog/25196.txt b/changelog/25196.txt new file mode 100644 index 000000000000..7ed634d7bc25 --- /dev/null +++ b/changelog/25196.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/cf: Update plugin to v0.16.0 +``` diff --git a/changelog/25204.txt b/changelog/25204.txt new file mode 100644 index 000000000000..0e09b6ea830e --- /dev/null +++ b/changelog/25204.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kubernetes: Update plugin to v0.7.0 +``` diff --git a/changelog/25207.txt b/changelog/25207.txt new file mode 100644 index 000000000000..f67b9fa843de --- /dev/null +++ b/changelog/25207.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kubernetes: Update plugin to v0.18.0 +``` \ No newline at end of file diff --git a/changelog/25209.txt b/changelog/25209.txt new file mode 100644 index 000000000000..178a09cbc8d3 --- /dev/null +++ b/changelog/25209.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix inconsistent empty state action link styles +``` \ No newline at end of file diff --git a/changelog/25217.txt b/changelog/25217.txt new file mode 100644 index 000000000000..fb60850c0598 --- /dev/null +++ b/changelog/25217.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Update plugin to v0.17.0 +``` diff --git a/changelog/25231.txt b/changelog/25231.txt new file mode 100644 index 000000000000..315c0e80828a --- /dev/null +++ b/changelog/25231.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcpkms: Update plugin to v0.16.0 +``` diff --git a/changelog/25232.txt b/changelog/25232.txt new file mode 100644 index 000000000000..9c76439b729b --- /dev/null +++ b/changelog/25232.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/kerberos: Update plugin to v0.11.0 +``` diff --git a/changelog/25233.txt b/changelog/25233.txt new file mode 100644 index 000000000000..dab2a92c96bc --- /dev/null +++ b/changelog/25233.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/gcp: Update plugin to v0.16.2 +``` diff --git a/changelog/25235.txt b/changelog/25235.txt new file mode 100644 index 000000000000..d4df23ebf49b --- /dev/null +++ b/changelog/25235.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Do not disable JSON display toggle for KV version 2 secrets +``` \ No newline at end of file diff --git a/changelog/25245.txt b/changelog/25245.txt new file mode 100644 index 000000000000..5bf3cc3cae04 --- /dev/null +++ b/changelog/25245.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/oci: Update plugin to v0.15.1 +``` diff --git a/changelog/25251.txt b/changelog/25251.txt new file mode 100644 index 000000000000..e737505e0903 --- /dev/null +++ b/changelog/25251.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/openldap: Update plugin to v0.12.0 +``` diff --git a/changelog/25253.txt b/changelog/25253.txt new file mode 100644 index 000000000000..7b989009b9c3 --- /dev/null +++ b/changelog/25253.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Update plugin to v0.11.0 +``` diff --git a/changelog/25256.txt b/changelog/25256.txt new file mode 100644 index 000000000000..f616e7a487cf --- /dev/null +++ b/changelog/25256.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Do not show resultant-acl banner on namespaces a user has access to +``` \ No newline at end of file diff --git a/changelog/25257.txt b/changelog/25257.txt new file mode 100644 index 000000000000..ae200b764bcd --- /dev/null +++ b/changelog/25257.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/alicloud: Update plugin to v0.16.0 +``` diff --git a/changelog/25258.txt b/changelog/25258.txt new file mode 100644 index 000000000000..8bf8fb7411fd --- /dev/null +++ b/changelog/25258.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.17.0 +``` diff --git a/changelog/25263.txt b/changelog/25263.txt new file mode 100644 index 000000000000..aea6faf033ee --- /dev/null +++ b/changelog/25263.txt @@ -0,0 +1,3 @@ +```release-note:change +database/elasticsearch: Update plugin to v0.14.0 +``` diff --git a/changelog/25264.txt b/changelog/25264.txt new file mode 100644 index 000000000000..827c0ede41d1 --- /dev/null +++ b/changelog/25264.txt @@ -0,0 +1,3 @@ +```release-note:change +database/mongodbatlas: Update plugin to v0.11.0 +``` diff --git a/changelog/25269.txt b/changelog/25269.txt new file mode 100644 index 000000000000..706c5abe4c67 --- /dev/null +++ b/changelog/25269.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix copy button not working on masked input when value is not a string +``` \ No newline at end of file diff --git a/changelog/25275.txt b/changelog/25275.txt new file mode 100644 index 000000000000..34b04ec1a93c --- /dev/null +++ b/changelog/25275.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Update plugin to v0.10.1 +``` diff --git a/changelog/25277.txt b/changelog/25277.txt new file mode 100644 index 000000000000..200c136b17fe --- /dev/null +++ b/changelog/25277.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kv: Update plugin to v0.17.0 +``` diff --git a/changelog/25288.txt b/changelog/25288.txt new file mode 100644 index 000000000000..0b13a0b22073 --- /dev/null +++ b/changelog/25288.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/terraform: Update plugin to v0.7.5 +``` diff --git a/changelog/25289.txt b/changelog/25289.txt new file mode 100644 index 000000000000..a230871df50e --- /dev/null +++ b/changelog/25289.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis: Update plugin to v0.2.3 +``` diff --git a/changelog/25296.txt b/changelog/25296.txt new file mode 100644 index 000000000000..914080b26d11 --- /dev/null +++ b/changelog/25296.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Update plugin to v0.3.0 +``` diff --git a/changelog/25321.txt b/changelog/25321.txt new file mode 100644 index 000000000000..247861c69caa --- /dev/null +++ b/changelog/25321.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Use Hds::Dropdown component to replace list view popup menus +``` \ No newline at end of file diff --git a/changelog/25326.txt b/changelog/25326.txt new file mode 100644 index 000000000000..587636b055a0 --- /dev/null +++ b/changelog/25326.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.20.0 +``` diff --git a/changelog/25335.txt b/changelog/25335.txt new file mode 100644 index 000000000000..b931d47f4a7a --- /dev/null +++ b/changelog/25335.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: redirect back to current route after reauthentication when token expires +``` diff --git a/changelog/25336.txt b/changelog/25336.txt new file mode 100644 index 000000000000..a1f32a444a4c --- /dev/null +++ b/changelog/25336.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: When provided an invalid input with hash_algorithm=none, a lock was not released properly before reporting an error leading to deadlocks on a subsequent key configuration update. +``` diff --git a/changelog/25364.txt b/changelog/25364.txt new file mode 100644 index 000000000000..b56fe9fbae02 --- /dev/null +++ b/changelog/25364.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: remove unnecessary OpenAPI calls for unmanaged auth methods +``` diff --git a/changelog/25387.txt b/changelog/25387.txt new file mode 100644 index 000000000000..46f91f4879ef --- /dev/null +++ b/changelog/25387.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix issue where Vault Agent was unable to render KVv2 secrets with delete_version_after set. +``` diff --git a/changelog/25395.txt b/changelog/25395.txt new file mode 100644 index 000000000000..cd2ca5137e0f --- /dev/null +++ b/changelog/25395.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/file: Fixing spuriously deleting storage keys ending with .temp +``` diff --git a/changelog/25399.txt b/changelog/25399.txt new file mode 100644 index 000000000000..d0b6405cc5fc --- /dev/null +++ b/changelog/25399.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix PKI ca_chain display so value can be copied to clipboard +``` diff --git a/changelog/25421.txt b/changelog/25421.txt new file mode 100644 index 000000000000..06adb1403e30 --- /dev/null +++ b/changelog/25421.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Cache trusted certs to reduce memory usage and improve performance of logins. +``` \ No newline at end of file diff --git a/changelog/25436.txt b/changelog/25436.txt new file mode 100644 index 000000000000..132af39f1466 --- /dev/null +++ b/changelog/25436.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add `deletion_allowed` param to transformations and include `tokenization` as a type option +``` diff --git a/changelog/25439.txt b/changelog/25439.txt new file mode 100644 index 000000000000..7b1775c9b996 --- /dev/null +++ b/changelog/25439.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Deleting a namespace that contains a rate limit quota no longer breaks replication +``` diff --git a/changelog/25443.txt b/changelog/25443.txt new file mode 100644 index 000000000000..301824d8105b --- /dev/null +++ b/changelog/25443.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit: Resolve potential race condition when auditing entries which use SSCT. +``` \ No newline at end of file diff --git a/changelog/25448.txt b/changelog/25448.txt new file mode 100644 index 000000000000..537cc8cdd1f8 --- /dev/null +++ b/changelog/25448.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Fix a deadlock that can occur on performance secondary clusters when there are many mounts and a mount is deleted or filtered +``` diff --git a/changelog/25479.txt b/changelog/25479.txt new file mode 100644 index 000000000000..5c23d2bcb7ae --- /dev/null +++ b/changelog/25479.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add error message when copy action fails +``` \ No newline at end of file diff --git a/changelog/25499.txt b/changelog/25499.txt new file mode 100644 index 000000000000..f2ef3e54aafd --- /dev/null +++ b/changelog/25499.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Add wrapper functions for GET /sys/mounts/:path and GET /sys/auth/:path +``` diff --git a/changelog/25500.txt b/changelog/25500.txt new file mode 100644 index 000000000000..22711e8f4478 --- /dev/null +++ b/changelog/25500.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add granularity param to sync destinations +``` diff --git a/changelog/25509.txt b/changelog/25509.txt new file mode 100644 index 000000000000..e668d1903dc0 --- /dev/null +++ b/changelog/25509.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fixing response fields for rekey operations +``` diff --git a/changelog/25524.txt b/changelog/25524.txt new file mode 100644 index 000000000000..0a46aa91d615 --- /dev/null +++ b/changelog/25524.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/openldap: Update plugin to v0.12.1 +``` diff --git a/changelog/25588.txt b/changelog/25588.txt new file mode 100644 index 000000000000..95a45130cd25 --- /dev/null +++ b/changelog/25588.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: replace popup menu on list items (namespaces, auth items, KMIP, K8S, LDAP) +``` \ No newline at end of file diff --git a/changelog/25605.txt b/changelog/25605.txt new file mode 100644 index 000000000000..a152ce45c221 --- /dev/null +++ b/changelog/25605.txt @@ -0,0 +1,3 @@ +```release-note:bug +audit: Handle a potential panic while formatting audit entries for an audit log +``` diff --git a/changelog/25614.txt b/changelog/25614.txt new file mode 100644 index 000000000000..852db412f929 --- /dev/null +++ b/changelog/25614.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with no active tab when viewing transit encryption key +``` \ No newline at end of file diff --git a/changelog/25636.txt b/changelog/25636.txt new file mode 100644 index 000000000000..d5528fb5df33 --- /dev/null +++ b/changelog/25636.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: make the best effort timeout for encryption count tracking persistence configurable via an environment variable. +``` \ No newline at end of file diff --git a/changelog/25640.txt b/changelog/25640.txt new file mode 100644 index 000000000000..8a213a1d8716 --- /dev/null +++ b/changelog/25640.txt @@ -0,0 +1,3 @@ +```release-note:change +events: Remove event noficiations websocket endpoint in non-Enterprise +``` diff --git a/changelog/25646.txt b/changelog/25646.txt new file mode 100644 index 000000000000..d8c659a1dd82 --- /dev/null +++ b/changelog/25646.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds allowed_response_headers, plugin_version and user_lockout_config params to auth method configuration +``` \ No newline at end of file diff --git a/changelog/25649.txt b/changelog/25649.txt new file mode 100644 index 000000000000..2ce669886201 --- /dev/null +++ b/changelog/25649.txt @@ -0,0 +1,5 @@ +```release-note:security +auth/cert: compare public keys of trusted non-CA certificates with incoming +client certificates to prevent trusting certs with the same serial number +but not the same public/private key. +``` \ No newline at end of file diff --git a/changelog/25697.txt b/changelog/25697.txt new file mode 100644 index 000000000000..ecc2ac186766 --- /dev/null +++ b/changelog/25697.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/login: Fixed a potential deadlock when a login fails and user lockout is enabled. +``` diff --git a/changelog/25713.txt b/changelog/25713.txt new file mode 100644 index 000000000000..250045059ec5 --- /dev/null +++ b/changelog/25713.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/metrics: add metrics for secret sync client count +``` diff --git a/changelog/25751.txt b/changelog/25751.txt new file mode 100644 index 000000000000..cfde6d9de06e --- /dev/null +++ b/changelog/25751.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: include secret syncs counts in the `vault operator usage` command output +``` \ No newline at end of file diff --git a/changelog/25766.txt b/changelog/25766.txt new file mode 100644 index 000000000000..7166fc3a3559 --- /dev/null +++ b/changelog/25766.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: call resultant-acl without namespace header when user mounted at root namespace +``` diff --git a/changelog/25867.txt b/changelog/25867.txt new file mode 100644 index 000000000000..c7611aaa86c7 --- /dev/null +++ b/changelog/25867.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: remove user_lockout_config settings for unsupported methods +``` diff --git a/changelog/25874.txt b/changelog/25874.txt new file mode 100644 index 000000000000..bf9ae37f02ba --- /dev/null +++ b/changelog/25874.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: remove leading slash from KV version 2 secret paths +``` diff --git a/changelog/25912.txt b/changelog/25912.txt new file mode 100644 index 000000000000..fdb419c8f463 --- /dev/null +++ b/changelog/25912.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Allow validation with OCSP responses with no NextUpdate time +``` diff --git a/changelog/25937.txt b/changelog/25937.txt new file mode 100644 index 000000000000..d2ff2e8057cc --- /dev/null +++ b/changelog/25937.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.20.1 +``` diff --git a/changelog/25968.txt b/changelog/25968.txt new file mode 100644 index 000000000000..e048b706dea7 --- /dev/null +++ b/changelog/25968.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Do not require sudo for API wrapper functions GetAuth and GetAuthWithContext +``` diff --git a/changelog/25982.txt b/changelog/25982.txt new file mode 100644 index 000000000000..59a7d0512b77 --- /dev/null +++ b/changelog/25982.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Allow cert auth login attempts if ocsp_fail_open is true and OCSP servers are unreachable +``` diff --git a/changelog/25986.txt b/changelog/25986.txt new file mode 100644 index 000000000000..3f64fe3c871a --- /dev/null +++ b/changelog/25986.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Address an issue in which OCSP query responses were not cached +``` diff --git a/changelog/25999.txt b/changelog/25999.txt new file mode 100644 index 000000000000..5999f7976acf --- /dev/null +++ b/changelog/25999.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix kubernetes auth method roles tab +``` diff --git a/changelog/26088.txt b/changelog/26088.txt new file mode 100644 index 000000000000..1bce05fa8124 --- /dev/null +++ b/changelog/26088.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: timestamps across multiple audit devices for an audit entry will now match. +``` \ No newline at end of file diff --git a/changelog/26091.txt b/changelog/26091.txt new file mode 100644 index 000000000000..31a219247ae3 --- /dev/null +++ b/changelog/26091.txt @@ -0,0 +1,3 @@ +```release-note:security +auth/cert: validate OCSP response was signed by the expected issuer and serial number matched request +``` diff --git a/changelog/26147.txt b/changelog/26147.txt new file mode 100644 index 000000000000..efc179ee70fa --- /dev/null +++ b/changelog/26147.txt @@ -0,0 +1,3 @@ +```release-note:bug +secret/database: Fixed race condition where database mounts may leak connections +``` diff --git a/changelog/26166.txt b/changelog/26166.txt new file mode 100644 index 000000000000..430a5a5fec6f --- /dev/null +++ b/changelog/26166.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Only reload seal configuration when enable_multiseal is set to true. +``` diff --git a/changelog/26200.txt b/changelog/26200.txt new file mode 100644 index 000000000000..3d1e03a257fc --- /dev/null +++ b/changelog/26200.txt @@ -0,0 +1,6 @@ +```release-note:bug +auth/ldap: Fix login error missing entity alias attribute value. +``` +```release-note:bug +auth/ldap: Fix login error for group search anonymous bind. +``` diff --git a/changelog/26243.txt b/changelog/26243.txt new file mode 100644 index 000000000000..9a2dc3963491 --- /dev/null +++ b/changelog/26243.txt @@ -0,0 +1,4 @@ +```release-note:bug +cli: fixed a bug where the Vault CLI would error out if +HOME was not set. +``` diff --git a/changelog/26263.txt b/changelog/26263.txt new file mode 100644 index 000000000000..4d5eb1239357 --- /dev/null +++ b/changelog/26263.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: fixes cases where inputs did not have associated labels +``` \ No newline at end of file diff --git a/changelog/26291.txt b/changelog/26291.txt new file mode 100644 index 000000000000..f5fa6cca1e08 --- /dev/null +++ b/changelog/26291.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.20.2 +``` diff --git a/changelog/26325.txt b/changelog/26325.txt new file mode 100644 index 000000000000..cbfc6c1f9c64 --- /dev/null +++ b/changelog/26325.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixed a bug where the replication pages did not update display when navigating between DR and performance +``` diff --git a/changelog/26346.txt b/changelog/26346.txt new file mode 100644 index 000000000000..1f6a8a486a99 --- /dev/null +++ b/changelog/26346.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: Update dependencies including D3 libraries +``` diff --git a/changelog/26381.txt b/changelog/26381.txt new file mode 100644 index 000000000000..c1d55e4781b5 --- /dev/null +++ b/changelog/26381.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/seal: During a seal reload through SIGHUP, only write updated seal barrier on an active node +``` diff --git a/changelog/26383.txt b/changelog/26383.txt new file mode 100644 index 000000000000..8b675a9ef416 --- /dev/null +++ b/changelog/26383.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: fixed a bug where LifetimeWatcher routines weren't respecting exponential backoff in the presence of unexpected errors +``` diff --git a/changelog/26396.txt b/changelog/26396.txt new file mode 100644 index 000000000000..7f66e5cd58cf --- /dev/null +++ b/changelog/26396.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: show banner instead of permission denied error when batch token is expired +``` diff --git a/changelog/26427.txt b/changelog/26427.txt new file mode 100644 index 000000000000..615e9e15f06a --- /dev/null +++ b/changelog/26427.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: `vault.namespace` no longer gets incorrectly overridden by `auto_auth.namespace`, if set +``` diff --git a/changelog/26464.txt b/changelog/26464.txt new file mode 100644 index 000000000000..c033d8cf4558 --- /dev/null +++ b/changelog/26464.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/decompression: DecompressWithCanary will now chunk the decompression in memory to prevent loading it all at once. +``` diff --git a/changelog/26477.txt b/changelog/26477.txt new file mode 100644 index 000000000000..f24fca805cd2 --- /dev/null +++ b/changelog/26477.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: fixed validation bug which rejected ldap schemed URLs in crl_distribution_points. +``` \ No newline at end of file diff --git a/changelog/26485.txt b/changelog/26485.txt new file mode 100644 index 000000000000..6cc54cfb9984 --- /dev/null +++ b/changelog/26485.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes undefined start time in filename for downloaded client count attribution csv +``` diff --git a/changelog/26523.txt b/changelog/26523.txt new file mode 100644 index 000000000000..3b3ef6427d4b --- /dev/null +++ b/changelog/26523.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): fix bug where raft followers disagree with the seal type after returning to one seal from two. +``` diff --git a/changelog/26528.txt b/changelog/26528.txt new file mode 100644 index 000000000000..ce43d4c86dac --- /dev/null +++ b/changelog/26528.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Update plugin to v0.17.1 +``` \ No newline at end of file diff --git a/changelog/26607.txt b/changelog/26607.txt new file mode 100644 index 000000000000..b28c3d405977 --- /dev/null +++ b/changelog/26607.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix `redact_version` listener parameter being ignored for some OpenAPI related endpoints. +``` diff --git a/changelog/26616.txt b/changelog/26616.txt new file mode 100644 index 000000000000..af8e85e91940 --- /dev/null +++ b/changelog/26616.txt @@ -0,0 +1,5 @@ +```release-note:bug +core/audit: Audit logging a Vault request/response will now use a minimum 5 second context timeout. +If the existing context deadline occurs later than 5s in the future, it will be used, otherwise a +new context, separate from the original will be used. +``` \ No newline at end of file diff --git a/changelog/26790.txt b/changelog/26790.txt new file mode 100644 index 000000000000..593d2de67d97 --- /dev/null +++ b/changelog/26790.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Refresh model within a namespace on the Secrets Sync overview page. +``` \ No newline at end of file diff --git a/changelog/26844.txt b/changelog/26844.txt new file mode 100644 index 000000000000..49f7bf2f1611 --- /dev/null +++ b/changelog/26844.txt @@ -0,0 +1,3 @@ +```release-note:bug +auto-auth: Addressed issue where having no permissions to renew a renewable token caused auto-auth to attempt to renew constantly with no backoff +``` diff --git a/changelog/26858.txt b/changelog/26858.txt new file mode 100644 index 000000000000..911fd20c174d --- /dev/null +++ b/changelog/26858.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix broken help link in console for the web command. +``` diff --git a/changelog/26876.txt b/changelog/26876.txt new file mode 100644 index 000000000000..6522b0ecd9a6 --- /dev/null +++ b/changelog/26876.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Add missing field delegated_auth_accessors to GET /sys/mounts/:path API response +``` diff --git a/changelog/26890.txt b/changelog/26890.txt new file mode 100644 index 000000000000..74d06a9cf781 --- /dev/null +++ b/changelog/26890.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Update plugin to v0.20.3 +``` diff --git a/changelog/26896.txt b/changelog/26896.txt new file mode 100644 index 000000000000..6147953d0b27 --- /dev/null +++ b/changelog/26896.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/azure: Update vault-plugin-secrets-azure to 0.17.2 to include a bug fix for azure role creation +``` diff --git a/changelog/26985.txt b/changelog/26985.txt new file mode 100644 index 000000000000..7894bd3d407d --- /dev/null +++ b/changelog/26985.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Prevent perpetual loading screen when Vault needs initialization +``` diff --git a/changelog/26993.txt b/changelog/26993.txt new file mode 100644 index 000000000000..35acaa79a8ad --- /dev/null +++ b/changelog/26993.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update PGP display and show error for Generate Operation Token flow with PGP +``` \ No newline at end of file diff --git a/changelog/27014.txt b/changelog/27014.txt new file mode 100644 index 000000000000..94f6ebbe075a --- /dev/null +++ b/changelog/27014.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Address a data race updating a seal's last seen healthy time attribute +``` diff --git a/changelog/27019.txt b/changelog/27019.txt new file mode 100644 index 000000000000..722e0d46c9ec --- /dev/null +++ b/changelog/27019.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix link to v2 generic secrets engine from secrets list page. +``` \ No newline at end of file diff --git a/changelog/27093.txt b/changelog/27093.txt new file mode 100644 index 000000000000..a24becec3eac --- /dev/null +++ b/changelog/27093.txt @@ -0,0 +1,3 @@ +```release-note:bug +pki: Fix error in cross-signing using ed25519 keys +``` diff --git a/changelog/27094.txt b/changelog/27094.txt new file mode 100644 index 000000000000..9cd743f55f94 --- /dev/null +++ b/changelog/27094.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix KVv2 json editor to allow null values. +``` \ No newline at end of file diff --git a/changelog/27120.txt b/changelog/27120.txt new file mode 100644 index 000000000000..3a9630b986c5 --- /dev/null +++ b/changelog/27120.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix KVv2 cursor jumping inside json editor after initial input. +``` \ No newline at end of file diff --git a/changelog/27184.txt b/changelog/27184.txt new file mode 100644 index 000000000000..500045efb5af --- /dev/null +++ b/changelog/27184.txt @@ -0,0 +1,3 @@ +```release-note:change +core/identity: improve performance for secondary nodes receiving identity related updates through replication +``` diff --git a/changelog/27211.txt b/changelog/27211.txt new file mode 100644 index 000000000000..26bf725ebff3 --- /dev/null +++ b/changelog/27211.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Use 'hash_algorithm' parameter if present in HMAC verify requests. Otherwise fall back to deprecated 'algorithm' parameter. +``` diff --git a/changelog/6483.txt b/changelog/6483.txt new file mode 100644 index 000000000000..2f0dbed1fdc8 --- /dev/null +++ b/changelog/6483.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/consul: Consul service registration tags are now case-sensitive. +``` \ No newline at end of file diff --git a/changelog/README.md b/changelog/README.md index cbf841f6c77b..6c249c687fd1 100644 --- a/changelog/README.md +++ b/changelog/README.md @@ -24,7 +24,7 @@ Release notes are text files with three lines: might be warranted. - `deprecation` - Announcement of a planned future removal of a feature. Only use this if a deprecation notice also exists [in the - docs](https://www.vaultproject.io/docs/deprecation). + docs](https://developer.hashicorp.com/vault/docs/deprecation). - `feature` - Large topical additions for a major release. These are rarely in minor releases. Formatting for `feature` entries differs from normal changelog formatting - see the [new features @@ -36,6 +36,8 @@ Release notes are text files with three lines: 3. An ending code block. +If more than one area is impacted, use separate code blocks for each entry. + This should be in a file named after the pull request number (e.g., `12345.txt`). There are many examples in this folder; check one out if you're stuck! diff --git a/changelog/_22733.txt b/changelog/_22733.txt new file mode 100644 index 000000000000..039e423596fe --- /dev/null +++ b/changelog/_22733.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes long namespace names overflow in the sidebar +``` diff --git a/changelog/_23945.txt b/changelog/_23945.txt new file mode 100644 index 000000000000..030b677aec70 --- /dev/null +++ b/changelog/_23945.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Custom messages (enterprise)**: Introduces custom messages settings, allowing users to view, and operators to configure system-wide messages. +``` diff --git a/changelog/_go-ver-1130.txt b/changelog/_go-ver-1130.txt index 588b90c052da..c63e249c4588 100644 --- a/changelog/_go-ver-1130.txt +++ b/changelog/_go-ver-1130.txt @@ -1,3 +1,3 @@ ```release-note:change -core: Bump Go version to 1.19.4. +core: Bump Go version to 1.20. ``` diff --git a/changelog/_go-ver-1140.txt b/changelog/_go-ver-1140.txt new file mode 100644 index 000000000000..052a277ab431 --- /dev/null +++ b/changelog/_go-ver-1140.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.20.5. +``` diff --git a/changelog/_go-ver-1150.txt b/changelog/_go-ver-1150.txt new file mode 100644 index 000000000000..6df482655f34 --- /dev/null +++ b/changelog/_go-ver-1150.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.21.1. +``` diff --git a/changelog/_go-ver-1160.txt b/changelog/_go-ver-1160.txt new file mode 100644 index 000000000000..d86b533e7229 --- /dev/null +++ b/changelog/_go-ver-1160.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.21.8. +``` diff --git a/changelog/_go-ver-1162.txt b/changelog/_go-ver-1162.txt new file mode 100644 index 000000000000..7a2da6ded4c2 --- /dev/null +++ b/changelog/_go-ver-1162.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.21.9. +``` diff --git a/changelog/_go-ver-1163.txt b/changelog/_go-ver-1163.txt new file mode 100644 index 000000000000..e9b6aaf6acc5 --- /dev/null +++ b/changelog/_go-ver-1163.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.22.2. +``` diff --git a/changelog/changelog.tmpl b/changelog/changelog.tmpl index 4f2c9d2d09c7..648160cbf713 100644 --- a/changelog/changelog.tmpl +++ b/changelog/changelog.tmpl @@ -22,7 +22,7 @@ CHANGES: {{ end -}} {{- end -}} -{{- if .NotesByType.feature -}} +{{- if .NotesByType.feature }} FEATURES: {{range .NotesByType.feature -}} diff --git a/changelog/pki-ui-improvements.txt b/changelog/pki-ui-improvements.txt new file mode 100644 index 000000000000..d824033f2e3c --- /dev/null +++ b/changelog/pki-ui-improvements.txt @@ -0,0 +1,3 @@ +```release-note:feature +**NEW PKI Workflow in UI**: Completes generally available rollout of new PKI UI that provides smoother mount configuration and a more guided user experience +``` \ No newline at end of file diff --git a/command/agent.go b/command/agent.go index 0edd96eb9f21..789e08d94f8f 100644 --- a/command/agent.go +++ b/command/agent.go @@ -1,48 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "context" "crypto/tls" + "errors" "flag" "fmt" "io" - "io/ioutil" "net" "net/http" "os" - "path/filepath" "sort" "strings" "sync" "time" - "github.com/hashicorp/vault/command/agent/sink/inmem" - systemd "github.com/coreos/go-systemd/daemon" - log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/cli" + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/gatedwriter" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - "github.com/hashicorp/vault/command/agent/auth/alicloud" - "github.com/hashicorp/vault/command/agent/auth/approle" - "github.com/hashicorp/vault/command/agent/auth/aws" - "github.com/hashicorp/vault/command/agent/auth/azure" - "github.com/hashicorp/vault/command/agent/auth/cert" - "github.com/hashicorp/vault/command/agent/auth/cf" - "github.com/hashicorp/vault/command/agent/auth/gcp" - "github.com/hashicorp/vault/command/agent/auth/jwt" - "github.com/hashicorp/vault/command/agent/auth/kerberos" - "github.com/hashicorp/vault/command/agent/auth/kubernetes" - "github.com/hashicorp/vault/command/agent/cache" - "github.com/hashicorp/vault/command/agent/cache/cacheboltdb" - "github.com/hashicorp/vault/command/agent/cache/cachememdb" - "github.com/hashicorp/vault/command/agent/cache/keymanager" agentConfig "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agent/exec" "github.com/hashicorp/vault/command/agent/template" - "github.com/hashicorp/vault/command/agent/winsvc" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" + "github.com/hashicorp/vault/command/agentproxyshared/winsvc" "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/useragent" @@ -52,9 +46,10 @@ import ( "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/version" "github.com/kr/pretty" - "github.com/mitchellh/cli" "github.com/oklog/run" "github.com/posener/complete" + "golang.org/x/text/cases" + "golang.org/x/text/language" "google.golang.org/grpc/test/bufconn" ) @@ -67,25 +62,32 @@ const ( // flagNameAgentExitAfterAuth is used as an Agent specific flag to indicate // that agent should exit after a single successful auth flagNameAgentExitAfterAuth = "exit-after-auth" + nameAgent = "agent" ) type AgentCommand struct { *BaseCommand logFlags logFlags + config *agentConfig.Config + ShutdownCh chan struct{} SighupCh chan struct{} + tlsReloadFuncsLock sync.RWMutex + tlsReloadFuncs []reloadutil.ReloadFunc + logWriter io.Writer logGate *gatedwriter.Writer - logger log.Logger + logger hclog.Logger // Telemetry object metricsHelper *metricsutil.MetricsHelper cleanupGuard sync.Once - startedCh chan (struct{}) // for tests + startedCh chan struct{} // for tests + reloadedCh chan struct{} // for tests flagConfigs []string flagExitAfterAuth bool @@ -100,7 +102,7 @@ func (c *AgentCommand) Help() string { helpText := ` Usage: vault agent [options] - This command starts a Vault agent that can perform automatic authentication + This command starts a Vault Agent that can perform automatic authentication in certain environments. Start an agent with a configuration file: @@ -186,85 +188,45 @@ func (c *AgentCommand) Run(args []string) int { } // Validation - if len(c.flagConfigs) != 1 { - c.UI.Error("Must specify exactly one config path using -config") + if len(c.flagConfigs) < 1 { + c.UI.Error("Must specify exactly at least one config path using -config") return 1 } - // Load the configuration file - config, err := agentConfig.LoadConfig(c.flagConfigs[0]) + config, err := c.loadConfig(c.flagConfigs) if err != nil { - c.UI.Error(fmt.Sprintf("Error loading configuration from %s: %s", c.flagConfigs[0], err)) + c.outputErrors(err) return 1 } - // Ensure at least one config was found. - if config == nil { - c.UI.Output(wrapAtLength( - "No configuration read. Please provide the configuration with the " + - "-config flag.")) - return 1 - } - - if config.AutoAuth == nil && config.Cache == nil { - c.UI.Error("No auto_auth or cache block found in config file") - return 1 - } if config.AutoAuth == nil { - c.UI.Info("No auto_auth block found in config file, not starting automatic authentication feature") + c.UI.Info("No auto_auth block found in config, the automatic authentication feature will not be started") } - c.updateConfig(f, config) - - // Parse all the log related config - logLevel, err := logging.ParseLogLevel(config.LogLevel) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - - logFormat, err := logging.ParseLogFormat(config.LogFormat) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - - logRotateDuration, err := parseutil.ParseDurationSecond(config.LogRotateDuration) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - - logRotateBytes, err := parseutil.ParseInt(config.LogRotateBytes) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } + c.applyConfigOverrides(f, config) // This only needs to happen on start-up to aggregate config from flags and env vars + c.config = config - logRotateMaxFiles, err := parseutil.ParseInt(config.LogRotateMaxFiles) + l, err := c.newLogger() if err != nil { - c.UI.Error(err.Error()) + c.outputErrors(err) return 1 } - logCfg := &logging.LogConfig{ - Name: "vault-agent", - LogLevel: logLevel, - LogFormat: logFormat, - LogFilePath: config.LogFile, - LogRotateDuration: logRotateDuration, - LogRotateBytes: int(logRotateBytes), - LogRotateMaxFiles: int(logRotateMaxFiles), - } + // Update the logger and then base the log writer on that logger. + // Log writer is supplied to consul-template runners for templates and execs. + // We want to ensure that consul-template will honor the settings, for example + // if the -log-format is JSON we want JSON, not a mix of JSON and non-JSON messages. + c.logger = l + c.logWriter = l.StandardWriter(&hclog.StandardLoggerOptions{ + InferLevels: true, + InferLevelsWithTimestamp: true, + }) - l, err := logging.Setup(logCfg, c.logWriter) - if err != nil { - c.UI.Error(err.Error()) - return 1 + // release log gate if the disable-gated-logs flag is set + if c.logFlags.flagDisableGatedLogs { + c.logGate.Flush() } - c.logger = l - infoKeys := make([]string, 0, 10) info := make(map[string]string) info["log level"] = config.LogLevel @@ -289,14 +251,14 @@ func (c *AgentCommand) Run(args []string) int { if os.Getenv("VAULT_TEST_VERIFY_ONLY_DUMP_CONFIG") != "" { c.UI.Output(fmt.Sprintf( "\nConfiguration:\n%s\n", - pretty.Sprint(*config))) + pretty.Sprint(*c.config))) } return 0 } - // Ignore any setting of agent's address. This client is used by the agent + // Ignore any setting of Agent's address. This client is used by the Agent // to reach out to Vault. This should never loop back to agent. - c.flagAgentAddress = "" + c.flagAgentProxyAddress = "" client, err := c.Client() if err != nil { c.UI.Error(fmt.Sprintf( @@ -305,7 +267,28 @@ func (c *AgentCommand) Run(args []string) int { return 1 } - // ctx and cancelFunc are passed to the AuthHandler, SinkServer, and + serverHealth, err := client.Sys().Health() + if err == nil { + // We don't exit on error here, as this is not worth stopping Agent over + serverVersion := serverHealth.Version + agentVersion := version.GetVersion().VersionNumber() + if serverVersion != agentVersion { + c.UI.Info("==> Note: Vault Agent version does not match Vault server version. " + + fmt.Sprintf("Vault Agent version: %s, Vault server version: %s", agentVersion, serverVersion)) + } + } + + if config.IsDefaultListerDefined() { + // Notably, we cannot know for sure if they are using the API proxy functionality unless + // we log on each API proxy call, which would be too noisy. + // A customer could have a listener defined but only be using e.g. the cache-clear API, + // even though the API proxy is something they have available. + c.UI.Warn("==> Note: Vault Agent will be deprecating API proxy functionality in a future " + + "release, and this functionality has moved to a new subcommand, vault proxy. If you rely on this " + + "functionality, plan to move to Vault Proxy instead.") + } + + // ctx and cancelFunc are passed to the AuthHandler, SinkServer, ExecServer and // TemplateServer that periodically listen for ctx.Done() to fire and shut // down accordingly. ctx, cancelFunc := context.WithCancel(context.Background()) @@ -317,7 +300,7 @@ func (c *AgentCommand) Run(args []string) int { Ui: c.UI, ServiceName: "vault", DisplayName: "Vault", - UserAgent: useragent.String(), + UserAgent: useragent.AgentString(), ClusterName: config.ClusterName, }) if err != nil { @@ -326,14 +309,28 @@ func (c *AgentCommand) Run(args []string) int { } c.metricsHelper = metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) + var templateNamespace string + // This indicates whether the namespace for the client has been set by environment variable. + // If it has, we don't touch it + namespaceSetByEnvironmentVariable := client.Namespace() != "" + + if !namespaceSetByEnvironmentVariable && config.Vault != nil && config.Vault.Namespace != "" { + client.SetNamespace(config.Vault.Namespace) + } + var method auth.AuthMethod var sinks []*sink.SinkConfig - var templateNamespace string if config.AutoAuth != nil { - if client.Headers().Get(consts.NamespaceHeaderName) == "" && config.AutoAuth.Method.Namespace != "" { + // Note: This will only set namespace header to the value in config.AutoAuth.Method.Namespace + // only if it hasn't been set by config.Vault.Namespace above. In that case, the config value + // present at config.AutoAuth.Method.Namespace will still be used for auto-auth. + if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { client.SetNamespace(config.AutoAuth.Method.Namespace) } - templateNamespace = client.Headers().Get(consts.NamespaceHeaderName) + templateNamespace = client.Namespace() + if !namespaceSetByEnvironmentVariable && config.Vault != nil && config.Vault.Namespace != "" { + templateNamespace = config.Vault.Namespace + } sinkClient, err := client.CloneWithHeaders() if err != nil { @@ -364,7 +361,7 @@ func (c *AgentCommand) Run(args []string) int { } s, err := file.NewFileSink(config) if err != nil { - c.UI.Error(fmt.Errorf("Error creating file sink: %w", err).Error()) + c.UI.Error(fmt.Errorf("error creating file sink: %w", err).Error()) return 1 } config.Sink = s @@ -380,35 +377,9 @@ func (c *AgentCommand) Run(args []string) int { MountPath: config.AutoAuth.Method.MountPath, Config: config.AutoAuth.Method.Config, } - switch config.AutoAuth.Method.Type { - case "alicloud": - method, err = alicloud.NewAliCloudAuthMethod(authConfig) - case "aws": - method, err = aws.NewAWSAuthMethod(authConfig) - case "azure": - method, err = azure.NewAzureAuthMethod(authConfig) - case "cert": - method, err = cert.NewCertAuthMethod(authConfig) - case "cf": - method, err = cf.NewCFAuthMethod(authConfig) - case "gcp": - method, err = gcp.NewGCPAuthMethod(authConfig) - case "jwt": - method, err = jwt.NewJWTAuthMethod(authConfig) - case "kerberos": - method, err = kerberos.NewKerberosAuthMethod(authConfig) - case "kubernetes": - method, err = kubernetes.NewKubernetesAuthMethod(authConfig) - case "approle": - method, err = approle.NewApproleAuthMethod(authConfig) - case "pcf": // Deprecated. - method, err = cf.NewCFAuthMethod(authConfig) - default: - c.UI.Error(fmt.Sprintf("Unknown auth method %q", config.AutoAuth.Method.Type)) - return 1 - } + method, err = agentproxyshared.GetAutoAuthMethodFromConfig(config.AutoAuth.Method.Type, authConfig, config.Vault.Address) if err != nil { - c.UI.Error(fmt.Errorf("Error creating %s auth method: %w", config.AutoAuth.Method.Type, err).Error()) + c.UI.Error(fmt.Sprintf("Error creating %s auth method: %v", config.AutoAuth.Method.Type, err)) return 1 } } @@ -417,7 +388,12 @@ func (c *AgentCommand) Run(args []string) int { // confuse the issue of retries for auth failures which have their own // config and are handled a bit differently. if os.Getenv(api.EnvVaultMaxRetries) == "" { - client.SetMaxRetries(config.Vault.Retry.NumRetries) + client.SetMaxRetries(ctconfig.DefaultRetryAttempts) + if config.Vault != nil { + if config.Vault.Retry != nil { + client.SetMaxRetries(config.Vault.Retry.NumRetries) + } + } } enforceConsistency := cache.EnforceConsistencyNever @@ -499,7 +475,7 @@ func (c *AgentCommand) Run(args []string) int { // Output the header that the agent has started if !c.logFlags.flagCombineLogs { - c.UI.Output("==> Vault agent started! Log data will stream in below:\n") + c.UI.Output("==> Vault Agent started! Log data will stream in below:\n") } var leaseCache *cache.LeaseCache @@ -523,10 +499,12 @@ func (c *AgentCommand) Run(args []string) int { // The API proxy to be used, if listeners are configured apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ - Client: proxyClient, - Logger: apiProxyLogger, - EnforceConsistency: enforceConsistency, - WhenInconsistentAction: whenInconsistent, + Client: proxyClient, + Logger: apiProxyLogger, + EnforceConsistency: enforceConsistency, + WhenInconsistentAction: whenInconsistent, + UserAgentStringFunction: useragent.AgentProxyStringWithProxiedUserAgent, + UserAgentString: useragent.AgentProxyString(), }) if err != nil { c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) @@ -540,10 +518,12 @@ func (c *AgentCommand) Run(args []string) int { // Create the lease cache proxier and set its underlying proxier to // the API proxier. leaseCache, err = cache.NewLeaseCache(&cache.LeaseCacheConfig{ - Client: proxyClient, - BaseContext: ctx, - Proxier: apiProxy, - Logger: cacheLogger.Named("leasecache"), + Client: proxyClient, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + CacheDynamicSecrets: true, + UserAgentToUse: useragent.ProxyAPIProxyString(), }) if err != nil { c.UI.Error(fmt.Sprintf("Error creating lease cache: %v", err)) @@ -552,147 +532,14 @@ func (c *AgentCommand) Run(args []string) int { // Configure persistent storage and add to LeaseCache if config.Cache.Persist != nil { - if config.Cache.Persist.Path == "" { - c.UI.Error("must specify persistent cache path") - return 1 - } - - // Set AAD based on key protection type - var aad string - switch config.Cache.Persist.Type { - case "kubernetes": - aad, err = getServiceAccountJWT(config.Cache.Persist.ServiceAccountTokenFile) - if err != nil { - c.UI.Error(fmt.Sprintf("failed to read service account token from %s: %s", config.Cache.Persist.ServiceAccountTokenFile, err)) - return 1 - } - default: - c.UI.Error(fmt.Sprintf("persistent key protection type %q not supported", config.Cache.Persist.Type)) - return 1 - } - - // Check if bolt file exists already - dbFileExists, err := cacheboltdb.DBFileExists(config.Cache.Persist.Path) + deferFunc, oldToken, err := agentproxyshared.AddPersistentStorageToLeaseCache(ctx, leaseCache, config.Cache.Persist, cacheLogger) if err != nil { - c.UI.Error(fmt.Sprintf("failed to check if bolt file exists at path %s: %s", config.Cache.Persist.Path, err)) + c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) return 1 } - if dbFileExists { - // Open the bolt file, but wait to setup Encryption - ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: config.Cache.Persist.Path, - Logger: cacheLogger.Named("cacheboltdb"), - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error opening persistent cache: %v", err)) - return 1 - } - - // Get the token from bolt for retrieving the encryption key, - // then setup encryption so that restore is possible - token, err := ps.GetRetrievalToken() - if err != nil { - c.UI.Error(fmt.Sprintf("Error getting retrieval token from persistent cache: %v", err)) - } - - if err := ps.Close(); err != nil { - c.UI.Warn(fmt.Sprintf("Failed to close persistent cache file after getting retrieval token: %s", err)) - } - - km, err := keymanager.NewPassthroughKeyManager(ctx, token) - if err != nil { - c.UI.Error(fmt.Sprintf("failed to configure persistence encryption for cache: %s", err)) - return 1 - } - - // Open the bolt file with the wrapper provided - ps, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: config.Cache.Persist.Path, - Logger: cacheLogger.Named("cacheboltdb"), - Wrapper: km.Wrapper(), - AAD: aad, - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error opening persistent cache with wrapper: %v", err)) - return 1 - } - - // Restore anything in the persistent cache to the memory cache - if err := leaseCache.Restore(ctx, ps); err != nil { - c.UI.Error(fmt.Sprintf("Error restoring in-memory cache from persisted file: %v", err)) - if config.Cache.Persist.ExitOnErr { - return 1 - } - } - cacheLogger.Info("loaded memcache from persistent storage") - - // Check for previous auto-auth token - oldTokenBytes, err := ps.GetAutoAuthToken(ctx) - if err != nil { - c.UI.Error(fmt.Sprintf("Error in fetching previous auto-auth token: %s", err)) - if config.Cache.Persist.ExitOnErr { - return 1 - } - } - if len(oldTokenBytes) > 0 { - oldToken, err := cachememdb.Deserialize(oldTokenBytes) - if err != nil { - c.UI.Error(fmt.Sprintf("Error in deserializing previous auto-auth token cache entry: %s", err)) - if config.Cache.Persist.ExitOnErr { - return 1 - } - } - previousToken = oldToken.Token - } - - // If keep_after_import true, set persistent storage layer in - // leaseCache, else remove db file - if config.Cache.Persist.KeepAfterImport { - defer ps.Close() - leaseCache.SetPersistentStorage(ps) - } else { - if err := ps.Close(); err != nil { - c.UI.Warn(fmt.Sprintf("failed to close persistent cache file: %s", err)) - } - dbFile := filepath.Join(config.Cache.Persist.Path, cacheboltdb.DatabaseFileName) - if err := os.Remove(dbFile); err != nil { - c.UI.Error(fmt.Sprintf("failed to remove persistent storage file %s: %s", dbFile, err)) - if config.Cache.Persist.ExitOnErr { - return 1 - } - } - } - } else { - km, err := keymanager.NewPassthroughKeyManager(ctx, nil) - if err != nil { - c.UI.Error(fmt.Sprintf("failed to configure persistence encryption for cache: %s", err)) - return 1 - } - ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: config.Cache.Persist.Path, - Logger: cacheLogger.Named("cacheboltdb"), - Wrapper: km.Wrapper(), - AAD: aad, - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) - return 1 - } - cacheLogger.Info("configured persistent storage", "path", config.Cache.Persist.Path) - - // Stash the key material in bolt - token, err := km.RetrievalToken(ctx) - if err != nil { - c.UI.Error(fmt.Sprintf("Error getting persistent key: %s", err)) - return 1 - } - if err := ps.StoreRetrievalToken(token); err != nil { - c.UI.Error(fmt.Sprintf("Error setting key in persistent cache: %v", err)) - return 1 - } - - defer ps.Close() - leaseCache.SetPersistentStorage(ps) + previousToken = oldToken + if deferFunc != nil { + defer deferFunc() } } } @@ -700,12 +547,16 @@ func (c *AgentCommand) Run(args []string) int { var listeners []net.Listener // If there are templates, add an in-process listener - if len(config.Templates) > 0 { + if len(config.Templates) > 0 || len(config.EnvTemplates) > 0 { config.Listeners = append(config.Listeners, &configutil.Listener{Type: listenerutil.BufConnType}) } + + // Ensure we've added all the reload funcs for TLS before anyone triggers a reload. + c.tlsReloadFuncsLock.Lock() + for i, lnConfig := range config.Listeners { var ln net.Listener - var tlsConf *tls.Config + var tlsCfg *tls.Config if lnConfig.Type == listenerutil.BufConnType { inProcListener := bufconn.Listen(1024 * 1024) @@ -714,11 +565,18 @@ func (c *AgentCommand) Run(args []string) int { } ln = inProcListener } else { - ln, tlsConf, err = cache.StartListener(lnConfig) + lnBundle, err := cache.StartListener(lnConfig) if err != nil { c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) + c.tlsReloadFuncsLock.Unlock() return 1 } + + tlsCfg = lnBundle.TLSConfig + ln = lnBundle.Listener + + // Track the reload func, so we can reload later if needed. + c.tlsReloadFuncs = append(c.tlsReloadFuncs, lnBundle.TLSReloadFunc) } listeners = append(listeners, ln) @@ -733,6 +591,7 @@ func (c *AgentCommand) Run(args []string) int { }, leaseCache) if err != nil { c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) + c.tlsReloadFuncsLock.Unlock() return 1 } sinks = append(sinks, &sink.SinkConfig{ @@ -743,7 +602,12 @@ func (c *AgentCommand) Run(args []string) int { proxyVaultToken = !config.APIProxy.ForceAutoAuthToken } - muxHandler := cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) + var muxHandler http.Handler + if leaseCache != nil { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) + } else { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) + } // Parse 'require_request_header' listener config option, and wrap // the request handler if necessary @@ -763,7 +627,7 @@ func (c *AgentCommand) Run(args []string) int { } scheme := "https://" - if tlsConf == nil { + if tlsCfg == nil { scheme = "http://" } if ln.Addr().Network() == "unix" { @@ -776,7 +640,7 @@ func (c *AgentCommand) Run(args []string) int { server := &http.Server{ Addr: ln.Addr().String(), - TLSConfig: tlsConf, + TLSConfig: tlsCfg, Handler: mux, ReadHeaderTimeout: 10 * time.Second, ReadTimeout: 30 * time.Second, @@ -787,6 +651,8 @@ func (c *AgentCommand) Run(args []string) int { go server.Serve(ln) } + c.tlsReloadFuncsLock.Unlock() + // Ensure that listeners are closed at all the exits listenerCloseFunc := func() { for _, ln := range listeners { @@ -800,28 +666,43 @@ func (c *AgentCommand) Run(args []string) int { close(c.startedCh) } - // Listen for signals - // TODO: implement support for SIGHUP reloading of configuration - // signal.Notify(c.signalCh) - var g run.Group + g.Add(func() error { + for { + select { + case <-c.SighupCh: + c.UI.Output("==> Vault Agent config reload triggered") + err := c.reloadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + } + // Send the 'reloaded' message on the relevant channel + select { + case c.reloadedCh <- struct{}{}: + default: + } + case <-ctx.Done(): + return nil + } + } + }, func(error) { + cancelFunc() + }) + // This run group watches for signal termination g.Add(func() error { for { select { case <-c.ShutdownCh: - c.UI.Output("==> Vault agent shutdown triggered") + c.UI.Output("==> Vault Agent shutdown triggered") // Notify systemd that the server is shutting down - c.notifySystemd(systemd.SdNotifyStopping) - // Let the lease cache know this is a shutdown; no need to evict - // everything + // Let the lease cache know this is a shutdown; no need to evict everything if leaseCache != nil { leaseCache.SetShuttingDown(true) } return nil case <-ctx.Done(): - c.notifySystemd(systemd.SdNotifyStopping) return nil case <-winsvc.ShutdownChannel(): return nil @@ -831,7 +712,8 @@ func (c *AgentCommand) Run(args []string) int { // Start auto-auth and sink servers if method != nil { - enableTokenCh := len(config.Templates) > 0 + enableTemplateTokenCh := len(config.Templates) > 0 + enableEnvTemplateTokenCh := len(config.EnvTemplates) > 0 // Auth Handler is going to set its own retry values, so we want to // work on a copy of the client to not affect other subsystems. @@ -841,6 +723,11 @@ func (c *AgentCommand) Run(args []string) int { return 1 } + // Override the set namespace with the auto-auth specific namespace + if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { + ahClient.SetNamespace(config.AutoAuth.Method.Namespace) + } + if config.DisableIdleConnsAutoAuth { ahClient.SetMaxIdleConnections(-1) } @@ -856,9 +743,12 @@ func (c *AgentCommand) Run(args []string) int { MinBackoff: config.AutoAuth.Method.MinBackoff, MaxBackoff: config.AutoAuth.Method.MaxBackoff, EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, - EnableTemplateTokenCh: enableTokenCh, + EnableTemplateTokenCh: enableTemplateTokenCh, + EnableExecTokenCh: enableEnvTemplateTokenCh, Token: previousToken, ExitOnError: config.AutoAuth.Method.ExitOnError, + UserAgent: useragent.AgentAutoAuthString(), + MetricsSignifier: "agent", }) ss := sink.NewSinkServer(&sink.SinkServerConfig{ @@ -869,13 +759,25 @@ func (c *AgentCommand) Run(args []string) int { ts := template.NewServer(&template.ServerConfig{ Logger: c.logger.Named("template.server"), - LogLevel: logLevel, + LogLevel: c.logger.GetLevel(), LogWriter: c.logWriter, - AgentConfig: config, + AgentConfig: c.config, Namespace: templateNamespace, ExitAfterAuth: config.ExitAfterAuth, }) + es, err := exec.NewServer(&exec.ServerConfig{ + AgentConfig: c.config, + Namespace: templateNamespace, + Logger: c.logger.Named("exec.server"), + LogLevel: c.logger.GetLevel(), + LogWriter: c.logWriter, + }) + if err != nil { + c.logger.Error("could not create exec server", "error", err) + return 1 + } + g.Add(func() error { return ah.Run(ctx, method) }, func(error) { @@ -930,17 +832,30 @@ func (c *AgentCommand) Run(args []string) int { ts.Stop() }) + g.Add(func() error { + return es.Run(ctx, ah.ExecTokenCh) + }, func(err error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + es.Close() + }) + } // Server configuration output padding := 24 sort.Strings(infoKeys) - c.UI.Output("==> Vault agent configuration:\n") + caser := cases.Title(language.English) + c.UI.Output("==> Vault Agent configuration:\n") for _, k := range infoKeys { c.UI.Output(fmt.Sprintf( "%s%s: %s", strings.Repeat(" ", padding-len(k)), - strings.Title(k), + caser.String(k), info[k])) } c.UI.Output("") @@ -963,21 +878,36 @@ func (c *AgentCommand) Run(args []string) int { } }() + var exitCode int if err := g.Run(); err != nil { - c.logger.Error("runtime error encountered", "error", err) - c.UI.Error("Error encountered during run, refer to logs for more details.") - return 1 + var processExitError *exec.ProcessExitError + if errors.As(err, &processExitError) { + exitCode = processExitError.ExitCode + } else { + exitCode = 1 + } + + if exitCode != 0 { + c.logger.Error("runtime error encountered", "error", err, "exitCode", exitCode) + c.UI.Error("Error encountered during run, refer to logs for more details.") + } } - return 0 + c.notifySystemd(systemd.SdNotifyStopping) + + return exitCode } -// updateConfig ensures that the config object accurately reflects the desired +// applyConfigOverrides ensures that the config object accurately reflects the desired // settings as configured by the user. It applies the relevant config setting based // on the precedence (env var overrides file config, cli overrides env var). // It mutates the config object supplied. -func (c *AgentCommand) updateConfig(f *FlagSets, config *agentConfig.Config) { - f.updateLogConfig(config.SharedConfig) +func (c *AgentCommand) applyConfigOverrides(f *FlagSets, config *agentConfig.Config) { + if config.Vault == nil { + config.Vault = &agentConfig.Vault{} + } + + f.applyLogConfigOverrides(config.SharedConfig) f.Visit(func(fl *flag.Flag) { if fl.Name == flagNameAgentExitAfterAuth { @@ -1102,7 +1032,12 @@ func (c *AgentCommand) setBoolFlag(f *FlagSets, configVal bool, fVar *BoolVar) { // Don't do anything as the flag is already set from the command line case flagEnvSet: // Use value from env var - *fVar.Target = flagEnvValue != "" + val, err := parseutil.ParseBool(flagEnvValue) + if err != nil { + c.logger.Error("error parsing bool from environment variable, using default instead", "environment variable", fVar.EnvVar, "provided value", flagEnvValue, "default", fVar.Default, "err", err) + val = fVar.Default + } + *fVar.Target = val case configVal: // Use value from config *fVar.Target = configVal @@ -1143,19 +1078,6 @@ func (c *AgentCommand) removePidFile(pidPath string) error { return os.Remove(pidPath) } -// GetServiceAccountJWT reads the service account jwt from `tokenFile`. Default is -// the default service account file path in kubernetes. -func getServiceAccountJWT(tokenFile string) (string, error) { - if len(tokenFile) == 0 { - tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" - } - token, err := ioutil.ReadFile(tokenFile) - if err != nil { - return "", err - } - return strings.TrimSpace(string(token)), nil -} - func (c *AgentCommand) handleMetrics() http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { @@ -1181,7 +1103,7 @@ func (c *AgentCommand) handleMetrics() http.Handler { w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) switch v := resp.Data[logical.HTTPRawBody].(type) { case string: - w.WriteHeader((status)) + w.WriteHeader(status) w.Write([]byte(v)) case []byte: w.WriteHeader(status) @@ -1210,3 +1132,165 @@ func (c *AgentCommand) handleQuit(enabled bool) http.Handler { close(c.ShutdownCh) }) } + +// newLogger creates a logger based on parsed config field on the Agent Command struct. +func (c *AgentCommand) newLogger() (hclog.InterceptLogger, error) { + if c.config == nil { + return nil, fmt.Errorf("cannot create logger, no config") + } + + var errs *multierror.Error + + // Parse all the log related config + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + errs = multierror.Append(errs, err) + } + + logFormat, err := logging.ParseLogFormat(c.config.LogFormat) + if err != nil { + errs = multierror.Append(errs, err) + } + + logRotateDuration, err := parseutil.ParseDurationSecond(c.config.LogRotateDuration) + if err != nil { + errs = multierror.Append(errs, err) + } + + if errs != nil { + return nil, errs + } + + logCfg, err := logging.NewLogConfig(nameAgent) + if err != nil { + return nil, err + } + logCfg.Name = nameAgent + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = c.config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = c.config.LogRotateBytes + logCfg.LogRotateMaxFiles = c.config.LogRotateMaxFiles + + l, err := logging.Setup(logCfg, c.logWriter) + if err != nil { + return nil, err + } + + return l, nil +} + +// loadConfig attempts to generate an Agent config from the file(s) specified. +func (c *AgentCommand) loadConfig(paths []string) (*agentConfig.Config, error) { + var errs *multierror.Error + cfg := agentConfig.NewConfig() + + for _, configPath := range paths { + configFromPath, err := agentConfig.LoadConfig(configPath) + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) + } else { + cfg = cfg.Merge(configFromPath) + } + } + + if errs != nil { + return nil, errs + } + + if err := cfg.ValidateConfig(); err != nil { + return nil, fmt.Errorf("error validating configuration: %w", err) + } + + return cfg, nil +} + +// reloadConfig will attempt to reload the config from file(s) and adjust certain +// config values without requiring a restart of the Vault Agent. +// If config is retrieved without error it is stored in the config field of the AgentCommand. +// This operation is not atomic and could result in updated config but partially applied config settings. +// The error returned from this func may be a multierror. +// This function will most likely be called due to Vault Agent receiving a SIGHUP signal. +// Currently only reloading the following are supported: +// * log level +// * TLS certs for listeners +func (c *AgentCommand) reloadConfig(paths []string) error { + // Notify systemd that the server is reloading + c.notifySystemd(systemd.SdNotifyReloading) + defer c.notifySystemd(systemd.SdNotifyReady) + + var errors error + + // Reload the config + cfg, err := c.loadConfig(paths) + if err != nil { + // Returning single error as we won't continue with bad config and won't 'commit' it. + return err + } + c.config = cfg + + // Update the log level + err = c.reloadLogLevel() + if err != nil { + errors = multierror.Append(errors, err) + } + + // Update certs + err = c.reloadCerts() + if err != nil { + errors = multierror.Append(errors, err) + } + + return errors +} + +// reloadLogLevel will attempt to update the log level for the logger attached +// to the AgentComment struct using the value currently set in config. +func (c *AgentCommand) reloadLogLevel() error { + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + return err + } + + c.logger.SetLevel(logLevel) + + return nil +} + +// reloadCerts will attempt to reload certificates using a reload func which +// was provided when the listeners were configured, only funcs that were appended +// to the AgentCommand slice will be invoked. +// This function returns a multierror type so that every func can report an error +// if it encounters one. +func (c *AgentCommand) reloadCerts() error { + var errors error + + c.tlsReloadFuncsLock.RLock() + defer c.tlsReloadFuncsLock.RUnlock() + + for _, reloadFunc := range c.tlsReloadFuncs { + // Non-TLS listeners will have a nil reload func. + if reloadFunc != nil { + err := reloadFunc() + if err != nil { + errors = multierror.Append(errors, err) + } + } + } + + return errors +} + +// outputErrors will take an error or multierror and handle outputting each to the UI +func (c *AgentCommand) outputErrors(err error) { + if err != nil { + if me, ok := err.(*multierror.Error); ok { + for _, err := range me.Errors { + c.UI.Error(err.Error()) + } + } else { + c.UI.Error(err.Error()) + } + } +} diff --git a/command/agent/README.md b/command/agent/README.md index 02ef02159f01..e46109810ef6 100644 --- a/command/agent/README.md +++ b/command/agent/README.md @@ -12,4 +12,4 @@ addressing the following challenges: See the usage documentation on the Vault website here: -- https://www.vaultproject.io/docs/agent/ +- https://developer.hashicorp.com/vault/docs/agent-and-proxy/agent diff --git a/command/agent/alicloud_end_to_end_test.go b/command/agent/alicloud_end_to_end_test.go index 64610811292a..1337bfb8a86f 100644 --- a/command/agent/alicloud_end_to_end_test.go +++ b/command/agent/alicloud_end_to_end_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package agent import ( @@ -12,17 +15,15 @@ import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" "github.com/aliyun/alibaba-cloud-sdk-go/services/sts" - hclog "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" vaultalicloud "github.com/hashicorp/vault-plugin-auth-alicloud" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - agentalicloud "github.com/hashicorp/vault/command/agent/auth/alicloud" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentalicloud "github.com/hashicorp/vault/command/agentproxyshared/auth/alicloud" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" "github.com/hashicorp/vault/helper/testhelpers" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) @@ -46,9 +47,7 @@ func TestAliCloudEndToEnd(t *testing.T) { } testhelpers.SkipUnlessEnvVarsSet(t, credNames) - logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "alicloud": vaultalicloud.Factory, }, @@ -88,7 +87,7 @@ func TestAliCloudEndToEnd(t *testing.T) { }() am, err := agentalicloud.NewAliCloudAuthMethod(&auth.AuthConfig{ - Logger: logger.Named("auth.alicloud"), + Logger: cluster.Logger.Named("auth.alicloud"), MountPath: "auth/alicloud", Config: map[string]interface{}{ "role": "test", @@ -101,7 +100,7 @@ func TestAliCloudEndToEnd(t *testing.T) { } ahConfig := &auth.AuthHandlerConfig{ - Logger: logger.Named("auth.handler"), + Logger: cluster.Logger.Named("auth.handler"), Client: client, } @@ -130,7 +129,7 @@ func TestAliCloudEndToEnd(t *testing.T) { t.Logf("output: %s", tokenSinkFileName) config := &sink.SinkConfig{ - Logger: logger.Named("sink.file"), + Logger: cluster.Logger.Named("sink.file"), Config: map[string]interface{}{ "path": tokenSinkFileName, }, @@ -144,7 +143,7 @@ func TestAliCloudEndToEnd(t *testing.T) { config.Sink = fs ss := sink.NewSinkServer(&sink.SinkServerConfig{ - Logger: logger.Named("sink.server"), + Logger: cluster.Logger.Named("sink.server"), Client: client, }) go func() { diff --git a/command/agent/approle_end_to_end_test.go b/command/agent/approle_end_to_end_test.go index e3456b3b5c74..600049865342 100644 --- a/command/agent/approle_end_to_end_test.go +++ b/command/agent/approle_end_to_end_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package agent import ( @@ -13,10 +16,10 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agent/auth" - agentapprole "github.com/hashicorp/vault/command/agent/auth/approle" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentapprole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -69,7 +72,6 @@ func testAppRoleEndToEnd(t *testing.T, removeSecretIDFile bool, bindSecretID boo coreConfig := &vault.CoreConfig{ DisableMlock: true, DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -408,9 +410,6 @@ func TestAppRoleLongRoleName(t *testing.T) { approleName := strings.Repeat("a", 5000) coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -474,9 +473,6 @@ func testAppRoleWithWrapping(t *testing.T, bindSecretID bool, secretIDLess bool, var err error logger := logging.NewVaultLogger(log.Trace) coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, diff --git a/command/agent/auth/auth.go b/command/agent/auth/auth.go deleted file mode 100644 index 854052adc5cb..000000000000 --- a/command/agent/auth/auth.go +++ /dev/null @@ -1,445 +0,0 @@ -package auth - -import ( - "context" - "encoding/json" - "errors" - "math/rand" - "net/http" - "time" - - "github.com/armon/go-metrics" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/jsonutil" -) - -const ( - defaultMinBackoff = 1 * time.Second - defaultMaxBackoff = 5 * time.Minute -) - -// AuthMethod is the interface that auto-auth methods implement for the agent -// to use. -type AuthMethod interface { - // Authenticate returns a mount path, header, request body, and error. - // The header may be nil if no special header is needed. - Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) - NewCreds() chan struct{} - CredSuccess() - Shutdown() -} - -// AuthMethodWithClient is an extended interface that can return an API client -// for use during the authentication call. -type AuthMethodWithClient interface { - AuthMethod - AuthClient(client *api.Client) (*api.Client, error) -} - -type AuthConfig struct { - Logger hclog.Logger - MountPath string - WrapTTL time.Duration - Config map[string]interface{} -} - -// AuthHandler is responsible for keeping a token alive and renewed and passing -// new tokens to the sink server -type AuthHandler struct { - OutputCh chan string - TemplateTokenCh chan string - token string - logger hclog.Logger - client *api.Client - random *rand.Rand - wrapTTL time.Duration - maxBackoff time.Duration - minBackoff time.Duration - enableReauthOnNewCredentials bool - enableTemplateTokenCh bool - exitOnError bool -} - -type AuthHandlerConfig struct { - Logger hclog.Logger - Client *api.Client - WrapTTL time.Duration - MaxBackoff time.Duration - MinBackoff time.Duration - Token string - EnableReauthOnNewCredentials bool - EnableTemplateTokenCh bool - ExitOnError bool -} - -func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler { - ah := &AuthHandler{ - // This is buffered so that if we try to output after the sink server - // has been shut down, during agent shutdown, we won't block - OutputCh: make(chan string, 1), - TemplateTokenCh: make(chan string, 1), - token: conf.Token, - logger: conf.Logger, - client: conf.Client, - random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - wrapTTL: conf.WrapTTL, - minBackoff: conf.MinBackoff, - maxBackoff: conf.MaxBackoff, - enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials, - enableTemplateTokenCh: conf.EnableTemplateTokenCh, - exitOnError: conf.ExitOnError, - } - - return ah -} - -func backoff(ctx context.Context, backoff *agentBackoff) bool { - if backoff.exitOnErr { - return false - } - - select { - case <-time.After(backoff.current): - case <-ctx.Done(): - } - - // Increase exponential backoff for the next time if we don't - // successfully auth/renew/etc. - backoff.next() - return true -} - -func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error { - if am == nil { - return errors.New("auth handler: nil auth method") - } - - if ah.minBackoff <= 0 { - ah.minBackoff = defaultMinBackoff - } - - backoffCfg := newAgentBackoff(ah.minBackoff, ah.maxBackoff, ah.exitOnError) - - if backoffCfg.min >= backoffCfg.max { - return errors.New("auth handler: min_backoff cannot be greater than max_backoff") - } - - ah.logger.Info("starting auth handler") - defer func() { - am.Shutdown() - close(ah.OutputCh) - close(ah.TemplateTokenCh) - ah.logger.Info("auth handler stopped") - }() - - credCh := am.NewCreds() - if !ah.enableReauthOnNewCredentials { - realCredCh := credCh - credCh = nil - if realCredCh != nil { - go func() { - for { - select { - case <-ctx.Done(): - return - case <-realCredCh: - } - } - }() - } - } - if credCh == nil { - credCh = make(chan struct{}) - } - - var watcher *api.LifetimeWatcher - first := true - - for { - select { - case <-ctx.Done(): - return nil - - default: - } - - var clientToUse *api.Client - var err error - var path string - var data map[string]interface{} - var header http.Header - - switch am.(type) { - case AuthMethodWithClient: - clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client) - if err != nil { - ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoff) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - - return err - } - default: - clientToUse = ah.client - } - - // Disable retry on the client to ensure our backoffOrQuit function is - // the only source of retry/backoff. - clientToUse.SetMaxRetries(0) - - var secret *api.Secret = new(api.Secret) - if first && ah.token != "" { - ah.logger.Debug("using preloaded token") - - first = false - ah.logger.Debug("lookup-self with preloaded token") - clientToUse.SetToken(ah.token) - - secret, err = clientToUse.Auth().Token().LookupSelfWithContext(ctx) - if err != nil { - ah.logger.Error("could not look up token", "err", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - - duration, _ := secret.Data["ttl"].(json.Number).Int64() - secret.Auth = &api.SecretAuth{ - ClientToken: secret.Data["id"].(string), - LeaseDuration: int(duration), - Renewable: secret.Data["renewable"].(bool), - } - } else { - ah.logger.Info("authenticating") - - path, header, data, err = am.Authenticate(ctx, ah.client) - if err != nil { - ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - } - - if ah.wrapTTL > 0 { - wrapClient, err := clientToUse.Clone() - if err != nil { - ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - wrapClient.SetWrappingLookupFunc(func(string, string) string { - return ah.wrapTTL.String() - }) - clientToUse = wrapClient - } - for key, values := range header { - for _, value := range values { - clientToUse.AddHeader(key, value) - } - } - - // This should only happen if there's no preloaded token (regular auto-auth login) - // or if a preloaded token has expired and is now switching to auto-auth. - if secret.Auth == nil { - secret, err = clientToUse.Logical().WriteWithContext(ctx, path, data) - // Check errors/sanity - if err != nil { - ah.logger.Error("error authenticating", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - } - - switch { - case ah.wrapTTL > 0: - if secret.WrapInfo == nil { - ah.logger.Error("authentication returned nil wrap info", "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - if secret.WrapInfo.Token == "" { - ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo) - if err != nil { - ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing") - ah.OutputCh <- string(wrappedResp) - if ah.enableTemplateTokenCh { - ah.TemplateTokenCh <- string(wrappedResp) - } - - am.CredSuccess() - backoffCfg.reset() - - select { - case <-ctx.Done(): - ah.logger.Info("shutdown triggered") - continue - - case <-credCh: - ah.logger.Info("auth method found new credentials, re-authenticating") - continue - } - - default: - if secret == nil || secret.Auth == nil { - ah.logger.Error("authentication returned nil auth info", "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - if secret.Auth.ClientToken == "" { - ah.logger.Error("authentication returned empty client token", "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - ah.logger.Info("authentication successful, sending token to sinks") - ah.OutputCh <- secret.Auth.ClientToken - if ah.enableTemplateTokenCh { - ah.TemplateTokenCh <- secret.Auth.ClientToken - } - - am.CredSuccess() - backoffCfg.reset() - } - - if watcher != nil { - watcher.Stop() - } - - watcher, err = clientToUse.NewLifetimeWatcher(&api.LifetimeWatcherInput{ - Secret: secret, - }) - if err != nil { - ah.logger.Error("error creating lifetime watcher", "error", err, "backoff", backoffCfg) - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - - if backoff(ctx, backoffCfg) { - continue - } - return err - } - - // Start the renewal process - ah.logger.Info("starting renewal process") - metrics.IncrCounter([]string{"agent", "auth", "success"}, 1) - go watcher.Renew() - - LifetimeWatcherLoop: - for { - select { - case <-ctx.Done(): - ah.logger.Info("shutdown triggered, stopping lifetime watcher") - watcher.Stop() - break LifetimeWatcherLoop - - case err := <-watcher.DoneCh(): - ah.logger.Info("lifetime watcher done channel triggered") - if err != nil { - metrics.IncrCounter([]string{"agent", "auth", "failure"}, 1) - ah.logger.Error("error renewing token", "error", err) - } - break LifetimeWatcherLoop - - case <-watcher.RenewCh(): - metrics.IncrCounter([]string{"agent", "auth", "success"}, 1) - ah.logger.Info("renewed auth token") - - case <-credCh: - ah.logger.Info("auth method found new credentials, re-authenticating") - break LifetimeWatcherLoop - } - } - } -} - -// agentBackoff tracks exponential backoff state. -type agentBackoff struct { - min time.Duration - max time.Duration - current time.Duration - exitOnErr bool -} - -func newAgentBackoff(min, max time.Duration, exitErr bool) *agentBackoff { - if max <= 0 { - max = defaultMaxBackoff - } - - if min <= 0 { - min = defaultMinBackoff - } - - return &agentBackoff{ - current: min, - max: max, - min: min, - exitOnErr: exitErr, - } -} - -// next determines the next backoff duration that is roughly twice -// the current value, capped to a max value, with a measure of randomness. -func (b *agentBackoff) next() { - maxBackoff := 2 * b.current - - if maxBackoff > b.max { - maxBackoff = b.max - } - - // Trim a random amount (0-25%) off the doubled duration - trim := rand.Int63n(int64(maxBackoff) / 4) - b.current = maxBackoff - time.Duration(trim) -} - -func (b *agentBackoff) reset() { - b.current = b.min -} - -func (b agentBackoff) String() string { - return b.current.Truncate(10 * time.Millisecond).String() -} diff --git a/command/agent/auth/auth_test.go b/command/agent/auth/auth_test.go deleted file mode 100644 index 9501342749bb..000000000000 --- a/command/agent/auth/auth_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package auth - -import ( - "context" - "net/http" - "testing" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/builtin/credential/userpass" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" -) - -type userpassTestMethod struct{} - -func newUserpassTestMethod(t *testing.T, client *api.Client) AuthMethod { - err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ - Type: "userpass", - Config: api.AuthConfigInput{ - DefaultLeaseTTL: "1s", - MaxLeaseTTL: "3s", - }, - }) - if err != nil { - t.Fatal(err) - } - - return &userpassTestMethod{} -} - -func (u *userpassTestMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { - _, err := client.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ - "password": "bar", - }) - if err != nil { - return "", nil, nil, err - } - return "auth/userpass/login/foo", nil, map[string]interface{}{ - "password": "bar", - }, nil -} - -func (u *userpassTestMethod) NewCreds() chan struct{} { - return nil -} - -func (u *userpassTestMethod) CredSuccess() { -} - -func (u *userpassTestMethod) Shutdown() { -} - -func TestAuthHandler(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - coreConfig := &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "userpass": userpass.Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - client := cluster.Cores[0].Client - - ctx, cancelFunc := context.WithCancel(context.Background()) - - ah := NewAuthHandler(&AuthHandlerConfig{ - Logger: logger.Named("auth.handler"), - Client: client, - }) - - am := newUserpassTestMethod(t, client) - errCh := make(chan error) - go func() { - errCh <- ah.Run(ctx, am) - }() - - // Consume tokens so we don't block - stopTime := time.Now().Add(5 * time.Second) - closed := false -consumption: - for { - select { - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - break consumption - case <-ah.OutputCh: - case <-ah.TemplateTokenCh: - // Nothing - case <-time.After(stopTime.Sub(time.Now())): - if !closed { - cancelFunc() - closed = true - } - } - } -} - -func TestAgentBackoff(t *testing.T) { - max := 1024 * time.Second - backoff := newAgentBackoff(defaultMinBackoff, max, false) - - // Test initial value - if backoff.current != defaultMinBackoff { - t.Fatalf("expected 1s initial backoff, got: %v", backoff.current) - } - - // Test that backoff values are in expected range (75-100% of 2*previous) - for i := 0; i < 9; i++ { - old := backoff.current - backoff.next() - - expMax := 2 * old - expMin := 3 * expMax / 4 - - if backoff.current < expMin || backoff.current > expMax { - t.Fatalf("expected backoff in range %v to %v, got: %v", expMin, expMax, backoff) - } - } - - // Test that backoff is capped - for i := 0; i < 100; i++ { - backoff.next() - if backoff.current > max { - t.Fatalf("backoff exceeded max of 100s: %v", backoff) - } - } - - // Test reset - backoff.reset() - if backoff.current != defaultMinBackoff { - t.Fatalf("expected 1s backoff after reset, got: %v", backoff.current) - } -} - -func TestAgentMinBackoffCustom(t *testing.T) { - type test struct { - minBackoff time.Duration - want time.Duration - } - - tests := []test{ - {minBackoff: 0 * time.Second, want: 1 * time.Second}, - {minBackoff: 1 * time.Second, want: 1 * time.Second}, - {minBackoff: 5 * time.Second, want: 5 * time.Second}, - {minBackoff: 10 * time.Second, want: 10 * time.Second}, - } - - for _, test := range tests { - max := 1024 * time.Second - backoff := newAgentBackoff(test.minBackoff, max, false) - - // Test initial value - if backoff.current != test.want { - t.Fatalf("expected %d initial backoff, got: %v", test.want, backoff.current) - } - - // Test that backoff values are in expected range (75-100% of 2*previous) - for i := 0; i < 5; i++ { - old := backoff.current - backoff.next() - - expMax := 2 * old - expMin := 3 * expMax / 4 - - if backoff.current < expMin || backoff.current > expMax { - t.Fatalf("expected backoff in range %v to %v, got: %v", expMin, expMax, backoff) - } - } - - // Test that backoff is capped - for i := 0; i < 100; i++ { - backoff.next() - if backoff.current > max { - t.Fatalf("backoff exceeded max of 100s: %v", backoff) - } - } - - // Test reset - backoff.reset() - if backoff.current != test.want { - t.Fatalf("expected %d backoff after reset, got: %v", test.want, backoff.current) - } - } -} diff --git a/command/agent/auth/azure/azure.go b/command/agent/auth/azure/azure.go deleted file mode 100644 index 5554e72c2de1..000000000000 --- a/command/agent/auth/azure/azure.go +++ /dev/null @@ -1,204 +0,0 @@ -package azure - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "net/http" - - cleanhttp "github.com/hashicorp/go-cleanhttp" - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - "github.com/hashicorp/vault/helper/useragent" - "github.com/hashicorp/vault/sdk/helper/jsonutil" -) - -const ( - instanceEndpoint = "http://169.254.169.254/metadata/instance" - identityEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - - // minimum version 2018-02-01 needed for identity metadata - // regional availability: https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service - apiVersion = "2018-02-01" -) - -type azureMethod struct { - logger hclog.Logger - mountPath string - - role string - resource string - objectID string - clientID string -} - -func NewAzureAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { - if conf == nil { - return nil, errors.New("empty config") - } - if conf.Config == nil { - return nil, errors.New("empty config data") - } - - a := &azureMethod{ - logger: conf.Logger, - mountPath: conf.MountPath, - } - - roleRaw, ok := conf.Config["role"] - if !ok { - return nil, errors.New("missing 'role' value") - } - a.role, ok = roleRaw.(string) - if !ok { - return nil, errors.New("could not convert 'role' config value to string") - } - - resourceRaw, ok := conf.Config["resource"] - if !ok { - return nil, errors.New("missing 'resource' value") - } - a.resource, ok = resourceRaw.(string) - if !ok { - return nil, errors.New("could not convert 'resource' config value to string") - } - - objectIDRaw, ok := conf.Config["object_id"] - if ok { - a.objectID, ok = objectIDRaw.(string) - if !ok { - return nil, errors.New("could not convert 'object_id' config value to string") - } - } - - clientIDRaw, ok := conf.Config["client_id"] - if ok { - a.clientID, ok = clientIDRaw.(string) - if !ok { - return nil, errors.New("could not convert 'client_id' config value to string") - } - } - - switch { - case a.role == "": - return nil, errors.New("'role' value is empty") - case a.resource == "": - return nil, errors.New("'resource' value is empty") - case a.objectID != "" && a.clientID != "": - return nil, errors.New("only one of 'object_id' or 'client_id' may be provided") - } - - return a, nil -} - -func (a *azureMethod) Authenticate(ctx context.Context, client *api.Client) (retPath string, header http.Header, retData map[string]interface{}, retErr error) { - a.logger.Trace("beginning authentication") - - // Fetch instance data - var instance struct { - Compute struct { - Name string - ResourceGroupName string - SubscriptionID string - VMScaleSetName string - } - } - - body, err := getMetadataInfo(ctx, instanceEndpoint, "", "", "") - if err != nil { - retErr = err - return - } - - err = jsonutil.DecodeJSON(body, &instance) - if err != nil { - retErr = fmt.Errorf("error parsing instance metadata response: %w", err) - return - } - - // Fetch JWT - var identity struct { - AccessToken string `json:"access_token"` - } - - body, err = getMetadataInfo(ctx, identityEndpoint, a.resource, a.objectID, a.clientID) - if err != nil { - retErr = err - return - } - - err = jsonutil.DecodeJSON(body, &identity) - if err != nil { - retErr = fmt.Errorf("error parsing identity metadata response: %w", err) - return - } - - // Attempt login - data := map[string]interface{}{ - "role": a.role, - "vm_name": instance.Compute.Name, - "vmss_name": instance.Compute.VMScaleSetName, - "resource_group_name": instance.Compute.ResourceGroupName, - "subscription_id": instance.Compute.SubscriptionID, - "jwt": identity.AccessToken, - } - - return fmt.Sprintf("%s/login", a.mountPath), nil, data, nil -} - -func (a *azureMethod) NewCreds() chan struct{} { - return nil -} - -func (a *azureMethod) CredSuccess() { -} - -func (a *azureMethod) Shutdown() { -} - -func getMetadataInfo(ctx context.Context, endpoint, resource, objectID, clientID string) ([]byte, error) { - req, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return nil, err - } - - q := req.URL.Query() - q.Add("api-version", apiVersion) - if resource != "" { - q.Add("resource", resource) - } - if objectID != "" { - q.Add("object_id", objectID) - } - if clientID != "" { - q.Add("client_id", clientID) - } - req.URL.RawQuery = q.Encode() - req.Header.Set("Metadata", "true") - req.Header.Set("User-Agent", useragent.String()) - req = req.WithContext(ctx) - - client := cleanhttp.DefaultClient() - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("error fetching metadata from %s: %w", endpoint, err) - } - - if resp == nil { - return nil, fmt.Errorf("empty response fetching metadata from %s", endpoint) - } - - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("error reading metadata from %s: %w", endpoint, err) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("error response in metadata from %s: %s", endpoint, body) - } - - return body, nil -} diff --git a/command/agent/auth/cert/cert.go b/command/agent/auth/cert/cert.go deleted file mode 100644 index 2703aa8ecd58..000000000000 --- a/command/agent/auth/cert/cert.go +++ /dev/null @@ -1,146 +0,0 @@ -package cert - -import ( - "context" - "errors" - "fmt" - "net/http" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - "github.com/hashicorp/vault/sdk/helper/consts" -) - -type certMethod struct { - logger hclog.Logger - mountPath string - name string - - caCert string - clientCert string - clientKey string - - // Client is the cached client to use if cert info was provided. - client *api.Client -} - -var _ auth.AuthMethodWithClient = &certMethod{} - -func NewCertAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { - if conf == nil { - return nil, errors.New("empty config") - } - - // Not concerned if the conf.Config is empty as the 'name' - // parameter is optional when using TLS Auth - - c := &certMethod{ - logger: conf.Logger, - mountPath: conf.MountPath, - } - - if conf.Config != nil { - nameRaw, ok := conf.Config["name"] - if !ok { - nameRaw = "" - } - c.name, ok = nameRaw.(string) - if !ok { - return nil, errors.New("could not convert 'name' config value to string") - } - - caCertRaw, ok := conf.Config["ca_cert"] - if ok { - c.caCert, ok = caCertRaw.(string) - if !ok { - return nil, errors.New("could not convert 'ca_cert' config value to string") - } - } - - clientCertRaw, ok := conf.Config["client_cert"] - if ok { - c.clientCert, ok = clientCertRaw.(string) - if !ok { - return nil, errors.New("could not convert 'cert_file' config value to string") - } - } - - clientKeyRaw, ok := conf.Config["client_key"] - if ok { - c.clientKey, ok = clientKeyRaw.(string) - if !ok { - return nil, errors.New("could not convert 'cert_key' config value to string") - } - } - } - - return c, nil -} - -func (c *certMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { - c.logger.Trace("beginning authentication") - - authMap := map[string]interface{}{} - - if c.name != "" { - authMap["name"] = c.name - } - - return fmt.Sprintf("%s/login", c.mountPath), nil, authMap, nil -} - -func (c *certMethod) NewCreds() chan struct{} { - return nil -} - -func (c *certMethod) CredSuccess() {} - -func (c *certMethod) Shutdown() {} - -// AuthClient uses the existing client's address and returns a new client with -// the auto-auth method's certificate information if that's provided in its -// config map. -func (c *certMethod) AuthClient(client *api.Client) (*api.Client, error) { - c.logger.Trace("deriving auth client to use") - - clientToAuth := client - - if c.caCert != "" || (c.clientKey != "" && c.clientCert != "") { - // Return cached client if present - if c.client != nil { - return c.client, nil - } - - config := api.DefaultConfig() - if config.Error != nil { - return nil, config.Error - } - config.Address = client.Address() - - t := &api.TLSConfig{ - CACert: c.caCert, - ClientCert: c.clientCert, - ClientKey: c.clientKey, - } - - // Setup TLS config - if err := config.ConfigureTLS(t); err != nil { - return nil, err - } - - var err error - clientToAuth, err = api.NewClient(config) - if err != nil { - return nil, err - } - if ns := client.Headers().Get(consts.NamespaceHeaderName); ns != "" { - clientToAuth.SetNamespace(ns) - } - - // Cache the client for future use - c.client = clientToAuth - } - - return clientToAuth, nil -} diff --git a/command/agent/auth/cert/cert_test.go b/command/agent/auth/cert/cert_test.go deleted file mode 100644 index 15ff8f4327f3..000000000000 --- a/command/agent/auth/cert/cert_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package cert - -import ( - "context" - "os" - "path" - "reflect" - "testing" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" -) - -func TestCertAuthMethod_Authenticate(t *testing.T) { - config := &auth.AuthConfig{ - Logger: hclog.NewNullLogger(), - MountPath: "cert-test", - Config: map[string]interface{}{ - "name": "foo", - }, - } - - method, err := NewCertAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - client, err := api.NewClient(nil) - if err != nil { - t.Fatal(err) - } - - loginPath, _, authMap, err := method.Authenticate(context.Background(), client) - if err != nil { - t.Fatal(err) - } - - expectedLoginPath := path.Join(config.MountPath, "/login") - if loginPath != expectedLoginPath { - t.Fatalf("mismatch on login path: got: %s, expected: %s", loginPath, expectedLoginPath) - } - - expectedAuthMap := map[string]interface{}{ - "name": config.Config["name"], - } - if !reflect.DeepEqual(authMap, expectedAuthMap) { - t.Fatalf("mismatch on login path:\ngot:\n\t%v\nexpected:\n\t%v", authMap, expectedAuthMap) - } -} - -func TestCertAuthMethod_AuthClient_withoutCerts(t *testing.T) { - config := &auth.AuthConfig{ - Logger: hclog.NewNullLogger(), - MountPath: "cert-test", - Config: map[string]interface{}{ - "name": "without-certs", - }, - } - - method, err := NewCertAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - - clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if client != clientToUse { - t.Fatal("error: expected AuthClient to return back original client") - } -} - -func TestCertAuthMethod_AuthClient_withCerts(t *testing.T) { - clientCert, err := os.Open("./test-fixtures/keys/cert.pem") - if err != nil { - t.Fatal(err) - } - defer clientCert.Close() - - clientKey, err := os.Open("./test-fixtures/keys/key.pem") - if err != nil { - t.Fatal(err) - } - defer clientKey.Close() - - config := &auth.AuthConfig{ - Logger: hclog.NewNullLogger(), - MountPath: "cert-test", - Config: map[string]interface{}{ - "name": "with-certs", - "client_cert": clientCert.Name(), - "client_key": clientKey.Name(), - }, - } - - method, err := NewCertAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - client, err := api.NewClient(nil) - if err != nil { - t.Fatal(err) - } - - clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if client == clientToUse { - t.Fatal("expected client from AuthClient to be different from original client") - } - - // Call AuthClient again to get back the cached client - cachedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) - if err != nil { - t.Fatal(err) - } - - if cachedClient != clientToUse { - t.Fatal("expected client from AuthClient to return back a cached client") - } -} diff --git a/command/agent/auth/jwt/jwt.go b/command/agent/auth/jwt/jwt.go deleted file mode 100644 index 8f088eb199e5..000000000000 --- a/command/agent/auth/jwt/jwt.go +++ /dev/null @@ -1,214 +0,0 @@ -package jwt - -import ( - "context" - "errors" - "fmt" - "io/fs" - "net/http" - "os" - "sync" - "sync/atomic" - "time" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - "github.com/hashicorp/vault/sdk/helper/parseutil" -) - -type jwtMethod struct { - logger hclog.Logger - path string - mountPath string - role string - removeJWTAfterReading bool - credsFound chan struct{} - watchCh chan string - stopCh chan struct{} - doneCh chan struct{} - credSuccessGate chan struct{} - ticker *time.Ticker - once *sync.Once - latestToken *atomic.Value -} - -// NewJWTAuthMethod returns an implementation of Agent's auth.AuthMethod -// interface for JWT auth. -func NewJWTAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { - if conf == nil { - return nil, errors.New("empty config") - } - if conf.Config == nil { - return nil, errors.New("empty config data") - } - - j := &jwtMethod{ - logger: conf.Logger, - mountPath: conf.MountPath, - removeJWTAfterReading: true, - credsFound: make(chan struct{}), - watchCh: make(chan string), - stopCh: make(chan struct{}), - doneCh: make(chan struct{}), - credSuccessGate: make(chan struct{}), - once: new(sync.Once), - latestToken: new(atomic.Value), - } - j.latestToken.Store("") - - pathRaw, ok := conf.Config["path"] - if !ok { - return nil, errors.New("missing 'path' value") - } - j.path, ok = pathRaw.(string) - if !ok { - return nil, errors.New("could not convert 'path' config value to string") - } - - roleRaw, ok := conf.Config["role"] - if !ok { - return nil, errors.New("missing 'role' value") - } - j.role, ok = roleRaw.(string) - if !ok { - return nil, errors.New("could not convert 'role' config value to string") - } - - if removeJWTAfterReadingRaw, ok := conf.Config["remove_jwt_after_reading"]; ok { - removeJWTAfterReading, err := parseutil.ParseBool(removeJWTAfterReadingRaw) - if err != nil { - return nil, fmt.Errorf("error parsing 'remove_jwt_after_reading' value: %w", err) - } - j.removeJWTAfterReading = removeJWTAfterReading - } - - switch { - case j.path == "": - return nil, errors.New("'path' value is empty") - case j.role == "": - return nil, errors.New("'role' value is empty") - } - - // If we don't delete the JWT after reading, use a slower reload period, - // otherwise we would re-read the whole file every 500ms, instead of just - // doing a stat on the file every 500ms. - readPeriod := 1 * time.Minute - if j.removeJWTAfterReading { - readPeriod = 500 * time.Millisecond - } - j.ticker = time.NewTicker(readPeriod) - - go j.runWatcher() - - j.logger.Info("jwt auth method created", "path", j.path) - - return j, nil -} - -func (j *jwtMethod) Authenticate(_ context.Context, _ *api.Client) (string, http.Header, map[string]interface{}, error) { - j.logger.Trace("beginning authentication") - - j.ingressToken() - - latestToken := j.latestToken.Load().(string) - if latestToken == "" { - return "", nil, nil, errors.New("latest known jwt is empty, cannot authenticate") - } - - return fmt.Sprintf("%s/login", j.mountPath), nil, map[string]interface{}{ - "role": j.role, - "jwt": latestToken, - }, nil -} - -func (j *jwtMethod) NewCreds() chan struct{} { - return j.credsFound -} - -func (j *jwtMethod) CredSuccess() { - j.once.Do(func() { - close(j.credSuccessGate) - }) -} - -func (j *jwtMethod) Shutdown() { - j.ticker.Stop() - close(j.stopCh) - <-j.doneCh -} - -func (j *jwtMethod) runWatcher() { - defer close(j.doneCh) - - select { - case <-j.stopCh: - return - - case <-j.credSuccessGate: - // We only start the next loop once we're initially successful, - // since at startup Authenticate will be called and we don't want - // to end up immediately reauthenticating by having found a new - // value - } - - for { - select { - case <-j.stopCh: - return - - case <-j.ticker.C: - latestToken := j.latestToken.Load().(string) - j.ingressToken() - newToken := j.latestToken.Load().(string) - if newToken != latestToken { - j.logger.Debug("new jwt file found") - j.credsFound <- struct{}{} - } - } - } -} - -func (j *jwtMethod) ingressToken() { - fi, err := os.Lstat(j.path) - if err != nil { - if os.IsNotExist(err) { - return - } - j.logger.Error("error encountered stat'ing jwt file", "error", err) - return - } - - // Check that the path refers to a file. - // If it's a symlink, it could still be a symlink to a directory, - // but os.ReadFile below will return a descriptive error. - switch mode := fi.Mode(); { - case mode.IsRegular(): - // regular file - case mode&fs.ModeSymlink != 0: - // symlink - default: - j.logger.Error("jwt file is not a regular file or symlink") - return - } - - token, err := os.ReadFile(j.path) - if err != nil { - j.logger.Error("failed to read jwt file", "error", err) - return - } - - switch len(token) { - case 0: - j.logger.Warn("empty jwt file read") - - default: - j.latestToken.Store(string(token)) - } - - if j.removeJWTAfterReading { - if err := os.Remove(j.path); err != nil { - j.logger.Error("error removing jwt file", "error", err) - } - } -} diff --git a/command/agent/auth/jwt/jwt_test.go b/command/agent/auth/jwt/jwt_test.go deleted file mode 100644 index 8e9a2ae86c13..000000000000 --- a/command/agent/auth/jwt/jwt_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package jwt - -import ( - "bytes" - "os" - "path" - "strings" - "sync/atomic" - "testing" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/auth" -) - -func TestIngressToken(t *testing.T) { - const ( - dir = "dir" - file = "file" - empty = "empty" - missing = "missing" - symlinked = "symlinked" - ) - - rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") - if err != nil { - t.Fatalf("failed to create temp dir: %s", err) - } - defer os.RemoveAll(rootDir) - - setupTestDir := func() string { - testDir, err := os.MkdirTemp(rootDir, "") - if err != nil { - t.Fatal(err) - } - err = os.WriteFile(path.Join(testDir, file), []byte("test"), 0o644) - if err != nil { - t.Fatal(err) - } - _, err = os.Create(path.Join(testDir, empty)) - if err != nil { - t.Fatal(err) - } - err = os.Mkdir(path.Join(testDir, dir), 0o755) - if err != nil { - t.Fatal(err) - } - err = os.Symlink(path.Join(testDir, file), path.Join(testDir, symlinked)) - if err != nil { - t.Fatal(err) - } - - return testDir - } - - for _, tc := range []struct { - name string - path string - errString string - }{ - { - "happy path", - file, - "", - }, - { - "path is directory", - dir, - "[ERROR] jwt file is not a regular file or symlink", - }, - { - "path is symlink", - symlinked, - "", - }, - { - "path is missing (implies nothing for ingressToken to do)", - missing, - "", - }, - { - "path is empty file", - empty, - "[WARN] empty jwt file read", - }, - } { - testDir := setupTestDir() - logBuffer := bytes.Buffer{} - jwtAuth := &jwtMethod{ - logger: hclog.New(&hclog.LoggerOptions{ - Output: &logBuffer, - }), - latestToken: new(atomic.Value), - path: path.Join(testDir, tc.path), - } - - jwtAuth.ingressToken() - - if tc.errString != "" { - if !strings.Contains(logBuffer.String(), tc.errString) { - t.Fatal("logs did no contain expected error", tc.errString, logBuffer.String()) - } - } else { - if strings.Contains(logBuffer.String(), "[ERROR]") || strings.Contains(logBuffer.String(), "[WARN]") { - t.Fatal("logs contained unexpected error", logBuffer.String()) - } - } - } -} - -func TestDeleteAfterReading(t *testing.T) { - for _, tc := range map[string]struct { - configValue string - shouldDelete bool - }{ - "default": { - "", - true, - }, - "explicit true": { - "true", - true, - }, - "false": { - "false", - false, - }, - } { - rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") - if err != nil { - t.Fatalf("failed to create temp dir: %s", err) - } - defer os.RemoveAll(rootDir) - tokenPath := path.Join(rootDir, "token") - err = os.WriteFile(tokenPath, []byte("test"), 0o644) - if err != nil { - t.Fatal(err) - } - - config := &auth.AuthConfig{ - Config: map[string]interface{}{ - "path": tokenPath, - "role": "unusedrole", - }, - Logger: hclog.Default(), - } - if tc.configValue != "" { - config.Config["remove_jwt_after_reading"] = tc.configValue - } - - jwtAuth, err := NewJWTAuthMethod(config) - if err != nil { - t.Fatal(err) - } - - jwtAuth.(*jwtMethod).ingressToken() - - if _, err := os.Lstat(tokenPath); tc.shouldDelete { - if err == nil || !os.IsNotExist(err) { - t.Fatal(err) - } - } else { - if err != nil { - t.Fatal(err) - } - } - } -} diff --git a/command/agent/auto_auth_preload_token_end_to_end_test.go b/command/agent/auto_auth_preload_token_end_to_end_test.go index 3f8d972a32cf..b566d7e7db2d 100644 --- a/command/agent/auto_auth_preload_token_end_to_end_test.go +++ b/command/agent/auto_auth_preload_token_end_to_end_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package agent import ( @@ -10,10 +13,10 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agent/auth" - agentAppRole "github.com/hashicorp/vault/command/agent/auth/approle" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentAppRole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -23,7 +26,6 @@ import ( func TestTokenPreload_UsingAutoAuth(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, LogicalBackends: map[string]logical.Factory{ "kv": vault.LeasedPassthroughBackendFactory, }, diff --git a/command/agent/aws_end_to_end_test.go b/command/agent/aws_end_to_end_test.go index e8ed3a508b9f..25d8cbd697b2 100644 --- a/command/agent/aws_end_to_end_test.go +++ b/command/agent/aws_end_to_end_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package agent import ( @@ -15,10 +18,10 @@ import ( uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" vaultaws "github.com/hashicorp/vault/builtin/credential/aws" - "github.com/hashicorp/vault/command/agent/auth" - agentaws "github.com/hashicorp/vault/command/agent/auth/aws" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentaws "github.com/hashicorp/vault/command/agentproxyshared/auth/aws" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" "github.com/hashicorp/vault/helper/testhelpers" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" @@ -58,7 +61,6 @@ func TestAWSEndToEnd(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "aws": vaultaws.Factory, }, diff --git a/command/agent/cache/api_proxy.go b/command/agent/cache/api_proxy.go deleted file mode 100644 index 1a754e064ec1..000000000000 --- a/command/agent/cache/api_proxy.go +++ /dev/null @@ -1,146 +0,0 @@ -package cache - -import ( - "context" - "fmt" - "sync" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-retryablehttp" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/http" -) - -type EnforceConsistency int - -const ( - EnforceConsistencyNever EnforceConsistency = iota - EnforceConsistencyAlways -) - -type WhenInconsistentAction int - -const ( - WhenInconsistentFail WhenInconsistentAction = iota - WhenInconsistentRetry - WhenInconsistentForward -) - -// APIProxy is an implementation of the proxier interface that is used to -// forward the request to Vault and get the response. -type APIProxy struct { - client *api.Client - logger hclog.Logger - enforceConsistency EnforceConsistency - whenInconsistentAction WhenInconsistentAction - l sync.RWMutex - lastIndexStates []string -} - -var _ Proxier = &APIProxy{} - -type APIProxyConfig struct { - Client *api.Client - Logger hclog.Logger - EnforceConsistency EnforceConsistency - WhenInconsistentAction WhenInconsistentAction -} - -func NewAPIProxy(config *APIProxyConfig) (Proxier, error) { - if config.Client == nil { - return nil, fmt.Errorf("nil API client") - } - return &APIProxy{ - client: config.Client, - logger: config.Logger, - enforceConsistency: config.EnforceConsistency, - whenInconsistentAction: config.WhenInconsistentAction, - }, nil -} - -func (ap *APIProxy) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - client, err := ap.client.Clone() - if err != nil { - return nil, err - } - client.SetToken(req.Token) - - // Derive and set a logger for the client - clientLogger := ap.logger.Named("client") - client.SetLogger(clientLogger) - - // http.Transport will transparently request gzip and decompress the response, but only if - // the client doesn't manually set the header. Removing any Accept-Encoding header allows the - // transparent compression to occur. - req.Request.Header.Del("Accept-Encoding") - client.SetHeaders(req.Request.Header) - - fwReq := client.NewRequest(req.Request.Method, req.Request.URL.Path) - fwReq.BodyBytes = req.RequestBody - - query := req.Request.URL.Query() - if len(query) != 0 { - fwReq.Params = query - } - - var newState string - manageState := ap.enforceConsistency == EnforceConsistencyAlways && - req.Request.Header.Get(http.VaultIndexHeaderName) == "" && - req.Request.Header.Get(http.VaultForwardHeaderName) == "" && - req.Request.Header.Get(http.VaultInconsistentHeaderName) == "" - - if manageState { - client = client.WithResponseCallbacks(api.RecordState(&newState)) - ap.l.RLock() - lastStates := ap.lastIndexStates - ap.l.RUnlock() - if len(lastStates) != 0 { - client = client.WithRequestCallbacks(api.RequireState(lastStates...)) - switch ap.whenInconsistentAction { - case WhenInconsistentFail: - // In this mode we want to delegate handling of inconsistency - // failures to the external client talking to Agent. - client.SetCheckRetry(retryablehttp.DefaultRetryPolicy) - case WhenInconsistentRetry: - // In this mode we want to handle retries due to inconsistency - // internally. This is the default api.Client behaviour so - // we needn't do anything. - case WhenInconsistentForward: - fwReq.Headers.Set(http.VaultInconsistentHeaderName, http.VaultInconsistentForward) - } - } - } - - // Make the request to Vault and get the response - ap.logger.Info("forwarding request to Vault", "method", req.Request.Method, "path", req.Request.URL.Path) - - resp, err := client.RawRequestWithContext(ctx, fwReq) - if resp == nil && err != nil { - // We don't want to cache nil responses, so we simply return the error - return nil, err - } - - if newState != "" { - ap.l.Lock() - // We want to be using the "newest" states seen, but newer isn't well - // defined here. There can be two states S1 and S2 which aren't strictly ordered: - // S1 could have a newer localindex and S2 could have a newer replicatedindex. So - // we need to merge them. But we can't merge them because we wouldn't be able to - // "sign" the resulting header because we don't have access to the HMAC key that - // Vault uses to do so. So instead we compare any of the 0-2 saved states - // we have to the new header, keeping the newest 1-2 of these, and sending - // them to Vault to evaluate. - ap.lastIndexStates = api.MergeReplicationStates(ap.lastIndexStates, newState) - ap.l.Unlock() - } - - // Before error checking from the request call, we'd want to initialize a SendResponse to - // potentially return - sendResponse, newErr := NewSendResponse(resp, nil) - if newErr != nil { - return nil, newErr - } - - // Bubble back the api.Response as well for error checking/handling at the handler layer. - return sendResponse, err -} diff --git a/command/agent/cache/cache_test.go b/command/agent/cache/cache_test.go deleted file mode 100644 index de66f86cc78b..000000000000 --- a/command/agent/cache/cache_test.go +++ /dev/null @@ -1,1239 +0,0 @@ -package cache - -import ( - "context" - "encoding/json" - "fmt" - "io" - "math/rand" - "net" - "net/http" - "sync" - "testing" - "time" - - "github.com/go-test/deep" - "github.com/hashicorp/go-hclog" - kv "github.com/hashicorp/vault-plugin-secrets-kv" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/cache/cachememdb" - "github.com/hashicorp/vault/command/agent/sink/mock" - "github.com/hashicorp/vault/helper/namespace" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" -) - -func tokenRevocationValidation(t *testing.T, sampleSpace map[string]string, expected map[string]string, leaseCache *LeaseCache) { - t.Helper() - for val, valType := range sampleSpace { - index, err := leaseCache.db.Get(valType, val) - if err != nil { - t.Fatal(err) - } - if expected[val] == "" && index != nil { - t.Fatalf("failed to evict index from the cache: type: %q, value: %q", valType, val) - } - if expected[val] != "" && index == nil { - t.Fatalf("evicted an undesired index from cache: type: %q, value: %q", valType, val) - } - } -} - -func TestCache_AutoAuthTokenStripping(t *testing.T) { - response1 := `{"data": {"id": "testid", "accessor": "testaccessor", "request": "lookup-self"}}` - response2 := `{"data": {"id": "testid", "accessor": "testaccessor", "request": "lookup"}}` - response3 := `{"auth": {"client_token": "testid", "accessor": "testaccessor"}}` - response4 := `{"auth": {"client_token": "testid", "accessor": "testaccessor"}}` - responses := []*SendResponse{ - newTestSendResponse(http.StatusOK, response1), - newTestSendResponse(http.StatusOK, response2), - newTestSendResponse(http.StatusOK, response3), - newTestSendResponse(http.StatusOK, response4), - } - - leaseCache := testNewLeaseCache(t, responses) - - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - vault.TestWaitActive(t, cores[0].Core) - client := cores[0].Client - - cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - ctx := namespace.RootContext(nil) - - // Create a muxer and add paths relevant for the lease cache layer - mux := http.NewServeMux() - mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) - - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), true)) - server := &http.Server{ - Handler: mux, - ReadHeaderTimeout: 10 * time.Second, - ReadTimeout: 30 * time.Second, - IdleTimeout: 5 * time.Minute, - ErrorLog: cacheLogger.StandardLogger(nil), - } - go server.Serve(listener) - - testClient, err := client.Clone() - if err != nil { - t.Fatal(err) - } - - if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { - t.Fatal(err) - } - - // Empty the token in the client. Auto-auth token should be put to use. - testClient.SetToken("") - secret, err := testClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - if secret.Data["id"] != nil || secret.Data["accessor"] != nil || secret.Data["request"].(string) != "lookup-self" { - t.Fatalf("failed to strip off auto-auth token on lookup-self") - } - - secret, err = testClient.Auth().Token().Lookup("") - if err != nil { - t.Fatal(err) - } - if secret.Data["id"] != nil || secret.Data["accessor"] != nil || secret.Data["request"].(string) != "lookup" { - t.Fatalf("failed to strip off auto-auth token on lookup") - } - - secret, err = testClient.Auth().Token().RenewSelf(1) - if err != nil { - t.Fatal(err) - } - if secret.Auth == nil { - secretJson, _ := json.Marshal(secret) - t.Fatalf("Expected secret to have Auth but was %s", secretJson) - } - if secret.Auth.ClientToken != "" || secret.Auth.Accessor != "" { - t.Fatalf("failed to strip off auto-auth token on renew-self") - } - - secret, err = testClient.Auth().Token().Renew("testid", 1) - if err != nil { - t.Fatal(err) - } - if secret.Auth == nil { - secretJson, _ := json.Marshal(secret) - t.Fatalf("Expected secret to have Auth but was %s", secretJson) - } - if secret.Auth.ClientToken != "" || secret.Auth.Accessor != "" { - t.Fatalf("failed to strip off auto-auth token on renew") - } -} - -func TestCache_AutoAuthClientTokenProxyStripping(t *testing.T) { - leaseCache := &mockTokenVerifierProxier{} - dummyToken := "DUMMY" - realToken := "testid" - - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - vault.TestWaitActive(t, cores[0].Core) - client := cores[0].Client - - cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - ctx := namespace.RootContext(nil) - - // Create a muxer and add paths relevant for the lease cache layer - mux := http.NewServeMux() - // mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) - - mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false)) - server := &http.Server{ - Handler: mux, - ReadHeaderTimeout: 10 * time.Second, - ReadTimeout: 30 * time.Second, - IdleTimeout: 5 * time.Minute, - ErrorLog: cacheLogger.StandardLogger(nil), - } - go server.Serve(listener) - - testClient, err := client.Clone() - if err != nil { - t.Fatal(err) - } - - if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { - t.Fatal(err) - } - - // Empty the token in the client. Auto-auth token should be put to use. - testClient.SetToken(dummyToken) - _, err = testClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - if leaseCache.currentToken != realToken { - t.Fatalf("failed to use real token from auto-auth") - } -} - -func TestCache_ConcurrentRequests(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - wg := &sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - key := fmt.Sprintf("kv/foo/%d_%d", i, rand.Int()) - _, err := testClient.Logical().Write(key, map[string]interface{}{ - "key": key, - }) - if err != nil { - t.Fatal(err) - } - secret, err := testClient.Logical().Read(key) - if err != nil { - t.Fatal(err) - } - if secret == nil || secret.Data["key"].(string) != key { - t.Fatal(fmt.Sprintf("failed to read value for key: %q", key)) - } - }(i) - - } - wg.Wait() -} - -func TestCache_TokenRevocations_RevokeOrphan(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - // Revoke-orphan the intermediate token. This should result in its own - // eviction and evictions of the revoked token's leases. All other things - // including the child tokens and leases of the child tokens should be - // untouched. - testClient.SetToken(token2) - err = testClient.Auth().Token().RevokeOrphan(token2) - if err != nil { - t.Fatal(err) - } - time.Sleep(1 * time.Second) - - expected = map[string]string{ - token1: "token", - lease1: "lease", - token3: "token", - lease3: "lease", - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_TokenRevocations_LeafLevelToken(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - // Revoke the lef token. This should evict all the leases belonging to this - // token, evict entries for all the child tokens and their respective - // leases. - testClient.SetToken(token3) - err = testClient.Auth().Token().RevokeSelf("") - if err != nil { - t.Fatal(err) - } - time.Sleep(1 * time.Second) - - expected = map[string]string{ - token1: "token", - lease1: "lease", - token2: "token", - lease2: "lease", - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_TokenRevocations_IntermediateLevelToken(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - // Revoke the second level token. This should evict all the leases - // belonging to this token, evict entries for all the child tokens and - // their respective leases. - testClient.SetToken(token2) - err = testClient.Auth().Token().RevokeSelf("") - if err != nil { - t.Fatal(err) - } - time.Sleep(1 * time.Second) - - expected = map[string]string{ - token1: "token", - lease1: "lease", - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_TokenRevocations_TopLevelToken(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - // Revoke the top level token. This should evict all the leases belonging - // to this token, evict entries for all the child tokens and their - // respective leases. - testClient.SetToken(token1) - err = testClient.Auth().Token().RevokeSelf("") - if err != nil { - t.Fatal(err) - } - time.Sleep(1 * time.Second) - - expected = make(map[string]string) - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_TokenRevocations_Shutdown(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - ctx, rootCancelFunc := context.WithCancel(namespace.RootContext(nil)) - cleanup, _, testClient, leaseCache := setupClusterAndAgent(ctx, t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - rootCancelFunc() - time.Sleep(1 * time.Second) - - // Ensure that all the entries are now gone - expected = make(map[string]string) - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_TokenRevocations_BaseContextCancellation(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - sampleSpace := make(map[string]string) - - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - token1 := testClient.Token() - sampleSpace[token1] = "token" - - // Mount the kv backend - err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Create a secret in the backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Read the secret and create a lease - leaseResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease1 := leaseResp.LeaseID - sampleSpace[lease1] = "lease" - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token2 := resp.Auth.ClientToken - sampleSpace[token2] = "token" - - testClient.SetToken(token2) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease2 := leaseResp.LeaseID - sampleSpace[lease2] = "lease" - - resp, err = testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token3 := resp.Auth.ClientToken - sampleSpace[token3] = "token" - - testClient.SetToken(token3) - - leaseResp, err = testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - lease3 := leaseResp.LeaseID - sampleSpace[lease3] = "lease" - - expected := make(map[string]string) - for k, v := range sampleSpace { - expected[k] = v - } - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) - - // Cancel the base context of the lease cache. This should trigger - // evictions of all the entries from the cache. - leaseCache.baseCtxInfo.CancelFunc() - time.Sleep(1 * time.Second) - - // Ensure that all the entries are now gone - expected = make(map[string]string) - tokenRevocationValidation(t, sampleSpace, expected, leaseCache) -} - -func TestCache_NonCacheable(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": kv.Factory, - }, - } - - cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - // Query mounts first - origMounts, err := testClient.Sys().ListMounts() - if err != nil { - t.Fatal(err) - } - - // Mount a kv backend - if err := testClient.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - Options: map[string]string{ - "version": "2", - }, - }); err != nil { - t.Fatal(err) - } - - // Query mounts again - newMounts, err := testClient.Sys().ListMounts() - if err != nil { - t.Fatal(err) - } - - if diff := deep.Equal(origMounts, newMounts); diff == nil { - t.Logf("response #1: %#v", origMounts) - t.Logf("response #2: %#v", newMounts) - t.Fatal("expected requests to be not cached") - } - - // Query a non-existing mount, expect an error from api.Response - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - r := testClient.NewRequest("GET", "/v1/kv-invalid") - - apiResp, err := testClient.RawRequestWithContext(ctx, r) - if apiResp != nil { - defer apiResp.Body.Close() - } - if apiResp.Error() == nil || (apiResp != nil && apiResp.StatusCode != 404) { - t.Fatalf("expected an error response and a 404 from requesting an invalid path, got: %#v", apiResp) - } - if err == nil { - t.Fatal("expected an error from requesting an invalid path") - } -} - -func TestCache_Caching_AuthResponse(t *testing.T) { - cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - resp, err := testClient.Logical().Write("auth/token/create", nil) - if err != nil { - t.Fatal(err) - } - token := resp.Auth.ClientToken - testClient.SetToken(token) - - authTokeCreateReq := func(t *testing.T, policies map[string]interface{}) *api.Secret { - resp, err := testClient.Logical().Write("auth/token/create", policies) - if err != nil { - t.Fatal(err) - } - if resp.Auth == nil || resp.Auth.ClientToken == "" { - t.Fatalf("expected a valid client token in the response, got = %#v", resp) - } - - return resp - } - - // Test on auth response by creating a child token - { - proxiedResp := authTokeCreateReq(t, map[string]interface{}{ - "policies": "default", - }) - - cachedResp := authTokeCreateReq(t, map[string]interface{}{ - "policies": "default", - }) - - if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil { - t.Fatal(diff) - } - } - - // Test on *non-renewable* auth response by creating a child root token - { - proxiedResp := authTokeCreateReq(t, nil) - - cachedResp := authTokeCreateReq(t, nil) - - if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil { - t.Fatal(diff) - } - } -} - -func TestCache_Caching_LeaseResponse(t *testing.T) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - cleanup, client, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - err := client.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Test proxy by issuing two different requests - { - // Write data to the lease-kv backend - _, err := testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - _, err = testClient.Logical().Write("kv/foobar", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - firstResp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - - secondResp, err := testClient.Logical().Read("kv/foobar") - if err != nil { - t.Fatal(err) - } - - if diff := deep.Equal(firstResp, secondResp); diff == nil { - t.Logf("response: %#v", firstResp) - t.Fatal("expected proxied responses, got cached response on second request") - } - } - - // Test caching behavior by issue the same request twice - { - _, err := testClient.Logical().Write("kv/baz", map[string]interface{}{ - "value": "foo", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - proxiedResp, err := testClient.Logical().Read("kv/baz") - if err != nil { - t.Fatal(err) - } - - cachedResp, err := testClient.Logical().Read("kv/baz") - if err != nil { - t.Fatal(err) - } - - if diff := deep.Equal(proxiedResp, cachedResp); diff != nil { - t.Fatal(diff) - } - } -} - -func TestCache_Caching_CacheClear(t *testing.T) { - t.Run("request_path", func(t *testing.T) { - testCachingCacheClearCommon(t, "request_path") - }) - - t.Run("lease", func(t *testing.T) { - testCachingCacheClearCommon(t, "lease") - }) - - t.Run("token", func(t *testing.T) { - testCachingCacheClearCommon(t, "token") - }) - - t.Run("token_accessor", func(t *testing.T) { - testCachingCacheClearCommon(t, "token_accessor") - }) - - t.Run("all", func(t *testing.T) { - testCachingCacheClearCommon(t, "all") - }) -} - -func testCachingCacheClearCommon(t *testing.T, clearType string) { - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), - LogicalBackends: map[string]logical.Factory{ - "kv": vault.LeasedPassthroughBackendFactory, - }, - } - - cleanup, client, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) - defer cleanup() - - err := client.Sys().Mount("kv", &api.MountInput{ - Type: "kv", - }) - if err != nil { - t.Fatal(err) - } - - // Write data to the lease-kv backend - _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ - "value": "bar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - // Proxy this request, agent should cache the response - resp, err := testClient.Logical().Read("kv/foo") - if err != nil { - t.Fatal(err) - } - gotLeaseID := resp.LeaseID - - // Verify the entry exists - idx, err := leaseCache.db.Get(cachememdb.IndexNameLease, gotLeaseID) - if err != nil { - t.Fatal(err) - } - - if idx == nil { - t.Fatalf("expected cached entry, got: %v", idx) - } - - data := map[string]interface{}{ - "type": clearType, - } - - // We need to set the value here depending on what we're trying to test. - // Some values are be static, but others are dynamically generated at runtime. - switch clearType { - case "request_path": - data["value"] = "/v1/kv/foo" - case "lease": - data["value"] = resp.LeaseID - case "token": - data["value"] = testClient.Token() - case "token_accessor": - lookupResp, err := client.Auth().Token().Lookup(testClient.Token()) - if err != nil { - t.Fatal(err) - } - data["value"] = lookupResp.Data["accessor"] - case "all": - default: - t.Fatalf("invalid type provided: %v", clearType) - } - - r := testClient.NewRequest("PUT", consts.AgentPathCacheClear) - if err := r.SetJSONBody(data); err != nil { - t.Fatal(err) - } - - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - apiResp, err := testClient.RawRequestWithContext(ctx, r) - if apiResp != nil { - defer apiResp.Body.Close() - } - if apiResp != nil && apiResp.StatusCode == 404 { - _, parseErr := api.ParseSecret(apiResp.Body) - switch parseErr { - case nil: - case io.EOF: - default: - t.Fatal(err) - } - } - if err != nil { - t.Fatal(err) - } - - time.Sleep(100 * time.Millisecond) - - // Verify the entry is cleared - idx, err = leaseCache.db.Get(cachememdb.IndexNameLease, gotLeaseID) - if err != nil { - t.Fatal(err) - } - - if idx != nil { - t.Fatalf("expected entry to be nil, got: %v", idx) - } -} - -func TestCache_AuthTokenCreateOrphan(t *testing.T) { - t.Run("create", func(t *testing.T) { - t.Run("managed", func(t *testing.T) { - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - reqOpts := &api.TokenCreateRequest{ - Policies: []string{"default"}, - NoParent: true, - } - resp, err := testClient.Auth().Token().Create(reqOpts) - if err != nil { - t.Fatal(err) - } - token := resp.Auth.ClientToken - - idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - t.Fatal(err) - } - if idx == nil { - t.Fatalf("expected entry to be non-nil, got: %#v", idx) - } - }) - - t.Run("non-managed", func(t *testing.T) { - cleanup, clusterClient, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - reqOpts := &api.TokenCreateRequest{ - Policies: []string{"default"}, - NoParent: true, - } - - // Use the test client but set the token to one that's not managed by agent - testClient.SetToken(clusterClient.Token()) - - resp, err := testClient.Auth().Token().Create(reqOpts) - if err != nil { - t.Fatal(err) - } - token := resp.Auth.ClientToken - - idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - t.Fatal(err) - } - if idx == nil { - t.Fatalf("expected entry to be non-nil, got: %#v", idx) - } - }) - }) - - t.Run("create-orphan", func(t *testing.T) { - t.Run("managed", func(t *testing.T) { - cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - reqOpts := &api.TokenCreateRequest{ - Policies: []string{"default"}, - } - resp, err := testClient.Auth().Token().CreateOrphan(reqOpts) - if err != nil { - t.Fatal(err) - } - token := resp.Auth.ClientToken - - idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - t.Fatal(err) - } - if idx == nil { - t.Fatalf("expected entry to be non-nil, got: %#v", idx) - } - }) - - t.Run("non-managed", func(t *testing.T) { - cleanup, clusterClient, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) - defer cleanup() - - reqOpts := &api.TokenCreateRequest{ - Policies: []string{"default"}, - } - - // Use the test client but set the token to one that's not managed by agent - testClient.SetToken(clusterClient.Token()) - - resp, err := testClient.Auth().Token().CreateOrphan(reqOpts) - if err != nil { - t.Fatal(err) - } - token := resp.Auth.ClientToken - - idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - t.Fatal(err) - } - if idx == nil { - t.Fatalf("expected entry to be non-nil, got: %#v", idx) - } - }) - }) -} diff --git a/command/agent/cache/cachememdb/cache_memdb.go b/command/agent/cache/cachememdb/cache_memdb.go deleted file mode 100644 index 7fdad303bb56..000000000000 --- a/command/agent/cache/cachememdb/cache_memdb.go +++ /dev/null @@ -1,240 +0,0 @@ -package cachememdb - -import ( - "errors" - "fmt" - "sync/atomic" - - memdb "github.com/hashicorp/go-memdb" -) - -const ( - tableNameIndexer = "indexer" -) - -// CacheMemDB is the underlying cache database for storing indexes. -type CacheMemDB struct { - db *atomic.Value -} - -// New creates a new instance of CacheMemDB. -func New() (*CacheMemDB, error) { - db, err := newDB() - if err != nil { - return nil, err - } - - c := &CacheMemDB{ - db: new(atomic.Value), - } - c.db.Store(db) - - return c, nil -} - -func newDB() (*memdb.MemDB, error) { - cacheSchema := &memdb.DBSchema{ - Tables: map[string]*memdb.TableSchema{ - tableNameIndexer: { - Name: tableNameIndexer, - Indexes: map[string]*memdb.IndexSchema{ - // This index enables fetching the cached item based on the - // identifier of the index. - IndexNameID: { - Name: IndexNameID, - Unique: true, - Indexer: &memdb.StringFieldIndex{ - Field: "ID", - }, - }, - // This index enables fetching all the entries in cache for - // a given request path, in a given namespace. - IndexNameRequestPath: { - Name: IndexNameRequestPath, - Unique: false, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Namespace", - }, - &memdb.StringFieldIndex{ - Field: "RequestPath", - }, - }, - }, - }, - // This index enables fetching all the entries in cache - // belonging to the leases of a given token. - IndexNameLeaseToken: { - Name: IndexNameLeaseToken, - Unique: false, - AllowMissing: true, - Indexer: &memdb.StringFieldIndex{ - Field: "LeaseToken", - }, - }, - // This index enables fetching all the entries in cache - // that are tied to the given token, regardless of the - // entries belonging to the token or belonging to the - // lease. - IndexNameToken: { - Name: IndexNameToken, - Unique: true, - AllowMissing: true, - Indexer: &memdb.StringFieldIndex{ - Field: "Token", - }, - }, - // This index enables fetching all the entries in cache for - // the given parent token. - IndexNameTokenParent: { - Name: IndexNameTokenParent, - Unique: false, - AllowMissing: true, - Indexer: &memdb.StringFieldIndex{ - Field: "TokenParent", - }, - }, - // This index enables fetching all the entries in cache for - // the given accessor. - IndexNameTokenAccessor: { - Name: IndexNameTokenAccessor, - Unique: true, - AllowMissing: true, - Indexer: &memdb.StringFieldIndex{ - Field: "TokenAccessor", - }, - }, - // This index enables fetching all the entries in cache for - // the given lease identifier. - IndexNameLease: { - Name: IndexNameLease, - Unique: true, - AllowMissing: true, - Indexer: &memdb.StringFieldIndex{ - Field: "Lease", - }, - }, - }, - }, - }, - } - - db, err := memdb.NewMemDB(cacheSchema) - if err != nil { - return nil, err - } - return db, nil -} - -// Get returns the index based on the indexer and the index values provided. -func (c *CacheMemDB) Get(indexName string, indexValues ...interface{}) (*Index, error) { - if !validIndexName(indexName) { - return nil, fmt.Errorf("invalid index name %q", indexName) - } - - txn := c.db.Load().(*memdb.MemDB).Txn(false) - - raw, err := txn.First(tableNameIndexer, indexName, indexValues...) - if err != nil { - return nil, err - } - - if raw == nil { - return nil, nil - } - - index, ok := raw.(*Index) - if !ok { - return nil, errors.New("unable to parse index value from the cache") - } - - return index, nil -} - -// Set stores the index into the cache. -func (c *CacheMemDB) Set(index *Index) error { - if index == nil { - return errors.New("nil index provided") - } - - txn := c.db.Load().(*memdb.MemDB).Txn(true) - defer txn.Abort() - - if err := txn.Insert(tableNameIndexer, index); err != nil { - return fmt.Errorf("unable to insert index into cache: %v", err) - } - - txn.Commit() - - return nil -} - -// GetByPrefix returns all the cached indexes based on the index name and the -// value prefix. -func (c *CacheMemDB) GetByPrefix(indexName string, indexValues ...interface{}) ([]*Index, error) { - if !validIndexName(indexName) { - return nil, fmt.Errorf("invalid index name %q", indexName) - } - - indexName = indexName + "_prefix" - - // Get all the objects - txn := c.db.Load().(*memdb.MemDB).Txn(false) - - iter, err := txn.Get(tableNameIndexer, indexName, indexValues...) - if err != nil { - return nil, err - } - - var indexes []*Index - for { - obj := iter.Next() - if obj == nil { - break - } - index, ok := obj.(*Index) - if !ok { - return nil, fmt.Errorf("failed to cast cached index") - } - - indexes = append(indexes, index) - } - - return indexes, nil -} - -// Evict removes an index from the cache based on index name and value. -func (c *CacheMemDB) Evict(indexName string, indexValues ...interface{}) error { - index, err := c.Get(indexName, indexValues...) - if err != nil { - return fmt.Errorf("unable to fetch index on cache deletion: %v", err) - } - - if index == nil { - return nil - } - - txn := c.db.Load().(*memdb.MemDB).Txn(true) - defer txn.Abort() - - if err := txn.Delete(tableNameIndexer, index); err != nil { - return fmt.Errorf("unable to delete index from cache: %v", err) - } - - txn.Commit() - - return nil -} - -// Flush resets the underlying cache object. -func (c *CacheMemDB) Flush() error { - newDB, err := newDB() - if err != nil { - return err - } - - c.db.Store(newDB) - - return nil -} diff --git a/command/agent/cache/cachememdb/cache_memdb_test.go b/command/agent/cache/cachememdb/cache_memdb_test.go deleted file mode 100644 index 4162fed0daf7..000000000000 --- a/command/agent/cache/cachememdb/cache_memdb_test.go +++ /dev/null @@ -1,392 +0,0 @@ -package cachememdb - -import ( - "context" - "testing" - - "github.com/go-test/deep" -) - -func testContextInfo() *ContextInfo { - ctx, cancelFunc := context.WithCancel(context.Background()) - - return &ContextInfo{ - Ctx: ctx, - CancelFunc: cancelFunc, - } -} - -func TestNew(t *testing.T) { - _, err := New() - if err != nil { - t.Fatal(err) - } -} - -func TestCacheMemDB_Get(t *testing.T) { - cache, err := New() - if err != nil { - t.Fatal(err) - } - - // Test invalid index name - _, err = cache.Get("foo", "bar") - if err == nil { - t.Fatal("expected error") - } - - // Test on empty cache - index, err := cache.Get(IndexNameID, "foo") - if err != nil { - t.Fatal(err) - } - if index != nil { - t.Fatalf("expected nil index, got: %v", index) - } - - // Populate cache - in := &Index{ - ID: "test_id", - Namespace: "test_ns/", - RequestPath: "/v1/request/path", - Token: "test_token", - TokenAccessor: "test_accessor", - Lease: "test_lease", - Response: []byte("hello world"), - } - - if err := cache.Set(in); err != nil { - t.Fatal(err) - } - - testCases := []struct { - name string - indexName string - indexValues []interface{} - }{ - { - "by_index_id", - "id", - []interface{}{in.ID}, - }, - { - "by_request_path", - "request_path", - []interface{}{in.Namespace, in.RequestPath}, - }, - { - "by_lease", - "lease", - []interface{}{in.Lease}, - }, - { - "by_token", - "token", - []interface{}{in.Token}, - }, - { - "by_token_accessor", - "token_accessor", - []interface{}{in.TokenAccessor}, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - out, err := cache.Get(tc.indexName, tc.indexValues...) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(in, out); diff != nil { - t.Fatal(diff) - } - }) - } -} - -func TestCacheMemDB_GetByPrefix(t *testing.T) { - cache, err := New() - if err != nil { - t.Fatal(err) - } - - // Test invalid index name - _, err = cache.GetByPrefix("foo", "bar", "baz") - if err == nil { - t.Fatal("expected error") - } - - // Test on empty cache - index, err := cache.GetByPrefix(IndexNameRequestPath, "foo", "bar") - if err != nil { - t.Fatal(err) - } - if index != nil { - t.Fatalf("expected nil index, got: %v", index) - } - - // Populate cache - in := &Index{ - ID: "test_id", - Namespace: "test_ns/", - RequestPath: "/v1/request/path/1", - Token: "test_token", - TokenParent: "test_token_parent", - TokenAccessor: "test_accessor", - Lease: "path/to/test_lease/1", - LeaseToken: "test_lease_token", - Response: []byte("hello world"), - } - - if err := cache.Set(in); err != nil { - t.Fatal(err) - } - - // Populate cache - in2 := &Index{ - ID: "test_id_2", - Namespace: "test_ns/", - RequestPath: "/v1/request/path/2", - Token: "test_token2", - TokenParent: "test_token_parent", - TokenAccessor: "test_accessor2", - Lease: "path/to/test_lease/2", - LeaseToken: "test_lease_token", - Response: []byte("hello world"), - } - - if err := cache.Set(in2); err != nil { - t.Fatal(err) - } - - testCases := []struct { - name string - indexName string - indexValues []interface{} - }{ - { - "by_request_path", - "request_path", - []interface{}{"test_ns/", "/v1/request/path"}, - }, - { - "by_lease", - "lease", - []interface{}{"path/to/test_lease"}, - }, - { - "by_token_parent", - "token_parent", - []interface{}{"test_token_parent"}, - }, - { - "by_lease_token", - "lease_token", - []interface{}{"test_lease_token"}, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - out, err := cache.GetByPrefix(tc.indexName, tc.indexValues...) - if err != nil { - t.Fatal(err) - } - - if diff := deep.Equal([]*Index{in, in2}, out); diff != nil { - t.Fatal(diff) - } - }) - } -} - -func TestCacheMemDB_Set(t *testing.T) { - cache, err := New() - if err != nil { - t.Fatal(err) - } - - testCases := []struct { - name string - index *Index - wantErr bool - }{ - { - "nil", - nil, - true, - }, - { - "empty_fields", - &Index{}, - true, - }, - { - "missing_required_fields", - &Index{ - Lease: "foo", - }, - true, - }, - { - "all_fields", - &Index{ - ID: "test_id", - Namespace: "test_ns/", - RequestPath: "/v1/request/path", - Token: "test_token", - TokenAccessor: "test_accessor", - Lease: "test_lease", - RenewCtxInfo: testContextInfo(), - }, - false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - if err := cache.Set(tc.index); (err != nil) != tc.wantErr { - t.Fatalf("CacheMemDB.Set() error = %v, wantErr = %v", err, tc.wantErr) - } - }) - } -} - -func TestCacheMemDB_Evict(t *testing.T) { - cache, err := New() - if err != nil { - t.Fatal(err) - } - - // Test on empty cache - if err := cache.Evict(IndexNameID, "foo"); err != nil { - t.Fatal(err) - } - - testIndex := &Index{ - ID: "test_id", - Namespace: "test_ns/", - RequestPath: "/v1/request/path", - Token: "test_token", - TokenAccessor: "test_token_accessor", - Lease: "test_lease", - RenewCtxInfo: testContextInfo(), - } - - testCases := []struct { - name string - indexName string - indexValues []interface{} - insertIndex *Index - wantErr bool - }{ - { - "empty_params", - "", - []interface{}{""}, - nil, - true, - }, - { - "invalid_params", - "foo", - []interface{}{"bar"}, - nil, - true, - }, - { - "by_id", - "id", - []interface{}{"test_id"}, - testIndex, - false, - }, - { - "by_request_path", - "request_path", - []interface{}{"test_ns/", "/v1/request/path"}, - testIndex, - false, - }, - { - "by_token", - "token", - []interface{}{"test_token"}, - testIndex, - false, - }, - { - "by_token_accessor", - "token_accessor", - []interface{}{"test_accessor"}, - testIndex, - false, - }, - { - "by_lease", - "lease", - []interface{}{"test_lease"}, - testIndex, - false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - if tc.insertIndex != nil { - if err := cache.Set(tc.insertIndex); err != nil { - t.Fatal(err) - } - } - - if err := cache.Evict(tc.indexName, tc.indexValues...); (err != nil) != tc.wantErr { - t.Fatal(err) - } - - // Verify that the cache doesn't contain the entry any more - index, err := cache.Get(tc.indexName, tc.indexValues...) - if (err != nil) != tc.wantErr { - t.Fatal(err) - } - - if index != nil { - t.Fatalf("expected nil entry, got = %#v", index) - } - }) - } -} - -func TestCacheMemDB_Flush(t *testing.T) { - cache, err := New() - if err != nil { - t.Fatal(err) - } - - // Populate cache - in := &Index{ - ID: "test_id", - Token: "test_token", - Lease: "test_lease", - Namespace: "test_ns/", - RequestPath: "/v1/request/path", - Response: []byte("hello world"), - } - - if err := cache.Set(in); err != nil { - t.Fatal(err) - } - - // Reset the cache - if err := cache.Flush(); err != nil { - t.Fatal(err) - } - - // Check the cache doesn't contain inserted index - out, err := cache.Get(IndexNameID, "test_id") - if err != nil { - t.Fatal(err) - } - if out != nil { - t.Fatalf("expected cache to be empty, got = %v", out) - } -} diff --git a/command/agent/cache/cachememdb/index.go b/command/agent/cache/cachememdb/index.go deleted file mode 100644 index 546a528cb2e4..000000000000 --- a/command/agent/cache/cachememdb/index.go +++ /dev/null @@ -1,150 +0,0 @@ -package cachememdb - -import ( - "context" - "encoding/json" - "net/http" - "time" -) - -// Index holds the response to be cached along with multiple other values that -// serve as pointers to refer back to this index. -type Index struct { - // ID is a value that uniquely represents the request held by this - // index. This is computed by serializing and hashing the response object. - // Required: true, Unique: true - ID string - - // Token is the token that fetched the response held by this index - // Required: true, Unique: true - Token string - - // TokenParent is the parent token of the token held by this index - // Required: false, Unique: false - TokenParent string - - // TokenAccessor is the accessor of the token being cached in this index - // Required: true, Unique: true - TokenAccessor string - - // Namespace is the namespace that was provided in the request path as the - // Vault namespace to query - Namespace string - - // RequestPath is the path of the request that resulted in the response - // held by this index. - // Required: true, Unique: false - RequestPath string - - // Lease is the identifier of the lease in Vault, that belongs to the - // response held by this index. - // Required: false, Unique: true - Lease string - - // LeaseToken is the identifier of the token that created the lease held by - // this index. - // Required: false, Unique: false - LeaseToken string - - // Response is the serialized response object that the agent is caching. - Response []byte - - // RenewCtxInfo holds the context and the corresponding cancel func for the - // goroutine that manages the renewal of the secret belonging to the - // response in this index. - RenewCtxInfo *ContextInfo - - // RequestMethod is the HTTP method of the request - RequestMethod string - - // RequestToken is the token used in the request - RequestToken string - - // RequestHeader is the header used in the request - RequestHeader http.Header - - // LastRenewed is the timestamp of last renewal - LastRenewed time.Time - - // Type is the index type (token, auth-lease, secret-lease) - Type string -} - -type IndexName uint32 - -const ( - // IndexNameID is the ID of the index constructed from the serialized request. - IndexNameID = "id" - - // IndexNameLease is the lease of the index. - IndexNameLease = "lease" - - // IndexNameRequestPath is the request path of the index. - IndexNameRequestPath = "request_path" - - // IndexNameToken is the token of the index. - IndexNameToken = "token" - - // IndexNameTokenAccessor is the token accessor of the index. - IndexNameTokenAccessor = "token_accessor" - - // IndexNameTokenParent is the token parent of the index. - IndexNameTokenParent = "token_parent" - - // IndexNameLeaseToken is the token that created the lease. - IndexNameLeaseToken = "lease_token" -) - -func validIndexName(indexName string) bool { - switch indexName { - case "id": - case "lease": - case "request_path": - case "token": - case "token_accessor": - case "token_parent": - case "lease_token": - default: - return false - } - return true -} - -type ContextInfo struct { - Ctx context.Context - CancelFunc context.CancelFunc - DoneCh chan struct{} -} - -func NewContextInfo(ctx context.Context) *ContextInfo { - if ctx == nil { - return nil - } - - ctxInfo := new(ContextInfo) - ctxInfo.Ctx, ctxInfo.CancelFunc = context.WithCancel(ctx) - ctxInfo.DoneCh = make(chan struct{}) - return ctxInfo -} - -// Serialize returns a json marshal'ed Index object, without the RenewCtxInfo -func (i Index) Serialize() ([]byte, error) { - i.RenewCtxInfo = nil - - indexBytes, err := json.Marshal(i) - if err != nil { - return nil, err - } - - return indexBytes, nil -} - -// Deserialize converts json bytes to an Index object -// Note: RenewCtxInfo will need to be reconstructed elsewhere. -func Deserialize(indexBytes []byte) (*Index, error) { - index := new(Index) - if err := json.Unmarshal(indexBytes, index); err != nil { - return nil, err - } - return index, nil -} diff --git a/command/agent/cache/keymanager/manager.go b/command/agent/cache/keymanager/manager.go deleted file mode 100644 index ff4d0f2c00fa..000000000000 --- a/command/agent/cache/keymanager/manager.go +++ /dev/null @@ -1,20 +0,0 @@ -package keymanager - -import ( - "context" - - wrapping "github.com/hashicorp/go-kms-wrapping/v2" -) - -const ( - KeyID = "root" -) - -type KeyManager interface { - // Returns a wrapping.Wrapper which can be used to perform key-related operations. - Wrapper() wrapping.Wrapper - // RetrievalToken is the material returned which can be used to source back the - // encryption key. Depending on the implementation, the token can be the - // encryption key itself or a token/identifier used to exchange the token. - RetrievalToken(ctx context.Context) ([]byte, error) -} diff --git a/command/agent/cache/lease_cache.go b/command/agent/cache/lease_cache.go deleted file mode 100644 index 87bfacd97ec1..000000000000 --- a/command/agent/cache/lease_cache.go +++ /dev/null @@ -1,1308 +0,0 @@ -package cache - -import ( - "bufio" - "bytes" - "context" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-secure-stdlib/base62" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/cache/cacheboltdb" - "github.com/hashicorp/vault/command/agent/cache/cachememdb" - "github.com/hashicorp/vault/helper/namespace" - nshelper "github.com/hashicorp/vault/helper/namespace" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/cryptoutil" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/helper/locksutil" - "github.com/hashicorp/vault/sdk/logical" - gocache "github.com/patrickmn/go-cache" - "go.uber.org/atomic" -) - -const ( - vaultPathTokenCreate = "/v1/auth/token/create" - vaultPathTokenRevoke = "/v1/auth/token/revoke" - vaultPathTokenRevokeSelf = "/v1/auth/token/revoke-self" - vaultPathTokenRevokeAccessor = "/v1/auth/token/revoke-accessor" - vaultPathTokenRevokeOrphan = "/v1/auth/token/revoke-orphan" - vaultPathTokenLookup = "/v1/auth/token/lookup" - vaultPathTokenLookupSelf = "/v1/auth/token/lookup-self" - vaultPathTokenRenew = "/v1/auth/token/renew" - vaultPathTokenRenewSelf = "/v1/auth/token/renew-self" - vaultPathLeaseRevoke = "/v1/sys/leases/revoke" - vaultPathLeaseRevokeForce = "/v1/sys/leases/revoke-force" - vaultPathLeaseRevokePrefix = "/v1/sys/leases/revoke-prefix" -) - -var ( - contextIndexID = contextIndex{} - errInvalidType = errors.New("invalid type provided") - revocationPaths = []string{ - strings.TrimPrefix(vaultPathTokenRevoke, "/v1"), - strings.TrimPrefix(vaultPathTokenRevokeSelf, "/v1"), - strings.TrimPrefix(vaultPathTokenRevokeAccessor, "/v1"), - strings.TrimPrefix(vaultPathTokenRevokeOrphan, "/v1"), - strings.TrimPrefix(vaultPathLeaseRevoke, "/v1"), - strings.TrimPrefix(vaultPathLeaseRevokeForce, "/v1"), - strings.TrimPrefix(vaultPathLeaseRevokePrefix, "/v1"), - } -) - -type contextIndex struct{} - -type cacheClearRequest struct { - Type string `json:"type"` - Value string `json:"value"` - Namespace string `json:"namespace"` -} - -// LeaseCache is an implementation of Proxier that handles -// the caching of responses. It passes the incoming request -// to an underlying Proxier implementation. -type LeaseCache struct { - client *api.Client - proxier Proxier - logger hclog.Logger - db *cachememdb.CacheMemDB - baseCtxInfo *cachememdb.ContextInfo - l *sync.RWMutex - - // idLocks is used during cache lookup to ensure that identical requests made - // in parallel won't trigger multiple renewal goroutines. - idLocks []*locksutil.LockEntry - - // inflightCache keeps track of inflight requests - inflightCache *gocache.Cache - - // ps is the persistent storage for tokens and leases - ps *cacheboltdb.BoltStorage - - // shuttingDown is used to determine if cache needs to be evicted or not - // when the context is cancelled - shuttingDown atomic.Bool -} - -// LeaseCacheConfig is the configuration for initializing a new -// Lease. -type LeaseCacheConfig struct { - Client *api.Client - BaseContext context.Context - Proxier Proxier - Logger hclog.Logger - Storage *cacheboltdb.BoltStorage -} - -type inflightRequest struct { - // ch is closed by the request that ends up processing the set of - // parallel request - ch chan struct{} - - // remaining is the number of remaining inflight request that needs to - // be processed before this object can be cleaned up - remaining *atomic.Uint64 -} - -func newInflightRequest() *inflightRequest { - return &inflightRequest{ - ch: make(chan struct{}), - remaining: atomic.NewUint64(0), - } -} - -// NewLeaseCache creates a new instance of a LeaseCache. -func NewLeaseCache(conf *LeaseCacheConfig) (*LeaseCache, error) { - if conf == nil { - return nil, errors.New("nil configuration provided") - } - - if conf.Proxier == nil || conf.Logger == nil { - return nil, fmt.Errorf("missing configuration required params: %v", conf) - } - - if conf.Client == nil { - return nil, fmt.Errorf("nil API client") - } - - db, err := cachememdb.New() - if err != nil { - return nil, err - } - - // Create a base context for the lease cache layer - baseCtxInfo := cachememdb.NewContextInfo(conf.BaseContext) - - return &LeaseCache{ - client: conf.Client, - proxier: conf.Proxier, - logger: conf.Logger, - db: db, - baseCtxInfo: baseCtxInfo, - l: &sync.RWMutex{}, - idLocks: locksutil.CreateLocks(), - inflightCache: gocache.New(gocache.NoExpiration, gocache.NoExpiration), - ps: conf.Storage, - }, nil -} - -// SetShuttingDown is a setter for the shuttingDown field -func (c *LeaseCache) SetShuttingDown(in bool) { - c.shuttingDown.Store(in) -} - -// SetPersistentStorage is a setter for the persistent storage field in -// LeaseCache -func (c *LeaseCache) SetPersistentStorage(storageIn *cacheboltdb.BoltStorage) { - c.ps = storageIn -} - -// checkCacheForRequest checks the cache for a particular request based on its -// computed ID. It returns a non-nil *SendResponse if an entry is found. -func (c *LeaseCache) checkCacheForRequest(id string) (*SendResponse, error) { - index, err := c.db.Get(cachememdb.IndexNameID, id) - if err != nil { - return nil, err - } - - if index == nil { - return nil, nil - } - - // Cached request is found, deserialize the response - reader := bufio.NewReader(bytes.NewReader(index.Response)) - resp, err := http.ReadResponse(reader, nil) - if err != nil { - c.logger.Error("failed to deserialize response", "error", err) - return nil, err - } - - sendResp, err := NewSendResponse(&api.Response{Response: resp}, index.Response) - if err != nil { - c.logger.Error("failed to create new send response", "error", err) - return nil, err - } - sendResp.CacheMeta.Hit = true - - respTime, err := http.ParseTime(resp.Header.Get("Date")) - if err != nil { - c.logger.Error("failed to parse cached response date", "error", err) - return nil, err - } - sendResp.CacheMeta.Age = time.Now().Sub(respTime) - - return sendResp, nil -} - -// Send performs a cache lookup on the incoming request. If it's a cache hit, -// it will return the cached response, otherwise it will delegate to the -// underlying Proxier and cache the received response. -func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - // Compute the index ID - id, err := computeIndexID(req) - if err != nil { - c.logger.Error("failed to compute cache key", "error", err) - return nil, err - } - - // Check the inflight cache to see if there are other inflight requests - // of the same kind, based on the computed ID. If so, we increment a counter - - var inflight *inflightRequest - - defer func() { - // Cleanup on the cache if there are no remaining inflight requests. - // This is the last step, so we defer the call first - if inflight != nil && inflight.remaining.Load() == 0 { - c.inflightCache.Delete(id) - } - }() - - idLock := locksutil.LockForKey(c.idLocks, id) - - // Briefly grab an ID-based lock in here to emulate a load-or-store behavior - // and prevent concurrent cacheable requests from being proxied twice if - // they both miss the cache due to it being clean when peeking the cache - // entry. - idLock.Lock() - inflightRaw, found := c.inflightCache.Get(id) - if found { - idLock.Unlock() - inflight = inflightRaw.(*inflightRequest) - inflight.remaining.Inc() - defer inflight.remaining.Dec() - - // If found it means that there's an inflight request being processed. - // We wait until that's finished before proceeding further. - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-inflight.ch: - } - } else { - inflight = newInflightRequest() - inflight.remaining.Inc() - defer inflight.remaining.Dec() - - c.inflightCache.Set(id, inflight, gocache.NoExpiration) - idLock.Unlock() - - // Signal that the processing request is done - defer close(inflight.ch) - } - - // Check if the response for this request is already in the cache - cachedResp, err := c.checkCacheForRequest(id) - if err != nil { - return nil, err - } - if cachedResp != nil { - c.logger.Debug("returning cached response", "path", req.Request.URL.Path) - return cachedResp, nil - } - - c.logger.Debug("forwarding request from cache", "method", req.Request.Method, "path", req.Request.URL.Path) - - // Pass the request down and get a response - resp, err := c.proxier.Send(ctx, req) - if err != nil { - return resp, err - } - - // If this is a non-2xx or if the returned response does not contain JSON payload, - // we skip caching - if resp.Response.StatusCode >= 300 || resp.Response.Header.Get("Content-Type") != "application/json" { - return resp, err - } - - // Get the namespace from the request header - namespace := req.Request.Header.Get(consts.NamespaceHeaderName) - // We need to populate an empty value since go-memdb will skip over indexes - // that contain empty values. - if namespace == "" { - namespace = "root/" - } - - // Build the index to cache based on the response received - index := &cachememdb.Index{ - ID: id, - Namespace: namespace, - RequestPath: req.Request.URL.Path, - LastRenewed: time.Now().UTC(), - } - - secret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody)) - if err != nil { - c.logger.Error("failed to parse response as secret", "error", err) - return nil, err - } - - isRevocation, err := c.handleRevocationRequest(ctx, req, resp) - if err != nil { - c.logger.Error("failed to process the response", "error", err) - return nil, err - } - - // If this is a revocation request, do not go through cache logic. - if isRevocation { - return resp, nil - } - - // Fast path for responses with no secrets - if secret == nil { - c.logger.Debug("pass-through response; no secret in response", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - // Short-circuit if the secret is not renewable - tokenRenewable, err := secret.TokenIsRenewable() - if err != nil { - c.logger.Error("failed to parse renewable param", "error", err) - return nil, err - } - if !secret.Renewable && !tokenRenewable { - c.logger.Debug("pass-through response; secret not renewable", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - var renewCtxInfo *cachememdb.ContextInfo - switch { - case secret.LeaseID != "": - c.logger.Debug("processing lease response", "method", req.Request.Method, "path", req.Request.URL.Path) - entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) - if err != nil { - return nil, err - } - // If the lease belongs to a token that is not managed by the agent, - // return the response without caching it. - if entry == nil { - c.logger.Debug("pass-through lease response; token not managed by agent", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - // Derive a context for renewal using the token's context - renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) - - index.Lease = secret.LeaseID - index.LeaseToken = req.Token - - index.Type = cacheboltdb.LeaseType - - case secret.Auth != nil: - c.logger.Debug("processing auth response", "method", req.Request.Method, "path", req.Request.URL.Path) - - // Check if this token creation request resulted in a non-orphan token, and if so - // correctly set the parentCtx to the request's token context. - var parentCtx context.Context - if !secret.Auth.Orphan { - entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) - if err != nil { - return nil, err - } - // If parent token is not managed by the agent, child shouldn't be - // either. - if entry == nil { - c.logger.Debug("pass-through auth response; parent token not managed by agent", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - c.logger.Debug("setting parent context", "method", req.Request.Method, "path", req.Request.URL.Path) - parentCtx = entry.RenewCtxInfo.Ctx - - index.TokenParent = req.Token - } - - renewCtxInfo = c.createCtxInfo(parentCtx) - index.Token = secret.Auth.ClientToken - index.TokenAccessor = secret.Auth.Accessor - - index.Type = cacheboltdb.LeaseType - - default: - // We shouldn't be hitting this, but will err on the side of caution and - // simply proxy. - c.logger.Debug("pass-through response; secret without lease and token", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - // Serialize the response to store it in the cached index - var respBytes bytes.Buffer - err = resp.Response.Write(&respBytes) - if err != nil { - c.logger.Error("failed to serialize response", "error", err) - return nil, err - } - - // Reset the response body for upper layers to read - if resp.Response.Body != nil { - resp.Response.Body.Close() - } - resp.Response.Body = ioutil.NopCloser(bytes.NewReader(resp.ResponseBody)) - - // Set the index's Response - index.Response = respBytes.Bytes() - - // Store the index ID in the lifetimewatcher context - renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) - - // Store the lifetime watcher context in the index - index.RenewCtxInfo = &cachememdb.ContextInfo{ - Ctx: renewCtx, - CancelFunc: renewCtxInfo.CancelFunc, - DoneCh: renewCtxInfo.DoneCh, - } - - // Add extra information necessary for restoring from persisted cache - index.RequestMethod = req.Request.Method - index.RequestToken = req.Token - index.RequestHeader = req.Request.Header - - // Store the index in the cache - c.logger.Debug("storing response into the cache", "method", req.Request.Method, "path", req.Request.URL.Path) - err = c.Set(ctx, index) - if err != nil { - c.logger.Error("failed to cache the proxied response", "error", err) - return nil, err - } - - // Start renewing the secret in the response - go c.startRenewing(renewCtx, index, req, secret) - - return resp, nil -} - -func (c *LeaseCache) createCtxInfo(ctx context.Context) *cachememdb.ContextInfo { - if ctx == nil { - c.l.RLock() - ctx = c.baseCtxInfo.Ctx - c.l.RUnlock() - } - return cachememdb.NewContextInfo(ctx) -} - -func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, req *SendRequest, secret *api.Secret) { - defer func() { - id := ctx.Value(contextIndexID).(string) - if c.shuttingDown.Load() { - c.logger.Trace("not evicting index from cache during shutdown", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) - return - } - c.logger.Debug("evicting index from cache", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) - err := c.Evict(index) - if err != nil { - c.logger.Error("failed to evict index", "id", id, "error", err) - return - } - }() - - client, err := c.client.Clone() - if err != nil { - c.logger.Error("failed to create API client in the lifetime watcher", "error", err) - return - } - client.SetToken(req.Token) - client.SetHeaders(req.Request.Header) - - watcher, err := client.NewLifetimeWatcher(&api.LifetimeWatcherInput{ - Secret: secret, - }) - if err != nil { - c.logger.Error("failed to create secret lifetime watcher", "error", err) - return - } - - c.logger.Debug("initiating renewal", "method", req.Request.Method, "path", req.Request.URL.Path) - go watcher.Start() - defer watcher.Stop() - - for { - select { - case <-ctx.Done(): - // This is the case which captures context cancellations from token - // and leases. Since all the contexts are derived from the agent's - // context, this will also cover the shutdown scenario. - c.logger.Debug("context cancelled; stopping lifetime watcher", "path", req.Request.URL.Path) - return - case err := <-watcher.DoneCh(): - // This case covers renewal completion and renewal errors - if err != nil { - c.logger.Error("failed to renew secret", "error", err) - return - } - c.logger.Debug("renewal halted; evicting from cache", "path", req.Request.URL.Path) - return - case <-watcher.RenewCh(): - c.logger.Debug("secret renewed", "path", req.Request.URL.Path) - if c.ps != nil { - if err := c.updateLastRenewed(ctx, index, time.Now().UTC()); err != nil { - c.logger.Warn("not able to update lastRenewed time for cached index", "id", index.ID) - } - } - case <-index.RenewCtxInfo.DoneCh: - // This case indicates the renewal process to shutdown and evict - // the cache entry. This is triggered when a specific secret - // renewal needs to be killed without affecting any of the derived - // context renewals. - c.logger.Debug("done channel closed") - return - } - } -} - -func (c *LeaseCache) updateLastRenewed(ctx context.Context, index *cachememdb.Index, t time.Time) error { - idLock := locksutil.LockForKey(c.idLocks, index.ID) - idLock.Lock() - defer idLock.Unlock() - - getIndex, err := c.db.Get(cachememdb.IndexNameID, index.ID) - if err != nil { - return err - } - index.LastRenewed = t - if err := c.Set(ctx, getIndex); err != nil { - return err - } - return nil -} - -// computeIndexID results in a value that uniquely identifies a request -// received by the agent. It does so by SHA256 hashing the serialized request -// object containing the request path, query parameters and body parameters. -func computeIndexID(req *SendRequest) (string, error) { - var b bytes.Buffer - - cloned := req.Request.Clone(context.Background()) - cloned.Header.Del(vaulthttp.VaultIndexHeaderName) - cloned.Header.Del(vaulthttp.VaultForwardHeaderName) - cloned.Header.Del(vaulthttp.VaultInconsistentHeaderName) - // Serialize the request - if err := cloned.Write(&b); err != nil { - return "", fmt.Errorf("failed to serialize request: %v", err) - } - - // Reset the request body after it has been closed by Write - req.Request.Body = ioutil.NopCloser(bytes.NewReader(req.RequestBody)) - - // Append req.Token into the byte slice. This is needed since auto-auth'ed - // requests sets the token directly into SendRequest.Token - if _, err := b.Write([]byte(req.Token)); err != nil { - return "", fmt.Errorf("failed to write token to hash input: %w", err) - } - - return hex.EncodeToString(cryptoutil.Blake2b256Hash(string(b.Bytes()))), nil -} - -// HandleCacheClear returns a handlerFunc that can perform cache clearing operations. -func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // If the cache is not enabled, return a 200 - if c == nil { - return - } - - // Only handle POST/PUT requests - switch r.Method { - case http.MethodPost: - case http.MethodPut: - default: - return - } - - req := new(cacheClearRequest) - if err := jsonutil.DecodeJSONFromReader(r.Body, req); err != nil { - if err == io.EOF { - err = errors.New("empty JSON provided") - } - logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse JSON input: %w", err)) - return - } - - c.logger.Debug("received cache-clear request", "type", req.Type, "namespace", req.Namespace, "value", req.Value) - - in, err := parseCacheClearInput(req) - if err != nil { - c.logger.Error("unable to parse clear input", "error", err) - logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse clear input: %w", err)) - return - } - - if err := c.handleCacheClear(ctx, in); err != nil { - // Default to 500 on error, unless the user provided an invalid type, - // which would then be a 400. - httpStatus := http.StatusInternalServerError - if err == errInvalidType { - httpStatus = http.StatusBadRequest - } - logical.RespondError(w, httpStatus, fmt.Errorf("failed to clear cache: %w", err)) - return - } - - return - }) -} - -func (c *LeaseCache) handleCacheClear(ctx context.Context, in *cacheClearInput) error { - if in == nil { - return errors.New("no value(s) provided to clear corresponding cache entries") - } - - switch in.Type { - case "request_path": - // For this particular case, we need to ensure that there are 2 provided - // indexers for the proper lookup. - if in.RequestPath == "" { - return errors.New("request path not provided") - } - - // The first value provided for this case will be the namespace, but if it's - // an empty value we need to overwrite it with "root/" to ensure proper - // cache lookup. - if in.Namespace == "" { - in.Namespace = "root/" - } - - // Find all the cached entries which has the given request path and - // cancel the contexts of all the respective lifetime watchers - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameRequestPath, in.Namespace, in.RequestPath) - if err != nil { - return err - } - for _, index := range indexes { - index.RenewCtxInfo.CancelFunc() - } - - case "token": - if in.Token == "" { - return errors.New("token not provided") - } - - // Get the context for the given token and cancel its context - index, err := c.db.Get(cachememdb.IndexNameToken, in.Token) - if err != nil { - return err - } - if index == nil { - return nil - } - - c.logger.Debug("canceling context of index attached to token") - - index.RenewCtxInfo.CancelFunc() - - case "token_accessor": - if in.TokenAccessor == "" { - return errors.New("token accessor not provided") - } - - // Get the cached index and cancel the corresponding lifetime watcher - // context - index, err := c.db.Get(cachememdb.IndexNameTokenAccessor, in.TokenAccessor) - if err != nil { - return err - } - if index == nil { - return nil - } - - c.logger.Debug("canceling context of index attached to accessor") - - index.RenewCtxInfo.CancelFunc() - - case "lease": - if in.Lease == "" { - return errors.New("lease not provided") - } - - // Get the cached index and cancel the corresponding lifetime watcher - // context - index, err := c.db.Get(cachememdb.IndexNameLease, in.Lease) - if err != nil { - return err - } - if index == nil { - return nil - } - - c.logger.Debug("canceling context of index attached to accessor") - - index.RenewCtxInfo.CancelFunc() - - case "all": - // Cancel the base context which triggers all the goroutines to - // stop and evict entries from cache. - c.logger.Debug("canceling base context") - c.l.Lock() - c.baseCtxInfo.CancelFunc() - // Reset the base context - baseCtx, baseCancel := context.WithCancel(ctx) - c.baseCtxInfo = &cachememdb.ContextInfo{ - Ctx: baseCtx, - CancelFunc: baseCancel, - } - c.l.Unlock() - - // Reset the memdb instance (and persistent storage if enabled) - if err := c.Flush(); err != nil { - return err - } - - default: - return errInvalidType - } - - c.logger.Debug("successfully cleared matching cache entries") - - return nil -} - -// handleRevocationRequest checks whether the originating request is a -// revocation request, and if so perform applicable cache cleanups. -// Returns true is this is a revocation request. -func (c *LeaseCache) handleRevocationRequest(ctx context.Context, req *SendRequest, resp *SendResponse) (bool, error) { - // Lease and token revocations return 204's on success. Fast-path if that's - // not the case. - if resp.Response.StatusCode != http.StatusNoContent { - return false, nil - } - - _, path := deriveNamespaceAndRevocationPath(req) - - switch { - case path == vaultPathTokenRevoke: - // Get the token from the request body - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - tokenRaw, ok := jsonBody["token"] - if !ok { - return false, fmt.Errorf("failed to get token from request body") - } - token, ok := tokenRaw.(string) - if !ok { - return false, fmt.Errorf("expected token in the request body to be string") - } - - // Clear the cache entry associated with the token and all the other - // entries belonging to the leases derived from this token. - in := &cacheClearInput{ - Type: "token", - Token: token, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case path == vaultPathTokenRevokeSelf: - // Clear the cache entry associated with the token and all the other - // entries belonging to the leases derived from this token. - in := &cacheClearInput{ - Type: "token", - Token: req.Token, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case path == vaultPathTokenRevokeAccessor: - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - accessorRaw, ok := jsonBody["accessor"] - if !ok { - return false, fmt.Errorf("failed to get accessor from request body") - } - accessor, ok := accessorRaw.(string) - if !ok { - return false, fmt.Errorf("expected accessor in the request body to be string") - } - - in := &cacheClearInput{ - Type: "token_accessor", - TokenAccessor: accessor, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case path == vaultPathTokenRevokeOrphan: - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - tokenRaw, ok := jsonBody["token"] - if !ok { - return false, fmt.Errorf("failed to get token from request body") - } - token, ok := tokenRaw.(string) - if !ok { - return false, fmt.Errorf("expected token in the request body to be string") - } - - // Kill the lifetime watchers of all the leases attached to the revoked - // token - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLeaseToken, token) - if err != nil { - return false, err - } - for _, index := range indexes { - index.RenewCtxInfo.CancelFunc() - } - - // Kill the lifetime watchers of the revoked token - index, err := c.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - return false, err - } - if index == nil { - return true, nil - } - - // Indicate the lifetime watcher goroutine for this index to return. - // This will not affect the child tokens because the context is not - // getting cancelled. - close(index.RenewCtxInfo.DoneCh) - - // Clear the parent references of the revoked token in the entries - // belonging to the child tokens of the revoked token. - indexes, err = c.db.GetByPrefix(cachememdb.IndexNameTokenParent, token) - if err != nil { - return false, err - } - for _, index := range indexes { - index.TokenParent = "" - err = c.db.Set(index) - if err != nil { - c.logger.Error("failed to persist index", "error", err) - return false, err - } - } - - case path == vaultPathLeaseRevoke: - // TODO: Should lease present in the URL itself be considered here? - // Get the lease from the request body - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - leaseIDRaw, ok := jsonBody["lease_id"] - if !ok { - return false, fmt.Errorf("failed to get lease_id from request body") - } - leaseID, ok := leaseIDRaw.(string) - if !ok { - return false, fmt.Errorf("expected lease_id the request body to be string") - } - in := &cacheClearInput{ - Type: "lease", - Lease: leaseID, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case strings.HasPrefix(path, vaultPathLeaseRevokeForce): - // Trim the URL path to get the request path prefix - prefix := strings.TrimPrefix(path, vaultPathLeaseRevokeForce) - // Get all the cache indexes that use the request path containing the - // prefix and cancel the lifetime watcher context of each. - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) - if err != nil { - return false, err - } - - _, tokenNSID := namespace.SplitIDFromString(req.Token) - for _, index := range indexes { - _, leaseNSID := namespace.SplitIDFromString(index.Lease) - // Only evict leases that match the token's namespace - if tokenNSID == leaseNSID { - index.RenewCtxInfo.CancelFunc() - } - } - - case strings.HasPrefix(path, vaultPathLeaseRevokePrefix): - // Trim the URL path to get the request path prefix - prefix := strings.TrimPrefix(path, vaultPathLeaseRevokePrefix) - // Get all the cache indexes that use the request path containing the - // prefix and cancel the lifetime watcher context of each. - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) - if err != nil { - return false, err - } - - _, tokenNSID := namespace.SplitIDFromString(req.Token) - for _, index := range indexes { - _, leaseNSID := namespace.SplitIDFromString(index.Lease) - // Only evict leases that match the token's namespace - if tokenNSID == leaseNSID { - index.RenewCtxInfo.CancelFunc() - } - } - - default: - return false, nil - } - - c.logger.Debug("triggered caching eviction from revocation request") - - return true, nil -} - -// Set stores the index in the cachememdb, and also stores it in the persistent -// cache (if enabled) -func (c *LeaseCache) Set(ctx context.Context, index *cachememdb.Index) error { - if err := c.db.Set(index); err != nil { - return err - } - - if c.ps != nil { - plaintext, err := index.Serialize() - if err != nil { - return err - } - - if err := c.ps.Set(ctx, index.ID, plaintext, index.Type); err != nil { - return err - } - c.logger.Trace("set entry in persistent storage", "type", index.Type, "path", index.RequestPath, "id", index.ID) - } - - return nil -} - -// Evict removes an Index from the cachememdb, and also removes it from the -// persistent cache (if enabled) -func (c *LeaseCache) Evict(index *cachememdb.Index) error { - if err := c.db.Evict(cachememdb.IndexNameID, index.ID); err != nil { - return err - } - - if c.ps != nil { - if err := c.ps.Delete(index.ID, index.Type); err != nil { - return err - } - c.logger.Trace("deleted item from persistent storage", "id", index.ID) - } - - return nil -} - -// Flush the cachememdb and persistent cache (if enabled) -func (c *LeaseCache) Flush() error { - if err := c.db.Flush(); err != nil { - return err - } - - if c.ps != nil { - c.logger.Trace("clearing persistent storage") - return c.ps.Clear() - } - - return nil -} - -// Restore loads the cachememdb from the persistent storage passed in. Loads -// tokens first, since restoring a lease's renewal context and watcher requires -// looking up the token in the cachememdb. -func (c *LeaseCache) Restore(ctx context.Context, storage *cacheboltdb.BoltStorage) error { - var errs *multierror.Error - - // Process tokens first - tokens, err := storage.GetByType(ctx, cacheboltdb.TokenType) - if err != nil { - errs = multierror.Append(errs, err) - } else { - if err := c.restoreTokens(tokens); err != nil { - errs = multierror.Append(errs, err) - } - } - - // Then process leases - leases, err := storage.GetByType(ctx, cacheboltdb.LeaseType) - if err != nil { - errs = multierror.Append(errs, err) - } else { - for _, lease := range leases { - newIndex, err := cachememdb.Deserialize(lease) - if err != nil { - errs = multierror.Append(errs, err) - continue - } - - c.logger.Trace("restoring lease", "id", newIndex.ID, "path", newIndex.RequestPath) - - // Check if this lease has already expired - expired, err := c.hasExpired(time.Now().UTC(), newIndex) - if err != nil { - c.logger.Warn("failed to check if lease is expired", "id", newIndex.ID, "error", err) - } - if expired { - continue - } - - if err := c.restoreLeaseRenewCtx(newIndex); err != nil { - errs = multierror.Append(errs, err) - continue - } - if err := c.db.Set(newIndex); err != nil { - errs = multierror.Append(errs, err) - continue - } - c.logger.Trace("restored lease", "id", newIndex.ID, "path", newIndex.RequestPath) - } - } - - return errs.ErrorOrNil() -} - -func (c *LeaseCache) restoreTokens(tokens [][]byte) error { - var errors *multierror.Error - - for _, token := range tokens { - newIndex, err := cachememdb.Deserialize(token) - if err != nil { - errors = multierror.Append(errors, err) - continue - } - newIndex.RenewCtxInfo = c.createCtxInfo(nil) - if err := c.db.Set(newIndex); err != nil { - errors = multierror.Append(errors, err) - continue - } - c.logger.Trace("restored token", "id", newIndex.ID) - } - - return errors.ErrorOrNil() -} - -// restoreLeaseRenewCtx re-creates a RenewCtx for an index object and starts -// the watcher go routine -func (c *LeaseCache) restoreLeaseRenewCtx(index *cachememdb.Index) error { - if index.Response == nil { - return fmt.Errorf("cached response was nil for %s", index.ID) - } - - // Parse the secret to determine which type it is - reader := bufio.NewReader(bytes.NewReader(index.Response)) - resp, err := http.ReadResponse(reader, nil) - if err != nil { - c.logger.Error("failed to deserialize response", "error", err) - return err - } - secret, err := api.ParseSecret(resp.Body) - if err != nil { - c.logger.Error("failed to parse response as secret", "error", err) - return err - } - - var renewCtxInfo *cachememdb.ContextInfo - switch { - case secret.LeaseID != "": - entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) - if err != nil { - return err - } - - if entry == nil { - return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) - } - - // Derive a context for renewal using the token's context - renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) - - case secret.Auth != nil: - var parentCtx context.Context - if !secret.Auth.Orphan { - entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) - if err != nil { - return err - } - // If parent token is not managed by the agent, child shouldn't be - // either. - if entry == nil { - return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) - } - - c.logger.Debug("setting parent context", "method", index.RequestMethod, "path", index.RequestPath) - parentCtx = entry.RenewCtxInfo.Ctx - } - renewCtxInfo = c.createCtxInfo(parentCtx) - default: - return fmt.Errorf("unknown cached index item: %s", index.ID) - } - - renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) - index.RenewCtxInfo = &cachememdb.ContextInfo{ - Ctx: renewCtx, - CancelFunc: renewCtxInfo.CancelFunc, - DoneCh: renewCtxInfo.DoneCh, - } - - sendReq := &SendRequest{ - Token: index.RequestToken, - Request: &http.Request{ - Header: index.RequestHeader, - Method: index.RequestMethod, - URL: &url.URL{ - Path: index.RequestPath, - }, - }, - } - go c.startRenewing(renewCtx, index, sendReq, secret) - - return nil -} - -// deriveNamespaceAndRevocationPath returns the namespace and relative path for -// revocation paths. -// -// If the path contains a namespace, but it's not a revocation path, it will be -// returned as-is, since there's no way to tell where the namespace ends and -// where the request path begins purely based off a string. -// -// Case 1: /v1/ns1/leases/revoke -> ns1/, /v1/leases/revoke -// Case 2: ns1/ /v1/leases/revoke -> ns1/, /v1/leases/revoke -// Case 3: /v1/ns1/foo/bar -> root/, /v1/ns1/foo/bar -// Case 4: ns1/ /v1/foo/bar -> ns1/, /v1/foo/bar -func deriveNamespaceAndRevocationPath(req *SendRequest) (string, string) { - namespace := "root/" - nsHeader := req.Request.Header.Get(consts.NamespaceHeaderName) - if nsHeader != "" { - namespace = nsHeader - } - - fullPath := req.Request.URL.Path - nonVersionedPath := strings.TrimPrefix(fullPath, "/v1") - - for _, pathToCheck := range revocationPaths { - // We use strings.Contains here for paths that can contain - // vars in the path, e.g. /v1/lease/revoke-prefix/:prefix - i := strings.Index(nonVersionedPath, pathToCheck) - // If there's no match, move on to the next check - if i == -1 { - continue - } - - // If the index is 0, this is a relative path with no namespace preppended, - // so we can break early - if i == 0 { - break - } - - // We need to turn /ns1 into ns1/, this makes it easy - namespaceInPath := nshelper.Canonicalize(nonVersionedPath[:i]) - - // If it's root, we replace, otherwise we join - if namespace == "root/" { - namespace = namespaceInPath - } else { - namespace = namespace + namespaceInPath - } - - return namespace, fmt.Sprintf("/v1%s", nonVersionedPath[i:]) - } - - return namespace, fmt.Sprintf("/v1%s", nonVersionedPath) -} - -// RegisterAutoAuthToken adds the provided auto-token into the cache. This is -// primarily used to register the auto-auth token and should only be called -// within a sink's WriteToken func. -func (c *LeaseCache) RegisterAutoAuthToken(token string) error { - // Get the token from the cache - oldIndex, err := c.db.Get(cachememdb.IndexNameToken, token) - if err != nil { - return err - } - - // If the index is found, just keep it in the cache and ignore the incoming - // token (since they're the same) - if oldIndex != nil { - c.logger.Trace("auto-auth token already exists in cache; no need to store it again") - return nil - } - - // The following randomly generated values are required for index stored by - // the cache, but are not actually used. We use random values to prevent - // accidental access. - id, err := base62.Random(5) - if err != nil { - return err - } - namespace, err := base62.Random(5) - if err != nil { - return err - } - requestPath, err := base62.Random(5) - if err != nil { - return err - } - - index := &cachememdb.Index{ - ID: id, - Token: token, - Namespace: namespace, - RequestPath: requestPath, - Type: cacheboltdb.TokenType, - } - - // Derive a context off of the lease cache's base context - ctxInfo := c.createCtxInfo(nil) - - index.RenewCtxInfo = &cachememdb.ContextInfo{ - Ctx: ctxInfo.Ctx, - CancelFunc: ctxInfo.CancelFunc, - DoneCh: ctxInfo.DoneCh, - } - - // Store the index in the cache - c.logger.Debug("storing auto-auth token into the cache") - err = c.Set(c.baseCtxInfo.Ctx, index) - if err != nil { - c.logger.Error("failed to cache the auto-auth token", "error", err) - return err - } - - return nil -} - -type cacheClearInput struct { - Type string - - RequestPath string - Namespace string - Token string - TokenAccessor string - Lease string -} - -func parseCacheClearInput(req *cacheClearRequest) (*cacheClearInput, error) { - if req == nil { - return nil, errors.New("nil request options provided") - } - - if req.Type == "" { - return nil, errors.New("no type provided") - } - - in := &cacheClearInput{ - Type: req.Type, - Namespace: req.Namespace, - } - - switch req.Type { - case "request_path": - in.RequestPath = req.Value - case "token": - in.Token = req.Value - case "token_accessor": - in.TokenAccessor = req.Value - case "lease": - in.Lease = req.Value - } - - return in, nil -} - -func (c *LeaseCache) hasExpired(currentTime time.Time, index *cachememdb.Index) (bool, error) { - reader := bufio.NewReader(bytes.NewReader(index.Response)) - resp, err := http.ReadResponse(reader, nil) - if err != nil { - return false, fmt.Errorf("failed to deserialize response: %w", err) - } - secret, err := api.ParseSecret(resp.Body) - if err != nil { - return false, fmt.Errorf("failed to parse response as secret: %w", err) - } - - elapsed := currentTime.Sub(index.LastRenewed) - var leaseDuration int - switch { - case secret.LeaseID != "": - leaseDuration = secret.LeaseDuration - case secret.Auth != nil: - leaseDuration = secret.Auth.LeaseDuration - default: - return false, errors.New("secret without lease encountered in expiration check") - } - - if int(elapsed.Seconds()) > leaseDuration { - c.logger.Trace("secret has expired", "id", index.ID, "elapsed", elapsed, "lease duration", leaseDuration) - return true, nil - } - return false, nil -} diff --git a/command/agent/cache/lease_cache_test.go b/command/agent/cache/lease_cache_test.go deleted file mode 100644 index 1501fcfe56db..000000000000 --- a/command/agent/cache/lease_cache_test.go +++ /dev/null @@ -1,1219 +0,0 @@ -package cache - -import ( - "context" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/go-test/deep" - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/cache/cacheboltdb" - "github.com/hashicorp/vault/command/agent/cache/cachememdb" - "github.com/hashicorp/vault/command/agent/cache/keymanager" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" -) - -func testNewLeaseCache(t *testing.T, responses []*SendResponse) *LeaseCache { - t.Helper() - - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - lc, err := NewLeaseCache(&LeaseCacheConfig{ - Client: client, - BaseContext: context.Background(), - Proxier: newMockProxier(responses), - Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), - }) - if err != nil { - t.Fatal(err) - } - return lc -} - -func testNewLeaseCacheWithDelay(t *testing.T, cacheable bool, delay int) *LeaseCache { - t.Helper() - - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - - lc, err := NewLeaseCache(&LeaseCacheConfig{ - Client: client, - BaseContext: context.Background(), - Proxier: &mockDelayProxier{cacheable, delay}, - Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), - }) - if err != nil { - t.Fatal(err) - } - - return lc -} - -func testNewLeaseCacheWithPersistence(t *testing.T, responses []*SendResponse, storage *cacheboltdb.BoltStorage) *LeaseCache { - t.Helper() - - client, err := api.NewClient(api.DefaultConfig()) - require.NoError(t, err) - - lc, err := NewLeaseCache(&LeaseCacheConfig{ - Client: client, - BaseContext: context.Background(), - Proxier: newMockProxier(responses), - Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), - Storage: storage, - }) - require.NoError(t, err) - - return lc -} - -func TestCache_ComputeIndexID(t *testing.T) { - type args struct { - req *http.Request - } - tests := []struct { - name string - req *SendRequest - want string - wantErr bool - }{ - { - "basic", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "test", - }, - }, - }, - "7b5db388f211fd9edca8c6c254831fb01ad4e6fe624dbb62711f256b5e803717", - false, - }, - { - "ignore consistency headers", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "test", - }, - Header: http.Header{ - vaulthttp.VaultIndexHeaderName: []string{"foo"}, - vaulthttp.VaultInconsistentHeaderName: []string{"foo"}, - vaulthttp.VaultForwardHeaderName: []string{"foo"}, - }, - }, - }, - "7b5db388f211fd9edca8c6c254831fb01ad4e6fe624dbb62711f256b5e803717", - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := computeIndexID(tt.req) - if (err != nil) != tt.wantErr { - t.Errorf("actual_error: %v, expected_error: %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, string(tt.want)) { - t.Errorf("bad: index id; actual: %q, expected: %q", got, string(tt.want)) - } - }) - } -} - -func TestLeaseCache_EmptyToken(t *testing.T) { - responses := []*SendResponse{ - newTestSendResponse(http.StatusCreated, `{"value": "invalid", "auth": {"client_token": "testtoken"}}`), - } - lc := testNewLeaseCache(t, responses) - - // Even if the send request doesn't have a token on it, a successful - // cacheable response should result in the index properly getting populated - // with a token and memdb shouldn't complain while inserting the index. - urlPath := "http://example.com/v1/sample/api" - sendReq := &SendRequest{ - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatalf("expected a non empty response") - } -} - -func TestLeaseCache_SendCacheable(t *testing.T) { - // Emulate 2 responses from the api proxy. One returns a new token and the - // other returns a lease. - responses := []*SendResponse{ - newTestSendResponse(http.StatusCreated, `{"auth": {"client_token": "testtoken", "renewable": true}}`), - newTestSendResponse(http.StatusOK, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}}`), - } - - lc := testNewLeaseCache(t, responses) - // Register an token so that the token and lease requests are cached - require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) - - // Make a request. A response with a new token is returned to the lease - // cache and that will be cached. - urlPath := "http://example.com/v1/sample/api" - sendReq := &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Send the same request again to get the cached response - sendReq = &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Check TokenParent - cachedItem, err := lc.db.Get(cachememdb.IndexNameToken, "testtoken") - if err != nil { - t.Fatal(err) - } - if cachedItem == nil { - t.Fatalf("expected token entry from cache") - } - if cachedItem.TokenParent != "autoauthtoken" { - t.Fatalf("unexpected value for tokenparent: %s", cachedItem.TokenParent) - } - - // Modify the request a little bit to ensure the second response is - // returned to the lease cache. - sendReq = &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Make the same request again and ensure that the same response is returned - // again. - sendReq = &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } -} - -func TestLeaseCache_SendNonCacheable(t *testing.T) { - responses := []*SendResponse{ - newTestSendResponse(http.StatusOK, `{"value": "output"}`), - newTestSendResponse(http.StatusNotFound, `{"value": "invalid"}`), - newTestSendResponse(http.StatusOK, `Hello`), - newTestSendResponse(http.StatusTemporaryRedirect, ""), - } - - lc := testNewLeaseCache(t, responses) - - // Send a request through the lease cache which is not cacheable (there is - // no lease information or auth information in the response) - sendReq := &SendRequest{ - Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[0].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Since the response is non-cacheable, the second response will be - // returned. - sendReq = &SendRequest{ - Token: "foo", - Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[1].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Since the response is non-cacheable, the third response will be - // returned. - sendReq = &SendRequest{ - Token: "foo", - Request: httptest.NewRequest("GET", "http://example.com", nil), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[2].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - // Since the response is non-cacheable, the fourth response will be - // returned. - sendReq = &SendRequest{ - Token: "foo", - Request: httptest.NewRequest("GET", "http://example.com", nil), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[3].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } -} - -func TestLeaseCache_SendNonCacheableNonTokenLease(t *testing.T) { - // Create the cache - responses := []*SendResponse{ - newTestSendResponse(http.StatusOK, `{"value": "output", "lease_id": "foo"}`), - newTestSendResponse(http.StatusCreated, `{"value": "invalid", "auth": {"client_token": "testtoken"}}`), - } - lc := testNewLeaseCache(t, responses) - - // Send a request through lease cache which returns a response containing - // lease_id. Response will not be cached because it doesn't belong to a - // token that is managed by the lease cache. - urlPath := "http://example.com/v1/sample/api" - sendReq := &SendRequest{ - Token: "foo", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[0].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - idx, err := lc.db.Get(cachememdb.IndexNameRequestPath, "root/", urlPath) - if err != nil { - t.Fatal(err) - } - if idx != nil { - t.Fatalf("expected nil entry, got: %#v", idx) - } - - // Verify that the response is not cached by sending the same request and - // by expecting a different response. - sendReq = &SendRequest{ - Token: "foo", - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err = lc.Send(context.Background(), sendReq) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(resp.Response, responses[1].Response); diff != nil { - t.Fatalf("expected getting proxied response: got %v", diff) - } - - idx, err = lc.db.Get(cachememdb.IndexNameRequestPath, "root/", urlPath) - if err != nil { - t.Fatal(err) - } - if idx != nil { - t.Fatalf("expected nil entry, got: %#v", idx) - } -} - -func TestLeaseCache_HandleCacheClear(t *testing.T) { - lc := testNewLeaseCache(t, nil) - - handler := lc.HandleCacheClear(context.Background()) - ts := httptest.NewServer(handler) - defer ts.Close() - - // Test missing body, should return 400 - resp, err := http.Post(ts.URL, "application/json", nil) - if err != nil { - t.Fatal() - } - if resp.StatusCode != http.StatusBadRequest { - t.Fatalf("status code mismatch: expected = %v, got = %v", http.StatusBadRequest, resp.StatusCode) - } - - testCases := []struct { - name string - reqType string - reqValue string - expectedStatusCode int - }{ - { - "invalid_type", - "foo", - "", - http.StatusBadRequest, - }, - { - "invalid_value", - "", - "bar", - http.StatusBadRequest, - }, - { - "all", - "all", - "", - http.StatusOK, - }, - { - "by_request_path", - "request_path", - "foo", - http.StatusOK, - }, - { - "by_token", - "token", - "foo", - http.StatusOK, - }, - { - "by_lease", - "lease", - "foo", - http.StatusOK, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - reqBody := fmt.Sprintf("{\"type\": \"%s\", \"value\": \"%s\"}", tc.reqType, tc.reqValue) - resp, err := http.Post(ts.URL, "application/json", strings.NewReader(reqBody)) - if err != nil { - t.Fatal(err) - } - if tc.expectedStatusCode != resp.StatusCode { - t.Fatalf("status code mismatch: expected = %v, got = %v", tc.expectedStatusCode, resp.StatusCode) - } - }) - } -} - -func TestCache_DeriveNamespaceAndRevocationPath(t *testing.T) { - tests := []struct { - name string - req *SendRequest - wantNamespace string - wantRelativePath string - }{ - { - "non_revocation_full_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns1/sys/mounts", - }, - }, - }, - "root/", - "/v1/ns1/sys/mounts", - }, - { - "non_revocation_relative_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/sys/mounts", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/", - "/v1/sys/mounts", - }, - { - "non_revocation_relative_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns2/sys/mounts", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/", - "/v1/ns2/sys/mounts", - }, - { - "revocation_full_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns1/sys/leases/revoke", - }, - }, - }, - "ns1/", - "/v1/sys/leases/revoke", - }, - { - "revocation_relative_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/sys/leases/revoke", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/", - "/v1/sys/leases/revoke", - }, - { - "revocation_relative_partial_ns", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns2/sys/leases/revoke", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/ns2/", - "/v1/sys/leases/revoke", - }, - { - "revocation_prefix_full_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns1/sys/leases/revoke-prefix/foo", - }, - }, - }, - "ns1/", - "/v1/sys/leases/revoke-prefix/foo", - }, - { - "revocation_prefix_relative_path", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/sys/leases/revoke-prefix/foo", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/", - "/v1/sys/leases/revoke-prefix/foo", - }, - { - "revocation_prefix_partial_ns", - &SendRequest{ - Request: &http.Request{ - URL: &url.URL{ - Path: "/v1/ns2/sys/leases/revoke-prefix/foo", - }, - Header: http.Header{ - consts.NamespaceHeaderName: []string{"ns1/"}, - }, - }, - }, - "ns1/ns2/", - "/v1/sys/leases/revoke-prefix/foo", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotNamespace, gotRelativePath := deriveNamespaceAndRevocationPath(tt.req) - if gotNamespace != tt.wantNamespace { - t.Errorf("deriveNamespaceAndRevocationPath() gotNamespace = %v, want %v", gotNamespace, tt.wantNamespace) - } - if gotRelativePath != tt.wantRelativePath { - t.Errorf("deriveNamespaceAndRevocationPath() gotRelativePath = %v, want %v", gotRelativePath, tt.wantRelativePath) - } - }) - } -} - -func TestLeaseCache_Concurrent_NonCacheable(t *testing.T) { - lc := testNewLeaseCacheWithDelay(t, false, 50) - - // We are going to send 100 requests, each taking 50ms to process. If these - // requests are processed serially, it will take ~5seconds to finish. we - // use a ContextWithTimeout to tell us if this is the case by giving ample - // time for it process them concurrently but time out if they get processed - // serially. - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - wgDoneCh := make(chan struct{}) - errCh := make(chan error) - - go func() { - var wg sync.WaitGroup - // 100 concurrent requests - for i := 0; i < 100; i++ { - wg.Add(1) - - go func() { - defer wg.Done() - - // Send a request through the lease cache which is not cacheable (there is - // no lease information or auth information in the response) - sendReq := &SendRequest{ - Request: httptest.NewRequest("GET", "http://example.com", nil), - } - - _, err := lc.Send(ctx, sendReq) - if err != nil { - errCh <- err - } - }() - } - - wg.Wait() - close(wgDoneCh) - }() - - select { - case <-ctx.Done(): - t.Fatalf("request timed out: %s", ctx.Err()) - case <-wgDoneCh: - case err := <-errCh: - t.Fatal(err) - } -} - -func TestLeaseCache_Concurrent_Cacheable(t *testing.T) { - lc := testNewLeaseCacheWithDelay(t, true, 50) - - if err := lc.RegisterAutoAuthToken("autoauthtoken"); err != nil { - t.Fatal(err) - } - - // We are going to send 100 requests, each taking 50ms to process. If these - // requests are processed serially, it will take ~5seconds to finish, so we - // use a ContextWithTimeout to tell us if this is the case. - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() - - var cacheCount atomic.Uint32 - wgDoneCh := make(chan struct{}) - errCh := make(chan error) - - go func() { - var wg sync.WaitGroup - // Start 100 concurrent requests - for i := 0; i < 100; i++ { - wg.Add(1) - - go func() { - defer wg.Done() - - sendReq := &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", "http://example.com/v1/sample/api", nil), - } - - resp, err := lc.Send(ctx, sendReq) - if err != nil { - errCh <- err - } - - if resp.CacheMeta != nil && resp.CacheMeta.Hit { - cacheCount.Inc() - } - }() - } - - wg.Wait() - close(wgDoneCh) - }() - - select { - case <-ctx.Done(): - t.Fatalf("request timed out: %s", ctx.Err()) - case <-wgDoneCh: - case err := <-errCh: - t.Fatal(err) - } - - // Ensure that all but one request got proxied. The other 99 should be - // returned from the cache. - if cacheCount.Load() != 99 { - t.Fatalf("Should have returned a cached response 99 times, got %d", cacheCount.Load()) - } -} - -func setupBoltStorage(t *testing.T) (tempCacheDir string, boltStorage *cacheboltdb.BoltStorage) { - t.Helper() - - km, err := keymanager.NewPassthroughKeyManager(context.Background(), nil) - require.NoError(t, err) - - tempCacheDir, err = ioutil.TempDir("", "agent-cache-test") - require.NoError(t, err) - boltStorage, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ - Path: tempCacheDir, - Logger: hclog.Default(), - Wrapper: km.Wrapper(), - }) - require.NoError(t, err) - require.NotNil(t, boltStorage) - // The calling function should `defer boltStorage.Close()` and `defer os.RemoveAll(tempCacheDir)` - return tempCacheDir, boltStorage -} - -func compareBeforeAndAfter(t *testing.T, before, after *LeaseCache, beforeLen, afterLen int) { - beforeDB, err := before.db.GetByPrefix(cachememdb.IndexNameID) - require.NoError(t, err) - assert.Len(t, beforeDB, beforeLen) - afterDB, err := after.db.GetByPrefix(cachememdb.IndexNameID) - require.NoError(t, err) - assert.Len(t, afterDB, afterLen) - for _, cachedItem := range beforeDB { - if strings.Contains(cachedItem.RequestPath, "expect-missing") { - continue - } - restoredItem, err := after.db.Get(cachememdb.IndexNameID, cachedItem.ID) - require.NoError(t, err) - - assert.NoError(t, err) - assert.Equal(t, cachedItem.ID, restoredItem.ID) - assert.Equal(t, cachedItem.Lease, restoredItem.Lease) - assert.Equal(t, cachedItem.LeaseToken, restoredItem.LeaseToken) - assert.Equal(t, cachedItem.Namespace, restoredItem.Namespace) - assert.Equal(t, cachedItem.RequestHeader, restoredItem.RequestHeader) - assert.Equal(t, cachedItem.RequestMethod, restoredItem.RequestMethod) - assert.Equal(t, cachedItem.RequestPath, restoredItem.RequestPath) - assert.Equal(t, cachedItem.RequestToken, restoredItem.RequestToken) - assert.Equal(t, cachedItem.Response, restoredItem.Response) - assert.Equal(t, cachedItem.Token, restoredItem.Token) - assert.Equal(t, cachedItem.TokenAccessor, restoredItem.TokenAccessor) - assert.Equal(t, cachedItem.TokenParent, restoredItem.TokenParent) - - // check what we can in the renewal context - assert.NotEmpty(t, restoredItem.RenewCtxInfo.CancelFunc) - assert.NotZero(t, restoredItem.RenewCtxInfo.DoneCh) - require.NotEmpty(t, restoredItem.RenewCtxInfo.Ctx) - assert.Equal(t, - cachedItem.RenewCtxInfo.Ctx.Value(contextIndexID), - restoredItem.RenewCtxInfo.Ctx.Value(contextIndexID), - ) - } -} - -func TestLeaseCache_PersistAndRestore(t *testing.T) { - // Emulate responses from the api proxy. The first two use the auto-auth - // token, and the others use another token. - // The test re-sends each request to ensure that the response is cached - // so the number of responses and cacheTests specified should always be equal. - responses := []*SendResponse{ - newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 600}}`), - newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 600}`), - // The auth token will get manually deleted from the bolt DB storage, causing both of the following two responses - // to be missing from the cache after a restore, because the lease is a child of the auth token. - newTestSendResponse(202, `{"auth": {"client_token": "testtoken2", "renewable": true, "orphan": true, "lease_duration": 600}}`), - newTestSendResponse(203, `{"lease_id": "secret2-lease", "renewable": true, "data": {"number": "two"}, "lease_duration": 600}`), - // 204 No content gets special handling - avoid. - newTestSendResponse(250, `{"auth": {"client_token": "testtoken3", "renewable": true, "orphan": true, "lease_duration": 600}}`), - newTestSendResponse(251, `{"lease_id": "secret3-lease", "renewable": true, "data": {"number": "three"}, "lease_duration": 600}`), - } - - tempDir, boltStorage := setupBoltStorage(t) - defer os.RemoveAll(tempDir) - defer boltStorage.Close() - lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) - - // Register an auto-auth token so that the token and lease requests are cached - err := lc.RegisterAutoAuthToken("autoauthtoken") - require.NoError(t, err) - - cacheTests := []struct { - token string - method string - urlPath string - body string - deleteFromPersistentStore bool // If true, will be deleted from bolt DB to induce an error on restore - expectMissingAfterRestore bool // If true, the response is not expected to be present in the restored cache - }{ - { - // Make a request. A response with a new token is returned to the - // lease cache and that will be cached. - token: "autoauthtoken", - method: "GET", - urlPath: "http://example.com/v1/sample/api", - body: `{"value": "input"}`, - }, - { - // Modify the request a little bit to ensure the second response is - // returned to the lease cache. - token: "autoauthtoken", - method: "GET", - urlPath: "http://example.com/v1/sample/api", - body: `{"value": "input_changed"}`, - }, - { - // Simulate an approle login to get another token - method: "PUT", - urlPath: "http://example.com/v1/auth/approle-expect-missing/login", - body: `{"role_id": "my role", "secret_id": "my secret"}`, - deleteFromPersistentStore: true, - expectMissingAfterRestore: true, - }, - { - // Test caching with the token acquired from the approle login - token: "testtoken2", - method: "GET", - urlPath: "http://example.com/v1/sample-expect-missing/api", - body: `{"second": "input"}`, - // This will be missing from the restored cache because its parent token was deleted - expectMissingAfterRestore: true, - }, - { - // Simulate another approle login to get another token - method: "PUT", - urlPath: "http://example.com/v1/auth/approle/login", - body: `{"role_id": "my role", "secret_id": "my secret"}`, - }, - { - // Test caching with the token acquired from the latest approle login - token: "testtoken3", - method: "GET", - urlPath: "http://example.com/v1/sample3/api", - body: `{"third": "input"}`, - }, - } - - var deleteIDs []string - for i, ct := range cacheTests { - // Send once to cache - sendReq := &SendRequest{ - Token: ct.token, - Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), - } - if ct.deleteFromPersistentStore { - deleteID, err := computeIndexID(sendReq) - require.NoError(t, err) - deleteIDs = append(deleteIDs, deleteID) - // Now reset the body after calculating the index - sendReq.Request = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) - } - resp, err := lc.Send(context.Background(), sendReq) - require.NoError(t, err) - assert.Equal(t, responses[i].Response.StatusCode, resp.Response.StatusCode, "expected proxied response") - assert.Nil(t, resp.CacheMeta) - - // Send again to test cache. If this isn't cached, the response returned - // will be the next in the list and the status code will not match. - sendCacheReq := &SendRequest{ - Token: ct.token, - Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), - } - respCached, err := lc.Send(context.Background(), sendCacheReq) - require.NoError(t, err, "failed to send request %+v", ct) - assert.Equal(t, responses[i].Response.StatusCode, respCached.Response.StatusCode, "expected proxied response") - require.NotNil(t, respCached.CacheMeta) - assert.True(t, respCached.CacheMeta.Hit) - } - - require.NotEmpty(t, deleteIDs) - for _, deleteID := range deleteIDs { - err = boltStorage.Delete(deleteID, cacheboltdb.LeaseType) - require.NoError(t, err) - } - - // Now we know the cache is working, so try restoring from the persisted - // cache's storage. Responses 3 and 4 have been cleared from the cache, so - // re-send those. - restoredCache := testNewLeaseCache(t, responses[2:4]) - - err = restoredCache.Restore(context.Background(), boltStorage) - errors, ok := err.(*multierror.Error) - require.True(t, ok) - assert.Len(t, errors.Errors, 1) - assert.Contains(t, errors.Error(), "could not find parent Token testtoken2") - - // Now compare the cache contents before and after - compareBeforeAndAfter(t, lc, restoredCache, 7, 5) - - // And finally send the cache requests once to make sure they're all being - // served from the restoredCache unless they were intended to be missing after restore. - for i, ct := range cacheTests { - sendCacheReq := &SendRequest{ - Token: ct.token, - Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), - } - respCached, err := restoredCache.Send(context.Background(), sendCacheReq) - require.NoError(t, err, "failed to send request %+v", ct) - assert.Equal(t, responses[i].Response.StatusCode, respCached.Response.StatusCode, "expected proxied response") - if ct.expectMissingAfterRestore { - require.Nil(t, respCached.CacheMeta) - } else { - require.NotNil(t, respCached.CacheMeta) - assert.True(t, respCached.CacheMeta.Hit) - } - } -} - -func TestLeaseCache_PersistAndRestore_WithManyDependencies(t *testing.T) { - tempDir, boltStorage := setupBoltStorage(t) - defer os.RemoveAll(tempDir) - defer boltStorage.Close() - - var requests []*SendRequest - var responses []*SendResponse - var orderedRequestPaths []string - - // helper func to generate new auth leases with a child secret lease attached - authAndSecretLease := func(id int, parentToken, newToken string) { - t.Helper() - path := fmt.Sprintf("/v1/auth/approle-%d/login", id) - orderedRequestPaths = append(orderedRequestPaths, path) - requests = append(requests, &SendRequest{ - Token: parentToken, - Request: httptest.NewRequest("PUT", "http://example.com"+path, strings.NewReader("")), - }) - responses = append(responses, newTestSendResponse(200, fmt.Sprintf(`{"auth": {"client_token": "%s", "renewable": true, "lease_duration": 600}}`, newToken))) - - // Fetch a leased secret using the new token - path = fmt.Sprintf("/v1/kv/%d", id) - orderedRequestPaths = append(orderedRequestPaths, path) - requests = append(requests, &SendRequest{ - Token: newToken, - Request: httptest.NewRequest("GET", "http://example.com"+path, strings.NewReader("")), - }) - responses = append(responses, newTestSendResponse(200, fmt.Sprintf(`{"lease_id": "secret-%d-lease", "renewable": true, "data": {"number": %d}, "lease_duration": 600}`, id, id))) - } - - // Pathological case: a long chain of child tokens - authAndSecretLease(0, "autoauthtoken", "many-ancestors-token;0") - for i := 1; i <= 50; i++ { - // Create a new generation of child token - authAndSecretLease(i, fmt.Sprintf("many-ancestors-token;%d", i-1), fmt.Sprintf("many-ancestors-token;%d", i)) - } - - // Lots of sibling tokens with auto auth token as their parent - for i := 51; i <= 100; i++ { - authAndSecretLease(i, "autoauthtoken", fmt.Sprintf("many-siblings-token;%d", i)) - } - - // Also create some extra siblings for an auth token further down the chain - for i := 101; i <= 110; i++ { - authAndSecretLease(i, "many-ancestors-token;25", fmt.Sprintf("many-siblings-for-ancestor-token;%d", i)) - } - - lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) - - // Register an auto-auth token so that the token and lease requests are cached - err := lc.RegisterAutoAuthToken("autoauthtoken") - require.NoError(t, err) - - for _, req := range requests { - // Send once to cache - resp, err := lc.Send(context.Background(), req) - require.NoError(t, err) - assert.Equal(t, 200, resp.Response.StatusCode, "expected success") - assert.Nil(t, resp.CacheMeta) - } - - // Ensure leases are retrieved in the correct order - var processed int - - leases, err := boltStorage.GetByType(context.Background(), cacheboltdb.LeaseType) - require.NoError(t, err) - for _, lease := range leases { - index, err := cachememdb.Deserialize(lease) - require.NoError(t, err) - require.Equal(t, orderedRequestPaths[processed], index.RequestPath) - processed++ - } - - assert.Equal(t, len(orderedRequestPaths), processed) - - restoredCache := testNewLeaseCache(t, nil) - err = restoredCache.Restore(context.Background(), boltStorage) - require.NoError(t, err) - - // Now compare the cache contents before and after - compareBeforeAndAfter(t, lc, restoredCache, 223, 223) -} - -func TestEvictPersistent(t *testing.T) { - ctx := context.Background() - - responses := []*SendResponse{ - newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}}`), - } - - tempDir, boltStorage := setupBoltStorage(t) - defer os.RemoveAll(tempDir) - defer boltStorage.Close() - lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) - - require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) - - // populate cache by sending request through - sendReq := &SendRequest{ - Token: "autoauthtoken", - Request: httptest.NewRequest("GET", "http://example.com/v1/sample/api", strings.NewReader(`{"value": "some_input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - require.NoError(t, err) - assert.Equal(t, resp.Response.StatusCode, 201, "expected proxied response") - assert.Nil(t, resp.CacheMeta) - - // Check bolt for the cached lease - secrets, err := lc.ps.GetByType(ctx, cacheboltdb.LeaseType) - require.NoError(t, err) - assert.Len(t, secrets, 1) - - // Call clear for the request path - err = lc.handleCacheClear(context.Background(), &cacheClearInput{ - Type: "request_path", - RequestPath: "/v1/sample/api", - }) - require.NoError(t, err) - - time.Sleep(2 * time.Second) - - // Check that cached item is gone - secrets, err = lc.ps.GetByType(ctx, cacheboltdb.LeaseType) - require.NoError(t, err) - assert.Len(t, secrets, 0) -} - -func TestRegisterAutoAuth_sameToken(t *testing.T) { - // If the auto-auth token already exists in the cache, it should not be - // stored again in a new index. - lc := testNewLeaseCache(t, nil) - err := lc.RegisterAutoAuthToken("autoauthtoken") - assert.NoError(t, err) - - oldTokenIndex, err := lc.db.Get(cachememdb.IndexNameToken, "autoauthtoken") - assert.NoError(t, err) - oldTokenID := oldTokenIndex.ID - - // register the same token again - err = lc.RegisterAutoAuthToken("autoauthtoken") - assert.NoError(t, err) - - // check that there's only one index for autoauthtoken - entries, err := lc.db.GetByPrefix(cachememdb.IndexNameToken, "autoauthtoken") - assert.NoError(t, err) - assert.Len(t, entries, 1) - - newTokenIndex, err := lc.db.Get(cachememdb.IndexNameToken, "autoauthtoken") - assert.NoError(t, err) - - // compare the ID's since those are randomly generated when an index for a - // token is added to the cache, so if a new token was added, the id's will - // not match. - assert.Equal(t, oldTokenID, newTokenIndex.ID) -} - -func Test_hasExpired(t *testing.T) { - responses := []*SendResponse{ - newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 60}}`), - newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 60}`), - } - lc := testNewLeaseCache(t, responses) - require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) - - cacheTests := []struct { - token string - urlPath string - leaseType string - wantStatusCode int - }{ - { - // auth lease - token: "autoauthtoken", - urlPath: "/v1/sample/auth", - leaseType: cacheboltdb.LeaseType, - wantStatusCode: responses[0].Response.StatusCode, - }, - { - // secret lease - token: "autoauthtoken", - urlPath: "/v1/sample/secret", - leaseType: cacheboltdb.LeaseType, - wantStatusCode: responses[1].Response.StatusCode, - }, - } - - for _, ct := range cacheTests { - // Send once to cache - urlPath := "http://example.com" + ct.urlPath - sendReq := &SendRequest{ - Token: ct.token, - Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), - } - resp, err := lc.Send(context.Background(), sendReq) - require.NoError(t, err) - assert.Equal(t, resp.Response.StatusCode, ct.wantStatusCode, "expected proxied response") - assert.Nil(t, resp.CacheMeta) - - // get the Index out of the mem cache - index, err := lc.db.Get(cachememdb.IndexNameRequestPath, "root/", ct.urlPath) - require.NoError(t, err) - assert.Equal(t, ct.leaseType, index.Type) - - // The lease duration is 60 seconds, so time.Now() should be within that - notExpired, err := lc.hasExpired(time.Now().UTC(), index) - require.NoError(t, err) - assert.False(t, notExpired) - - // In 90 seconds the index should be "expired" - futureTime := time.Now().UTC().Add(time.Second * 90) - expired, err := lc.hasExpired(futureTime, index) - require.NoError(t, err) - assert.True(t, expired) - } -} - -func TestLeaseCache_hasExpired_wrong_type(t *testing.T) { - index := &cachememdb.Index{ - Type: cacheboltdb.TokenType, - Response: []byte(`HTTP/0.0 200 OK -Content-Type: application/json -Date: Tue, 02 Mar 2021 17:54:16 GMT - -{}`), - } - - lc := testNewLeaseCache(t, nil) - expired, err := lc.hasExpired(time.Now().UTC(), index) - assert.False(t, expired) - assert.EqualError(t, err, `secret without lease encountered in expiration check`) -} - -func TestLeaseCacheRestore_expired(t *testing.T) { - // Emulate 2 responses from the api proxy, both expired - responses := []*SendResponse{ - newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": -600}}`), - newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": -600}`), - } - - tempDir, boltStorage := setupBoltStorage(t) - defer os.RemoveAll(tempDir) - defer boltStorage.Close() - lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) - - // Register an auto-auth token so that the token and lease requests are cached in mem - require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) - - cacheTests := []struct { - token string - method string - urlPath string - body string - wantStatusCode int - }{ - { - // Make a request. A response with a new token is returned to the - // lease cache and that will be cached. - token: "autoauthtoken", - method: "GET", - urlPath: "http://example.com/v1/sample/api", - body: `{"value": "input"}`, - wantStatusCode: responses[0].Response.StatusCode, - }, - { - // Modify the request a little bit to ensure the second response is - // returned to the lease cache. - token: "autoauthtoken", - method: "GET", - urlPath: "http://example.com/v1/sample/api", - body: `{"value": "input_changed"}`, - wantStatusCode: responses[1].Response.StatusCode, - }, - } - - for _, ct := range cacheTests { - // Send once to cache - sendReq := &SendRequest{ - Token: ct.token, - Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), - } - resp, err := lc.Send(context.Background(), sendReq) - require.NoError(t, err) - assert.Equal(t, resp.Response.StatusCode, ct.wantStatusCode, "expected proxied response") - assert.Nil(t, resp.CacheMeta) - } - - // Restore from the persisted cache's storage - restoredCache := testNewLeaseCache(t, nil) - - err := restoredCache.Restore(context.Background(), boltStorage) - assert.NoError(t, err) - - // The original mem cache should have all three items - beforeDB, err := lc.db.GetByPrefix(cachememdb.IndexNameID) - require.NoError(t, err) - assert.Len(t, beforeDB, 3) - - // There should only be one item in the restored cache: the autoauth token - afterDB, err := restoredCache.db.GetByPrefix(cachememdb.IndexNameID) - require.NoError(t, err) - assert.Len(t, afterDB, 1) - - // Just verify that the one item in the restored mem cache matches one in the original mem cache, and that it's the auto-auth token - beforeItem, err := lc.db.Get(cachememdb.IndexNameID, afterDB[0].ID) - require.NoError(t, err) - assert.NotNil(t, beforeItem) - - assert.Equal(t, "autoauthtoken", afterDB[0].Token) - assert.Equal(t, cacheboltdb.TokenType, afterDB[0].Type) -} diff --git a/command/agent/cache/listener.go b/command/agent/cache/listener.go deleted file mode 100644 index c11867ac13a5..000000000000 --- a/command/agent/cache/listener.go +++ /dev/null @@ -1,68 +0,0 @@ -package cache - -import ( - "crypto/tls" - "fmt" - "net" - "strings" - - "github.com/hashicorp/vault/command/server" - "github.com/hashicorp/vault/internalshared/configutil" - "github.com/hashicorp/vault/internalshared/listenerutil" -) - -func StartListener(lnConfig *configutil.Listener) (net.Listener, *tls.Config, error) { - addr := lnConfig.Address - - var ln net.Listener - var err error - switch lnConfig.Type { - case "tcp": - if addr == "" { - addr = "127.0.0.1:8200" - } - - bindProto := "tcp" - // If they've passed 0.0.0.0, we only want to bind on IPv4 - // rather than golang's dual stack default - if strings.HasPrefix(addr, "0.0.0.0:") { - bindProto = "tcp4" - } - - ln, err = net.Listen(bindProto, addr) - if err != nil { - return nil, nil, err - } - ln = &server.TCPKeepAliveListener{ln.(*net.TCPListener)} - - case "unix": - var uConfig *listenerutil.UnixSocketsConfig - if lnConfig.SocketMode != "" && - lnConfig.SocketUser != "" && - lnConfig.SocketGroup != "" { - uConfig = &listenerutil.UnixSocketsConfig{ - Mode: lnConfig.SocketMode, - User: lnConfig.SocketUser, - Group: lnConfig.SocketGroup, - } - } - ln, err = listenerutil.UnixSocketListener(addr, uConfig) - if err != nil { - return nil, nil, err - } - - default: - return nil, nil, fmt.Errorf("invalid listener type: %q", lnConfig.Type) - } - - props := map[string]string{"addr": ln.Addr().String()} - tlsConf, _, err := listenerutil.TLSConfig(lnConfig, props, nil) - if err != nil { - return nil, nil, err - } - if tlsConf != nil { - ln = tls.NewListener(ln, tlsConf) - } - - return ln, tlsConf, nil -} diff --git a/command/agent/cache/proxy.go b/command/agent/cache/proxy.go deleted file mode 100644 index af9267ba01cc..000000000000 --- a/command/agent/cache/proxy.go +++ /dev/null @@ -1,76 +0,0 @@ -package cache - -import ( - "bytes" - "context" - "io" - "net/http" - "time" - - "github.com/hashicorp/vault/api" -) - -// SendRequest is the input for Proxier.Send. -type SendRequest struct { - Token string - Request *http.Request - - // RequestBody is the stored body bytes from Request.Body. It is set here to - // avoid reading and re-setting the stream multiple times. - RequestBody []byte -} - -// SendResponse is the output from Proxier.Send. -type SendResponse struct { - Response *api.Response - - // ResponseBody is the stored body bytes from Response.Body. It is set here to - // avoid reading and re-setting the stream multiple times. - ResponseBody []byte - CacheMeta *CacheMeta -} - -// CacheMeta contains metadata information about the response, -// such as whether it was a cache hit or miss, and the age of the -// cached entry. -type CacheMeta struct { - Hit bool - Age time.Duration -} - -// Proxier is the interface implemented by different components that are -// responsible for performing specific tasks, such as caching and proxying. All -// these tasks combined together would serve the request received by the agent. -type Proxier interface { - Send(ctx context.Context, req *SendRequest) (*SendResponse, error) -} - -// NewSendResponse creates a new SendResponse and takes care of initializing its -// fields properly. -func NewSendResponse(apiResponse *api.Response, responseBody []byte) (*SendResponse, error) { - resp := &SendResponse{ - Response: apiResponse, - CacheMeta: &CacheMeta{}, - } - - // If a response body is separately provided we set that as the SendResponse.ResponseBody, - // otherwise we will do an ioutil.ReadAll to extract the response body from apiResponse. - switch { - case len(responseBody) > 0: - resp.ResponseBody = responseBody - case apiResponse.Body != nil: - respBody, err := io.ReadAll(apiResponse.Body) - if err != nil { - return nil, err - } - // Close the old body - apiResponse.Body.Close() - - // Re-set the response body after reading from the Reader - apiResponse.Body = io.NopCloser(bytes.NewReader(respBody)) - - resp.ResponseBody = respBody - } - - return resp, nil -} diff --git a/command/agent/cache/testing.go b/command/agent/cache/testing.go deleted file mode 100644 index 9ec637be4e0e..000000000000 --- a/command/agent/cache/testing.go +++ /dev/null @@ -1,107 +0,0 @@ -package cache - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "math/rand" - "net/http" - "strings" - "time" - - "github.com/hashicorp/vault/api" -) - -// mockProxier is a mock implementation of the Proxier interface, used for testing purposes. -// The mock will return the provided responses every time it reaches its Send method, up to -// the last provided response. This lets tests control what the next/underlying Proxier layer -// might expect to return. -type mockProxier struct { - proxiedResponses []*SendResponse - responseIndex int -} - -func newMockProxier(responses []*SendResponse) *mockProxier { - return &mockProxier{ - proxiedResponses: responses, - } -} - -func (p *mockProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - if p.responseIndex >= len(p.proxiedResponses) { - return nil, fmt.Errorf("index out of bounds: responseIndex = %d, responses = %d", p.responseIndex, len(p.proxiedResponses)) - } - resp := p.proxiedResponses[p.responseIndex] - - p.responseIndex++ - - return resp, nil -} - -func (p *mockProxier) ResponseIndex() int { - return p.responseIndex -} - -func newTestSendResponse(status int, body string) *SendResponse { - resp := &SendResponse{ - Response: &api.Response{ - Response: &http.Response{ - StatusCode: status, - Header: http.Header{}, - }, - }, - } - resp.Response.Header.Set("Date", time.Now().Format(http.TimeFormat)) - - if body != "" { - resp.Response.Body = ioutil.NopCloser(strings.NewReader(body)) - resp.ResponseBody = []byte(body) - } - - if json.Valid([]byte(body)) { - resp.Response.Header.Set("content-type", "application/json") - } - - return resp -} - -type mockTokenVerifierProxier struct { - currentToken string -} - -func (p *mockTokenVerifierProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - p.currentToken = req.Token - resp := newTestSendResponse(http.StatusOK, - `{"data": {"id": "`+p.currentToken+`"}}`) - - return resp, nil -} - -func (p *mockTokenVerifierProxier) GetCurrentRequestToken() string { - return p.currentToken -} - -type mockDelayProxier struct { - cacheableResp bool - delay int -} - -func (p *mockDelayProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - if p.delay > 0 { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(time.Duration(p.delay) * time.Millisecond): - } - } - - // If this is a cacheable response, we return a unique response every time - if p.cacheableResp { - rand.Seed(time.Now().Unix()) - s := fmt.Sprintf(`{"lease_id": "%d", "renewable": true, "data": {"foo": "bar"}}`, rand.Int()) - return newTestSendResponse(http.StatusOK, s), nil - } - - return newTestSendResponse(http.StatusOK, `{"value": "output"}`), nil -} diff --git a/command/agent/cache_end_to_end_test.go b/command/agent/cache_end_to_end_test.go index 6337c918a698..555d3f2879b2 100644 --- a/command/agent/cache_end_to_end_test.go +++ b/command/agent/cache_end_to_end_test.go @@ -1,9 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package agent import ( "context" "fmt" - "io/ioutil" "net" "net/http" "os" @@ -14,12 +16,13 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agent/auth" - agentapprole "github.com/hashicorp/vault/command/agent/auth/approle" - "github.com/hashicorp/vault/command/agent/cache" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" - "github.com/hashicorp/vault/command/agent/sink/inmem" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentapprole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + cache "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" + "github.com/hashicorp/vault/helper/useragent" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" @@ -41,9 +44,6 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { var err error logger := logging.NewVaultLogger(log.Trace) coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), LogicalBackends: map[string]logical.Factory{ "kv": vault.LeasedPassthroughBackendFactory, }, @@ -122,7 +122,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { } roleID1 := resp.Data["role_id"].(string) - rolef, err := ioutil.TempFile("", "auth.role-id.test.") + rolef, err := os.CreateTemp("", "auth.role-id.test.") if err != nil { t.Fatal(err) } @@ -131,7 +131,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { defer os.Remove(role) t.Logf("input role_id_file_path: %s", role) - secretf, err := ioutil.TempFile("", "auth.secret-id.test.") + secretf, err := os.CreateTemp("", "auth.secret-id.test.") if err != nil { t.Fatal(err) } @@ -142,7 +142,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { // We close these right away because we're just basically testing // permissions and finding a usable file name - ouf, err := ioutil.TempFile("", "auth.tokensink.test.") + ouf, err := os.CreateTemp("", "auth.tokensink.test.") if err != nil { t.Fatal(err) } @@ -163,8 +163,10 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { // Create the API proxier apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ - Client: client, - Logger: cacheLogger.Named("apiproxy"), + Client: client, + Logger: cacheLogger.Named("apiproxy"), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), }) if err != nil { t.Fatal(err) @@ -173,10 +175,12 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { // Create the lease cache proxier and set its underlying proxier to // the API proxier. leaseCache, err := cache.NewLeaseCache(&cache.LeaseCacheConfig{ - Client: client, - BaseContext: ctx, - Proxier: apiProxy, - Logger: cacheLogger.Named("leasecache"), + Client: client, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + CacheDynamicSecrets: true, + UserAgentToUse: "test", }) if err != nil { t.Fatal(err) @@ -265,13 +269,13 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { t.Fatal("expected notexist err") } - if err := ioutil.WriteFile(role, []byte(roleID1), 0o600); err != nil { + if err := os.WriteFile(role, []byte(roleID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test role 1", "path", role) } - if err := ioutil.WriteFile(secret, []byte(secretID1), 0o600); err != nil { + if err := os.WriteFile(secret, []byte(secretID1), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test secret 1", "path", secret) @@ -283,7 +287,7 @@ func TestCache_UsingAutoAuthToken(t *testing.T) { if time.Now().After(timeout) { t.Fatal("did not find a written token after timeout") } - val, err := ioutil.ReadFile(out) + val, err := os.ReadFile(out) if err == nil { os.Remove(out) if len(val) == 0 { diff --git a/command/agent/cert_end_to_end_test.go b/command/agent/cert_end_to_end_test.go index bacb188021cd..c67b5e408cd7 100644 --- a/command/agent/cert_end_to_end_test.go +++ b/command/agent/cert_end_to_end_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package agent import ( @@ -9,16 +12,14 @@ import ( "testing" "time" - "github.com/hashicorp/vault/builtin/logical/pki" - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/api" vaultcert "github.com/hashicorp/vault/builtin/credential/cert" - "github.com/hashicorp/vault/command/agent/auth" - agentcert "github.com/hashicorp/vault/command/agent/auth/cert" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentcert "github.com/hashicorp/vault/command/agentproxyshared/auth/cert" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" "github.com/hashicorp/vault/helper/dhutil" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/jsonutil" @@ -65,7 +66,6 @@ func TestCertEndToEnd(t *testing.T) { func testCertEndToEnd(t *testing.T, withCertRoleName, ahWrapping bool) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "cert": vaultcert.Factory, }, @@ -305,7 +305,6 @@ func testCertEndToEnd(t *testing.T, withCertRoleName, ahWrapping bool) { func TestCertEndToEnd_CertsInConfig(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "cert": vaultcert.Factory, }, @@ -425,7 +424,7 @@ func TestCertEndToEnd_CertsInConfig(t *testing.T) { t.Fatal(err) } defer os.Remove(leafCertFile.Name()) - if _, err := leafCertFile.Write([]byte(leafCertPEM)); err != nil { + if _, err := leafCertFile.WriteString(leafCertPEM); err != nil { t.Fatal(err) } if err := leafCertFile.Close(); err != nil { @@ -437,7 +436,7 @@ func TestCertEndToEnd_CertsInConfig(t *testing.T) { t.Fatal(err) } defer os.Remove(leafCertKeyFile.Name()) - if _, err := leafCertKeyFile.Write([]byte(leafCertKeyPEM)); err != nil { + if _, err := leafCertKeyFile.WriteString(leafCertKeyPEM); err != nil { t.Fatal(err) } if err := leafCertKeyFile.Close(); err != nil { diff --git a/command/agent/cf_end_to_end_test.go b/command/agent/cf_end_to_end_test.go index 6bc1fa8b6a07..a20cdcc76c6e 100644 --- a/command/agent/cf_end_to_end_test.go +++ b/command/agent/cf_end_to_end_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package agent import ( @@ -12,10 +15,10 @@ import ( "github.com/hashicorp/vault-plugin-auth-cf/testing/certificates" cfAPI "github.com/hashicorp/vault-plugin-auth-cf/testing/cf" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - agentcf "github.com/hashicorp/vault/command/agent/auth/cf" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentcf "github.com/hashicorp/vault/command/agentproxyshared/auth/cf" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -26,9 +29,6 @@ func TestCFEndToEnd(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: hclog.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "cf": credCF.Factory, }, diff --git a/command/agent/config/config.go b/command/agent/config/config.go index 820a37c8b331..a03dfe39a4e7 100644 --- a/command/agent/config/config.go +++ b/command/agent/config/config.go @@ -1,22 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package config import ( "context" "errors" "fmt" + "io" "net" "os" + "path/filepath" "strings" + "syscall" "time" ctconfig "github.com/hashicorp/consul-template/config" + ctsignals "github.com/hashicorp/consul-template/signals" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/mitchellh/mapstructure" + "k8s.io/utils/strings/slices" + + "github.com/hashicorp/vault/command/agentproxyshared" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/internalshared/configutil" - "github.com/mitchellh/mapstructure" + "github.com/hashicorp/vault/sdk/helper/pointerutil" ) // Config is the configuration for Vault Agent. @@ -26,7 +37,7 @@ type Config struct { AutoAuth *AutoAuth `hcl:"auto_auth"` ExitAfterAuth bool `hcl:"exit_after_auth"` Cache *Cache `hcl:"cache"` - APIProxy *APIProxy `hcl:"api_proxy""` + APIProxy *APIProxy `hcl:"api_proxy"` Vault *Vault `hcl:"vault"` TemplateConfig *TemplateConfig `hcl:"template_config"` Templates []*ctconfig.TemplateConfig `hcl:"templates"` @@ -38,11 +49,15 @@ type Config struct { DisableKeepAlivesAPIProxy bool `hcl:"-"` DisableKeepAlivesTemplating bool `hcl:"-"` DisableKeepAlivesAutoAuth bool `hcl:"-"` + Exec *ExecConfig `hcl:"exec,optional"` + EnvTemplates []*ctconfig.TemplateConfig `hcl:"env_template,optional"` } const ( DisableIdleConnsEnv = "VAULT_AGENT_DISABLE_IDLE_CONNECTIONS" DisableKeepAlivesEnv = "VAULT_AGENT_DISABLE_KEEP_ALIVES" + + DefaultTemplateConfigMaxConnsPerHost = 10 ) func (c *Config) Prune() { @@ -76,6 +91,7 @@ type Vault struct { ClientCert string `hcl:"client_cert"` ClientKey string `hcl:"client_key"` TLSServerName string `hcl:"tls_server_name"` + Namespace string `hcl:"namespace"` Retry *Retry `hcl:"retry"` } @@ -100,22 +116,13 @@ type APIProxy struct { // Cache contains any configuration needed for Cache mode type Cache struct { - UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` - UseAutoAuthToken bool `hcl:"-"` - ForceAutoAuthToken bool `hcl:"-"` - EnforceConsistency string `hcl:"enforce_consistency"` - WhenInconsistent string `hcl:"when_inconsistent"` - Persist *Persist `hcl:"persist"` - InProcDialer transportDialer `hcl:"-"` -} - -// Persist contains configuration needed for persistent caching -type Persist struct { - Type string - Path string `hcl:"path"` - KeepAfterImport bool `hcl:"keep_after_import"` - ExitOnErr bool `hcl:"exit_on_err"` - ServiceAccountTokenFile string `hcl:"service_account_token_file"` + UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` + UseAutoAuthToken bool `hcl:"-"` + ForceAutoAuthToken bool `hcl:"-"` + EnforceConsistency string `hcl:"enforce_consistency"` + WhenInconsistent string `hcl:"when_inconsistent"` + Persist *agentproxyshared.PersistConfig `hcl:"persist"` + InProcDialer transportDialer `hcl:"-"` } // AutoAuth is the configured authentication method and sinks @@ -161,6 +168,16 @@ type TemplateConfig struct { ExitOnRetryFailure bool `hcl:"exit_on_retry_failure"` StaticSecretRenderIntRaw interface{} `hcl:"static_secret_render_interval"` StaticSecretRenderInt time.Duration `hcl:"-"` + MaxConnectionsPerHostRaw interface{} `hcl:"max_connections_per_host"` + MaxConnectionsPerHost int `hcl:"-"` +} + +type ExecConfig struct { + Command []string `hcl:"command,attr" mapstructure:"command"` + RestartOnSecretChanges string `hcl:"restart_on_secret_changes,optional" mapstructure:"restart_on_secret_changes"` + RestartStopSignal os.Signal `hcl:"-" mapstructure:"restart_stop_signal"` + ChildProcessStdout string `mapstructure:"child_process_stdout"` + ChildProcessStderr string `mapstructure:"child_process_stderr"` } func NewConfig() *Config { @@ -169,14 +186,398 @@ func NewConfig() *Config { } } +// Merge merges two Agent configurations. +func (c *Config) Merge(c2 *Config) *Config { + if c2 == nil { + return c + } + + result := NewConfig() + + result.SharedConfig = c.SharedConfig + if c2.SharedConfig != nil { + result.SharedConfig = c.SharedConfig.Merge(c2.SharedConfig) + } + + result.AutoAuth = c.AutoAuth + if c2.AutoAuth != nil { + result.AutoAuth = c2.AutoAuth + } + + result.Cache = c.Cache + if c2.Cache != nil { + result.Cache = c2.Cache + } + + result.APIProxy = c.APIProxy + if c2.APIProxy != nil { + result.APIProxy = c2.APIProxy + } + + result.DisableMlock = c.DisableMlock + if c2.DisableMlock { + result.DisableMlock = c2.DisableMlock + } + + // For these, ignore the non-specific one and overwrite them all + result.DisableIdleConnsAutoAuth = c.DisableIdleConnsAutoAuth + if c2.DisableIdleConnsAutoAuth { + result.DisableIdleConnsAutoAuth = c2.DisableIdleConnsAutoAuth + } + + result.DisableIdleConnsAPIProxy = c.DisableIdleConnsAPIProxy + if c2.DisableIdleConnsAPIProxy { + result.DisableIdleConnsAPIProxy = c2.DisableIdleConnsAPIProxy + } + + result.DisableIdleConnsTemplating = c.DisableIdleConnsTemplating + if c2.DisableIdleConnsTemplating { + result.DisableIdleConnsTemplating = c2.DisableIdleConnsTemplating + } + + result.DisableKeepAlivesAutoAuth = c.DisableKeepAlivesAutoAuth + if c2.DisableKeepAlivesAutoAuth { + result.DisableKeepAlivesAutoAuth = c2.DisableKeepAlivesAutoAuth + } + + result.DisableKeepAlivesAPIProxy = c.DisableKeepAlivesAPIProxy + if c2.DisableKeepAlivesAPIProxy { + result.DisableKeepAlivesAPIProxy = c2.DisableKeepAlivesAPIProxy + } + + result.DisableKeepAlivesTemplating = c.DisableKeepAlivesTemplating + if c2.DisableKeepAlivesTemplating { + result.DisableKeepAlivesTemplating = c2.DisableKeepAlivesTemplating + } + + result.TemplateConfig = c.TemplateConfig + if c2.TemplateConfig != nil { + result.TemplateConfig = c2.TemplateConfig + } + + for _, l := range c.Templates { + result.Templates = append(result.Templates, l) + } + for _, l := range c2.Templates { + result.Templates = append(result.Templates, l) + } + + result.ExitAfterAuth = c.ExitAfterAuth + if c2.ExitAfterAuth { + result.ExitAfterAuth = c2.ExitAfterAuth + } + + result.Vault = c.Vault + if c2.Vault != nil { + result.Vault = c2.Vault + } + + result.PidFile = c.PidFile + if c2.PidFile != "" { + result.PidFile = c2.PidFile + } + + result.Exec = c.Exec + if c2.Exec != nil { + result.Exec = c2.Exec + } + + for _, envTmpl := range c.EnvTemplates { + result.EnvTemplates = append(result.EnvTemplates, envTmpl) + } + + for _, envTmpl := range c2.EnvTemplates { + result.EnvTemplates = append(result.EnvTemplates, envTmpl) + } + + return result +} + +// IsDefaultListerDefined returns true if a default listener has been defined +// in this config +func (c *Config) IsDefaultListerDefined() bool { + for _, l := range c.Listeners { + if l.Role != "metrics_only" { + return true + } + } + return false +} + +// ValidateConfig validates an Agent configuration after it has been fully merged together, to +// ensure that required combinations of configs are there +func (c *Config) ValidateConfig() error { + if c.APIProxy != nil && c.Cache != nil { + if c.Cache.UseAutoAuthTokenRaw != nil { + if c.APIProxy.UseAutoAuthTokenRaw != nil { + return fmt.Errorf("use_auto_auth_token defined in both api_proxy and cache config. Please remove this configuration from the cache block") + } else { + c.APIProxy.ForceAutoAuthToken = c.Cache.ForceAutoAuthToken + } + } + } + + if c.Cache != nil { + if len(c.Listeners) < 1 && len(c.Templates) < 1 && len(c.EnvTemplates) < 1 { + return fmt.Errorf("enabling the cache requires at least 1 template or 1 listener to be defined") + } + + if c.Cache.UseAutoAuthToken { + if c.AutoAuth == nil { + return fmt.Errorf("cache.use_auto_auth_token is true but auto_auth not configured") + } + if c.AutoAuth != nil && c.AutoAuth.Method != nil && c.AutoAuth.Method.WrapTTL > 0 { + return fmt.Errorf("cache.use_auto_auth_token is true and auto_auth uses wrapping") + } + } + } + + if c.APIProxy != nil { + if len(c.Listeners) < 1 { + return fmt.Errorf("configuring the api_proxy requires at least 1 listener to be defined") + } + + if c.APIProxy.UseAutoAuthToken { + if c.AutoAuth == nil { + return fmt.Errorf("api_proxy.use_auto_auth_token is true but auto_auth not configured") + } + if c.AutoAuth != nil && c.AutoAuth.Method != nil && c.AutoAuth.Method.WrapTTL > 0 { + return fmt.Errorf("api_proxy.use_auto_auth_token is true and auto_auth uses wrapping") + } + } + } + + if c.AutoAuth != nil { + if len(c.AutoAuth.Sinks) == 0 && + (c.APIProxy == nil || !c.APIProxy.UseAutoAuthToken) && + len(c.Templates) == 0 && + len(c.EnvTemplates) == 0 { + return fmt.Errorf("auto_auth requires at least one sink or at least one template or api_proxy.use_auto_auth_token=true") + } + } + + if c.AutoAuth == nil && c.Cache == nil && len(c.Listeners) == 0 { + return fmt.Errorf("no auto_auth, cache, or listener block found in config") + } + + return c.validateEnvTemplateConfig() +} + +func (c *Config) validateEnvTemplateConfig() error { + // if we are not in env-template mode, exit early + if c.Exec == nil && len(c.EnvTemplates) == 0 { + return nil + } + + if c.Exec == nil { + return fmt.Errorf("a top-level 'exec' element must be specified with 'env_template' entries") + } + + if len(c.EnvTemplates) == 0 { + return fmt.Errorf("must specify at least one 'env_template' element with a top-level 'exec' element") + } + + if c.APIProxy != nil { + return fmt.Errorf("'api_proxy' cannot be specified with 'env_template' entries") + } + + if len(c.Templates) > 0 { + return fmt.Errorf("'template' cannot be specified with 'env_template' entries") + } + + if len(c.Exec.Command) == 0 { + return fmt.Errorf("'exec' requires a non-empty 'command' field") + } + + if !slices.Contains([]string{"always", "never"}, c.Exec.RestartOnSecretChanges) { + return fmt.Errorf("'exec.restart_on_secret_changes' unexpected value: %q", c.Exec.RestartOnSecretChanges) + } + + uniqueKeys := make(map[string]struct{}) + + for _, template := range c.EnvTemplates { + // Required: + // - the key (environment variable name) + // - either "contents" or "source" + // Optional / permitted: + // - error_on_missing_key + // - error_fatal + // - left_delimiter + // - right_delimiter + // - ExtFuncMap + // - function_denylist / function_blacklist + + if template.MapToEnvironmentVariable == nil { + return fmt.Errorf("env_template: an environment variable name is required") + } + + key := *template.MapToEnvironmentVariable + + if _, exists := uniqueKeys[key]; exists { + return fmt.Errorf("env_template: duplicate environment variable name: %q", key) + } + + uniqueKeys[key] = struct{}{} + + if template.Contents == nil && template.Source == nil { + return fmt.Errorf("env_template[%s]: either 'contents' or 'source' must be specified", key) + } + + if template.Contents != nil && template.Source != nil { + return fmt.Errorf("env_template[%s]: 'contents' and 'source' cannot be specified together", key) + } + + if template.Backup != nil { + return fmt.Errorf("env_template[%s]: 'backup' is not allowed", key) + } + + if template.Command != nil { + return fmt.Errorf("env_template[%s]: 'command' is not allowed", key) + } + + if template.CommandTimeout != nil { + return fmt.Errorf("env_template[%s]: 'command_timeout' is not allowed", key) + } + + if template.CreateDestDirs != nil { + return fmt.Errorf("env_template[%s]: 'create_dest_dirs' is not allowed", key) + } + + if template.Destination != nil { + return fmt.Errorf("env_template[%s]: 'destination' is not allowed", key) + } + + if template.Exec != nil { + return fmt.Errorf("env_template[%s]: 'exec' is not allowed", key) + } + + if template.Perms != nil { + return fmt.Errorf("env_template[%s]: 'perms' is not allowed", key) + } + + if template.User != nil { + return fmt.Errorf("env_template[%s]: 'user' is not allowed", key) + } + + if template.Uid != nil { + return fmt.Errorf("env_template[%s]: 'uid' is not allowed", key) + } + + if template.Group != nil { + return fmt.Errorf("env_template[%s]: 'group' is not allowed", key) + } + + if template.Gid != nil { + return fmt.Errorf("env_template[%s]: 'gid' is not allowed", key) + } + + if template.Wait != nil { + return fmt.Errorf("env_template[%s]: 'wait' is not allowed", key) + } + + if template.SandboxPath != nil { + return fmt.Errorf("env_template[%s]: 'sandbox_path' is not allowed", key) + } + } + + return nil +} + // LoadConfig loads the configuration at the given path, regardless if -// its a file or directory. +// it's a file or directory. func LoadConfig(path string) (*Config, error) { fi, err := os.Stat(path) if err != nil { return nil, err } + if fi.IsDir() { + return LoadConfigDir(path) + } + return LoadConfigFile(path) +} + +// LoadConfigDir loads the configuration at the given path if it's a directory +func LoadConfigDir(dir string) (*Config, error) { + f, err := os.Open(dir) + if err != nil { + return nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fmt.Errorf("configuration path must be a directory: %q", dir) + } + + var files []string + err = nil + for err != io.EOF { + var fis []os.FileInfo + fis, err = f.Readdir(128) + if err != nil && err != io.EOF { + return nil, err + } + + for _, fi := range fis { + // Ignore directories + if fi.IsDir() { + continue + } + + // Only care about files that are valid to load. + name := fi.Name() + skip := true + if strings.HasSuffix(name, ".hcl") { + skip = false + } else if strings.HasSuffix(name, ".json") { + skip = false + } + if skip || isTemporaryFile(name) { + continue + } + + path := filepath.Join(dir, name) + files = append(files, path) + } + } + + result := NewConfig() + for _, f := range files { + config, err := LoadConfigFile(f) + if err != nil { + return nil, fmt.Errorf("error loading %q: %w", f, err) + } + + if result == nil { + result = config + } else { + result = result.Merge(config) + } + } + + return result, nil +} + +// isTemporaryFile returns true or false depending on whether the +// provided file name is a temporary file for the following editors: +// emacs or vim. +func isTemporaryFile(name string) bool { + return strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, ".#") || // emacs + (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs +} + +// LoadConfigFile loads the configuration at the given path if it's a file +func LoadConfigFile(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + if fi.IsDir() { return nil, fmt.Errorf("location is a directory, not a file") } @@ -236,16 +637,6 @@ func LoadConfig(path string) (*Config, error) { return nil, fmt.Errorf("error parsing 'api_proxy':%w", err) } - if result.APIProxy != nil && result.Cache != nil { - if result.Cache.UseAutoAuthTokenRaw != nil { - if result.APIProxy.UseAutoAuthTokenRaw != nil { - return nil, fmt.Errorf("use_auto_auth_token defined in both api_proxy and cache config. Please remove this configuration from the cache block") - } else { - result.APIProxy.ForceAutoAuthToken = result.Cache.ForceAutoAuthToken - } - } - } - if err := parseTemplateConfig(result, list); err != nil { return nil, fmt.Errorf("error parsing 'template_config': %w", err) } @@ -254,48 +645,18 @@ func LoadConfig(path string) (*Config, error) { return nil, fmt.Errorf("error parsing 'template': %w", err) } - if result.Cache != nil && result.APIProxy == nil && len(result.Listeners) > 0 { - result.APIProxy = &APIProxy{ - UseAutoAuthToken: result.Cache.UseAutoAuthToken, - ForceAutoAuthToken: result.Cache.ForceAutoAuthToken, - } + if err := parseExec(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'exec': %w", err) } - if result.Cache != nil { - if len(result.Listeners) < 1 && len(result.Templates) < 1 { - return nil, fmt.Errorf("enabling the cache requires at least 1 template or 1 listener to be defined") - } - - if result.Cache.UseAutoAuthToken { - if result.AutoAuth == nil { - return nil, fmt.Errorf("cache.use_auto_auth_token is true but auto_auth not configured") - } - if result.AutoAuth != nil && result.AutoAuth.Method != nil && result.AutoAuth.Method.WrapTTL > 0 { - return nil, fmt.Errorf("cache.use_auto_auth_token is true and auto_auth uses wrapping") - } - } + if err := parseEnvTemplates(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'env_template': %w", err) } - if result.APIProxy != nil { - if len(result.Listeners) < 1 { - return nil, fmt.Errorf("configuring the api_proxy requires at least 1 listener to be defined") - } - - if result.APIProxy.UseAutoAuthToken { - if result.AutoAuth == nil { - return nil, fmt.Errorf("api_proxy.use_auto_auth_token is true but auto_auth not configured") - } - if result.AutoAuth != nil && result.AutoAuth.Method != nil && result.AutoAuth.Method.WrapTTL > 0 { - return nil, fmt.Errorf("api_proxy.use_auto_auth_token is true and auto_auth uses wrapping") - } - } - } - - if result.AutoAuth != nil { - if len(result.AutoAuth.Sinks) == 0 && - (result.APIProxy == nil || !result.APIProxy.UseAutoAuthToken) && - len(result.Templates) == 0 { - return nil, fmt.Errorf("auto_auth requires at least one sink or at least one template or api_proxy.use_auto_auth_token=true") + if result.Cache != nil && result.APIProxy == nil && (result.Cache.UseAutoAuthToken || result.Cache.ForceAutoAuthToken) { + result.APIProxy = &APIProxy{ + UseAutoAuthToken: result.Cache.UseAutoAuthToken, + ForceAutoAuthToken: result.Cache.ForceAutoAuthToken, } } @@ -304,19 +665,17 @@ func LoadConfig(path string) (*Config, error) { return nil, fmt.Errorf("error parsing 'vault':%w", err) } - if result.Vault == nil { - result.Vault = &Vault{} - } - - // Set defaults - if result.Vault.Retry == nil { - result.Vault.Retry = &Retry{} - } - switch result.Vault.Retry.NumRetries { - case 0: - result.Vault.Retry.NumRetries = ctconfig.DefaultRetryAttempts - case -1: - result.Vault.Retry.NumRetries = 0 + if result.Vault != nil { + // Set defaults + if result.Vault.Retry == nil { + result.Vault.Retry = &Retry{} + } + switch result.Vault.Retry.NumRetries { + case 0: + result.Vault.Retry.NumRetries = ctconfig.DefaultRetryAttempts + case -1: + result.Vault.Retry.NumRetries = 0 + } } if disableIdleConnsEnv := os.Getenv(DisableIdleConnsEnv); disableIdleConnsEnv != "" { @@ -543,7 +902,7 @@ func parsePersist(result *Config, list *ast.ObjectList) error { item := persistList.Items[0] - var p Persist + var p agentproxyshared.PersistConfig err := hcl.DecodeObject(&p, item.Val) if err != nil { return err @@ -747,6 +1106,9 @@ func parseTemplateConfig(result *Config, list *ast.ObjectList) error { templateConfigList := list.Filter(name) if len(templateConfigList.Items) == 0 { + result.TemplateConfig = &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + } return nil } @@ -772,6 +1134,17 @@ func parseTemplateConfig(result *Config, list *ast.ObjectList) error { result.TemplateConfig.StaticSecretRenderIntRaw = nil } + if result.TemplateConfig.MaxConnectionsPerHostRaw != nil { + var err error + if result.TemplateConfig.MaxConnectionsPerHost, err = parseutil.SafeParseInt(result.TemplateConfig.MaxConnectionsPerHostRaw); err != nil { + return err + } + + result.TemplateConfig.MaxConnectionsPerHostRaw = nil + } else { + result.TemplateConfig.MaxConnectionsPerHost = DefaultTemplateConfigMaxConnsPerHost + } + return nil } @@ -841,3 +1214,121 @@ func parseTemplates(result *Config, list *ast.ObjectList) error { result.Templates = tcs return nil } + +func parseExec(result *Config, list *ast.ObjectList) error { + name := "exec" + + execList := list.Filter(name) + if len(execList.Items) == 0 { + return nil + } + + if len(execList.Items) > 1 { + return fmt.Errorf("at most one %q block is allowed", name) + } + + item := execList.Items[0] + var shadow interface{} + if err := hcl.DecodeObject(&shadow, item.Val); err != nil { + return fmt.Errorf("error decoding config: %s", err) + } + + parsed, ok := shadow.(map[string]interface{}) + if !ok { + return errors.New("error converting config") + } + + var execConfig ExecConfig + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + ctconfig.StringToFileModeFunc(), + ctconfig.StringToWaitDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), + ctsignals.StringToSignalFunc(), + ), + ErrorUnused: true, + Metadata: &md, + Result: &execConfig, + }) + if err != nil { + return errors.New("mapstructure decoder creation failed") + } + if err := decoder.Decode(parsed); err != nil { + return err + } + + // if the user does not specify a restart signal, default to SIGTERM + if execConfig.RestartStopSignal == nil { + execConfig.RestartStopSignal = syscall.SIGTERM + } + + if execConfig.RestartOnSecretChanges == "" { + execConfig.RestartOnSecretChanges = "always" + } + + result.Exec = &execConfig + return nil +} + +func parseEnvTemplates(result *Config, list *ast.ObjectList) error { + name := "env_template" + + envTemplateList := list.Filter(name) + + if len(envTemplateList.Items) < 1 { + return nil + } + + envTemplates := make([]*ctconfig.TemplateConfig, 0, len(envTemplateList.Items)) + + for _, item := range envTemplateList.Items { + var shadow interface{} + if err := hcl.DecodeObject(&shadow, item.Val); err != nil { + return fmt.Errorf("error decoding config: %s", err) + } + + // Convert to a map and flatten the keys we want to flatten + parsed, ok := shadow.(map[string]any) + if !ok { + return errors.New("error converting config") + } + + var templateConfig ctconfig.TemplateConfig + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + ctconfig.StringToFileModeFunc(), + ctconfig.StringToWaitDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), + ctsignals.StringToSignalFunc(), + ), + ErrorUnused: true, + Metadata: &md, + Result: &templateConfig, + }) + if err != nil { + return errors.New("mapstructure decoder creation failed") + } + if err := decoder.Decode(parsed); err != nil { + return err + } + + // parse the keys in the item for the environment variable name + if numberOfKeys := len(item.Keys); numberOfKeys != 1 { + return fmt.Errorf("expected one and only one environment variable name, got %d", numberOfKeys) + } + + // hcl parses this with extra quotes if quoted in config file + environmentVariableName := strings.Trim(item.Keys[0].Token.Text, `"`) + + templateConfig.MapToEnvironmentVariable = pointerutil.StringPtr(environmentVariableName) + + envTemplates = append(envTemplates, &templateConfig) + } + + result.EnvTemplates = envTemplates + return nil +} diff --git a/command/agent/config/config_test.go b/command/agent/config/config_test.go index ba601e56b6a1..f729aec4be66 100644 --- a/command/agent/config/config_test.go +++ b/command/agent/config/config_test.go @@ -1,18 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package config import ( "os" + "syscall" "testing" "time" "github.com/go-test/deep" ctconfig "github.com/hashicorp/consul-template/config" + "golang.org/x/exp/slices" + + "github.com/hashicorp/vault/command/agentproxyshared" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/pointerutil" ) func TestLoadConfigFile_AgentCache(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-cache.hcl") + config, err := LoadConfigFile("./test-fixtures/config-cache.hcl") if err != nil { t.Fatal(err) } @@ -77,7 +84,7 @@ func TestLoadConfigFile_AgentCache(t *testing.T) { UseAutoAuthToken: true, UseAutoAuthTokenRaw: true, ForceAutoAuthToken: false, - Persist: &Persist{ + Persist: &agentproxyshared.PersistConfig{ Type: "kubernetes", Path: "/vault/agent-cache/", KeepAfterImport: true, @@ -85,6 +92,9 @@ func TestLoadConfigFile_AgentCache(t *testing.T) { ServiceAccountTokenFile: "/tmp/serviceaccount/token", }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", CACert: "config_ca_cert", @@ -104,7 +114,7 @@ func TestLoadConfigFile_AgentCache(t *testing.T) { t.Fatal(diff) } - config, err = LoadConfig("./test-fixtures/config-cache-embedded-type.hcl") + config, err = LoadConfigFile("./test-fixtures/config-cache-embedded-type.hcl") if err != nil { t.Fatal(err) } @@ -116,8 +126,245 @@ func TestLoadConfigFile_AgentCache(t *testing.T) { } } +func TestLoadConfigDir_AgentCache(t *testing.T) { + config, err := LoadConfig("./test-fixtures/config-dir-cache/") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "unix", + Address: "/path/to/socket", + TLSDisable: true, + SocketMode: "configmode", + SocketUser: "configuser", + SocketGroup: "configgroup", + }, + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + { + Type: "tcp", + Address: "127.0.0.1:3000", + Role: "metrics_only", + TLSDisable: true, + }, + { + Type: "tcp", + Role: "default", + Address: "127.0.0.1:8400", + TLSKeyFile: "/path/to/cakey.pem", + TLSCertFile: "/path/to/cacert.pem", + }, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + APIProxy: &APIProxy{ + UseAutoAuthToken: true, + ForceAutoAuthToken: false, + }, + Cache: &Cache{ + UseAutoAuthToken: true, + UseAutoAuthTokenRaw: true, + ForceAutoAuthToken: false, + Persist: &agentproxyshared.PersistConfig{ + Type: "kubernetes", + Path: "/vault/agent-cache/", + KeepAfterImport: true, + ExitOnErr: true, + ServiceAccountTokenFile: "/tmp/serviceaccount/token", + }, + }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + CACert: "config_ca_cert", + CAPath: "config_ca_path", + TLSSkipVerifyRaw: interface{}("true"), + TLSSkipVerify: true, + ClientCert: "config_client_cert", + ClientKey: "config_client_key", + Retry: &Retry{ + NumRetries: 12, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-dir-cache/config-cache1.hcl") + if err != nil { + t.Fatal(err) + } + config2, err := LoadConfigFile("./test-fixtures/config-dir-cache/config-cache2.hcl") + + mergedConfig := config.Merge(config2) + + mergedConfig.Prune() + if diff := deep.Equal(mergedConfig, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigDir_AutoAuthAndListener(t *testing.T) { + config, err := LoadConfig("./test-fixtures/config-dir-auto-auth-and-listener/") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-dir-auto-auth-and-listener/config1.hcl") + if err != nil { + t.Fatal(err) + } + config2, err := LoadConfigFile("./test-fixtures/config-dir-auto-auth-and-listener/config2.hcl") + + mergedConfig := config.Merge(config2) + + mergedConfig.Prune() + if diff := deep.Equal(mergedConfig, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigDir_VaultBlock(t *testing.T) { + config, err := LoadConfig("./test-fixtures/config-dir-vault-block/") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + CACert: "config_ca_cert", + CAPath: "config_ca_path", + TLSSkipVerifyRaw: interface{}("true"), + TLSSkipVerify: true, + ClientCert: "config_client_cert", + ClientKey: "config_client_key", + Retry: &Retry{ + NumRetries: 12, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-dir-vault-block/config1.hcl") + if err != nil { + t.Fatal(err) + } + config2, err := LoadConfigFile("./test-fixtures/config-dir-vault-block/config2.hcl") + + mergedConfig := config.Merge(config2) + + mergedConfig.Prune() + if diff := deep.Equal(mergedConfig, expected); diff != nil { + t.Fatal(diff) + } +} + func TestLoadConfigFile_AgentCache_NoListeners(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-cache-no-listeners.hcl") + config, err := LoadConfigFile("./test-fixtures/config-cache-no-listeners.hcl") if err != nil { t.Fatal(err) } @@ -146,11 +393,15 @@ func TestLoadConfigFile_AgentCache_NoListeners(t *testing.T) { }, }, }, + APIProxy: &APIProxy{ + UseAutoAuthToken: true, + ForceAutoAuthToken: false, + }, Cache: &Cache{ UseAutoAuthToken: true, UseAutoAuthTokenRaw: true, ForceAutoAuthToken: false, - Persist: &Persist{ + Persist: &agentproxyshared.PersistConfig{ Type: "kubernetes", Path: "/vault/agent-cache/", KeepAfterImport: true, @@ -158,6 +409,9 @@ func TestLoadConfigFile_AgentCache_NoListeners(t *testing.T) { ServiceAccountTokenFile: "/tmp/serviceaccount/token", }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", CACert: "config_ca_cert", @@ -194,7 +448,7 @@ func TestLoadConfigFile(t *testing.T) { } }() - config, err := LoadConfig("./test-fixtures/config.hcl") + config, err := LoadConfigFile("./test-fixtures/config.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -237,10 +491,8 @@ func TestLoadConfigFile(t *testing.T) { }, }, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -249,7 +501,7 @@ func TestLoadConfigFile(t *testing.T) { t.Fatal(diff) } - config, err = LoadConfig("./test-fixtures/config-embedded-type.hcl") + config, err = LoadConfigFile("./test-fixtures/config-embedded-type.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -261,7 +513,7 @@ func TestLoadConfigFile(t *testing.T) { } func TestLoadConfigFile_Method_Wrapping(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-method-wrapping.hcl") + config, err := LoadConfigFile("./test-fixtures/config-method-wrapping.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -290,10 +542,8 @@ func TestLoadConfigFile_Method_Wrapping(t *testing.T) { }, }, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -304,7 +554,7 @@ func TestLoadConfigFile_Method_Wrapping(t *testing.T) { } func TestLoadConfigFile_Method_InitialBackoff(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-method-initial-backoff.hcl") + config, err := LoadConfigFile("./test-fixtures/config-method-initial-backoff.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -334,10 +584,8 @@ func TestLoadConfigFile_Method_InitialBackoff(t *testing.T) { }, }, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -348,7 +596,7 @@ func TestLoadConfigFile_Method_InitialBackoff(t *testing.T) { } func TestLoadConfigFile_Method_ExitOnErr(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-method-exit-on-err.hcl") + config, err := LoadConfigFile("./test-fixtures/config-method-exit-on-err.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -378,10 +626,8 @@ func TestLoadConfigFile_Method_ExitOnErr(t *testing.T) { }, }, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -392,14 +638,13 @@ func TestLoadConfigFile_Method_ExitOnErr(t *testing.T) { } func TestLoadConfigFile_AgentCache_NoAutoAuth(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-cache-no-auto_auth.hcl") + config, err := LoadConfigFile("./test-fixtures/config-cache-no-auto_auth.hcl") if err != nil { t.Fatalf("err: %s", err) } expected := &Config{ - APIProxy: &APIProxy{}, - Cache: &Cache{}, + Cache: &Cache{}, SharedConfig: &configutil.SharedConfig{ PidFile: "./pidfile", Listeners: []*configutil.Listener{ @@ -410,10 +655,8 @@ func TestLoadConfigFile_AgentCache_NoAutoAuth(t *testing.T) { }, }, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -424,63 +667,98 @@ func TestLoadConfigFile_AgentCache_NoAutoAuth(t *testing.T) { } func TestLoadConfigFile_Bad_AgentCache_InconsisentAutoAuth(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl") + config, err := LoadConfigFile("./test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) + } + if config == nil { + t.Fatal("config was nil") + } + err = config.ValidateConfig() if err == nil { - t.Fatal("LoadConfig should return an error when use_auto_auth_token=true and no auto_auth section present") + t.Fatal("ValidateConfig should return an error when use_auto_auth_token=true and no auto_auth section present") } } func TestLoadConfigFile_Bad_AgentCache_ForceAutoAuthNoMethod(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-cache-force-auto_auth.hcl") + config, err := LoadConfigFile("./test-fixtures/bad-config-cache-force-token-no-auth-method.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) + } + if config == nil { + t.Fatal("config was nil") + } + err = config.ValidateConfig() if err == nil { - t.Fatal("LoadConfig should return an error when use_auto_auth_token=force and no auto_auth section present") + t.Fatal("ValidateConfig should return an error when use_auto_auth_token=force and no auto_auth section present") } } func TestLoadConfigFile_Bad_AgentCache_NoListeners(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-cache-no-listeners.hcl") - if err == nil { - t.Fatal("LoadConfig should return an error when cache section present and no listeners present and no templates defined") + _, err := LoadConfigFile("./test-fixtures/bad-config-cache-no-listeners.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should return an error for this config") } } func TestLoadConfigFile_Bad_AutoAuth_Wrapped_Multiple_Sinks(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl") + _, err := LoadConfigFile("./test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl") if err == nil { - t.Fatal("LoadConfig should return an error when auth_auth.method.wrap_ttl nonzero and multiple sinks defined") + t.Fatalf("LoadConfigFile should return an error for this config, err: %v", err) } } func TestLoadConfigFile_Bad_AutoAuth_Nosinks_Nocache_Notemplates(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl") + config, err := LoadConfigFile("./test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) + } + if config == nil { + t.Fatal("config was nil") + } + err = config.ValidateConfig() if err == nil { - t.Fatal("LoadConfig should return an error when auto_auth configured and there are no sinks, caches or templates") + t.Fatal("ValidateConfig should return an error when auto_auth configured and there are no sinks, caches or templates") } } func TestLoadConfigFile_Bad_AutoAuth_Both_Wrapping_Types(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl") + _, err := LoadConfigFile("./test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl") if err == nil { - t.Fatal("LoadConfig should return an error when auth_auth.method.wrap_ttl nonzero and sinks.wrap_ttl nonzero") + t.Fatalf("LoadConfigFile should return an error for this config") } } func TestLoadConfigFile_Bad_AgentCache_AutoAuth_Method_wrapping(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl") + config, err := LoadConfigFile("./test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) + } + if config == nil { + t.Fatal("config was nil") + } + err = config.ValidateConfig() if err == nil { - t.Fatal("LoadConfig should return an error when auth_auth.method.wrap_ttl nonzero and cache.use_auto_auth_token=true") + t.Fatal("ValidateConfig should return an error when auth_auth.method.wrap_ttl nonzero and cache.use_auto_auth_token=true") } } func TestLoadConfigFile_Bad_APIProxy_And_Cache_Same_Config(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-api_proxy-cache.hcl") + config, err := LoadConfigFile("./test-fixtures/bad-config-api_proxy-cache.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) + } + if config == nil { + t.Fatal("config was nil") + } + err = config.ValidateConfig() if err == nil { - t.Fatal("LoadConfig should return an error when cache and api_proxy try and configure the same value") + t.Fatal("ValidateConfig should return an error when cache and api_proxy try and configure the same value") } } func TestLoadConfigFile_AgentCache_AutoAuth_NoSink(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-cache-auto_auth-no-sink.hcl") + config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-no-sink.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -514,10 +792,8 @@ func TestLoadConfigFile_AgentCache_AutoAuth_NoSink(t *testing.T) { UseAutoAuthTokenRaw: true, ForceAutoAuthToken: false, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -528,7 +804,7 @@ func TestLoadConfigFile_AgentCache_AutoAuth_NoSink(t *testing.T) { } func TestLoadConfigFile_AgentCache_AutoAuth_Force(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-cache-auto_auth-force.hcl") + config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-force.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -562,10 +838,8 @@ func TestLoadConfigFile_AgentCache_AutoAuth_Force(t *testing.T) { UseAutoAuthTokenRaw: "force", ForceAutoAuthToken: true, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -576,7 +850,7 @@ func TestLoadConfigFile_AgentCache_AutoAuth_Force(t *testing.T) { } func TestLoadConfigFile_AgentCache_AutoAuth_True(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-cache-auto_auth-true.hcl") + config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-true.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -610,10 +884,8 @@ func TestLoadConfigFile_AgentCache_AutoAuth_True(t *testing.T) { UseAutoAuthTokenRaw: "true", ForceAutoAuthToken: false, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -624,7 +896,7 @@ func TestLoadConfigFile_AgentCache_AutoAuth_True(t *testing.T) { } func TestLoadConfigFile_Agent_AutoAuth_APIProxyAllConfig(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl") + config, err := LoadConfigFile("./test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -656,10 +928,8 @@ func TestLoadConfigFile_Agent_AutoAuth_APIProxyAllConfig(t *testing.T) { EnforceConsistency: "always", WhenInconsistent: "forward", }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -670,7 +940,7 @@ func TestLoadConfigFile_Agent_AutoAuth_APIProxyAllConfig(t *testing.T) { } func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-cache-auto_auth-false.hcl") + config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-false.hcl") if err != nil { t.Fatalf("err: %s", err) } @@ -706,19 +976,13 @@ func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) { }, }, }, - APIProxy: &APIProxy{ - UseAutoAuthToken: false, - ForceAutoAuthToken: false, - }, Cache: &Cache{ UseAutoAuthToken: false, UseAutoAuthTokenRaw: "false", ForceAutoAuthToken: false, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -729,15 +993,14 @@ func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) { } func TestLoadConfigFile_AgentCache_Persist(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-cache-persist-false.hcl") + config, err := LoadConfigFile("./test-fixtures/config-cache-persist-false.hcl") if err != nil { t.Fatalf("err: %s", err) } expected := &Config{ - APIProxy: &APIProxy{}, Cache: &Cache{ - Persist: &Persist{ + Persist: &agentproxyshared.PersistConfig{ Type: "kubernetes", Path: "/vault/agent-cache/", KeepAfterImport: false, @@ -755,10 +1018,8 @@ func TestLoadConfigFile_AgentCache_Persist(t *testing.T) { }, }, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -769,7 +1030,7 @@ func TestLoadConfigFile_AgentCache_Persist(t *testing.T) { } func TestLoadConfigFile_AgentCache_PersistMissingType(t *testing.T) { - _, err := LoadConfig("./test-fixtures/config-cache-persist-empty-type.hcl") + _, err := LoadConfigFile("./test-fixtures/config-cache-persist-empty-type.hcl") if err == nil || os.IsNotExist(err) { t.Fatal("expected error or file is missing") } @@ -785,19 +1046,27 @@ func TestLoadConfigFile_TemplateConfig(t *testing.T) { TemplateConfig{ ExitOnRetryFailure: true, StaticSecretRenderInt: 1 * time.Minute, + MaxConnectionsPerHost: 100, }, }, "empty": { "./test-fixtures/config-template_config-empty.hcl", TemplateConfig{ - ExitOnRetryFailure: false, + ExitOnRetryFailure: false, + MaxConnectionsPerHost: 10, + }, + }, + "missing": { + "./test-fixtures/config-template_config-missing.hcl", + TemplateConfig{ + MaxConnectionsPerHost: 10, }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - config, err := LoadConfig(tc.fixturePath) + config, err := LoadConfigFile(tc.fixturePath) if err != nil { t.Fatal(err) } @@ -896,7 +1165,7 @@ func TestLoadConfigFile_Template(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - config, err := LoadConfig(tc.fixturePath) + config, err := LoadConfigFile(tc.fixturePath) if err != nil { t.Fatalf("err: %s", err) } @@ -926,10 +1195,8 @@ func TestLoadConfigFile_Template(t *testing.T) { }, }, }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, Templates: tc.expectedTemplates, } @@ -1007,7 +1274,7 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - config, err := LoadConfig(tc.fixturePath) + config, err := LoadConfigFile(tc.fixturePath) if err != nil { t.Fatalf("err: %s", err) } @@ -1027,12 +1294,10 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { }, Sinks: nil, }, - Templates: tc.expectedTemplates, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, + Templates: tc.expectedTemplates, } config.Prune() @@ -1043,8 +1308,48 @@ func TestLoadConfigFile_Template_NoSinks(t *testing.T) { } } +// TestLoadConfigFile_Template_WithCache tests ensures that cache {} stanza is +// permitted in vault agent configuration with template(s) +func TestLoadConfigFile_Template_WithCache(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-template-with-cache.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + }, + Cache: &Cache{}, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, + Templates: []*ctconfig.TemplateConfig{ + { + Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + func TestLoadConfigFile_Vault_Retry(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-vault-retry.hcl") + config, err := LoadConfigFile("./test-fixtures/config-vault-retry.hcl") if err != nil { t.Fatal(err) } @@ -1074,6 +1379,9 @@ func TestLoadConfigFile_Vault_Retry(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1089,7 +1397,7 @@ func TestLoadConfigFile_Vault_Retry(t *testing.T) { } func TestLoadConfigFile_Vault_Retry_Empty(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-vault-retry-empty.hcl") + config, err := LoadConfigFile("./test-fixtures/config-vault-retry-empty.hcl") if err != nil { t.Fatal(err) } @@ -1119,6 +1427,9 @@ func TestLoadConfigFile_Vault_Retry_Empty(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1134,7 +1445,7 @@ func TestLoadConfigFile_Vault_Retry_Empty(t *testing.T) { } func TestLoadConfigFile_EnforceConsistency(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-consistency.hcl") + config, err := LoadConfigFile("./test-fixtures/config-consistency.hcl") if err != nil { t.Fatal(err) } @@ -1150,15 +1461,12 @@ func TestLoadConfigFile_EnforceConsistency(t *testing.T) { }, PidFile: "", }, - APIProxy: &APIProxy{}, Cache: &Cache{ EnforceConsistency: "always", WhenInconsistent: "retry", }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -1169,7 +1477,7 @@ func TestLoadConfigFile_EnforceConsistency(t *testing.T) { } func TestLoadConfigFile_EnforceConsistency_APIProxy(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-consistency-apiproxy.hcl") + config, err := LoadConfigFile("./test-fixtures/config-consistency-apiproxy.hcl") if err != nil { t.Fatal(err) } @@ -1189,10 +1497,8 @@ func TestLoadConfigFile_EnforceConsistency_APIProxy(t *testing.T) { EnforceConsistency: "always", WhenInconsistent: "retry", }, - Vault: &Vault{ - Retry: &Retry{ - NumRetries: 12, - }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, }, } @@ -1203,7 +1509,7 @@ func TestLoadConfigFile_EnforceConsistency_APIProxy(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_All(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-all.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-all.hcl") if err != nil { t.Fatal(err) } @@ -1237,6 +1543,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_All(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1252,7 +1561,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_All(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_Auto_Auth(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-auto-auth.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-auto-auth.hcl") if err != nil { t.Fatal(err) } @@ -1286,6 +1595,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Auto_Auth(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1301,7 +1613,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Auto_Auth(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_Templating(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-templating.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-templating.hcl") if err != nil { t.Fatal(err) } @@ -1335,6 +1647,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Templating(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1350,7 +1665,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Templating(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_Caching(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-caching.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-caching.hcl") if err != nil { t.Fatal(err) } @@ -1384,6 +1699,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Caching(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1399,7 +1717,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Caching(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_Proxying(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-proxying.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-proxying.hcl") if err != nil { t.Fatal(err) } @@ -1433,6 +1751,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Proxying(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1448,7 +1769,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Proxying(t *testing.T) { } func TestLoadConfigFile_Disable_Idle_Conns_Empty(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-empty.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-empty.hcl") if err != nil { t.Fatal(err) } @@ -1482,6 +1803,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Empty(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1503,7 +1827,7 @@ func TestLoadConfigFile_Disable_Idle_Conns_Env(t *testing.T) { if err != nil { t.Fatal(err) } - config, err := LoadConfig("./test-fixtures/config-disable-idle-connections-empty.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-empty.hcl") if err != nil { t.Fatal(err) } @@ -1537,6 +1861,9 @@ func TestLoadConfigFile_Disable_Idle_Conns_Env(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1552,14 +1879,14 @@ func TestLoadConfigFile_Disable_Idle_Conns_Env(t *testing.T) { } func TestLoadConfigFile_Bad_Value_Disable_Idle_Conns(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-disable-idle-connections.hcl") + _, err := LoadConfigFile("./test-fixtures/bad-config-disable-idle-connections.hcl") if err == nil { t.Fatal("should have error, it didn't") } } func TestLoadConfigFile_Disable_Keep_Alives_All(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-all.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-all.hcl") if err != nil { t.Fatal(err) } @@ -1593,6 +1920,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_All(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1608,7 +1938,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_All(t *testing.T) { } func TestLoadConfigFile_Disable_Keep_Alives_Auto_Auth(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-auto-auth.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-auto-auth.hcl") if err != nil { t.Fatal(err) } @@ -1642,6 +1972,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Auto_Auth(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1657,7 +1990,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Auto_Auth(t *testing.T) { } func TestLoadConfigFile_Disable_Keep_Alives_Templating(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-templating.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-templating.hcl") if err != nil { t.Fatal(err) } @@ -1691,6 +2024,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Templating(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1706,7 +2042,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Templating(t *testing.T) { } func TestLoadConfigFile_Disable_Keep_Alives_Caching(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-caching.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-caching.hcl") if err != nil { t.Fatal(err) } @@ -1740,6 +2076,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Caching(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1755,7 +2094,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Caching(t *testing.T) { } func TestLoadConfigFile_Disable_Keep_Alives_Proxying(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-proxying.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-proxying.hcl") if err != nil { t.Fatal(err) } @@ -1789,6 +2128,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Proxying(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1804,7 +2146,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Proxying(t *testing.T) { } func TestLoadConfigFile_Disable_Keep_Alives_Empty(t *testing.T) { - config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-empty.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-empty.hcl") if err != nil { t.Fatal(err) } @@ -1838,6 +2180,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Empty(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1859,7 +2204,7 @@ func TestLoadConfigFile_Disable_Keep_Alives_Env(t *testing.T) { if err != nil { t.Fatal(err) } - config, err := LoadConfig("./test-fixtures/config-disable-keep-alives-empty.hcl") + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-empty.hcl") if err != nil { t.Fatal(err) } @@ -1893,6 +2238,9 @@ func TestLoadConfigFile_Disable_Keep_Alives_Env(t *testing.T) { }, }, }, + TemplateConfig: &TemplateConfig{ + MaxConnectionsPerHost: DefaultTemplateConfigMaxConnsPerHost, + }, Vault: &Vault{ Address: "http://127.0.0.1:1111", Retry: &Retry{ @@ -1908,8 +2256,194 @@ func TestLoadConfigFile_Disable_Keep_Alives_Env(t *testing.T) { } func TestLoadConfigFile_Bad_Value_Disable_Keep_Alives(t *testing.T) { - _, err := LoadConfig("./test-fixtures/bad-config-disable-keep-alives.hcl") + _, err := LoadConfigFile("./test-fixtures/bad-config-disable-keep-alives.hcl") if err == nil { t.Fatal("should have error, it didn't") } } + +// TestLoadConfigFile_EnvTemplates_Simple loads and validates an env_template config +func TestLoadConfigFile_EnvTemplates_Simple(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-simple.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + expectedKey := "MY_DATABASE_USER" + found := false + for _, envTemplate := range cfg.EnvTemplates { + if *envTemplate.MapToEnvironmentVariable == expectedKey { + found = true + } + } + if !found { + t.Fatalf("expected environment variable name to be populated") + } +} + +// TestLoadConfigFile_EnvTemplates_Complex loads and validates an env_template config +func TestLoadConfigFile_EnvTemplates_Complex(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-complex.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + expectedKeys := []string{ + "FOO_PASSWORD", + "FOO_USER", + } + + envExists := func(key string) bool { + for _, envTmpl := range cfg.EnvTemplates { + if *envTmpl.MapToEnvironmentVariable == key { + return true + } + } + return false + } + + for _, expected := range expectedKeys { + if !envExists(expected) { + t.Fatalf("expected environment variable %s", expected) + } + } +} + +// TestLoadConfigFile_EnvTemplates_WithSource loads and validates an +// env_template config with "source" instead of "contents" +func TestLoadConfigFile_EnvTemplates_WithSource(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-with-source.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } +} + +// TestLoadConfigFile_EnvTemplates_NoName ensures that env_template with no name triggers an error +func TestLoadConfigFile_EnvTemplates_NoName(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-no-name.hcl") + if err == nil { + t.Fatalf("expected error") + } +} + +// TestLoadConfigFile_EnvTemplates_ExecInvalidSignal ensures that an invalid signal triggers an error +func TestLoadConfigFile_EnvTemplates_ExecInvalidSignal(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-invalid-signal.hcl") + if err == nil { + t.Fatalf("expected error") + } +} + +// TestLoadConfigFile_EnvTemplates_ExecSimple validates the exec section with default parameters +func TestLoadConfigFile_EnvTemplates_ExecSimple(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-simple.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + expectedCmd := []string{"/path/to/my/app", "arg1", "arg2"} + if !slices.Equal(cfg.Exec.Command, expectedCmd) { + t.Fatal("exec.command does not have expected value") + } + + // check defaults + if cfg.Exec.RestartOnSecretChanges != "always" { + t.Fatalf("expected cfg.Exec.RestartOnSecretChanges to be 'always', got '%s'", cfg.Exec.RestartOnSecretChanges) + } + + if cfg.Exec.RestartStopSignal != syscall.SIGTERM { + t.Fatalf("expected cfg.Exec.RestartStopSignal to be 'syscall.SIGTERM', got '%s'", cfg.Exec.RestartStopSignal) + } +} + +// TestLoadConfigFile_EnvTemplates_ExecComplex validates the exec section with non-default parameters +func TestLoadConfigFile_EnvTemplates_ExecComplex(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-complex.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + if !slices.Equal(cfg.Exec.Command, []string{"env"}) { + t.Fatal("exec.command does not have expected value") + } + + if cfg.Exec.RestartOnSecretChanges != "never" { + t.Fatalf("expected cfg.Exec.RestartOnSecretChanges to be 'never', got %q", cfg.Exec.RestartOnSecretChanges) + } + + if cfg.Exec.RestartStopSignal != syscall.SIGINT { + t.Fatalf("expected cfg.Exec.RestartStopSignal to be 'syscall.SIGINT', got %q", cfg.Exec.RestartStopSignal) + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_MissingExec ensures that ValidateConfig +// errors when "env_template" stanza(s) are specified but "exec" is missing +func TestLoadConfigFile_Bad_EnvTemplates_MissingExec(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-missing-exec.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: exec section is missing") + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_WithProxy ensures that ValidateConfig +// errors when both env_template and api_proxy stanzas are present +func TestLoadConfigFile_Bad_EnvTemplates_WithProxy(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-with-proxy.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: listener / api_proxy are not compatible with env_template") + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_WithFileTemplates ensures that +// ValidateConfig errors when both env_template and template stanzas are present +func TestLoadConfigFile_Bad_EnvTemplates_WithFileTemplates(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-with-file-templates.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: file template stanza is not compatible with env_template") + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_DisalowedFields ensure that +// ValidateConfig errors for disalowed env_template fields +func TestLoadConfigFile_Bad_EnvTemplates_DisalowedFields(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-disalowed-fields.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: disallowed fields specified in env_template") + } +} diff --git a/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl b/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl index ae79293b483a..b35a06f2848b 100644 --- a/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl +++ b/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl b/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl index 93e31aad4d73..1d618befa974 100644 --- a/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl +++ b/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl b/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl index 9a491fa4efc2..eea90860cbe9 100644 --- a/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl +++ b/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl b/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl index 5821c1b59f83..e90ba98bb0aa 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl b/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl index e2c8b328eb02..39f2bc740266 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl b/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl index 5029b8d37641..f5d39af89eee 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl b/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl index 9d8110e2d67c..46f2802689ad 100644 --- a/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl +++ b/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl b/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl index c13c82520ee6..10b7e54a7a54 100644 --- a/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl +++ b/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_idle_connections = ["foo","caching","templating"] diff --git a/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl b/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl index 3f1b9f0a198e..47f1eb2e52cd 100644 --- a/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl +++ b/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_keep_alives = ["foo","caching","templating"] diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl new file mode 100644 index 000000000000..4355fd078f9e --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + + # Error: destination and create_dest_dirs are not allowed in env_template + destination = "/path/on/disk/where/template/will/render.txt" + create_dest_dirs = true +} + +exec { + command = ["./my-app", "arg1", "arg2"] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl new file mode 100644 index 000000000000..7cbbc09318bb --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.lock }}{{ end }}" + error_on_missing_key = false +} + + +exec { + command = ["env"] + restart_on_secret_changes = "never" + restart_stop_signal = "notasignal" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl new file mode 100644 index 000000000000..8fbbd83bae5c --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +# Error: missing a required "exec" section! diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl new file mode 100644 index 000000000000..7c7363a46548 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +vault { + address = "http://localhost:8200" +} + +env_template { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.lock }}{{ end }}" + error_on_missing_key = false +} + + +exec { + command = ["env"] + restart_on_secret_changes = "never" + restart_stop_signal = "SIGTERM" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl new file mode 100644 index 000000000000..ace9410bd0b8 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl @@ -0,0 +1,43 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +# Error: template is incompatible with env_template! +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +exec { + command = ["./my-app", "arg1", "arg2"] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl new file mode 100644 index 000000000000..ac0824441af0 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl @@ -0,0 +1,50 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +exec { + command = ["./my-app", "arg1", "arg2"] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} + +# Error: api_proxy is incompatible with env_template +api_proxy { + use_auto_auth_token = "force" + enforce_consistency = "always" + when_inconsistent = "forward" +} + +# Error: listener is incompatible with env_template +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl b/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl index 7a375737161a..89c766d5ceb7 100644 --- a/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl +++ b/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl b/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl index b486418eedef..79b2009630a6 100644 --- a/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl +++ b/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl index 1a2fd91d6c70..7fbaa7418e80 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl index 9aad89cdd2aa..5d280bd20355 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl index b3dc1383f676..e951427430eb 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl index 5a46d1b93c5c..bbc945ccac3d 100644 --- a/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-embedded-type.hcl b/command/agent/config/test-fixtures/config-cache-embedded-type.hcl index b09a978c66ae..6661966c2816 100644 --- a/command/agent/config/test-fixtures/config-cache-embedded-type.hcl +++ b/command/agent/config/test-fixtures/config-cache-embedded-type.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl b/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl index 7a2a57f683ae..b654e202da14 100644 --- a/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl +++ b/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/config-cache-no-listeners.hcl b/command/agent/config/test-fixtures/config-cache-no-listeners.hcl index d7176e0aa539..0ad5203fd87d 100644 --- a/command/agent/config/test-fixtures/config-cache-no-listeners.hcl +++ b/command/agent/config/test-fixtures/config-cache-no-listeners.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl b/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl index 55f1d6480161..f85799bb93c4 100644 --- a/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl +++ b/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/config-cache-persist-false.hcl b/command/agent/config/test-fixtures/config-cache-persist-false.hcl index 5ab7f0449911..f48dfd857638 100644 --- a/command/agent/config/test-fixtures/config-cache-persist-false.hcl +++ b/command/agent/config/test-fixtures/config-cache-persist-false.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" cache { diff --git a/command/agent/config/test-fixtures/config-cache.hcl b/command/agent/config/test-fixtures/config-cache.hcl index 05f321a95c4c..148ef6e7ccbf 100644 --- a/command/agent/config/test-fixtures/config-cache.hcl +++ b/command/agent/config/test-fixtures/config-cache.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl b/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl index d116964a1b50..318c36973743 100644 --- a/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl +++ b/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + api_proxy { enforce_consistency = "always" when_inconsistent = "retry" diff --git a/command/agent/config/test-fixtures/config-consistency.hcl b/command/agent/config/test-fixtures/config-consistency.hcl index d57e0557362e..7ed752297358 100644 --- a/command/agent/config/test-fixtures/config-consistency.hcl +++ b/command/agent/config/test-fixtures/config-consistency.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + cache { enforce_consistency = "always" when_inconsistent = "retry" diff --git a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl new file mode 100644 index 000000000000..1bd5e93c770f --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl new file mode 100644 index 000000000000..b5d7425ebe46 --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl @@ -0,0 +1,9 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl b/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl new file mode 100644 index 000000000000..f65b4c6ddbcb --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl @@ -0,0 +1,50 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +listener "unix" { + address = "/path/to/socket" + tls_disable = true + socket_mode = "configmode" + socket_user = "configuser" + socket_group = "configgroup" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +listener { + type = "tcp" + address = "127.0.0.1:3000" + tls_disable = true + role = "metrics_only" +} + +listener "tcp" { + role = "default" + address = "127.0.0.1:8400" + tls_key_file = "/path/to/cakey.pem" + tls_cert_file = "/path/to/cacert.pem" +} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl b/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl new file mode 100644 index 000000000000..57929cd55c82 --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +cache { + use_auto_auth_token = true + persist = { + type = "kubernetes" + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = "true" + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl b/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl new file mode 100644 index 000000000000..b99ee93f907a --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = "true" + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl b/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl new file mode 100644 index 000000000000..1bd5e93c770f --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl index 94e8cc827f3c..b6869a2000ab 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_idle_connections = ["auto-auth","caching","templating","proxying"] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl index 1a63b20480d4..02bda0b0a6ed 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_idle_connections = ["auto-auth"] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl index 30d0806c0337..624d1bd1c86e 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_idle_connections = ["caching"] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl index eb95310cedff..6b7ac26df736 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_idle_connections = [] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl index 8c2c6db67400..2219b84eb285 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_idle_connections = ["proxying"] diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl index 922377fc82a9..4f819c7a443e 100644 --- a/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_idle_connections = ["templating"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl index 6e498f756d39..356c79ff5bb0 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_keep_alives = ["auto-auth","caching","templating","proxying"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl index 11393bfb57a6..a7648c480ffe 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_keep_alives = ["auto-auth"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl index 5712296924ed..4f93218ee501 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_keep_alives = ["caching"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl index 8cddcebd8f10..b0969776fde3 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_keep_alives = [] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl index 8363cb58f132..138254652579 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_keep_alives = ["proxying"] diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl index d4731cbd90e2..9e154a9ce467 100644 --- a/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" disable_keep_alives = ["templating"] diff --git a/command/agent/config/test-fixtures/config-embedded-type.hcl b/command/agent/config/test-fixtures/config-embedded-type.hcl index 4e6dc41f46a8..cf3c182a85f3 100644 --- a/command/agent/config/test-fixtures/config-embedded-type.hcl +++ b/command/agent/config/test-fixtures/config-embedded-type.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" log_file = "/var/log/vault/vault-agent.log" diff --git a/command/agent/config/test-fixtures/config-env-templates-complex.hcl b/command/agent/config/test-fixtures/config-env-templates-complex.hcl new file mode 100644 index 000000000000..adcd4b0dccc0 --- /dev/null +++ b/command/agent/config/test-fixtures/config-env-templates-complex.hcl @@ -0,0 +1,39 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +cache {} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +exec { + command = ["env"] + restart_on_secret_changes = "never" + restart_stop_signal = "SIGINT" +} diff --git a/command/agent/config/test-fixtures/config-env-templates-simple.hcl b/command/agent/config/test-fixtures/config-env-templates-simple.hcl new file mode 100644 index 000000000000..3ca1a190980e --- /dev/null +++ b/command/agent/config/test-fixtures/config-env-templates-simple.hcl @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +env_template "MY_DATABASE_USER" { + contents = "{{ with secret \"secret/db-secret\" }}{{ .Data.data.user }}{{ end }}" +} + +exec { + command = ["/path/to/my/app", "arg1", "arg2"] +} diff --git a/command/agent/config/test-fixtures/config-env-templates-with-source.hcl b/command/agent/config/test-fixtures/config-env-templates-with-source.hcl new file mode 100644 index 000000000000..7643ff28d826 --- /dev/null +++ b/command/agent/config/test-fixtures/config-env-templates-with-source.hcl @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +auto_auth { + method { + type = "token_file" + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +env_template "MY_PASSWORD" { + source = "/path/on/disk/to/template.ctmpl" +} + +exec { + command = ["/path/to/my/app", "arg1", "arg2"] +} diff --git a/command/agent/config/test-fixtures/config-method-exit-on-err.hcl b/command/agent/config/test-fixtures/config-method-exit-on-err.hcl index c52140102f1c..d6b32c70de5a 100644 --- a/command/agent/config/test-fixtures/config-method-exit-on-err.hcl +++ b/command/agent/config/test-fixtures/config-method-exit-on-err.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-method-initial-backoff.hcl b/command/agent/config/test-fixtures/config-method-initial-backoff.hcl index 6b9343aa4ba6..a7fbccd4ba97 100644 --- a/command/agent/config/test-fixtures/config-method-initial-backoff.hcl +++ b/command/agent/config/test-fixtures/config-method-initial-backoff.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-method-wrapping.hcl b/command/agent/config/test-fixtures/config-method-wrapping.hcl index cbafc5a24593..0012bb5708a0 100644 --- a/command/agent/config/test-fixtures/config-method-wrapping.hcl +++ b/command/agent/config/test-fixtures/config-method-wrapping.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-full-nosink.hcl b/command/agent/config/test-fixtures/config-template-full-nosink.hcl index 84edf6f11e3c..cda6d020c619 100644 --- a/command/agent/config/test-fixtures/config-template-full-nosink.hcl +++ b/command/agent/config/test-fixtures/config-template-full-nosink.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-full.hcl b/command/agent/config/test-fixtures/config-template-full.hcl index 5e5cbc62cd7b..649510d167c6 100644 --- a/command/agent/config/test-fixtures/config-template-full.hcl +++ b/command/agent/config/test-fixtures/config-template-full.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-many-nosink.hcl b/command/agent/config/test-fixtures/config-template-many-nosink.hcl index e04f77da263e..2882d76de0f2 100644 --- a/command/agent/config/test-fixtures/config-template-many-nosink.hcl +++ b/command/agent/config/test-fixtures/config-template-many-nosink.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-many.hcl b/command/agent/config/test-fixtures/config-template-many.hcl index 2f6fe7b70b6d..992381704990 100644 --- a/command/agent/config/test-fixtures/config-template-many.hcl +++ b/command/agent/config/test-fixtures/config-template-many.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-min-nosink.hcl b/command/agent/config/test-fixtures/config-template-min-nosink.hcl index 12c5a82c5385..395be10e367e 100644 --- a/command/agent/config/test-fixtures/config-template-min-nosink.hcl +++ b/command/agent/config/test-fixtures/config-template-min-nosink.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-min.hcl b/command/agent/config/test-fixtures/config-template-min.hcl index 5d37dbefbab1..523a81e46bfb 100644 --- a/command/agent/config/test-fixtures/config-template-min.hcl +++ b/command/agent/config/test-fixtures/config-template-min.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-template-with-cache.hcl b/command/agent/config/test-fixtures/config-template-with-cache.hcl new file mode 100644 index 000000000000..14e8ab119978 --- /dev/null +++ b/command/agent/config/test-fixtures/config-template-with-cache.hcl @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "/my-namespace" + + config = { + role = "foobar" + } + } +} + +cache {} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} diff --git a/command/agent/config/test-fixtures/config-template_config-empty.hcl b/command/agent/config/test-fixtures/config-template_config-empty.hcl index a4f5b3a0938f..b497032a74ab 100644 --- a/command/agent/config/test-fixtures/config-template_config-empty.hcl +++ b/command/agent/config/test-fixtures/config-template_config-empty.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + vault { address = "http://127.0.0.1:1111" retry { diff --git a/command/agent/config/test-fixtures/config-template_config-missing.hcl b/command/agent/config/test-fixtures/config-template_config-missing.hcl new file mode 100644 index 000000000000..4673e0c1dfdc --- /dev/null +++ b/command/agent/config/test-fixtures/config-template_config-missing.hcl @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +vault { + address = "http://127.0.0.1:1111" + retry { + num_retries = 5 + } +} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} diff --git a/command/agent/config/test-fixtures/config-template_config.hcl b/command/agent/config/test-fixtures/config-template_config.hcl index 5da0e2b9127b..46c082a4228b 100644 --- a/command/agent/config/test-fixtures/config-template_config.hcl +++ b/command/agent/config/test-fixtures/config-template_config.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + vault { address = "http://127.0.0.1:1111" retry { @@ -8,6 +11,7 @@ vault { template_config { exit_on_retry_failure = true static_secret_render_interval = 60 + max_connections_per_host = 100 } template { diff --git a/command/agent/config/test-fixtures/config-vault-retry-empty.hcl b/command/agent/config/test-fixtures/config-vault-retry-empty.hcl index e7ab4aa0017f..b6bf1abe8fec 100644 --- a/command/agent/config/test-fixtures/config-vault-retry-empty.hcl +++ b/command/agent/config/test-fixtures/config-vault-retry-empty.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config-vault-retry.hcl b/command/agent/config/test-fixtures/config-vault-retry.hcl index 0c1cfa19b064..aedbfdc52052 100644 --- a/command/agent/config/test-fixtures/config-vault-retry.hcl +++ b/command/agent/config/test-fixtures/config-vault-retry.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" auto_auth { diff --git a/command/agent/config/test-fixtures/config.hcl b/command/agent/config/test-fixtures/config.hcl index ecfb88ae05f9..f6ca0e684e03 100644 --- a/command/agent/config/test-fixtures/config.hcl +++ b/command/agent/config/test-fixtures/config.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + pid_file = "./pidfile" log_file = "/var/log/vault/vault-agent.log" diff --git a/command/agent/doc.go b/command/agent/doc.go index 0786f5c1d394..785fe94060a5 100644 --- a/command/agent/doc.go +++ b/command/agent/doc.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + /* Package agent implements a daemon mode of Vault designed to provide helper features like auto-auth, caching, and templating. diff --git a/command/agent/exec/childprocessstate_enumer.go b/command/agent/exec/childprocessstate_enumer.go new file mode 100644 index 000000000000..154606ed62f6 --- /dev/null +++ b/command/agent/exec/childprocessstate_enumer.go @@ -0,0 +1,51 @@ +// Code generated by "enumer -type=childProcessState -trimprefix=childProcessState"; DO NOT EDIT. + +package exec + +import ( + "fmt" +) + +const _childProcessStateName = "NotStartedRunningRestartingStopped" + +var _childProcessStateIndex = [...]uint8{0, 10, 17, 27, 34} + +func (i childProcessState) String() string { + if i >= childProcessState(len(_childProcessStateIndex)-1) { + return fmt.Sprintf("childProcessState(%d)", i) + } + return _childProcessStateName[_childProcessStateIndex[i]:_childProcessStateIndex[i+1]] +} + +var _childProcessStateValues = []childProcessState{0, 1, 2, 3} + +var _childProcessStateNameToValueMap = map[string]childProcessState{ + _childProcessStateName[0:10]: 0, + _childProcessStateName[10:17]: 1, + _childProcessStateName[17:27]: 2, + _childProcessStateName[27:34]: 3, +} + +// childProcessStateString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func childProcessStateString(s string) (childProcessState, error) { + if val, ok := _childProcessStateNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to childProcessState values", s) +} + +// childProcessStateValues returns all values of the enum +func childProcessStateValues() []childProcessState { + return _childProcessStateValues +} + +// IsAchildProcessState returns "true" if the value is listed in the enum definition. "false" otherwise +func (i childProcessState) IsAchildProcessState() bool { + for _, v := range _childProcessStateValues { + if i == v { + return true + } + } + return false +} diff --git a/command/agent/exec/exec.go b/command/agent/exec/exec.go new file mode 100644 index 000000000000..8baf51d69e6f --- /dev/null +++ b/command/agent/exec/exec.go @@ -0,0 +1,377 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package exec + +import ( + "context" + "fmt" + "io" + "os" + "sort" + "sync" + "time" + + "github.com/hashicorp/consul-template/child" + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/consul-template/manager" + "github.com/hashicorp/go-hclog" + "golang.org/x/exp/slices" + + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/internal/ctmanager" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +//go:generate enumer -type=childProcessState -trimprefix=childProcessState +type childProcessState uint8 + +const ( + childProcessStateNotStarted childProcessState = iota + childProcessStateRunning + childProcessStateRestarting + childProcessStateStopped +) + +type ServerConfig struct { + Logger hclog.Logger + AgentConfig *config.Config + + Namespace string + + // LogLevel is needed to set the internal Consul Template Runner's log level + // to match the log level of Vault Agent. The internal Runner creates it's own + // logger and can't be set externally or copied from the Template Server. + // + // LogWriter is needed to initialize Consul Template's internal logger to use + // the same io.Writer that Vault Agent itself is using. + LogLevel hclog.Level + LogWriter io.Writer +} + +type Server struct { + // config holds the ServerConfig used to create it. It's passed along in other + // methods + config *ServerConfig + + // runner is the consul-template runner + runner *manager.Runner + + // numberOfTemplates is the count of templates determined by consul-template, + // we keep the value to ensure all templates have been rendered before + // starting the child process + // NOTE: each template may have more than one TemplateConfig, so the numbers may not match up + numberOfTemplates int + + logger hclog.Logger + + childProcess *child.Child + childProcessState childProcessState + childProcessLock sync.Mutex + childProcessStdout io.WriteCloser + childProcessStderr io.WriteCloser + + // exit channel of the child process + childProcessExitCh chan int + + // lastRenderedEnvVars is the cached value of all environment variables + // rendered by the templating engine; it is used for detecting changes + lastRenderedEnvVars []string +} + +type ProcessExitError struct { + ExitCode int +} + +func (e *ProcessExitError) Error() string { + return fmt.Sprintf("process exited with %d", e.ExitCode) +} + +func NewServer(cfg *ServerConfig) (*Server, error) { + var err error + + childProcessStdout := os.Stdout + childProcessStderr := os.Stderr + + if cfg.AgentConfig.Exec != nil { + if cfg.AgentConfig.Exec.ChildProcessStdout != "" { + childProcessStdout, err = os.OpenFile(cfg.AgentConfig.Exec.ChildProcessStdout, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return nil, fmt.Errorf("could not open %q, %w", cfg.AgentConfig.Exec.ChildProcessStdout, err) + } + } + + if cfg.AgentConfig.Exec.ChildProcessStderr != "" { + childProcessStderr, err = os.OpenFile(cfg.AgentConfig.Exec.ChildProcessStderr, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return nil, fmt.Errorf("could not open %q, %w", cfg.AgentConfig.Exec.ChildProcessStdout, err) + } + } + } + + server := Server{ + logger: cfg.Logger, + config: cfg, + childProcessState: childProcessStateNotStarted, + childProcessExitCh: make(chan int), + childProcessStdout: childProcessStdout, + childProcessStderr: childProcessStderr, + } + + return &server, nil +} + +func (s *Server) Run(ctx context.Context, incomingVaultToken chan string) error { + latestToken := new(string) + s.logger.Info("starting exec server") + defer func() { + s.logger.Info("exec server stopped") + }() + + if len(s.config.AgentConfig.EnvTemplates) == 0 || s.config.AgentConfig.Exec == nil { + s.logger.Info("no env templates or exec config, exiting") + <-ctx.Done() + return nil + } + + managerConfig := ctmanager.ManagerConfig{ + AgentConfig: s.config.AgentConfig, + Namespace: s.config.Namespace, + LogLevel: s.config.LogLevel, + LogWriter: s.config.LogWriter, + } + + runnerConfig, err := ctmanager.NewConfig(managerConfig, s.config.AgentConfig.EnvTemplates) + if err != nil { + return fmt.Errorf("template server failed to generate runner config: %w", err) + } + + // We leave this in "dry" mode, as there are no files to render; + // we will get the environment variables rendered contents from the incoming events + s.runner, err = manager.NewRunner(runnerConfig, true) + if err != nil { + return fmt.Errorf("template server failed to create: %w", err) + } + + // prevent the templates from being rendered to stdout in "dry" mode + s.runner.SetOutStream(io.Discard) + + s.numberOfTemplates = len(s.runner.TemplateConfigMapping()) + + // We receive multiple events every staticSecretRenderInterval + // from <-s.runner.TemplateRenderedCh(), one for each secret. Only the last + // event in a batch will contain the latest set of all secrets and the + // corresponding environment variables. This timer will fire after 2 seconds + // unless an event comes in which resets the timer back to 2 seconds. + var debounceTimer *time.Timer + + // capture the errors related to restarting the child process + restartChildProcessErrCh := make(chan error) + + for { + select { + case <-ctx.Done(): + s.runner.Stop() + s.childProcessLock.Lock() + if s.childProcess != nil { + s.childProcess.Stop() + } + s.childProcessState = childProcessStateStopped + s.close() + s.childProcessLock.Unlock() + return nil + + case token := <-incomingVaultToken: + if token != *latestToken { + s.logger.Info("exec server received new token") + + s.runner.Stop() + *latestToken = token + newTokenConfig := ctconfig.Config{ + Vault: &ctconfig.VaultConfig{ + Token: latestToken, + ClientUserAgent: pointerutil.StringPtr(useragent.AgentTemplatingString()), + }, + } + + // got a new auth token, merge it in with the existing config + runnerConfig = runnerConfig.Merge(&newTokenConfig) + s.runner, err = manager.NewRunner(runnerConfig, true) + if err != nil { + s.logger.Error("template server failed with new Vault token", "error", err) + continue + } + + // prevent the templates from being rendered to stdout in "dry" mode + s.runner.SetOutStream(io.Discard) + + go s.runner.Start() + } + + case err := <-s.runner.ErrCh: + s.logger.Error("template server error", "error", err.Error()) + s.runner.StopImmediately() + + // Return after stopping the runner if exit on retry failure was specified + if s.config.AgentConfig.TemplateConfig != nil && s.config.AgentConfig.TemplateConfig.ExitOnRetryFailure { + return fmt.Errorf("template server: %w", err) + } + + s.runner, err = manager.NewRunner(runnerConfig, true) + if err != nil { + return fmt.Errorf("template server failed to create: %w", err) + } + go s.runner.Start() + + case <-s.runner.TemplateRenderedCh(): + // A template has been rendered, figure out what to do + s.logger.Trace("template rendered") + events := s.runner.RenderEvents() + + // This checks if we've finished rendering the initial set of templates, + // for every consecutive re-render len(events) should equal s.numberOfTemplates + if len(events) < s.numberOfTemplates { + // Not all templates have been rendered yet + continue + } + + // assume the renders are finished, until we find otherwise + doneRendering := true + var renderedEnvVars []string + for _, event := range events { + // This template hasn't been rendered + if event.LastWouldRender.IsZero() { + doneRendering = false + break + } else { + for _, tcfg := range event.TemplateConfigs { + envVar := fmt.Sprintf("%s=%s", *tcfg.MapToEnvironmentVariable, event.Contents) + renderedEnvVars = append(renderedEnvVars, envVar) + } + } + } + if !doneRendering { + continue + } + + // sort the environment variables for a deterministic output and easy comparison + sort.Strings(renderedEnvVars) + + s.logger.Trace("done rendering templates") + + // don't restart the process unless a change is detected + if slices.Equal(s.lastRenderedEnvVars, renderedEnvVars) { + continue + } + + s.lastRenderedEnvVars = renderedEnvVars + + s.logger.Debug("detected a change in the environment variables: restarting the child process") + + // if a timer exists, stop it + if debounceTimer != nil { + debounceTimer.Stop() + } + debounceTimer = time.AfterFunc(2*time.Second, func() { + if err := s.restartChildProcess(renderedEnvVars); err != nil { + restartChildProcessErrCh <- fmt.Errorf("unable to restart the child process: %w", err) + } + }) + + case err := <-restartChildProcessErrCh: + // catch the error from restarting + return err + + case exitCode := <-s.childProcessExitCh: + // process exited on its own + return &ProcessExitError{ExitCode: exitCode} + } + } +} + +func (s *Server) restartChildProcess(newEnvVars []string) error { + s.childProcessLock.Lock() + defer s.childProcessLock.Unlock() + + switch s.config.AgentConfig.Exec.RestartOnSecretChanges { + case "always": + if s.childProcessState == childProcessStateRunning { + // process is running, need to kill it first + s.logger.Info("stopping process", "process_id", s.childProcess.Pid()) + s.childProcessState = childProcessStateRestarting + s.childProcess.Stop() + } + case "never": + if s.childProcessState == childProcessStateRunning { + s.logger.Info("detected update, but not restarting process", "process_id", s.childProcess.Pid()) + return nil + } + default: + return fmt.Errorf("invalid value for restart-on-secret-changes: %q", s.config.AgentConfig.Exec.RestartOnSecretChanges) + } + + args, subshell, err := child.CommandPrep(s.config.AgentConfig.Exec.Command) + if err != nil { + return fmt.Errorf("unable to parse command: %w", err) + } + + childInput := &child.NewInput{ + Stdin: os.Stdin, + Stdout: s.childProcessStdout, + Stderr: s.childProcessStderr, + Command: args[0], + Args: args[1:], + Timeout: 0, // let it run forever + Env: append(os.Environ(), newEnvVars...), + ReloadSignal: nil, // can't reload w/ new env vars + KillSignal: s.config.AgentConfig.Exec.RestartStopSignal, + KillTimeout: 30 * time.Second, + Splay: 0, + Setpgid: subshell, + Logger: s.logger.StandardLogger(nil), + } + + proc, err := child.New(childInput) + if err != nil { + return err + } + s.childProcess = proc + + if err := s.childProcess.Start(); err != nil { + return fmt.Errorf("error starting the child process: %w", err) + } + + s.childProcessState = childProcessStateRunning + + // Listen if the child process exits and bubble it up to the main loop. + // + // NOTE: this must be invoked after child.Start() to avoid a potential + // race condition with ExitCh not being initialized. + go func() { + select { + case exitCode, ok := <-proc.ExitCh(): + // ignore ExitCh channel closures caused by our restarts + if ok { + s.childProcessExitCh <- exitCode + } + } + }() + + return nil +} + +func (s *Server) Close() { + s.childProcessLock.Lock() + defer s.childProcessLock.Unlock() + s.close() +} + +func (s *Server) close() { + if s.childProcessStdout != os.Stdout { + _ = s.childProcessStdout.Close() + } + if s.childProcessStderr != os.Stderr { + _ = s.childProcessStderr.Close() + } +} diff --git a/command/agent/exec/exec_test.go b/command/agent/exec/exec_test.go new file mode 100644 index 000000000000..a825037faa46 --- /dev/null +++ b/command/agent/exec/exec_test.go @@ -0,0 +1,580 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package exec + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strconv" + "syscall" + "testing" + "time" + + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +func fakeVaultServer(t *testing.T) *httptest.Server { + t.Helper() + + firstRequest := true + + mux := http.NewServeMux() + mux.HandleFunc("/v1/kv/my-app/creds", func(w http.ResponseWriter, r *http.Request) { + // change the password on the second request to re-render the template + var password string + + if firstRequest { + password = "s3cr3t" + } else { + password = "s3cr3t-two" + } + + firstRequest = false + + fmt.Fprintf(w, `{ + "request_id": "8af096e9-518c-7351-eff5-5ba20554b21f", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "data": { + "password": "%s", + "user": "app-user" + }, + "metadata": { + "created_time": "2019-10-07T22:18:44.233247Z", + "deletion_time": "", + "destroyed": false, + "version": 3 + } + }, + "wrap_info": null, + "warnings": null, + "auth": null + }`, + password, + ) + }) + + return httptest.NewServer(mux) +} + +// TestExecServer_Run tests various scenarios of using vault agent as a process +// supervisor. At its core is a sample application referred to as 'test app', +// compiled from ./test-app/main.go. Each test case verifies that the test app +// is started and/or stopped correctly by exec.Server.Run(). There are 3 +// high-level scenarios we want to test for: +// +// 1. test app is started and is injected with environment variables +// 2. test app exits early (either with zero or non-zero extit code) +// 3. test app needs to be stopped (and restarted) by exec.Server +func TestExecServer_Run(t *testing.T) { + // we must build a test-app binary since 'go run' does not propagate signals correctly + goBinary, err := exec.LookPath("go") + if err != nil { + t.Fatalf("could not find go binary on path: %s", err) + } + + testAppBinary := filepath.Join(os.TempDir(), "test-app") + + if err := exec.Command(goBinary, "build", "-o", testAppBinary, "./test-app").Run(); err != nil { + t.Fatalf("could not build the test application: %s", err) + } + defer func() { + if err := os.Remove(testAppBinary); err != nil { + t.Fatalf("could not remove %q test application: %s", testAppBinary, err) + } + }() + + testCases := map[string]struct { + // skip this test case + skip bool + skipReason string + + // inputs to the exec server + envTemplates []*ctconfig.TemplateConfig + staticSecretRenderInterval time.Duration + + // test app parameters + testAppArgs []string + testAppStopSignal os.Signal + + // simulate a shutdown of agent, which, in turn stops the test app + simulateShutdown bool + simulateShutdownWaitDuration time.Duration + + // expected results + expected map[string]string + expectedTestDuration time.Duration + expectedError error + }{ + "ensure_environment_variables_are_injected": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }, { + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.password }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_PASSWORD"), + }}, + testAppArgs: []string{"--stop-after", "10s"}, + testAppStopSignal: syscall.SIGTERM, + expected: map[string]string{ + "MY_USER": "app-user", + "MY_PASSWORD": "s3cr3t", + }, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "password_changes_test_app_should_restart": { + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }, { + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.password }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_PASSWORD"), + }}, + staticSecretRenderInterval: 5 * time.Second, + testAppArgs: []string{"--stop-after", "15s", "--sleep-after-stop-signal", "0s"}, + testAppStopSignal: syscall.SIGTERM, + expected: map[string]string{ + "MY_USER": "app-user", + "MY_PASSWORD": "s3cr3t-two", + }, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "test_app_exits_early": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "1s"}, + testAppStopSignal: syscall.SIGTERM, + expectedTestDuration: 15 * time.Second, + expectedError: &ProcessExitError{0}, + }, + + "test_app_exits_early_non_zero": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "1s", "--exit-code", "5"}, + testAppStopSignal: syscall.SIGTERM, + expectedTestDuration: 15 * time.Second, + expectedError: &ProcessExitError{5}, + }, + + "send_sigterm_expect_test_app_exit": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "30s", "--sleep-after-stop-signal", "1s"}, + testAppStopSignal: syscall.SIGTERM, + simulateShutdown: true, + simulateShutdownWaitDuration: 3 * time.Second, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "send_sigusr1_expect_test_app_exit": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "30s", "--sleep-after-stop-signal", "1s", "--use-sigusr1"}, + testAppStopSignal: syscall.SIGUSR1, + simulateShutdown: true, + simulateShutdownWaitDuration: 3 * time.Second, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "test_app_ignores_stop_signal": { + skip: true, + skipReason: "This test currently fails with 'go test -race' (see hashicorp/consul-template/issues/1753).", + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "60s", "--sleep-after-stop-signal", "60s"}, + testAppStopSignal: syscall.SIGTERM, + simulateShutdown: true, + simulateShutdownWaitDuration: 32 * time.Second, // the test app should be stopped immediately after 30s + expectedTestDuration: 45 * time.Second, + expectedError: nil, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + if testCase.skip { + t.Skip(testCase.skipReason) + } + + t.Logf("test case %s: begin", name) + defer t.Logf("test case %s: end", name) + + fakeVault := fakeVaultServer(t) + defer fakeVault.Close() + + ctx, cancelContextFunc := context.WithTimeout(context.Background(), testCase.expectedTestDuration) + defer cancelContextFunc() + + port := findOpenPort(t) + + testAppCommand := []string{ + testAppBinary, + "--port", + strconv.Itoa(port), + } + + execServer, err := NewServer(&ServerConfig{ + Logger: logging.NewVaultLogger(hclog.Trace), + AgentConfig: &config.Config{ + Vault: &config.Vault{ + Address: fakeVault.URL, + Retry: &config.Retry{ + NumRetries: 3, + }, + }, + Exec: &config.ExecConfig{ + RestartOnSecretChanges: "always", + Command: append(testAppCommand, testCase.testAppArgs...), + RestartStopSignal: testCase.testAppStopSignal, + }, + EnvTemplates: testCase.envTemplates, + TemplateConfig: &config.TemplateConfig{ + ExitOnRetryFailure: true, + StaticSecretRenderInt: testCase.staticSecretRenderInterval, + }, + }, + LogLevel: hclog.Trace, + LogWriter: hclog.DefaultOutput, + }) + if err != nil { + t.Fatalf("could not create exec server: %q", err) + } + + // start the exec server + var ( + execServerErrCh = make(chan error) + execServerTokenCh = make(chan string, 1) + ) + go func() { + execServerErrCh <- execServer.Run(ctx, execServerTokenCh) + }() + + // send a dummy token to kick off the server + execServerTokenCh <- "my-token" + + // ensure the test app is running after 3 seconds + var ( + testAppAddr = fmt.Sprintf("http://localhost:%d", port) + testAppStartedCh = make(chan error) + ) + if testCase.expectedError == nil { + time.AfterFunc(500*time.Millisecond, func() { + _, err := retryablehttp.Head(testAppAddr) + testAppStartedCh <- err + }) + } + + select { + case <-ctx.Done(): + t.Fatal("timeout reached before templates were rendered") + + case err := <-execServerErrCh: + if testCase.expectedError == nil && err != nil { + t.Fatalf("exec server did not expect an error, got: %v", err) + } + + if errors.Is(err, testCase.expectedError) { + t.Fatalf("exec server expected error %v; got %v", testCase.expectedError, err) + } + + t.Log("exec server exited without an error") + + return + + case err := <-testAppStartedCh: + if testCase.expectedError == nil && err != nil { + t.Fatalf("test app could not be started") + } + + t.Log("test app started successfully") + } + + // expect the test app to restart after staticSecretRenderInterval + debounce timer due to a password change + if testCase.staticSecretRenderInterval != 0 { + t.Logf("sleeping for %v to wait for application restart", testCase.staticSecretRenderInterval+5*time.Second) + time.Sleep(testCase.staticSecretRenderInterval + 5*time.Second) + } + + // simulate a shutdown of agent, which, in turn stops the test app + if testCase.simulateShutdown { + cancelContextFunc() + + time.Sleep(testCase.simulateShutdownWaitDuration) + + // check if the test app is still alive + if _, err := http.Head(testAppAddr); err == nil { + t.Fatalf("the test app is still alive %v after a simulated shutdown!", testCase.simulateShutdownWaitDuration) + } + + return + } + + // verify the environment variables + t.Logf("verifying test-app's environment variables") + + resp, err := retryablehttp.Get(testAppAddr) + if err != nil { + t.Fatalf("error making request to the test app: %s", err) + } + defer resp.Body.Close() + + decoder := json.NewDecoder(resp.Body) + var response struct { + EnvironmentVariables map[string]string `json:"environment_variables"` + ProcessID int `json:"process_id"` + } + if err := decoder.Decode(&response); err != nil { + t.Fatalf("unable to parse response from test app: %s", err) + } + + for key, expectedValue := range testCase.expected { + actualValue, ok := response.EnvironmentVariables[key] + if !ok { + t.Fatalf("expected the test app to return %q environment variable", key) + } + if expectedValue != actualValue { + t.Fatalf("expected environment variable %s to have a value of %q but it has a value of %q", key, expectedValue, actualValue) + } + } + }) + } +} + +func TestExecServer_LogFiles(t *testing.T) { + goBinary, err := exec.LookPath("go") + if err != nil { + t.Fatalf("could not find go binary on path: %s", err) + } + + testAppBinary := filepath.Join(os.TempDir(), "test-app") + + if err := exec.Command(goBinary, "build", "-o", testAppBinary, "./test-app").Run(); err != nil { + t.Fatalf("could not build the test application: %s", err) + } + t.Cleanup(func() { + if err := os.Remove(testAppBinary); err != nil { + t.Fatalf("could not remove %q test application: %s", testAppBinary, err) + } + }) + + testCases := map[string]struct { + testAppArgs []string + stderrFile string + stdoutFile string + + expectedError error + }{ + "can_log_stderr_to_file": { + stderrFile: "vault-exec-test.stderr.log", + }, + "can_log_stdout_to_file": { + stdoutFile: "vault-exec-test.stdout.log", + testAppArgs: []string{"-log-to-stdout"}, + }, + "cant_open_file": { + stderrFile: "/file/does/not/exist", + expectedError: os.ErrNotExist, + }, + } + + for tcName, testCase := range testCases { + t.Run(tcName, func(t *testing.T) { + fakeVault := fakeVaultServer(t) + defer fakeVault.Close() + + port := findOpenPort(t) + testAppCommand := []string{ + testAppBinary, + "--port", + strconv.Itoa(port), + "--stop-after", + "60s", + } + + execConfig := &config.ExecConfig{ + RestartOnSecretChanges: "always", + Command: append(testAppCommand, testCase.testAppArgs...), + } + + if testCase.stdoutFile != "" { + execConfig.ChildProcessStdout = filepath.Join(os.TempDir(), "vault-agent-exec.stdout.log") + t.Cleanup(func() { + _ = os.Remove(execConfig.ChildProcessStdout) + }) + } + + if testCase.stderrFile != "" { + execConfig.ChildProcessStderr = filepath.Join(os.TempDir(), "vault-agent-exec.stderr.log") + t.Cleanup(func() { + _ = os.Remove(execConfig.ChildProcessStderr) + }) + } + + execServer, err := NewServer(&ServerConfig{ + Logger: logging.NewVaultLogger(hclog.Trace), + AgentConfig: &config.Config{ + Vault: &config.Vault{ + Address: fakeVault.URL, + Retry: &config.Retry{ + NumRetries: 3, + }, + }, + Exec: execConfig, + EnvTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + TemplateConfig: &config.TemplateConfig{ + ExitOnRetryFailure: true, + StaticSecretRenderInt: 5 * time.Second, + }, + }, + LogLevel: hclog.Trace, + LogWriter: hclog.DefaultOutput, + }) + if err != nil { + if testCase.expectedError != nil { + if errors.Is(err, testCase.expectedError) { + t.Log("test passes! caught expected err") + return + } else { + t.Fatalf("caught error %q did not match expected error %q", err, testCase.expectedError) + } + } + t.Fatalf("could not create exec server: %q", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // start the exec server + var ( + execServerErrCh = make(chan error) + execServerTokenCh = make(chan string, 1) + ) + go func() { + execServerErrCh <- execServer.Run(ctx, execServerTokenCh) + }() + + // send a dummy token to kick off the server + execServerTokenCh <- "my-token" + + // ensure the test app is running after 500ms + var ( + testAppAddr = fmt.Sprintf("http://localhost:%d", port) + testAppStartedCh = make(chan error) + ) + time.AfterFunc(500*time.Millisecond, func() { + _, err := retryablehttp.Head(testAppAddr) + testAppStartedCh <- err + }) + + select { + case <-ctx.Done(): + t.Fatal("timeout reached before templates were rendered") + + case err := <-execServerErrCh: + if testCase.expectedError == nil && err != nil { + t.Fatalf("exec server did not expect an error, got: %v", err) + } + + if errors.Is(err, testCase.expectedError) { + t.Fatalf("exec server expected error %v; got %v", testCase.expectedError, err) + } + + t.Log("exec server exited without an error") + + return + + case <-testAppStartedCh: + t.Log("test app started successfully") + } + + // let the app run a bit + time.Sleep(5 * time.Second) + // stop the app + cancel() + // wait for app to stop + time.Sleep(5 * time.Second) + + // check if the files have content + if testCase.stdoutFile != "" { + stdoutInfo, err := os.Stat(execConfig.ChildProcessStdout) + if err != nil { + t.Fatalf("error calling stat on stdout file: %q", err) + } + if stdoutInfo.Size() == 0 { + t.Fatalf("stdout log file does not have any data!") + } + } + + if testCase.stderrFile != "" { + stderrInfo, err := os.Stat(execConfig.ChildProcessStderr) + if err != nil { + t.Fatalf("error calling stat on stderr file: %q", err) + } + if stderrInfo.Size() == 0 { + t.Fatalf("stderr log file does not have any data!") + } + } + }) + } +} + +// findOpenPort generates a random open port, using Go's :0 to find a port, +// for us to then use as the test binary's port to use. +// This is a little race-y, as something else could open the port before +// we use it, but we're not the process that needs to open the port, +// and we still need to be able to access it. +// We should be fine so long as we don't make the tests parallel. +func findOpenPort(t *testing.T) int { + t.Helper() + ln, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + port := ln.Addr().(*net.TCPAddr).Port + err = ln.Close() + if err != nil { + t.Fatal(err) + } + return port +} diff --git a/command/agent/exec/test-app/main.go b/command/agent/exec/test-app/main.go new file mode 100644 index 000000000000..db8845aa6055 --- /dev/null +++ b/command/agent/exec/test-app/main.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package main + +// This is a test application that is used by TestExecServer_Run to verify +// the behavior of vault agent running as a process supervisor. +// +// The app will automatically exit after 1 minute or the --stop-after interval, +// whichever comes first. It also can serve its loaded environment variables on +// the given --port. This app will also return the given --exit-code and +// terminate on SIGTERM unless --use-sigusr1 is specified. + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "log" + "net/http" + "os" + "os/signal" + "strings" + "syscall" + "time" +) + +var ( + port uint + sleepAfterStopSignal time.Duration + useSigusr1StopSignal bool + stopAfter time.Duration + exitCode int + logToStdout bool +) + +func init() { + flag.UintVar(&port, "port", 34000, "port to run the test app on") + flag.DurationVar(&sleepAfterStopSignal, "sleep-after-stop-signal", 1*time.Second, "time to sleep after getting the signal before exiting") + flag.BoolVar(&useSigusr1StopSignal, "use-sigusr1", false, "use SIGUSR1 as the stop signal, instead of the default SIGTERM") + flag.DurationVar(&stopAfter, "stop-after", 0, "stop the process after duration (overrides all other flags if set)") + flag.IntVar(&exitCode, "exit-code", 0, "exit code to return when this script exits") + flag.BoolVar(&logToStdout, "log-to-stdout", false, "log to stdout instead of stderr") +} + +type Response struct { + EnvironmentVariables map[string]string `json:"environment_variables"` + ProcessID int `json:"process_id"` +} + +func newResponse() Response { + respEnv := make(map[string]string, len(os.Environ())) + for _, envVar := range os.Environ() { + tokens := strings.Split(envVar, "=") + respEnv[tokens[0]] = tokens[1] + } + + return Response{ + EnvironmentVariables: respEnv, + ProcessID: os.Getpid(), + } +} + +func handler(w http.ResponseWriter, r *http.Request) { + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + if r.URL.Query().Get("pretty") == "1" { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(newResponse()); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(buf.Bytes()) +} + +func main() { + flag.Parse() + + logOut := os.Stderr + if logToStdout { + logOut = os.Stdout + } + logger := log.New(logOut, "test-app: ", log.LstdFlags) + + logger.Printf("running on port %d", port) + if err := run(logger); err != nil { + log.Fatalf("error: %v\n", err) + } + + logger.Printf("exit code: %d\n", exitCode) + + os.Exit(exitCode) +} + +func run(logger *log.Logger) error { + /* */ logger.Println("run: started") + defer logger.Println("run: done") + + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 60*time.Second) + defer cancelContextFunc() + + server := http.Server{ + Addr: fmt.Sprintf(":%d", port), + Handler: http.HandlerFunc(handler), + ReadTimeout: 20 * time.Second, + WriteTimeout: 20 * time.Second, + IdleTimeout: 20 * time.Second, + } + + doneCh := make(chan struct{}) + + go func() { + defer close(doneCh) + + stopSignal := make(chan os.Signal, 1) + if useSigusr1StopSignal { + signal.Notify(stopSignal, syscall.SIGUSR1) + } else { + signal.Notify(stopSignal, syscall.SIGTERM) + } + + select { + case <-ctx.Done(): + logger.Println("context done: exiting") + + case s := <-stopSignal: + logger.Printf("signal %q: received\n", s) + + if sleepAfterStopSignal > 0 { + logger.Printf("signal %q: sleeping for %v simulate cleanup\n", s, sleepAfterStopSignal) + time.Sleep(sleepAfterStopSignal) + } + + case <-time.After(stopAfter): + logger.Printf("stopping after: %v\n", stopAfter) + } + + if err := server.Shutdown(context.Background()); err != nil { + log.Printf("server shutdown error: %v", err) + } + }() + + logger.Printf("server %s: started\n", server.Addr) + + if err := server.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { + return fmt.Errorf("could not start the server: %v", err) + } + + logger.Printf("server %s: done\n", server.Addr) + + <-doneCh + + return nil +} diff --git a/command/agent/internal/ctmanager/runner_config.go b/command/agent/internal/ctmanager/runner_config.go new file mode 100644 index 000000000000..c19e2efef0a2 --- /dev/null +++ b/command/agent/internal/ctmanager/runner_config.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package ctmanager + +import ( + "fmt" + "io" + "strings" + + ctconfig "github.com/hashicorp/consul-template/config" + ctlogging "github.com/hashicorp/consul-template/logging" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +type ManagerConfig struct { + AgentConfig *config.Config + Namespace string + LogLevel hclog.Level + LogWriter io.Writer +} + +// NewConfig returns a consul-template runner configuration, setting the +// Vault and Consul configurations based on the clients configs. +func NewConfig(mc ManagerConfig, templates ctconfig.TemplateConfigs) (*ctconfig.Config, error) { + conf := ctconfig.DefaultConfig() + conf.Templates = templates.Copy() + + // Setup the Vault config + // Always set these to ensure nothing is picked up from the environment + conf.Vault.RenewToken = pointerutil.BoolPtr(false) + conf.Vault.Token = pointerutil.StringPtr("") + conf.Vault.Address = &mc.AgentConfig.Vault.Address + + if mc.Namespace != "" { + conf.Vault.Namespace = &mc.Namespace + } + + if mc.AgentConfig.TemplateConfig != nil && mc.AgentConfig.TemplateConfig.StaticSecretRenderInt != 0 { + conf.Vault.DefaultLeaseDuration = &mc.AgentConfig.TemplateConfig.StaticSecretRenderInt + } + + if mc.AgentConfig.DisableIdleConnsTemplating { + idleConns := -1 + conf.Vault.Transport.MaxIdleConns = &idleConns + } + + if mc.AgentConfig.DisableKeepAlivesTemplating { + conf.Vault.Transport.DisableKeepAlives = pointerutil.BoolPtr(true) + } + + if mc.AgentConfig.TemplateConfig != nil && mc.AgentConfig.TemplateConfig.MaxConnectionsPerHost != 0 { + conf.Vault.Transport.MaxConnsPerHost = &mc.AgentConfig.TemplateConfig.MaxConnectionsPerHost + } + + conf.Vault.SSL = &ctconfig.SSLConfig{ + Enabled: pointerutil.BoolPtr(false), + Verify: pointerutil.BoolPtr(false), + Cert: pointerutil.StringPtr(""), + Key: pointerutil.StringPtr(""), + CaCert: pointerutil.StringPtr(""), + CaPath: pointerutil.StringPtr(""), + ServerName: pointerutil.StringPtr(""), + } + + // If Vault.Retry isn't specified, use the default of 12 retries. + // This retry value will be respected regardless of if we use the cache. + attempts := ctconfig.DefaultRetryAttempts + if mc.AgentConfig.Vault != nil && mc.AgentConfig.Vault.Retry != nil { + attempts = mc.AgentConfig.Vault.Retry.NumRetries + } + + // Use the cache if available or fallback to the Vault server values. + if mc.AgentConfig.Cache != nil { + if mc.AgentConfig.Cache.InProcDialer == nil { + return nil, fmt.Errorf("missing in-process dialer configuration") + } + if conf.Vault.Transport == nil { + conf.Vault.Transport = &ctconfig.TransportConfig{} + } + conf.Vault.Transport.CustomDialer = mc.AgentConfig.Cache.InProcDialer + // The in-process dialer ignores the address passed in, but we're still + // setting it here to override the setting at the top of this function, + // and to prevent the vault/http client from defaulting to https. + conf.Vault.Address = pointerutil.StringPtr("http://127.0.0.1:8200") + } else if strings.HasPrefix(mc.AgentConfig.Vault.Address, "https") || mc.AgentConfig.Vault.CACert != "" { + skipVerify := mc.AgentConfig.Vault.TLSSkipVerify + verify := !skipVerify + conf.Vault.SSL = &ctconfig.SSLConfig{ + Enabled: pointerutil.BoolPtr(true), + Verify: &verify, + Cert: &mc.AgentConfig.Vault.ClientCert, + Key: &mc.AgentConfig.Vault.ClientKey, + CaCert: &mc.AgentConfig.Vault.CACert, + CaPath: &mc.AgentConfig.Vault.CAPath, + ServerName: &mc.AgentConfig.Vault.TLSServerName, + } + } + enabled := attempts > 0 + conf.Vault.Retry = &ctconfig.RetryConfig{ + Attempts: &attempts, + Enabled: &enabled, + } + + // Sync Consul Template's retry with user set auto-auth initial backoff value. + // This is helpful if Auto Auth cannot get a new token and CT is trying to fetch + // secrets. + if mc.AgentConfig.AutoAuth != nil && mc.AgentConfig.AutoAuth.Method != nil { + if mc.AgentConfig.AutoAuth.Method.MinBackoff > 0 { + conf.Vault.Retry.Backoff = &mc.AgentConfig.AutoAuth.Method.MinBackoff + } + + if mc.AgentConfig.AutoAuth.Method.MaxBackoff > 0 { + conf.Vault.Retry.MaxBackoff = &mc.AgentConfig.AutoAuth.Method.MaxBackoff + } + } + + conf.Finalize() + + // setup log level from TemplateServer config + conf.LogLevel = logLevelToStringPtr(mc.LogLevel) + + if err := ctlogging.Setup(&ctlogging.Config{ + Level: *conf.LogLevel, + Writer: mc.LogWriter, + }); err != nil { + return nil, err + } + return conf, nil +} + +// logLevelToString converts a go-hclog level to a matching, uppercase string +// value. It's used to convert Vault Agent's hclog level to a string version +// suitable for use in Consul Template's runner configuration input. +func logLevelToStringPtr(level hclog.Level) *string { + // consul template's default level is WARN, but Vault Agent's default is INFO, + // so we use that for the Runner's default. + var levelStr string + + switch level { + case hclog.Trace: + levelStr = "TRACE" + case hclog.Debug: + levelStr = "DEBUG" + case hclog.Warn: + levelStr = "WARN" + case hclog.Error: + levelStr = "ERR" + default: + levelStr = "INFO" + } + return pointerutil.StringPtr(levelStr) +} diff --git a/command/agent/jwt_end_to_end_test.go b/command/agent/jwt_end_to_end_test.go index c2d74d9f37dc..1c8d1c0d50df 100644 --- a/command/agent/jwt_end_to_end_test.go +++ b/command/agent/jwt_end_to_end_test.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package agent import ( "context" "encoding/json" - "io/ioutil" + "fmt" "os" "testing" "time" @@ -11,10 +14,10 @@ import ( hclog "github.com/hashicorp/go-hclog" vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" - agentjwt "github.com/hashicorp/vault/command/agent/auth/jwt" - "github.com/hashicorp/vault/command/agent/sink" - "github.com/hashicorp/vault/command/agent/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentjwt "github.com/hashicorp/vault/command/agentproxyshared/auth/jwt" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" "github.com/hashicorp/vault/helper/dhutil" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/jsonutil" @@ -24,14 +27,34 @@ import ( ) func TestJWTEndToEnd(t *testing.T) { - testJWTEndToEnd(t, false) - testJWTEndToEnd(t, true) + t.Parallel() + testCases := []struct { + ahWrapping bool + useSymlink bool + removeJWTAfterReading bool + }{ + {false, false, false}, + {true, false, false}, + {false, true, false}, + {true, true, false}, + {false, false, true}, + {true, false, true}, + {false, true, true}, + {true, true, true}, + } + + for _, tc := range testCases { + tc := tc // capture range variable + t.Run(fmt.Sprintf("ahWrapping=%v, useSymlink=%v, removeJWTAfterReading=%v", tc.ahWrapping, tc.useSymlink, tc.removeJWTAfterReading), func(t *testing.T) { + t.Parallel() + testJWTEndToEnd(t, tc.ahWrapping, tc.useSymlink, tc.removeJWTAfterReading) + }) + } } -func testJWTEndToEnd(t *testing.T, ahWrapping bool) { +func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading bool) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "jwt": vaultjwt.Factory, }, @@ -83,16 +106,24 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) { // We close these right away because we're just basically testing // permissions and finding a usable file name - inf, err := ioutil.TempFile("", "auth.jwt.test.") + inf, err := os.CreateTemp("", "auth.jwt.test.") if err != nil { t.Fatal(err) } in := inf.Name() inf.Close() os.Remove(in) + symlink, err := os.CreateTemp("", "auth.jwt.symlink.test.") + if err != nil { + t.Fatal(err) + } + symlinkName := symlink.Name() + symlink.Close() + os.Remove(symlinkName) + os.Symlink(in, symlinkName) t.Logf("input: %s", in) - ouf, err := ioutil.TempFile("", "auth.tokensink.test.") + ouf, err := os.CreateTemp("", "auth.tokensink.test.") if err != nil { t.Fatal(err) } @@ -101,7 +132,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) { os.Remove(out) t.Logf("output: %s", out) - dhpathf, err := ioutil.TempFile("", "auth.dhpath.test.") + dhpathf, err := os.CreateTemp("", "auth.dhpath.test.") if err != nil { t.Fatal(err) } @@ -116,7 +147,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) { if err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(dhpath, mPubKey, 0o600); err != nil { + if err := os.WriteFile(dhpath, mPubKey, 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote dh param file", "path", dhpath) @@ -124,12 +155,21 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + var fileNameToUseAsPath string + if useSymlink { + fileNameToUseAsPath = symlinkName + } else { + fileNameToUseAsPath = in + } am, err := agentjwt.NewJWTAuthMethod(&auth.AuthConfig{ Logger: logger.Named("auth.jwt"), MountPath: "auth/jwt", Config: map[string]interface{}{ - "path": in, - "role": "test", + "path": fileNameToUseAsPath, + "role": "test", + "remove_jwt_after_reading": removeJWTAfterReading, + "remove_jwt_follows_symlinks": true, + "jwt_read_period": "0.5s", }, }) if err != nil { @@ -225,7 +265,8 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) { // Get a token jwtToken, _ := GetTestJWT(t) - if err := ioutil.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + + if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test jwt", "path", in) @@ -237,13 +278,29 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) { if time.Now().After(timeout) { t.Fatal("did not find a written token after timeout") } - val, err := ioutil.ReadFile(out) + val, err := os.ReadFile(out) if err == nil { os.Remove(out) if len(val) == 0 { t.Fatal("written token was empty") } + // First, ensure JWT has been removed + if removeJWTAfterReading { + _, err = os.Stat(in) + if err == nil { + t.Fatal("no error returned from stat, indicating the jwt is still present") + } + if !os.IsNotExist(err) { + t.Fatalf("unexpected error: %v", err) + } + } else { + _, err := os.Stat(in) + if err != nil { + t.Fatal("JWT file removed despite removeJWTAfterReading being set to false") + } + } + // First decrypt it resp := new(dhutil.Envelope) if err := jsonutil.DecodeJSON(val, resp); err != nil { @@ -336,7 +393,7 @@ func testJWTEndToEnd(t *testing.T, ahWrapping bool) { // Get another token to test the backend pushing the need to authenticate // to the handler jwtToken, _ = GetTestJWT(t) - if err := ioutil.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { t.Fatal(err) } diff --git a/command/agent/oci_end_to_end_test.go b/command/agent/oci_end_to_end_test.go new file mode 100644 index 000000000000..eb5f215ed886 --- /dev/null +++ b/command/agent/oci_end_to_end_test.go @@ -0,0 +1,230 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package agent + +import ( + "context" + "io/ioutil" + "os" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + vaultoci "github.com/hashicorp/vault-plugin-auth-oci" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentoci "github.com/hashicorp/vault/command/agentproxyshared/auth/oci" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/helper/testhelpers" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +const ( + envVarOCITestTenancyOCID = "OCI_TEST_TENANCY_OCID" + envVarOCITestUserOCID = "OCI_TEST_USER_OCID" + envVarOCITestFingerprint = "OCI_TEST_FINGERPRINT" + envVarOCITestPrivateKeyPath = "OCI_TEST_PRIVATE_KEY_PATH" + envVAROCITestOCIDList = "OCI_TEST_OCID_LIST" + + // The OCI SDK doesn't export its standard env vars so they're captured here. + // These are used for the duration of the test to make sure the agent is able to + // pick up creds from the env. + // + // To run this test, do not set these. Only the above ones need to be set. + envVarOCITenancyOCID = "OCI_tenancy_ocid" + envVarOCIUserOCID = "OCI_user_ocid" + envVarOCIFingerprint = "OCI_fingerprint" + envVarOCIPrivateKeyPath = "OCI_private_key_path" +) + +func TestOCIEndToEnd(t *testing.T) { + if !runAcceptanceTests { + t.SkipNow() + } + + // Ensure each cred is populated. + credNames := []string{ + envVarOCITestTenancyOCID, + envVarOCITestUserOCID, + envVarOCITestFingerprint, + envVarOCITestPrivateKeyPath, + envVAROCITestOCIDList, + } + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "oci": vaultoci.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + if err := client.Sys().EnableAuthWithOptions("oci", &api.EnableAuthOptions{ + Type: "oci", + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("auth/oci/config", map[string]interface{}{ + "home_tenancy_id": os.Getenv(envVarOCITestTenancyOCID), + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("auth/oci/role/test", map[string]interface{}{ + "ocid_list": os.Getenv(envVAROCITestOCIDList), + }); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + // We're going to feed oci auth creds via env variables. + if err := setOCIEnvCreds(); err != nil { + t.Fatal(err) + } + defer func() { + if err := unsetOCIEnvCreds(); err != nil { + t.Fatal(err) + } + }() + + vaultAddr := "http://" + cluster.Cores[0].Listeners[0].Addr().String() + + am, err := agentoci.NewOCIAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.oci"), + MountPath: "auth/oci", + Config: map[string]interface{}{ + "type": "apikey", + "role": "test", + }, + }, vaultAddr) + if err != nil { + t.Fatal(err) + } + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + tokenSinkFileName := tmpFile.Name() + tmpFile.Close() + os.Remove(tokenSinkFileName) + t.Logf("output: %s", tokenSinkFileName) + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": tokenSinkFileName, + }, + WrapTTL: 10 * time.Second, + } + + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + if stat, err := os.Lstat(tokenSinkFileName); err == nil { + t.Fatalf("expected err but got %s", stat) + } else if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + // Wait 2 seconds for the env variables to be detected and an auth to be generated. + time.Sleep(time.Second * 2) + + token, err := readToken(tokenSinkFileName) + if err != nil { + t.Fatal(err) + } + + if token.Token == "" { + t.Fatal("expected token but didn't receive it") + } +} + +func setOCIEnvCreds() error { + if err := os.Setenv(envVarOCITenancyOCID, os.Getenv(envVarOCITestTenancyOCID)); err != nil { + return err + } + if err := os.Setenv(envVarOCIUserOCID, os.Getenv(envVarOCITestUserOCID)); err != nil { + return err + } + if err := os.Setenv(envVarOCIFingerprint, os.Getenv(envVarOCITestFingerprint)); err != nil { + return err + } + return os.Setenv(envVarOCIPrivateKeyPath, os.Getenv(envVarOCITestPrivateKeyPath)) +} + +func unsetOCIEnvCreds() error { + if err := os.Unsetenv(envVarOCITenancyOCID); err != nil { + return err + } + if err := os.Unsetenv(envVarOCIUserOCID); err != nil { + return err + } + if err := os.Unsetenv(envVarOCIFingerprint); err != nil { + return err + } + return os.Unsetenv(envVarOCIPrivateKeyPath) +} diff --git a/command/agent/sink/mock/mock_sink.go b/command/agent/sink/mock/mock_sink.go deleted file mode 100644 index fb4720cc17ab..000000000000 --- a/command/agent/sink/mock/mock_sink.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "github.com/hashicorp/vault/command/agent/sink" -) - -type mockSink struct { - token string -} - -func NewSink(token string) sink.Sink { - return &mockSink{ - token: token, - } -} - -func (m *mockSink) WriteToken(token string) error { - m.token = token - return nil -} - -func (m *mockSink) Token() string { - return m.token -} diff --git a/command/agent/template/template.go b/command/agent/template/template.go index 32e9022bb78b..b0fa9326ab0d 100644 --- a/command/agent/template/template.go +++ b/command/agent/template/template.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + // Package template is responsible for rendering user supplied templates to // disk. The Server type accepts configuration to communicate to a Vault server // and a Vault token for authentication. Internally, the Server creates a Consul @@ -10,15 +13,16 @@ import ( "errors" "fmt" "io" - "strings" "go.uber.org/atomic" ctconfig "github.com/hashicorp/consul-template/config" - ctlogging "github.com/hashicorp/consul-template/logging" "github.com/hashicorp/consul-template/manager" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/internal/ctmanager" + "github.com/hashicorp/vault/helper/useragent" "github.com/hashicorp/vault/sdk/helper/pointerutil" ) @@ -107,8 +111,14 @@ func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ct // configuration var runnerConfig *ctconfig.Config var runnerConfigErr error - - if runnerConfig, runnerConfigErr = newRunnerConfig(ts.config, templates); runnerConfigErr != nil { + managerConfig := ctmanager.ManagerConfig{ + AgentConfig: ts.config.AgentConfig, + Namespace: ts.config.Namespace, + LogLevel: ts.config.LogLevel, + LogWriter: ts.config.LogWriter, + } + runnerConfig, runnerConfigErr = ctmanager.NewConfig(managerConfig, templates) + if runnerConfigErr != nil { return fmt.Errorf("template server failed to runner generate config: %w", runnerConfigErr) } @@ -157,7 +167,8 @@ func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ct *latestToken = token ctv := ctconfig.Config{ Vault: &ctconfig.VaultConfig{ - Token: latestToken, + Token: latestToken, + ClientUserAgent: pointerutil.StringPtr(useragent.AgentTemplatingString()), }, } @@ -224,131 +235,3 @@ func (ts *Server) Stop() { close(ts.DoneCh) } } - -// newRunnerConfig returns a consul-template runner configuration, setting the -// Vault and Consul configurations based on the clients configs. -func newRunnerConfig(sc *ServerConfig, templates ctconfig.TemplateConfigs) (*ctconfig.Config, error) { - conf := ctconfig.DefaultConfig() - conf.Templates = templates.Copy() - - // Setup the Vault config - // Always set these to ensure nothing is picked up from the environment - conf.Vault.RenewToken = pointerutil.BoolPtr(false) - conf.Vault.Token = pointerutil.StringPtr("") - conf.Vault.Address = &sc.AgentConfig.Vault.Address - - if sc.Namespace != "" { - conf.Vault.Namespace = &sc.Namespace - } - - if sc.AgentConfig.TemplateConfig != nil && sc.AgentConfig.TemplateConfig.StaticSecretRenderInt != 0 { - conf.Vault.DefaultLeaseDuration = &sc.AgentConfig.TemplateConfig.StaticSecretRenderInt - } - - if sc.AgentConfig.DisableIdleConnsTemplating { - idleConns := -1 - conf.Vault.Transport.MaxIdleConns = &idleConns - } - - if sc.AgentConfig.DisableKeepAlivesTemplating { - conf.Vault.Transport.DisableKeepAlives = pointerutil.BoolPtr(true) - } - - conf.Vault.SSL = &ctconfig.SSLConfig{ - Enabled: pointerutil.BoolPtr(false), - Verify: pointerutil.BoolPtr(false), - Cert: pointerutil.StringPtr(""), - Key: pointerutil.StringPtr(""), - CaCert: pointerutil.StringPtr(""), - CaPath: pointerutil.StringPtr(""), - ServerName: pointerutil.StringPtr(""), - } - - // We need to assign something to Vault.Retry or it will use its default of 12 retries. - // This retry value will be respected regardless of if we use the cache. - var attempts int - if sc.AgentConfig.Vault != nil && sc.AgentConfig.Vault.Retry != nil { - attempts = sc.AgentConfig.Vault.Retry.NumRetries - } - - // Use the cache if available or fallback to the Vault server values. - if sc.AgentConfig.Cache != nil { - if sc.AgentConfig.Cache.InProcDialer == nil { - return nil, fmt.Errorf("missing in-process dialer configuration") - } - if conf.Vault.Transport == nil { - conf.Vault.Transport = &ctconfig.TransportConfig{} - } - conf.Vault.Transport.CustomDialer = sc.AgentConfig.Cache.InProcDialer - // The in-process dialer ignores the address passed in, but we're still - // setting it here to override the setting at the top of this function, - // and to prevent the vault/http client from defaulting to https. - conf.Vault.Address = pointerutil.StringPtr("http://127.0.0.1:8200") - } else if strings.HasPrefix(sc.AgentConfig.Vault.Address, "https") || sc.AgentConfig.Vault.CACert != "" { - skipVerify := sc.AgentConfig.Vault.TLSSkipVerify - verify := !skipVerify - conf.Vault.SSL = &ctconfig.SSLConfig{ - Enabled: pointerutil.BoolPtr(true), - Verify: &verify, - Cert: &sc.AgentConfig.Vault.ClientCert, - Key: &sc.AgentConfig.Vault.ClientKey, - CaCert: &sc.AgentConfig.Vault.CACert, - CaPath: &sc.AgentConfig.Vault.CAPath, - ServerName: &sc.AgentConfig.Vault.TLSServerName, - } - } - enabled := attempts > 0 - conf.Vault.Retry = &ctconfig.RetryConfig{ - Attempts: &attempts, - Enabled: &enabled, - } - - // Sync Consul Template's retry with user set auto-auth initial backoff value. - // This is helpful if Auto Auth cannot get a new token and CT is trying to fetch - // secrets. - if sc.AgentConfig.AutoAuth != nil && sc.AgentConfig.AutoAuth.Method != nil { - if sc.AgentConfig.AutoAuth.Method.MinBackoff > 0 { - conf.Vault.Retry.Backoff = &sc.AgentConfig.AutoAuth.Method.MinBackoff - } - - if sc.AgentConfig.AutoAuth.Method.MaxBackoff > 0 { - conf.Vault.Retry.MaxBackoff = &sc.AgentConfig.AutoAuth.Method.MaxBackoff - } - } - - conf.Finalize() - - // setup log level from TemplateServer config - conf.LogLevel = logLevelToStringPtr(sc.LogLevel) - - if err := ctlogging.Setup(&ctlogging.Config{ - Level: *conf.LogLevel, - Writer: sc.LogWriter, - }); err != nil { - return nil, err - } - return conf, nil -} - -// logLevelToString converts a go-hclog level to a matching, uppercase string -// value. It's used to convert Vault Agent's hclog level to a string version -// suitable for use in Consul Template's runner configuration input. -func logLevelToStringPtr(level hclog.Level) *string { - // consul template's default level is WARN, but Vault Agent's default is INFO, - // so we use that for the Runner's default. - var levelStr string - - switch level { - case hclog.Trace: - levelStr = "TRACE" - case hclog.Debug: - levelStr = "DEBUG" - case hclog.Warn: - levelStr = "WARN" - case hclog.Error: - levelStr = "ERR" - default: - levelStr = "INFO" - } - return pointerutil.StringPtr(levelStr) -} diff --git a/command/agent/template/template_test.go b/command/agent/template/template_test.go index 25999945f99d..e03460ec826e 100644 --- a/command/agent/template/template_test.go +++ b/command/agent/template/template_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package template import ( @@ -14,6 +17,8 @@ import ( ctconfig "github.com/hashicorp/consul-template/config" "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/internal/ctmanager" + "github.com/hashicorp/vault/command/agentproxyshared" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/listenerutil" "github.com/hashicorp/vault/sdk/helper/logging" @@ -23,6 +28,14 @@ import ( "google.golang.org/grpc/test/bufconn" ) +func newRunnerConfig(s *ServerConfig, configs ctconfig.TemplateConfigs) (*ctconfig.Config, error) { + managerCfg := ctmanager.ManagerConfig{ + AgentConfig: s.AgentConfig, + } + cfg, err := ctmanager.NewConfig(managerCfg, configs) + return cfg, err +} + // TestNewServer is a simple test to make sure NewServer returns a Server and // channel func TestNewServer(t *testing.T) { @@ -75,7 +88,7 @@ func newAgentConfig(listeners []*configutil.Listener, enableCache, enablePersise } if enablePersisentCache { - agentConfig.Cache.Persist = &config.Persist{Type: "kubernetes"} + agentConfig.Cache.Persist = &agentproxyshared.PersistConfig{Type: "kubernetes"} } return agentConfig diff --git a/command/agent/test-fixtures/reload/reload_bar.key b/command/agent/test-fixtures/reload/reload_bar.key new file mode 100644 index 000000000000..10849fbe1d7f --- /dev/null +++ b/command/agent/test-fixtures/reload/reload_bar.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwF7sRAyUiLcd6es6VeaTRUBOusFFGkmKJ5lU351waCJqXFju +Z6i/SQYNAAnnRgotXSTE1fIPjE2kZNH1hvqE5IpTGgAwy50xpjJrrBBI6e9lyKqj +7T8gLVNBvtC0cpQi+pGrszEI0ckDQCSZHqi/PAzcpmLUgh2KMrgagT+YlN35KHtl +/bQ/Fsn+kqykVqNw69n/CDKNKdDHn1qPwiX9q/fTMj3EG6g+3ntKrUOh8V/gHKPz +q8QGP/wIud2K+tTSorVXr/4zx7xgzlbJkCakzcQQiP6K+paPnDRlE8fK+1gRRyR7 +XCzyp0irUl8G1NjYAR/tVWxiUhlk/jZutb8PpwIDAQABAoIBAEOzJELuindyujxQ +ZD9G3h1I/GwNCFyv9Mbq10u7BIwhUH0fbwdcA7WXQ4v38ERd4IkfH4aLoZ0m1ewF +V/sgvxQO+h/0YTfHImny5KGxOXfaoF92bipYROKuojydBmQsbgLwsRRm9UufCl3Q +g3KewG5JuH112oPQEYq379v8nZ4FxC3Ano1OFBTm9UhHIAX1Dn22kcHOIIw8jCsQ +zp7TZOW+nwtkS41cBwhvV4VIeL6yse2UgbOfRVRwI7B0OtswS5VgW3wysO2mTDKt +V/WCmeht1il/6ZogEHgi/mvDCKpj20wQ1EzGnPdFLdiFJFylf0oufQD/7N/uezbC +is0qJEECgYEA3AE7SeLpe3SZApj2RmE2lcD9/Saj1Y30PznxB7M7hK0sZ1yXEbtS +Qf894iDDD/Cn3ufA4xk/K52CXgAcqvH/h2geG4pWLYsT1mdWhGftprtOMCIvJvzU +8uWJzKdOGVMG7R59wNgEpPDZDpBISjexwQsFo3aw1L/H1/Sa8cdY3a0CgYEA39hB +1oLmGRyE32Q4GF/srG4FqKL1EsbISGDUEYTnaYg2XiM43gu3tC/ikfclk27Jwc2L +m7cA5FxxaEyfoOgfAizfU/uWTAbx9GoXgWsO0hWSN9+YNq61gc5WKoHyrJ/rfrti +y5d7k0OCeBxckLqGDuJqICQ0myiz0El6FU8h5SMCgYEAuhigmiNC9JbwRu40g9v/ +XDVfox9oPmBRVpogdC78DYKeqN/9OZaGQiUxp3GnDni2xyqqUm8srCwT9oeJuF/z +kgpUTV96/hNCuH25BU8UC5Es1jJUSFpdlwjqwx5SRcGhfjnojZMseojwUg1h2MW7 +qls0bc0cTxnaZaYW2qWRWhECgYBrT0cwyQv6GdvxJCBoPwQ9HXmFAKowWC+H0zOX +Onmd8/jsZEJM4J0uuo4Jn8vZxBDg4eL9wVuiHlcXwzP7dYv4BP8DSechh2rS21Ft +b59pQ4IXWw+jl1nYYsyYEDgAXaIN3VNder95N7ICVsZhc6n01MI/qlu1zmt1fOQT +9x2utQKBgHI9SbsfWfbGiu6oLS3+9V1t4dORhj8D8b7z3trvECrD6tPhxoZqtfrH +4apKr3OKRSXk3K+1K6pkMHJHunspucnA1ChXLhzfNF08BSRJkQDGYuaRLS6VGgab +JZTl54bGvO1GkszEBE/9QFcqNVtWGMWXnUPwNNv8t//yJT5rvQil +-----END RSA PRIVATE KEY----- diff --git a/command/agent/test-fixtures/reload/reload_bar.pem b/command/agent/test-fixtures/reload/reload_bar.pem new file mode 100644 index 000000000000..a8217be5c7df --- /dev/null +++ b/command/agent/test-fixtures/reload/reload_bar.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIULLCz3mZKmg2xy3rWCud0f1zcmBwwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjQ0WhcNMzYw +MzA1MDEzNzE0WjAaMRgwFgYDVQQDEw9iYXIuZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAXuxEDJSItx3p6zpV5pNFQE66wUUaSYon +mVTfnXBoImpcWO5nqL9JBg0ACedGCi1dJMTV8g+MTaRk0fWG+oTkilMaADDLnTGm +MmusEEjp72XIqqPtPyAtU0G+0LRylCL6kauzMQjRyQNAJJkeqL88DNymYtSCHYoy +uBqBP5iU3fkoe2X9tD8Wyf6SrKRWo3Dr2f8IMo0p0MefWo/CJf2r99MyPcQbqD7e +e0qtQ6HxX+Aco/OrxAY//Ai53Yr61NKitVev/jPHvGDOVsmQJqTNxBCI/or6lo+c +NGUTx8r7WBFHJHtcLPKnSKtSXwbU2NgBH+1VbGJSGWT+Nm61vw+nAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSVoF8F +7qbzSryIFrldurAG78LvSjAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9iYXIuZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAGmz2N282iT2IaEZvOmzIE4znHGkvoxZmrr/2byq5PskBg9ysyCHfUvw +SFA8U7jWjezKTnGRUu5blB+yZdjrMtB4AePWyEqtkJwVsZ2SPeP+9V2gNYK4iktP +UF3aIgBbAbw8rNuGIIB0T4D+6Zyo9Y3MCygs6/N4bRPZgLhewWn1ilklfnl3eqaC +a+JY1NBuTgCMa28NuC+Hy3mCveqhI8tFNiOthlLdgAEbuQaOuNutAG73utZ2aq6Q +W4pajFm3lEf5zt7Lo6ZCFtY/Q8jjURJ9e4O7VjXcqIhBM5bSMI6+fgQyOH0SLboj +RNanJ2bcyF1iPVyPBGzV3dF0ngYzxEY= +-----END CERTIFICATE----- diff --git a/command/agent/test-fixtures/reload/reload_ca.pem b/command/agent/test-fixtures/reload/reload_ca.pem new file mode 100644 index 000000000000..72a74440c482 --- /dev/null +++ b/command/agent/test-fixtures/reload/reload_ca.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNTCCAh2gAwIBAgIUBeVo+Ce2BrdRT1cogKvJLtdOky8wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNTM4WhcNMzYw +MzA1MDIzNjA4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAPTQGWPRIOECGeJB6tR/ftvvtioC9f84fY2QdJ5k +JBupXjPAGYKgS4MGzyT5bz9yY400tCtmh6h7p9tZwHl/TElTugtLQ/8ilMbJTiOM +SiyaMDPHiMJJYKTjm9bu6bKeU1qPZ0Cryes4rygbqs7w2XPgA2RxNmDh7JdX7/h+ +VB5onBmv8g4WFSayowGyDcJWWCbu5yv6ZdH1bqQjgRzQ5xp17WXNmvlzdp2vate/ +9UqPdA8sdJzW/91Gvmros0o/FnG7c2pULhk22wFqO8t2HRjKb3nuxALEJvqoPvad +KjpDTaq1L1ZzxcB7wvWyhy/lNLZL7jiNWy0mN1YB0UpSWdECAwEAAaN7MHkwDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHMM2+oX9Orb +U6BazXcHljJ1mOW/MB8GA1UdIwQYMBaAFHMM2+oX9OrbU6BazXcHljJ1mOW/MBYG +A1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQAp17XsOaT9 +hculRqrFptn3+zkH3HrIckHm+28R5xYT8ASFXFcLFugGizJAXVL5lvsRVRIwCoOX +Nhi8XSNEFP640VbHcEl81I84bbRIIDS+Yheu6JDZGemTaDYLv1J3D5SHwgoM+nyf +oTRgotUCIXcwJHmTpWEUkZFKuqBxsoTGzk0jO8wOP6xoJkzxVVG5PvNxs924rxY8 +Y8iaLdDfMeT7Pi0XIliBa/aSp/iqSW8XKyJl5R5vXg9+DOgZUrVzIxObaF5RBl/a +mJOeklJBdNVzQm5+iMpO42lu0TA9eWtpP+YiUEXU17XDvFeQWOocFbQ1Peo0W895 +XRz2GCwCNyvW +-----END CERTIFICATE----- diff --git a/command/agent/test-fixtures/reload/reload_foo.key b/command/agent/test-fixtures/reload/reload_foo.key new file mode 100644 index 000000000000..86e6cce63e64 --- /dev/null +++ b/command/agent/test-fixtures/reload/reload_foo.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEAzNyVieSti9XBb5/celB5u8YKRJv3mQS9A4/X0mqY1ePznt1i +ilG7OmG0yM2VAk0ceIAQac3Bsn74jxn2cDlrrVniPXcNgYtMtW0kRqNEo4doo4EX +xZguS9vNBu29useHhif1TGX/pA3dgvaVycUCjzTEVk6qI8UEehMK6gEGZb7nOr0A +A9nipSqoeHpDLe3a4KVqj1vtlJKUvD2i1MuBuQ130cB1K9rufLCShGu7mEgzEosc +gr+K3Bf03IejbeVRyIfLtgj1zuvV1katec75UqRA/bsvt5G9JfJqiZ9mwFN0vp3g +Cr7pdQBSBQ2q4yf9s8CuY5c5w9fl3F8f5QFQoQIDAQABAoIBAQCbCb1qNFRa5ZSV +I8i6ELlwMDqJHfhOJ9XcIjpVljLAfNlcu3Ld92jYkCU/asaAjVckotbJG9yhd5Io +yp9E40/oS4P6vGTOS1vsWgMAKoPBtrKsOwCAm+E9q8UIn1fdSS/5ibgM74x+3bds +a62Em8KKGocUQkhk9a+jq1GxMsFisbHRxEHvClLmDMgGnW3FyGmWwT6yZLPSC0ey +szmmjt3ouP8cLAOmSjzcQBMmEZpQMCgR6Qckg6nrLQAGzZyTdCd875wbGA57DpWX +Lssn95+A5EFvr/6b7DkXeIFCrYBFFa+UQN3PWGEQ6Zjmiw4VgV2vO8yX2kCLlUhU +02bL393ZAoGBAPXPD/0yWINbKUPcRlx/WfWQxfz0bu50ytwIXzVK+pRoAMuNqehK +BJ6kNzTTBq40u+IZ4f5jbLDulymR+4zSkirLE7CyWFJOLNI/8K4Pf5DJUgNdrZjJ +LCtP9XRdxiPatQF0NGfdgHlSJh+/CiRJP4AgB17AnB/4z9/M0ZlJGVrzAoGBANVa +69P3Rp/WPBQv0wx6f0tWppJolWekAHKcDIdQ5HdOZE5CPAYSlTrTUW3uJuqMwU2L +M0Er2gIPKWIR5X+9r7Fvu9hQW6l2v3xLlcrGPiapp3STJvuMxzhRAmXmu3bZfVn1 +Vn7Vf1jPULHtTFSlNFEvYG5UJmygK9BeyyVO5KMbAoGBAMCyAibLQPg4jrDUDZSV +gUAwrgUO2ae1hxHWvkxY6vdMUNNByuB+pgB3W4/dnm8Sh/dHsxJpftt1Lqs39ar/ +p/ZEHLt4FCTxg9GOrm7FV4t5RwG8fko36phJpnIC0UFqQltRbYO+8OgqrhhU+u5X +PaCDe0OcWsf1lYAsYGN6GpZhAoGBAMJ5Ksa9+YEODRs1cIFKUyd/5ztC2xRqOAI/ +3WemQ2nAacuvsfizDZVeMzYpww0+maAuBt0btI719PmwaGmkpDXvK+EDdlmkpOwO +FY6MXvBs6fdnfjwCWUErDi2GQFAX9Jt/9oSL5JU1+08DhvUM1QA/V/2Y9KFE6kr3 +bOIn5F4LAoGBAKQzH/AThDGhT3hwr4ktmReF3qKxBgxzjVa8veXtkY5VWwyN09iT +jnTTt6N1CchZoK5WCETjdzNYP7cuBTcV4d3bPNRiJmxXaNVvx3Tlrk98OiffT8Qa +5DO/Wfb43rNHYXBjU6l0n2zWcQ4PUSSbu0P0bM2JTQPRCqSthXvSHw2P +-----END RSA PRIVATE KEY----- diff --git a/command/agent/test-fixtures/reload/reload_foo.pem b/command/agent/test-fixtures/reload/reload_foo.pem new file mode 100644 index 000000000000..c8b868bcd0f0 --- /dev/null +++ b/command/agent/test-fixtures/reload/reload_foo.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIUFVW6i/M+yJUsDrXWgRKO/Dnb+L4wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjA1WhcNMzYw +MzA1MDEzNjM1WjAaMRgwFgYDVQQDEw9mb28uZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDM3JWJ5K2L1cFvn9x6UHm7xgpEm/eZBL0D +j9fSapjV4/Oe3WKKUbs6YbTIzZUCTRx4gBBpzcGyfviPGfZwOWutWeI9dw2Bi0y1 +bSRGo0Sjh2ijgRfFmC5L280G7b26x4eGJ/VMZf+kDd2C9pXJxQKPNMRWTqojxQR6 +EwrqAQZlvuc6vQAD2eKlKqh4ekMt7drgpWqPW+2UkpS8PaLUy4G5DXfRwHUr2u58 +sJKEa7uYSDMSixyCv4rcF/Tch6Nt5VHIh8u2CPXO69XWRq15zvlSpED9uy+3kb0l +8mqJn2bAU3S+neAKvul1AFIFDarjJ/2zwK5jlznD1+XcXx/lAVChAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRNJoOJ +dnazDiuqLhV6truQ4cRe9jAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9mb28uZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAHzv67mtbxMWcuMsxCFBN1PJNAyUDZVCB+1gWhk59EySbVg81hWJDCBy +fl3TKjz3i7wBGAv+C2iTxmwsSJbda22v8JQbuscXIfLFbNALsPzF+J0vxAgJs5Gc +sDbfJ7EQOIIOVKQhHLYnQoLnigSSPc1kd0JjYyHEBjgIaSuXgRRTBAeqLiBMx0yh +RKL1lQ+WoBU/9SXUZZkwokqWt5G7khi5qZkNxVXZCm8VGPg0iywf6gGyhI1SU5S2 +oR219S6kA4JY/stw1qne85/EmHmoImHGt08xex3GoU72jKAjsIpqRWopcD/+uene +Tc9nn3fTQW/Z9fsoJ5iF5OdJnDEswqE= +-----END CERTIFICATE----- diff --git a/command/agent/testing.go b/command/agent/testing.go index d4de988a9857..f3260c14833e 100644 --- a/command/agent/testing.go +++ b/command/agent/testing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package agent import ( @@ -6,14 +9,14 @@ import ( "crypto/x509" "encoding/json" "encoding/pem" - "io/ioutil" "os" "testing" "time" + "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v3/jwt" + "github.com/hashicorp/vault/sdk/logical" - jose "gopkg.in/square/go-jose.v2" - "gopkg.in/square/go-jose.v2/jwt" ) const envVarRunAccTests = "VAULT_ACC" @@ -61,7 +64,7 @@ func GetTestJWT(t *testing.T) (string, *ecdsa.PrivateKey) { } func readToken(fileName string) (*logical.HTTPWrapInfo, error) { - b, err := ioutil.ReadFile(fileName) + b, err := os.ReadFile(fileName) if err != nil { return nil, err } diff --git a/command/agent/token_file_end_to_end_test.go b/command/agent/token_file_end_to_end_test.go new file mode 100644 index 000000000000..7eb8c9a69fc8 --- /dev/null +++ b/command/agent/token_file_end_to_end_test.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package agent + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/vault" +) + +func TestTokenFileEndToEnd(t *testing.T) { + logger := logging.NewVaultLogger(log.Trace) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + secret, err := client.Auth().Token().Create(nil) + if err != nil || secret == nil { + t.Fatal(err) + } + + tokenFile, err := os.Create(filepath.Join(t.TempDir(), "token_file")) + if err != nil { + t.Fatal(err) + } + tokenFileName := tokenFile.Name() + tokenFile.Close() // WriteFile doesn't need it open + os.WriteFile(tokenFileName, []byte(secret.Auth.ClientToken), 0o666) + defer os.Remove(tokenFileName) + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + am, err := token_file.NewTokenFileAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.method"), + Config: map[string]interface{}{ + "token_file_path": tokenFileName, + }, + }) + if err != nil { + t.Fatal(err) + } + + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // We close these right away because we're just basically testing + // permissions and finding a usable file name + sinkFile, err := os.Create(filepath.Join(t.TempDir(), "auth.tokensink.test.")) + if err != nil { + t.Fatal(err) + } + tokenSinkFileName := sinkFile.Name() + sinkFile.Close() + os.Remove(tokenSinkFileName) + t.Logf("output: %s", tokenSinkFileName) + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": tokenSinkFileName, + }, + WrapTTL: 10 * time.Second, + } + + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers, so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + if stat, err := os.Lstat(tokenSinkFileName); err == nil { + t.Fatalf("expected err but got %s", stat) + } else if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + // Wait 2 seconds for the env variables to be detected and an auth to be generated. + time.Sleep(time.Second * 2) + + token, err := readToken(tokenSinkFileName) + if err != nil { + t.Fatal(err) + } + + if token.Token == "" { + t.Fatal("expected token but didn't receive it") + } + + _, err = os.Stat(tokenFileName) + if err != nil { + t.Fatal("Token file removed") + } +} diff --git a/command/agent_generate_config.go b/command/agent_generate_config.go new file mode 100644 index 000000000000..cc394490961d --- /dev/null +++ b/command/agent_generate_config.go @@ -0,0 +1,444 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "io" + "os" + paths "path" + "sort" + "strings" + "unicode" + + "github.com/hashicorp/cli" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/go-homedir" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AgentGenerateConfigCommand)(nil) + _ cli.CommandAutocomplete = (*AgentGenerateConfigCommand)(nil) +) + +type AgentGenerateConfigCommand struct { + *BaseCommand + + flagType string + flagPaths []string + flagExec string +} + +func (c *AgentGenerateConfigCommand) Synopsis() string { + return "Generate a Vault Agent configuration file." +} + +func (c *AgentGenerateConfigCommand) Help() string { + helpText := ` +Usage: vault agent generate-config [options] [path/to/config.hcl] + + Generates a simple Vault Agent configuration file from the given parameters. + + Currently, the only supported configuration type is 'env-template', which + helps you generate a configuration file with environment variable templates + for running Vault Agent in process supervisor mode. + + For every specified secret -path, the command will attempt to generate one or + multiple 'env_template' entries based on the JSON key(s) stored in the + specified secret. If the secret -path ends with '/*', the command will + attempt to recurse through the secrets tree rooted at the given path, + generating 'env_template' entries for each encountered secret. Currently, + only kv-v1 and kv-v2 paths are supported. + + The command specified in the '-exec' option will be used to generate an + 'exec' entry, which will tell Vault Agent which child process to run. + + In addition to env_template entries, the command generates an 'auto_auth' + section with 'token_file' authentication method. While this method is very + convenient for local testing, it should NOT be used in production. Please + see https://developer.hashicorp.com/vault/docs/agent-and-proxy/autoauth/methods + for a list of production-ready auto_auth methods that you can use instead. + + By default, the file will be generated in the local directory as 'agent.hcl' + unless a path is specified as an argument. + + Generate a simple environment variable template configuration: + + $ vault agent generate-config -type="env-template" \ + -exec="./my-app arg1 arg2" \ + -path="secret/foo" + + Generate an environment variable template configuration for multiple secrets: + + $ vault agent generate-config -type="env-template" \ + -exec="./my-app arg1 arg2" \ + -path="secret/foo" \ + -path="secret/bar" \ + -path="secret/my-app/*" + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AgentGenerateConfigCommand) Flags() *FlagSets { + // Include client-modifying flags (-address, -namespace, etc.) + set := c.flagSet(FlagSetHTTP) + + // Common Options + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Usage: "Type of configuration file to generate; currently, only 'env-template' is supported.", + Completion: complete.PredictSet( + "env-template", + ), + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "path", + Target: &c.flagPaths, + Usage: "Path to a kv-v1 or kv-v2 secret (e.g. secret/data/foo, kv-v2/prefix/*); multiple secrets and tail '*' wildcards are allowed.", + Completion: c.PredictVaultFolders(), + }) + + f.StringVar(&StringVar{ + Name: "exec", + Target: &c.flagExec, + Default: "env", + Usage: "The command to execute in agent process supervisor mode.", + }) + + return set +} + +func (c *AgentGenerateConfigCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *AgentGenerateConfigCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AgentGenerateConfigCommand) Run(args []string) int { + flags := c.Flags() + + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = flags.Args() + + if len(args) > 1 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected at most 1, got %d)", len(args))) + return 1 + } + + if c.flagType == "" { + c.UI.Error(`Please specify a -type flag; currently only -type="env-template" is supported.`) + return 1 + } + + if c.flagType != "env-template" { + c.UI.Error(fmt.Sprintf(`%q is not a supported configuration type; currently only -type="env-template" is supported.`, c.flagType)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + config, err := generateConfiguration(context.Background(), client, c.flagExec, c.flagPaths) + if err != nil { + c.UI.Error(fmt.Sprintf("Error: %v", err)) + return 2 + } + + var configPath string + if len(args) == 1 { + configPath = args[0] + } else { + configPath = "agent.hcl" + } + + f, err := os.Create(configPath) + if err != nil { + c.UI.Error(fmt.Sprintf("Could not create configuration file %q: %v", configPath, err)) + return 3 + } + defer func() { + if err := f.Close(); err != nil { + c.UI.Error(fmt.Sprintf("Could not close configuration file %q: %v", configPath, err)) + } + }() + + if _, err := config.WriteTo(f); err != nil { + c.UI.Error(fmt.Sprintf("Could not write to configuration file %q: %v", configPath, err)) + return 3 + } + + c.UI.Info(fmt.Sprintf("Successfully generated %q configuration file!", configPath)) + + c.UI.Warn("Warning: the generated file uses 'token_file' authentication method, which is not suitable for production environments.") + + return 0 +} + +func generateConfiguration(ctx context.Context, client *api.Client, flagExec string, flagPaths []string) (io.WriterTo, error) { + var execCommand []string + if flagExec != "" { + execCommand = strings.Split(flagExec, " ") + } else { + execCommand = []string{"env"} + } + + tokenPath, err := homedir.Expand("~/.vault-token") + if err != nil { + return nil, fmt.Errorf("could not expand home directory: %w", err) + } + + templates, err := constructTemplates(ctx, client, flagPaths) + if err != nil { + return nil, fmt.Errorf("could not generate templates: %w", err) + } + + config := generatedConfig{ + AutoAuth: generatedConfigAutoAuth{ + Method: generatedConfigAutoAuthMethod{ + Type: "token_file", + Config: generatedConfigAutoAuthMethodConfig{ + TokenFilePath: tokenPath, + }, + }, + }, + TemplateConfig: generatedConfigTemplateConfig{ + StaticSecretRenderInterval: "5m", + ExitOnRetryFailure: true, + MaxConnectionsPerHost: 10, + }, + Vault: generatedConfigVault{ + Address: client.Address(), + }, + Exec: generatedConfigExec{ + Command: execCommand, + RestartOnSecretChanges: "always", + RestartStopSignal: "SIGTERM", + }, + EnvTemplates: templates, + } + + contents := hclwrite.NewEmptyFile() + + gohcl.EncodeIntoBody(&config, contents.Body()) + + return contents, nil +} + +func constructTemplates(ctx context.Context, client *api.Client, paths []string) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + for _, path := range paths { + path = sanitizePath(path) + + mountPath, v2, err := isKVv2(path, client) + if err != nil { + return nil, fmt.Errorf("could not validate secret path %q: %w", path, err) + } + + switch { + case strings.HasSuffix(path, "/*"): + // this path contains a tail wildcard, attempt to walk the tree + t, err := constructTemplatesFromTree(ctx, client, path[:len(path)-2], mountPath, v2) + if err != nil { + return nil, fmt.Errorf("could not traverse sercet at %q: %w", path, err) + } + templates = append(templates, t...) + + case strings.Contains(path, "*"): + // don't allow any other wildcards + return nil, fmt.Errorf("the path %q cannot contain '*' wildcard characters except as the last element of the path", path) + + default: + // regular secret path + t, err := constructTemplatesFromSecret(ctx, client, path, mountPath, v2) + if err != nil { + return nil, fmt.Errorf("could not read secret at %q: %v", path, err) + } + templates = append(templates, t...) + } + } + + return templates, nil +} + +func constructTemplatesFromTree(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + if v2 { + metadataPath := strings.Replace( + path, + paths.Join(mountPath, "data"), + paths.Join(mountPath, "metadata"), + 1, + ) + if path != metadataPath { + path = metadataPath + } else { + path = addPrefixToKVPath(path, mountPath, "metadata", true) + } + } + + err := walkSecretsTree(ctx, client, path, func(child string, directory bool) error { + if directory { + return nil + } + + dataPath := strings.Replace( + child, + paths.Join(mountPath, "metadata"), + paths.Join(mountPath, "data"), + 1, + ) + + t, err := constructTemplatesFromSecret(ctx, client, dataPath, mountPath, v2) + if err != nil { + return err + } + templates = append(templates, t...) + + return nil + }) + if err != nil { + return nil, err + } + + return templates, nil +} + +func constructTemplatesFromSecret(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + if v2 { + path = addPrefixToKVPath(path, mountPath, "data", true) + } + + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil { + return nil, fmt.Errorf("error querying: %w", err) + } + if resp == nil { + return nil, fmt.Errorf("secret not found") + } + + var data map[string]interface{} + if v2 { + internal, ok := resp.Data["data"] + if !ok { + return nil, fmt.Errorf("secret.Data not found") + } + data = internal.(map[string]interface{}) + } else { + data = resp.Data + } + + fields := make([]string, 0, len(data)) + + for field := range data { + fields = append(fields, field) + } + + // sort for a deterministic output + sort.Strings(fields) + + var dataContents string + if v2 { + dataContents = ".Data.data" + } else { + dataContents = ".Data" + } + + for _, field := range fields { + templates = append(templates, generatedConfigEnvTemplate{ + Name: constructDefaultEnvironmentKey(path, field), + Contents: fmt.Sprintf(`{{ with secret "%s" }}{{ %s.%s }}{{ end }}`, path, dataContents, field), + ErrorOnMissingKey: true, + }) + } + + return templates, nil +} + +func constructDefaultEnvironmentKey(path string, field string) string { + pathParts := strings.Split(path, "/") + pathPartsLast := pathParts[len(pathParts)-1] + + notLetterOrNumber := func(r rune) bool { + return !unicode.IsLetter(r) && !unicode.IsNumber(r) + } + + p1 := strings.FieldsFunc(pathPartsLast, notLetterOrNumber) + p2 := strings.FieldsFunc(field, notLetterOrNumber) + + keyParts := append(p1, p2...) + + return strings.ToUpper(strings.Join(keyParts, "_")) +} + +// Below, we are redefining a subset of the configuration-related structures +// defined under command/agent/config. Using these structures we can tailor the +// output of the generated config, while using the original structures would +// have produced an HCL document with many empty fields. The structures below +// should not be used for anything other than generation. + +type generatedConfig struct { + AutoAuth generatedConfigAutoAuth `hcl:"auto_auth,block"` + TemplateConfig generatedConfigTemplateConfig `hcl:"template_config,block"` + Vault generatedConfigVault `hcl:"vault,block"` + EnvTemplates []generatedConfigEnvTemplate `hcl:"env_template,block"` + Exec generatedConfigExec `hcl:"exec,block"` +} + +type generatedConfigTemplateConfig struct { + StaticSecretRenderInterval string `hcl:"static_secret_render_interval"` + ExitOnRetryFailure bool `hcl:"exit_on_retry_failure"` + MaxConnectionsPerHost int `hcl:"max_connections_per_host"` +} + +type generatedConfigExec struct { + Command []string `hcl:"command"` + RestartOnSecretChanges string `hcl:"restart_on_secret_changes"` + RestartStopSignal string `hcl:"restart_stop_signal"` +} + +type generatedConfigEnvTemplate struct { + Name string `hcl:"name,label"` + Contents string `hcl:"contents,attr"` + ErrorOnMissingKey bool `hcl:"error_on_missing_key"` +} + +type generatedConfigVault struct { + Address string `hcl:"address"` +} + +type generatedConfigAutoAuth struct { + Method generatedConfigAutoAuthMethod `hcl:"method,block"` +} + +type generatedConfigAutoAuthMethod struct { + Type string `hcl:"type"` + Config generatedConfigAutoAuthMethodConfig `hcl:"config,block"` +} + +type generatedConfigAutoAuthMethodConfig struct { + TokenFilePath string `hcl:"token_file_path"` +} diff --git a/command/agent_generate_config_test.go b/command/agent_generate_config_test.go new file mode 100644 index 000000000000..cbe341f8f363 --- /dev/null +++ b/command/agent_generate_config_test.go @@ -0,0 +1,276 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "bytes" + "context" + "reflect" + "regexp" + "testing" + "time" +) + +// TestConstructTemplates tests the construcTemplates helper function +func TestConstructTemplates(t *testing.T) { + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelContextFunc() + + client, closer := testVaultServerWithSecrets(ctx, t) + defer closer() + + cases := map[string]struct { + paths []string + expected []generatedConfigEnvTemplate + expectedError bool + }{ + "kv-v1-simple": { + paths: []string{"kv-v1/foo"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + }, + expectedError: false, + }, + + "kv-v2-simple": { + paths: []string{"kv-v2/foo"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + }, + expectedError: false, + }, + + "kv-v2-data-in-path": { + paths: []string{"kv-v2/data/foo"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + }, + expectedError: false, + }, + + "kv-v1-nested": { + paths: []string{"kv-v1/app-1/*"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + {Contents: `{{ with secret "kv-v1/app-1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v1/app-1/nested/baz" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/nested/baz" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_USER"}, + }, + expectedError: false, + }, + + "kv-v2-nested": { + paths: []string{"kv-v2/app-1/*"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + {Contents: `{{ with secret "kv-v2/data/app-1/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v2/data/app-1/nested/baz" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/nested/baz" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_USER"}, + }, + expectedError: false, + }, + + "kv-v1-multi-path": { + paths: []string{"kv-v1/foo", "kv-v1/app-1/bar"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + }, + expectedError: false, + }, + + "kv-v2-multi-path": { + paths: []string{"kv-v2/foo", "kv-v2/app-1/bar"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + }, + expectedError: false, + }, + + "kv-v1-path-not-found": { + paths: []string{"kv-v1/does/not/exist"}, + expected: nil, + expectedError: true, + }, + + "kv-v2-path-not-found": { + paths: []string{"kv-v2/does/not/exist"}, + expected: nil, + expectedError: true, + }, + + "kv-v1-early-wildcard": { + paths: []string{"kv-v1/*/foo"}, + expected: nil, + expectedError: true, + }, + + "kv-v2-early-wildcard": { + paths: []string{"kv-v2/*/foo"}, + expected: nil, + expectedError: true, + }, + } + + for name, tc := range cases { + name, tc := name, tc + + t.Run(name, func(t *testing.T) { + templates, err := constructTemplates(ctx, client, tc.paths) + + if tc.expectedError { + if err == nil { + t.Fatal("an error was expected but the test succeeded") + } + } else { + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, templates) { + t.Fatalf("unexpected output; want: %v, got: %v", tc.expected, templates) + } + } + }) + } +} + +// TestGenerateConfiguration tests the generateConfiguration helper function +func TestGenerateConfiguration(t *testing.T) { + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelContextFunc() + + client, closer := testVaultServerWithSecrets(ctx, t) + defer closer() + + cases := map[string]struct { + flagExec string + flagPaths []string + expected *regexp.Regexp + expectedError bool + }{ + "kv-v1-simple": { + flagExec: "./my-app arg1 arg2", + flagPaths: []string{"kv-v1/foo"}, + expected: regexp.MustCompile(` +auto_auth \{ + + method \{ + type = "token_file" + + config \{ + token_file_path = ".*/.vault-token" + } + } +} + +template_config \{ + static_secret_render_interval = "5m" + exit_on_retry_failure = true + max_connections_per_host = 10 +} + +vault \{ + address = "https://127.0.0.1:[0-9]{5}" +} + +env_template "FOO_PASSWORD" \{ + contents = "\{\{ with secret \\"kv-v1/foo\\" }}\{\{ .Data.password }}\{\{ end }}" + error_on_missing_key = true +} +env_template "FOO_USER" \{ + contents = "\{\{ with secret \\"kv-v1/foo\\" }}\{\{ .Data.user }}\{\{ end }}" + error_on_missing_key = true +} + +exec \{ + command = \["./my-app", "arg1", "arg2"\] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} +`), + expectedError: false, + }, + + "kv-v2-default-exec": { + flagExec: "", + flagPaths: []string{"kv-v2/foo"}, + expected: regexp.MustCompile(` +auto_auth \{ + + method \{ + type = "token_file" + + config \{ + token_file_path = ".*/.vault-token" + } + } +} + +template_config \{ + static_secret_render_interval = "5m" + exit_on_retry_failure = true + max_connections_per_host = 10 +} + +vault \{ + address = "https://127.0.0.1:[0-9]{5}" +} + +env_template "FOO_PASSWORD" \{ + contents = "\{\{ with secret \\"kv-v2/data/foo\\" }}\{\{ .Data.data.password }}\{\{ end }}" + error_on_missing_key = true +} +env_template "FOO_USER" \{ + contents = "\{\{ with secret \\"kv-v2/data/foo\\" }}\{\{ .Data.data.user }}\{\{ end }}" + error_on_missing_key = true +} + +exec \{ + command = \["env"\] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} +`), + expectedError: false, + }, + } + + for name, tc := range cases { + name, tc := name, tc + + t.Run(name, func(t *testing.T) { + var config bytes.Buffer + + c, err := generateConfiguration(ctx, client, tc.flagExec, tc.flagPaths) + c.WriteTo(&config) + + if tc.expectedError { + if err == nil { + t.Fatal("an error was expected but the test succeeded") + } + } else { + if err != nil { + t.Fatal(err) + } + + if !tc.expected.MatchString(config.String()) { + t.Fatalf("unexpected output; want: %v, got: %v", tc.expected.String(), config.String()) + } + } + }) + } +} diff --git a/command/agent_test.go b/command/agent_test.go index 4c1ef04e812e..ddef97f5eac1 100644 --- a/command/agent_test.go +++ b/command/agent_test.go @@ -1,9 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( + "bufio" + "context" + "crypto/tls" + "crypto/x509" "encoding/json" + "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "os" @@ -14,27 +22,34 @@ import ( "testing" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/cli" + "github.com/hashicorp/go-hclog" vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/audit" + auditFile "github.com/hashicorp/vault/builtin/audit/file" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" "github.com/hashicorp/vault/command/agent" agentConfig "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/helper/testhelpers/minimal" + "github.com/hashicorp/vault/helper/useragent" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/helper/pointerutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( BasicHclConfig = ` -log_file = "/foo/bar/juan.log" +log_file = "TMPDIR/juan.log" +log_level="warn" +log_rotate_max_files=2 +log_rotate_bytes=1048576 vault { address = "http://127.0.0.1:8200" retry { @@ -44,7 +59,27 @@ vault { listener "tcp" { address = "127.0.0.1:8100" - tls_disable = true + tls_disable = false + tls_cert_file = "TMPDIR/reload_cert.pem" + tls_key_file = "TMPDIR/reload_key.pem" +}` + BasicHclConfig2 = ` +log_file = "TMPDIR/juan.log" +log_level="debug" +log_rotate_max_files=-1 +log_rotate_bytes=1048576 +vault { + address = "http://127.0.0.1:8200" + retry { + num_retries = 5 + } +} + +listener "tcp" { + address = "127.0.0.1:8100" + tls_disable = false + tls_cert_file = "TMPDIR/reload_cert.pem" + tls_key_file = "TMPDIR/reload_key.pem" }` ) @@ -57,195 +92,13 @@ func testAgentCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *AgentCo UI: ui, }, ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), logger: logger, + startedCh: make(chan struct{}, 5), + reloadedCh: make(chan struct{}, 5), } } -/* -func TestAgent_Cache_UnixListener(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - coreConfig := &vault.CoreConfig{ - Logger: logger.Named("core"), - CredentialBackends: map[string]logical.Factory{ - "jwt": vaultjwt.Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - client := cluster.Cores[0].Client - - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Setenv(api.EnvVaultAddress, client.Address()) - - defer os.Setenv(api.EnvVaultCACert, os.Getenv(api.EnvVaultCACert)) - os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir)) - - // Setup Vault - err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{ - Type: "jwt", - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{ - "bound_issuer": "https://team-vault.auth0.com/", - "jwt_validation_pubkeys": agent.TestECDSAPubKey, - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{ - "role_type": "jwt", - "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", - "bound_audiences": "https://vault.plugin.auth.jwt.test", - "user_claim": "https://vault/user", - "groups_claim": "https://vault/groups", - "policies": "test", - "period": "3s", - }) - if err != nil { - t.Fatal(err) - } - - inf, err := ioutil.TempFile("", "auth.jwt.test.") - if err != nil { - t.Fatal(err) - } - in := inf.Name() - inf.Close() - os.Remove(in) - t.Logf("input: %s", in) - - sink1f, err := ioutil.TempFile("", "sink1.jwt.test.") - if err != nil { - t.Fatal(err) - } - sink1 := sink1f.Name() - sink1f.Close() - os.Remove(sink1) - t.Logf("sink1: %s", sink1) - - sink2f, err := ioutil.TempFile("", "sink2.jwt.test.") - if err != nil { - t.Fatal(err) - } - sink2 := sink2f.Name() - sink2f.Close() - os.Remove(sink2) - t.Logf("sink2: %s", sink2) - - conff, err := ioutil.TempFile("", "conf.jwt.test.") - if err != nil { - t.Fatal(err) - } - conf := conff.Name() - conff.Close() - os.Remove(conf) - t.Logf("config: %s", conf) - - jwtToken, _ := agent.GetTestJWT(t) - if err := ioutil.WriteFile(in, []byte(jwtToken), 0600); err != nil { - t.Fatal(err) - } else { - logger.Trace("wrote test jwt", "path", in) - } - - socketff, err := ioutil.TempFile("", "cache.socket.") - if err != nil { - t.Fatal(err) - } - socketf := socketff.Name() - socketff.Close() - os.Remove(socketf) - t.Logf("socketf: %s", socketf) - - config := ` -auto_auth { - method { - type = "jwt" - config = { - role = "test" - path = "%s" - } - } - - sink { - type = "file" - config = { - path = "%s" - } - } - - sink "file" { - config = { - path = "%s" - } - } -} - -cache { - use_auto_auth_token = true - - listener "unix" { - address = "%s" - tls_disable = true - } -} -` - - config = fmt.Sprintf(config, in, sink1, sink2, socketf) - if err := ioutil.WriteFile(conf, []byte(config), 0600); err != nil { - t.Fatal(err) - } else { - logger.Trace("wrote test config", "path", conf) - } - - _, cmd := testAgentCommand(t, logger) - cmd.client = client - - // Kill the command 5 seconds after it starts - go func() { - select { - case <-cmd.ShutdownCh: - case <-time.After(5 * time.Second): - cmd.ShutdownCh <- struct{}{} - } - }() - - originalVaultAgentAddress := os.Getenv(api.EnvVaultAgentAddr) - - // Create a client that talks to the agent - os.Setenv(api.EnvVaultAgentAddr, socketf) - testClient, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - os.Setenv(api.EnvVaultAgentAddr, originalVaultAgentAddress) - - // Start the agent - go cmd.Run([]string{"-config", conf}) - - // Give some time for the auto-auth to complete - time.Sleep(1 * time.Second) - - // Invoke lookup self through the agent - secret, err := testClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - if secret == nil || secret.Data == nil || secret.Data["id"].(string) == "" { - t.Fatalf("failed to perform lookup self through agent") - } -} -*/ - func TestAgent_ExitAfterAuth(t *testing.T) { t.Run("via_config", func(t *testing.T) { testAgentExitAfterAuth(t, false) @@ -259,7 +112,6 @@ func TestAgent_ExitAfterAuth(t *testing.T) { func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { logger := logging.NewVaultLogger(hclog.Trace) coreConfig := &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "jwt": vaultjwt.Factory, }, @@ -303,7 +155,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { t.Fatal(err) } - inf, err := ioutil.TempFile("", "auth.jwt.test.") + inf, err := os.CreateTemp("", "auth.jwt.test.") if err != nil { t.Fatal(err) } @@ -312,7 +164,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { os.Remove(in) t.Logf("input: %s", in) - sink1f, err := ioutil.TempFile("", "sink1.jwt.test.") + sink1f, err := os.CreateTemp("", "sink1.jwt.test.") if err != nil { t.Fatal(err) } @@ -321,7 +173,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { os.Remove(sink1) t.Logf("sink1: %s", sink1) - sink2f, err := ioutil.TempFile("", "sink2.jwt.test.") + sink2f, err := os.CreateTemp("", "sink2.jwt.test.") if err != nil { t.Fatal(err) } @@ -330,7 +182,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { os.Remove(sink2) t.Logf("sink2: %s", sink2) - conff, err := ioutil.TempFile("", "conf.jwt.test.") + conff, err := os.CreateTemp("", "conf.jwt.test.") if err != nil { t.Fatal(err) } @@ -340,7 +192,7 @@ func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { t.Logf("config: %s", conf) jwtToken, _ := agent.GetTestJWT(t) - if err := ioutil.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test jwt", "path", in) @@ -379,7 +231,7 @@ auto_auth { ` config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sink1, sink2) - if err := ioutil.WriteFile(conf, []byte(config), 0o600); err != nil { + if err := os.WriteFile(conf, []byte(config), 0o600); err != nil { t.Fatal(err) } else { logger.Trace("wrote test config", "path", conf) @@ -411,7 +263,7 @@ auto_auth { t.Fatal("timeout reached while waiting for agent to exit") } - sink1Bytes, err := ioutil.ReadFile(sink1) + sink1Bytes, err := os.ReadFile(sink1) if err != nil { t.Fatal(err) } @@ -419,7 +271,7 @@ auto_auth { t.Fatal("got no output from sink 1") } - sink2Bytes, err := ioutil.ReadFile(sink2) + sink2Bytes, err := os.ReadFile(sink2) if err != nil { t.Fatal(err) } @@ -463,7 +315,6 @@ func TestAgent_RequireRequestHeader(t *testing.T) { logger := logging.NewVaultLogger(hclog.Trace) cluster := vault.NewTestCluster(t, &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -477,40 +328,7 @@ func TestAgent_RequireRequestHeader(t *testing.T) { serverClient := cluster.Cores[0].Client // Enable the approle auth method - req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") - req.BodyBytes = []byte(`{ - "type": "approle" - }`) - request(t, serverClient, req, 204) - - // Create a named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") - req.BodyBytes = []byte(`{ - "secret_id_num_uses": "10", - "secret_id_ttl": "1m", - "token_max_ttl": "1m", - "token_num_uses": "10", - "token_ttl": "1m" - }`) - request(t, serverClient, req, 204) - - // Fetch the RoleID of the named role - req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") - body := request(t, serverClient, req, 200) - data := body["data"].(map[string]interface{}) - roleID := data["role_id"].(string) - - // Get a SecretID issued against the named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") - body = request(t, serverClient, req, 200) - data = body["data"].(map[string]interface{}) - secretID := data["secret_id"].(string) - - // Write the RoleID and SecretID to temp files - roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") - secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") - defer os.Remove(roleIDPath) - defer os.Remove(secretIDPath) + roleIDPath, secretIDPath := setupAppRole(t, serverClient) // Create a config file config := ` @@ -536,11 +354,13 @@ listener "tcp" { address = "%s" tls_disable = true require_request_header = false + disable_request_limiter = false } listener "tcp" { address = "%s" tls_disable = true require_request_header = true + disable_request_limiter = true } ` listenAddr1 := generateListenerAddress(t) @@ -562,14 +382,14 @@ listener "tcp" { cmd.client = serverClient cmd.startedCh = make(chan struct{}) + var output string + var code int wg := &sync.WaitGroup{} wg.Add(1) go func() { - code := cmd.Run([]string{"-config", configPath}) + code = cmd.Run([]string{"-config", configPath}) if code != 0 { - t.Errorf("non-zero return code when running agent: %d", code) - t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) - t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + output = ui.ErrorWriter.String() + ui.OutputWriter.String() } wg.Done() }() @@ -577,13 +397,16 @@ listener "tcp" { select { case <-cmd.startedCh: case <-time.After(5 * time.Second): - t.Errorf("timeout") + t.Fatalf("timeout") } // defer agent shutdown defer func() { cmd.ShutdownCh <- struct{}{} wg.Wait() + if code != 0 { + t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) + } }() //---------------------------------------------------- @@ -593,7 +416,7 @@ listener "tcp" { // Test against a listener configuration that omits // 'require_request_header', with the header missing from the request. agentClient := newApiClient("http://"+listenAddr1, false) - req = agentClient.NewRequest("GET", "/v1/sys/health") + req := agentClient.NewRequest("GET", "/v1/sys/health") request(t, agentClient, req, 200) // Test against a listener configuration that sets 'require_request_header' @@ -668,15 +491,18 @@ listener "tcp" { } } -// TestAgent_Template tests rendering templates -func TestAgent_Template_Basic(t *testing.T) { +// TestAgent_Template_UserAgent Validates that the User-Agent sent to Vault +// as part of Templating requests is correct. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Agent. +func TestAgent_Template_UserAgent(t *testing.T) { //---------------------------------------------------- // Start the server and agent //---------------------------------------------------- logger := logging.NewVaultLogger(hclog.Trace) + var h userAgentHandler cluster := vault.NewTestCluster(t, &vault.CoreConfig{ - Logger: logger, CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, @@ -685,7 +511,16 @@ func TestAgent_Template_Basic(t *testing.T) { }, }, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentTemplatingString() + h.pathToCheck = "/v1/secret/data" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), }) cluster.Start() defer cluster.Cleanup() @@ -698,78 +533,182 @@ func TestAgent_Template_Basic(t *testing.T) { defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) os.Setenv(api.EnvVaultAddress, serverClient.Address()) - // Enable the approle auth method - req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") - req.BodyBytes = []byte(`{ - "type": "approle" - }`) - request(t, serverClient, req, 204) - - // give test-role permissions to read the kv secret - req = serverClient.NewRequest("PUT", "/v1/sys/policy/myapp-read") - req.BodyBytes = []byte(`{ - "policy": "path \"secret/*\" { capabilities = [\"read\", \"list\"] }" - }`) - request(t, serverClient, req, 204) + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) - // Create a named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") - req.BodyBytes = []byte(`{ - "token_ttl": "5m", - "token_policies":"default,myapp-read", - "policies":"default,myapp-read" - }`) - request(t, serverClient, req, 204) + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, "TestAgent_Template_UserAgent") + if err != nil { + t.Fatal(err) + } - // Fetch the RoleID of the named role - req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") - body := request(t, serverClient, req, 200) - data := body["data"].(map[string]interface{}) - roleID := data["role_id"].(string) + // make some template files + var templatePaths []string + fileName := filepath.Join(tmpDir, "render_0.tmpl") + if err := os.WriteFile(fileName, []byte(templateContents(0)), 0o600); err != nil { + t.Fatal(err) + } + templatePaths = append(templatePaths, fileName) - // Get a SecretID issued against the named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") - body = request(t, serverClient, req, 200) - data = body["data"].(map[string]interface{}) - secretID := data["secret_id"].(string) + // build up the template config to be added to the Agent config.hcl file + var templateConfigStrings []string + for i, t := range templatePaths { + index := fmt.Sprintf("render_%d.json", i) + s := fmt.Sprintf(templateConfigString, t, tmpDir, index) + templateConfigStrings = append(templateConfigStrings, s) + } - // Write the RoleID and SecretID to temp files - roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") - secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") - defer os.Remove(roleIDPath) - defer os.Remove(secretIDPath) + // Create a config file + config := ` +vault { + address = "%s" + tls_skip_verify = true +} - // setup the kv secrets - req = serverClient.NewRequest("POST", "/v1/sys/mounts/secret/tune") - req.BodyBytes = []byte(`{ - "options": {"version": "2"} - }`) - request(t, serverClient, req, 200) - - // populate a secret - req = serverClient.NewRequest("POST", "/v1/secret/data/myapp") - req.BodyBytes = []byte(`{ - "data": { - "username": "bar", - "password": "zap" +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } } - }`) - request(t, serverClient, req, 200) +} - // populate another secret - req = serverClient.NewRequest("POST", "/v1/secret/data/otherapp") - req.BodyBytes = []byte(`{ - "data": { - "username": "barstuff", - "password": "zap", - "cert": "something" - } - }`) - request(t, serverClient, req, 200) +%s +` + + // flatten the template configs + templateConfig := strings.Join(templateConfigStrings, " ") + + config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // We need to shut down the Agent command + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + verify := func(suffix string) { + t.Helper() + // We need to poll for a bit to give Agent time to render the + // templates. Without this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(10 * time.Second) + var err error + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + // Check for files rendered in the directory and break + // early for shutdown if we do have all the files + // rendered + + //---------------------------------------------------- + // Perform the tests + //---------------------------------------------------- + + if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != len(templatePaths) { + err = fmt.Errorf("expected (%d) templates, got (%d)", len(templatePaths), numFiles) + continue + } + + for i := range templatePaths { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) + var c []byte + c, err = os.ReadFile(fileName) + if err != nil { + continue + } + if string(c) != templateRendered(i)+suffix { + err = fmt.Errorf("expected=%q, got=%q", templateRendered(i)+suffix, string(c)) + continue + } + } + return + } + } + + verify("") + + fileName = filepath.Join(tmpDir, "render_0.tmpl") + if err := os.WriteFile(fileName, []byte(templateContents(0)+"{}"), 0o600); err != nil { + t.Fatal(err) + } + + verify("{}") +} + +// TestAgent_Template tests rendering templates +func TestAgent_Template_Basic(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, serverClient.Address()) + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) // make a temp directory to hold renders. Each test will create a temp dir // inside this one - tmpDirRoot, err := ioutil.TempDir("", "agent-test-renders") + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") if err != nil { t.Fatal(err) } @@ -799,7 +738,7 @@ func TestAgent_Template_Basic(t *testing.T) { for tcname, tc := range testCases { t.Run(tcname, func(t *testing.T) { // create temp dir for this test run - tmpDir, err := ioutil.TempDir(tmpDirRoot, tcname) + tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) if err != nil { t.Fatal(err) } @@ -808,7 +747,7 @@ func TestAgent_Template_Basic(t *testing.T) { var templatePaths []string for i := 0; i < tc.templateCount; i++ { fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) - if err := ioutil.WriteFile(fileName, []byte(templateContents(i)), 0o600); err != nil { + if err := os.WriteFile(fileName, []byte(templateContents(i)), 0o600); err != nil { t.Fatal(err) } templatePaths = append(templatePaths, fileName) @@ -835,7 +774,7 @@ auto_auth { config = { role_id_file_path = "%s" secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false + remove_secret_id_file_after_reading = false } } } @@ -894,7 +833,7 @@ auto_auth { verify := func(suffix string) { t.Helper() // We need to poll for a bit to give Agent time to render the - // templates. Without this this, the test will attempt to read + // templates. Without this, the test will attempt to read // the temp dir before Agent has had time to render and will // likely fail the test tick := time.Tick(1 * time.Second) @@ -922,7 +861,7 @@ auto_auth { for i := range templatePaths { fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) var c []byte - c, err = ioutil.ReadFile(fileName) + c, err = os.ReadFile(fileName) if err != nil { continue } @@ -939,7 +878,7 @@ auto_auth { for i := 0; i < tc.templateCount; i++ { fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) - if err := ioutil.WriteFile(fileName, []byte(templateContents(i)+"{}"), 0o600); err != nil { + if err := os.WriteFile(fileName, []byte(templateContents(i)+"{}"), 0o600); err != nil { t.Fatal(err) } } @@ -949,58 +888,8 @@ auto_auth { } } -func testListFiles(t *testing.T, dir, extension string) int { +func setupAppRole(t *testing.T, serverClient *api.Client) (string, string) { t.Helper() - - files, err := ioutil.ReadDir(dir) - if err != nil { - t.Fatal(err) - } - var count int - for _, f := range files { - if filepath.Ext(f.Name()) == extension { - count++ - } - } - - return count -} - -// TestAgent_Template_ExitCounter tests that Vault Agent correctly renders all -// templates before exiting when the configuration uses exit_after_auth. This is -// similar to TestAgent_Template_Basic, but differs by using a consistent number -// of secrets from multiple sources, where as the basic test could possibly -// generate a random number of secrets, but all using the same source. This test -// reproduces https://github.com/hashicorp/vault/issues/7883 -func TestAgent_Template_ExitCounter(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Setenv(api.EnvVaultAddress, serverClient.Address()) - // Enable the approle auth method req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") req.BodyBytes = []byte(`{ @@ -1008,13 +897,6 @@ func TestAgent_Template_ExitCounter(t *testing.T) { }`) request(t, serverClient, req, 204) - // give test-role permissions to read the kv secret - req = serverClient.NewRequest("PUT", "/v1/sys/policy/myapp-read") - req.BodyBytes = []byte(`{ - "policy": "path \"secret/*\" { capabilities = [\"read\", \"list\"] }" - }`) - request(t, serverClient, req, 204) - // Create a named role req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") req.BodyBytes = []byte(`{ @@ -1039,8 +921,23 @@ func TestAgent_Template_ExitCounter(t *testing.T) { // Write the RoleID and SecretID to temp files roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") - defer os.Remove(roleIDPath) - defer os.Remove(secretIDPath) + t.Cleanup(func() { + os.Remove(roleIDPath) + os.Remove(secretIDPath) + }) + + return roleIDPath, secretIDPath +} + +func setupAppRoleAndKVMounts(t *testing.T, serverClient *api.Client) (string, string) { + roleIDPath, secretIDPath := setupAppRole(t, serverClient) + + // give test-role permissions to read the kv secret + req := serverClient.NewRequest("PUT", "/v1/sys/policy/myapp-read") + req.BodyBytes = []byte(`{ + "policy": "path \"secret/*\" { capabilities = [\"read\", \"list\"] }" + }`) + request(t, serverClient, req, 204) // setup the kv secrets req = serverClient.NewRequest("POST", "/v1/sys/mounts/secret/tune") @@ -1049,7 +946,7 @@ func TestAgent_Template_ExitCounter(t *testing.T) { }`) request(t, serverClient, req, 200) - // populate a secret + // Secret: myapp req = serverClient.NewRequest("POST", "/v1/secret/data/myapp") req.BodyBytes = []byte(`{ "data": { @@ -1059,7 +956,7 @@ func TestAgent_Template_ExitCounter(t *testing.T) { }`) request(t, serverClient, req, 200) - // populate another secret + // Secret: myapp2 req = serverClient.NewRequest("POST", "/v1/secret/data/myapp2") req.BodyBytes = []byte(`{ "data": { @@ -1069,7 +966,7 @@ func TestAgent_Template_ExitCounter(t *testing.T) { }`) request(t, serverClient, req, 200) - // populate another, another secret + // Secret: otherapp req = serverClient.NewRequest("POST", "/v1/secret/data/otherapp") req.BodyBytes = []byte(`{ "data": { @@ -1080,62 +977,275 @@ func TestAgent_Template_ExitCounter(t *testing.T) { }`) request(t, serverClient, req, 200) + return roleIDPath, secretIDPath +} + +// TestAgent_Template_VaultClientFromEnv tests that Vault Agent can read in its +// required `vault` client details from environment variables instead of config. +func TestAgent_Template_VaultClientFromEnv(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + // make a temp directory to hold renders. Each test will create a temp dir // inside this one - tmpDirRoot, err := ioutil.TempDir("", "agent-test-renders") + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDirRoot) - // create temp dir for this test run - tmpDir, err := ioutil.TempDir(tmpDirRoot, "agent-test") - if err != nil { - t.Fatal(err) + vaultAddr := "https://" + cluster.Cores[0].Listeners[0].Address.String() + testCases := map[string]struct { + env map[string]string + }{ + "VAULT_ADDR and VAULT_CACERT": { + env: map[string]string{ + api.EnvVaultAddress: vaultAddr, + api.EnvVaultCACert: cluster.CACertPEMFile, + }, + }, + "VAULT_ADDR and VAULT_CACERT_BYTES": { + env: map[string]string{ + api.EnvVaultAddress: vaultAddr, + api.EnvVaultCACertBytes: string(cluster.CACertPEM), + }, + }, } - // Create a config file - config := ` -vault { - address = "%s" - tls_skip_verify = true + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + for k, v := range tc.env { + t.Setenv(k, v) + } + tmpDir := t.TempDir() + + // Make a template. + templateFile := filepath.Join(tmpDir, "render.tmpl") + if err := os.WriteFile(templateFile, []byte(templateContents(0)), 0o600); err != nil { + t.Fatal(err) + } + + // build up the template config to be added to the Agent config.hcl file + targetFile := filepath.Join(tmpDir, "render.json") + templateConfig := fmt.Sprintf(` +template { + source = "%s" + destination = "%s" } + `, templateFile, targetFile) + // Create a config file + config := ` auto_auth { method "approle" { mount_path = "auth/approle" config = { role_id_file_path = "%s" secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false + remove_secret_id_file_after_reading = false } } } -template { - contents = "{{ with secret \"secret/myapp\" }}{{ range $k, $v := .Data.data }}{{ $v }}{{ end }}{{ end }}" - destination = "%s/render-pass.txt" -} - -template { - contents = "{{ with secret \"secret/myapp2\" }}{{ .Data.data.username}}{{ end }}" - destination = "%s/render-user.txt" -} - -template { - contents = < 10 { + t.Fatalf("did too many renews -- Vault received %d renew-self requests", numberOfRenewSelves) } +} - // defer agent shutdown - defer func() { - cmd.ShutdownCh <- struct{}{} - wg.Wait() +// TestAgent_Logging_ConsulTemplate attempts to ensure two things about Vault Agent logs: +// 1. When -log-format command line arg is set to JSON, it is honored as the output format +// for messages generated from within the consul-template library. +// 2. When -log-file command line arg is supplied, a file receives all log messages +// generated by the consul-template library (they don't just go to stdout/stderr). +// Should prevent a regression of: https://github.com/hashicorp/vault/issues/21109 +func TestAgent_Logging_ConsulTemplate(t *testing.T) { + const ( + runnerLogMessage = "(runner) creating new runner (dry: false, once: false)" + ) + + // Configure a Vault server so Agent can successfully communicate and render its templates + cluster := minimal.NewTestSoloCluster(t, nil) + apiClient := cluster.Cores[0].Client + t.Setenv(api.EnvVaultAddress, apiClient.Address()) + tempDir := t.TempDir() + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, apiClient) + + // Create relevant configs for Vault Agent (config, template config) + templateSrc := filepath.Join(tempDir, "render_1.tmpl") + err := os.WriteFile(templateSrc, []byte(templateContents(1)), 0o600) + require.NoError(t, err) + templateConfig := fmt.Sprintf(templateConfigString, templateSrc, tempDir, "render_1.json") + + config := ` +vault { + address = "%s" + tls_skip_verify = true +} + +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +%s +` + config = fmt.Sprintf(config, apiClient.Address(), roleIDPath, secretIDPath, templateConfig) + configFileName := filepath.Join(tempDir, "config.hcl") + err = os.WriteFile(configFileName, []byte(config), 0o600) + require.NoError(t, err) + _, cmd := testAgentCommand(t, nil) + logFilePath := filepath.Join(tempDir, "agent") + + // Start Vault Agent + go func() { + code := cmd.Run([]string{"-config", configFileName, "-log-format", "json", "-log-file", logFilePath, "-log-level", "trace"}) + require.Equalf(t, 0, code, "Vault Agent returned a non-zero exit code") }() - conf := api.DefaultConfig() - conf.Address = "http://" + listenAddr - agentClient, err := api.NewClient(conf) - if err != nil { - t.Fatalf("err: %s", err) + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatal("timeout starting agent") } - req := agentClient.NewRequest("GET", "/agent/v1/metrics") - body := request(t, agentClient, req, 200) - keys := []string{} - for k := range body { - keys = append(keys, k) + // Give Vault Agent some time to render our template. + time.Sleep(3 * time.Second) + + // This flag will be used to capture whether we saw a consul-template log + // message in the log file (the presence of the log file is also part of the test) + found := false + + // Vault Agent file logs will match agent-{timestamp}.log based on the + // cmd line argument we supplied, e.g. agent-1701258869573205000.log + m, err := filepath.Glob(logFilePath + "*") + require.NoError(t, err) + require.Truef(t, len(m) > 0, "no files were found") + + for _, p := range m { + f, err := os.Open(p) + require.NoError(t, err) + + fs := bufio.NewScanner(f) + fs.Split(bufio.ScanLines) + + for fs.Scan() { + s := fs.Text() + entry := make(map[string]string) + err := json.Unmarshal([]byte(s), &entry) + require.NoError(t, err) + v, ok := entry["@message"] + if !ok { + continue + } + if v == runnerLogMessage { + found = true + break + } + } } - require.ElementsMatch(t, keys, []string{ - "Counters", - "Samples", - "Timestamp", - "Gauges", - "Points", - }) + + require.Truef(t, found, "unable to find consul-template partial message in logs", runnerLogMessage) } -func TestAgent_Quit(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Error) +// TestAgent_DeleteAfterVersion_Rendering Validates that Vault Agent +// can correctly render a secret with delete_after_version set. +func TestAgent_DeleteAfterVersion_Rendering(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) cluster := vault.NewTestCluster(t, &vault.CoreConfig{ Logger: logger, - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, }, &vault.TestClusterOptions{ - NumCores: 1, + NumCores: 1, + HandlerFunc: vaulthttp.Handler, }) cluster.Start() defer cluster.Cleanup() @@ -2160,49 +3369,90 @@ func TestAgent_Quit(t *testing.T) { vault.TestWaitActive(t, cluster.Cores[0].Core) serverClient := cluster.Cores[0].Client - // Unset the environment variable so that agent picks up the right test + // Set up KVv2 + err := serverClient.Sys().Mount("kv-v2", &api.MountInput{ + Type: "kv-v2", + }) + require.NoError(t, err) + + // Configure the mount to set delete_version_after on all of its secrets + _, err = serverClient.Logical().Write("kv-v2/config", map[string]interface{}{ + "delete_version_after": "1h", + }) + require.NoError(t, err) + + // Set up the secret (which will have delete_version_after set to 1h) + data, err := serverClient.KVv2("kv-v2").Put(context.Background(), "foo", map[string]interface{}{ + "bar": "baz", + }) + require.NoError(t, err) + + // Ensure Deletion Time was correctly set + require.NotZero(t, data.VersionMetadata.DeletionTime) + require.True(t, data.VersionMetadata.DeletionTime.After(time.Now())) + require.NotNil(t, data.VersionMetadata.CreatedTime) + require.True(t, data.VersionMetadata.DeletionTime.After(data.VersionMetadata.CreatedTime)) + + // Unset the environment variable so that Agent picks up the right test // cluster address defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - err := os.Unsetenv(api.EnvVaultAddress) - if err != nil { - t.Fatal(err) - } + os.Setenv(api.EnvVaultAddress, serverClient.Address()) - listenAddr := generateListenerAddress(t) - listenAddr2 := generateListenerAddress(t) - config := fmt.Sprintf(` + // create temp dir for this test run + tmpDir, err := os.MkdirTemp("", "TestAgent_DeleteAfterVersion_Rendering") + require.NoError(t, err) + + tokenFileName := makeTempFile(t, "token-file", serverClient.Token()) + defer os.Remove(tokenFileName) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method { + type = "token_file" + config = { + token_file_path = "%s" + } + } +}`, tokenFileName) + + // Create a config file + config := ` vault { address = "%s" - tls_skip_verify = true + tls_skip_verify = true } -listener "tcp" { - address = "%s" - tls_disable = true -} +%s -listener "tcp" { - address = "%s" - tls_disable = true - agent_api { - enable_quit = true - } -} +%s +` -cache {} -`, serverClient.Address(), listenAddr, listenAddr2) + fileName := "secret.txt" + templateConfig := fmt.Sprintf(` +template { + destination = "%s/%s" + contents = "{{ with secret \"kv-v2/foo\" }}{{ .Data.data.bar }}{{ end }}" +} +`, tmpDir, fileName) + config = fmt.Sprintf(config, serverClient.Address(), autoAuthConfig, templateConfig) configPath := makeTempFile(t, "config.hcl", config) defer os.Remove(configPath) // Start the agent - _, cmd := testAgentCommand(t, logger) + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient cmd.startedCh = make(chan struct{}) wg := &sync.WaitGroup{} wg.Add(1) go func() { - cmd.Run([]string{"-config", configPath}) + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } wg.Done() }() @@ -2211,96 +3461,47 @@ cache {} case <-time.After(5 * time.Second): t.Errorf("timeout") } - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - client.SetToken(serverClient.Token()) - client.SetMaxRetries(0) - err = client.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - // First try on listener 1 where the API should be disabled. - resp, err := client.RawRequest(client.NewRequest(http.MethodPost, "/agent/v1/quit")) - if err == nil { - t.Fatalf("expected error") - } - if resp != nil && resp.StatusCode != http.StatusNotFound { - t.Fatalf("expected %d but got: %d", http.StatusNotFound, resp.StatusCode) - } - - // Now try on listener 2 where the quit API should be enabled. - err = client.SetAddress("http://" + listenAddr2) - if err != nil { - t.Fatal(err) - } - - _, err = client.RawRequest(client.NewRequest(http.MethodPost, "/agent/v1/quit")) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - select { - case <-cmd.ShutdownCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - wg.Wait() -} - -func TestAgent_LogFile_CliOverridesConfig(t *testing.T) { - // Create basic config - configFile := populateTempFile(t, "agent-config.hcl", BasicHclConfig) - cfg, err := agentConfig.LoadConfig(configFile.Name()) - if err != nil { - t.Fatal("Cannot load config to test update/merge", err) - } - - // Sanity check that the config value is the current value - assert.Equal(t, "/foo/bar/juan.log", cfg.LogFile) - // Initialize the command and parse any flags - cmd := &AgentCommand{BaseCommand: &BaseCommand{}} - f := cmd.Flags() - // Simulate the flag being specified - err = f.Parse([]string{"-log-file=/foo/bar/test.log"}) - if err != nil { - t.Fatal(err) - } + // We need to shut down the Agent command + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() - // Update the config based on the inputs. - cmd.updateConfig(f, cfg) + filePath := fmt.Sprintf("%s/%s", tmpDir, fileName) - assert.NotEqual(t, "/foo/bar/juan.log", cfg.LogFile) - assert.NotEqual(t, "/squiggle/logs.txt", cfg.LogFile) - assert.Equal(t, "/foo/bar/test.log", cfg.LogFile) -} + waitForFiles := func() error { + tick := time.Tick(100 * time.Millisecond) + timeout := time.After(10 * time.Second) + // We need to wait for the templates to render... + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } -func TestAgent_LogFile_Config(t *testing.T) { - configFile := populateTempFile(t, "agent-config.hcl", BasicHclConfig) + _, err := os.Stat(filePath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + continue + } + return err + } - cfg, err := agentConfig.LoadConfig(configFile.Name()) - if err != nil { - t.Fatal("Cannot load config to test update/merge", err) + return nil + } } - // Sanity check that the config value is the current value - assert.Equal(t, "/foo/bar/juan.log", cfg.LogFile, "sanity check on log config failed") + err = waitForFiles() + require.NoError(t, err) - // Parse the cli flags (but we pass in an empty slice) - cmd := &AgentCommand{BaseCommand: &BaseCommand{}} - f := cmd.Flags() - err = f.Parse([]string{}) - if err != nil { - t.Fatal(err) + // Ensure the file has the + fileData, err := os.ReadFile(filePath) + require.NoError(t, err) + if string(fileData) != "baz" { + t.Fatalf("Unexpected file contents. Expected 'baz', got %s", string(fileData)) } - - cmd.updateConfig(f, cfg) - - assert.Equal(t, "/foo/bar/juan.log", cfg.LogFile, "actual config check") } // Get a randomly assigned port and then free it again before returning it. diff --git a/command/agent/auth/alicloud/alicloud.go b/command/agentproxyshared/auth/alicloud/alicloud.go similarity index 96% rename from command/agent/auth/alicloud/alicloud.go rename to command/agentproxyshared/auth/alicloud/alicloud.go index 6fc640c290e0..d700bc02fa2e 100644 --- a/command/agent/auth/alicloud/alicloud.go +++ b/command/agentproxyshared/auth/alicloud/alicloud.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package alicloud import ( @@ -14,7 +17,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault-plugin-auth-alicloud/tools" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) /* @@ -60,10 +63,10 @@ func NewAliCloudAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { // Check for an optional custom frequency at which we should poll for creds. credCheckFreqSec := defaultCredCheckFreqSeconds if checkFreqRaw, ok := conf.Config["credential_poll_interval"]; ok { - if credFreq, ok := checkFreqRaw.(int); ok { + if credFreq, ok := checkFreqRaw.(int); ok && credFreq > 0 { credCheckFreqSec = credFreq } else { - return nil, errors.New("could not convert 'credential_poll_interval' config value to int") + return nil, errors.New("could not convert 'credential_poll_interval' config value to positive int") } } diff --git a/command/agent/auth/approle/approle.go b/command/agentproxyshared/auth/approle/approle.go similarity index 98% rename from command/agent/auth/approle/approle.go rename to command/agentproxyshared/auth/approle/approle.go index e58299ad7b2e..ef32d493cd0a 100644 --- a/command/agent/auth/approle/approle.go +++ b/command/agentproxyshared/auth/approle/approle.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package approle import ( @@ -12,7 +15,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) type approleMethod struct { diff --git a/command/agentproxyshared/auth/auth.go b/command/agentproxyshared/auth/auth.go new file mode 100644 index 000000000000..afc71d110da1 --- /dev/null +++ b/command/agentproxyshared/auth/auth.go @@ -0,0 +1,573 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package auth + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "math/rand" + "net/http" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/backoff" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +const ( + defaultMinBackoff = 1 * time.Second + defaultMaxBackoff = 5 * time.Minute +) + +// AuthMethod is the interface that auto-auth methods implement for the agent/proxy +// to use. +type AuthMethod interface { + // Authenticate returns a mount path, header, request body, and error. + // The header may be nil if no special header is needed. + Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) + NewCreds() chan struct{} + CredSuccess() + Shutdown() +} + +// AuthMethodWithClient is an extended interface that can return an API client +// for use during the authentication call. +type AuthMethodWithClient interface { + AuthMethod + AuthClient(client *api.Client) (*api.Client, error) +} + +type AuthConfig struct { + Logger hclog.Logger + MountPath string + WrapTTL time.Duration + Config map[string]interface{} +} + +// AuthHandler is responsible for keeping a token alive and renewed and passing +// new tokens to the sink server +type AuthHandler struct { + OutputCh chan string + TemplateTokenCh chan string + ExecTokenCh chan string + token string + userAgent string + metricsSignifier string + logger hclog.Logger + client *api.Client + random *rand.Rand + wrapTTL time.Duration + maxBackoff time.Duration + minBackoff time.Duration + enableReauthOnNewCredentials bool + enableTemplateTokenCh bool + enableExecTokenCh bool + exitOnError bool +} + +type AuthHandlerConfig struct { + Logger hclog.Logger + Client *api.Client + WrapTTL time.Duration + MaxBackoff time.Duration + MinBackoff time.Duration + Token string + // UserAgent is the HTTP UserAgent header auto-auth will use when + // communicating with Vault. + UserAgent string + // MetricsSignifier is the first argument we will give to + // metrics.IncrCounter, signifying what the name of the application is + MetricsSignifier string + EnableReauthOnNewCredentials bool + EnableTemplateTokenCh bool + EnableExecTokenCh bool + ExitOnError bool +} + +func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler { + ah := &AuthHandler{ + // This is buffered so that if we try to output after the sink server + // has been shut down, during agent/proxy shutdown, we won't block + OutputCh: make(chan string, 1), + TemplateTokenCh: make(chan string, 1), + ExecTokenCh: make(chan string, 1), + token: conf.Token, + logger: conf.Logger, + client: conf.Client, + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + wrapTTL: conf.WrapTTL, + minBackoff: conf.MinBackoff, + maxBackoff: conf.MaxBackoff, + enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials, + enableTemplateTokenCh: conf.EnableTemplateTokenCh, + enableExecTokenCh: conf.EnableExecTokenCh, + exitOnError: conf.ExitOnError, + userAgent: conf.UserAgent, + metricsSignifier: conf.MetricsSignifier, + } + + return ah +} + +func backoffSleep(ctx context.Context, backoff *autoAuthBackoff) bool { + nextSleep, err := backoff.backoff.Next() + if err != nil { + return false + } + select { + case <-time.After(nextSleep): + case <-ctx.Done(): + } + return true +} + +func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error { + if am == nil { + return errors.New("auth handler: nil auth method") + } + + if ah.minBackoff <= 0 { + ah.minBackoff = defaultMinBackoff + } + if ah.maxBackoff <= 0 { + ah.maxBackoff = defaultMaxBackoff + } + if ah.minBackoff > ah.maxBackoff { + return errors.New("auth handler: min_backoff cannot be greater than max_backoff") + } + backoffCfg := newAutoAuthBackoff(ah.minBackoff, ah.maxBackoff, ah.exitOnError) + + ah.logger.Info("starting auth handler") + defer func() { + am.Shutdown() + close(ah.OutputCh) + close(ah.TemplateTokenCh) + close(ah.ExecTokenCh) + ah.logger.Info("auth handler stopped") + }() + + credCh := am.NewCreds() + if !ah.enableReauthOnNewCredentials { + realCredCh := credCh + credCh = nil + if realCredCh != nil { + go func() { + for { + select { + case <-ctx.Done(): + return + case <-realCredCh: + } + } + }() + } + } + if credCh == nil { + credCh = make(chan struct{}) + } + + if ah.client != nil { + headers := ah.client.Headers() + if headers == nil { + headers = make(http.Header) + } + headers.Set("User-Agent", ah.userAgent) + ah.client.SetHeaders(headers) + } + + var watcher *api.LifetimeWatcher + first := true + + for { + select { + case <-ctx.Done(): + return nil + + default: + } + + var clientToUse *api.Client + var err error + var path string + var data map[string]interface{} + var header http.Header + var isTokenFileMethod bool + + switch am.(type) { + case AuthMethodWithClient: + clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client) + if err != nil { + ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + + return err + } + default: + clientToUse = ah.client + } + + // Disable retry on the client to ensure our backoffOrQuit function is + // the only source of retry/backoff. + clientToUse.SetMaxRetries(0) + + var secret *api.Secret = new(api.Secret) + if first && ah.token != "" { + ah.logger.Debug("using preloaded token") + + first = false + ah.logger.Debug("lookup-self with preloaded token") + clientToUse.SetToken(ah.token) + + secret, err = clientToUse.Auth().Token().LookupSelfWithContext(ctx) + if err != nil { + ah.logger.Error("could not look up token", "err", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + + duration, _ := secret.Data["ttl"].(json.Number).Int64() + secret.Auth = &api.SecretAuth{ + ClientToken: secret.Data["id"].(string), + LeaseDuration: int(duration), + Renewable: secret.Data["renewable"].(bool), + } + } else { + ah.logger.Info("authenticating") + + path, header, data, err = am.Authenticate(ctx, ah.client) + if err != nil { + ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + } + + if ah.wrapTTL > 0 { + wrapClient, err := clientToUse.CloneWithHeaders() + if err != nil { + ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + wrapClient.SetWrappingLookupFunc(func(string, string) string { + return ah.wrapTTL.String() + }) + clientToUse = wrapClient + } + for key, values := range header { + for _, value := range values { + clientToUse.AddHeader(key, value) + } + } + + // This should only happen if there's no preloaded token (regular auto-auth login) + // or if a preloaded token has expired and is now switching to auto-auth. + if secret.Auth == nil { + isTokenFileMethod = path == "auth/token/lookup-self" + if isTokenFileMethod { + token, _ := data["token"].(string) + lookupSelfClient, err := clientToUse.CloneWithHeaders() + if err != nil { + ah.logger.Error("failed to clone client to perform token lookup") + return err + } + lookupSelfClient.SetToken(token) + secret, err = lookupSelfClient.Auth().Token().LookupSelf() + } else { + secret, err = clientToUse.Logical().WriteWithContext(ctx, path, data) + } + + // Check errors/sanity + if err != nil { + ah.logger.Error("error authenticating", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + } + + var leaseDuration int + + switch { + case ah.wrapTTL > 0: + if secret.WrapInfo == nil { + ah.logger.Error("authentication returned nil wrap info", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + if secret.WrapInfo.Token == "" { + ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo) + if err != nil { + ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing") + ah.OutputCh <- string(wrappedResp) + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- string(wrappedResp) + } + if ah.enableExecTokenCh { + ah.ExecTokenCh <- string(wrappedResp) + } + + am.CredSuccess() + backoffCfg.backoff.Reset() + + select { + case <-ctx.Done(): + ah.logger.Info("shutdown triggered") + continue + + case <-credCh: + ah.logger.Info("auth method found new credentials, re-authenticating") + continue + } + + default: + // We handle the token_file method specially, as it's the only + // auth method that isn't actually authenticating, i.e. the secret + // returned does not have an Auth struct attached + isTokenFileMethod := path == "auth/token/lookup-self" + if isTokenFileMethod { + // We still check the response of the request to ensure the token is valid + // i.e. if the token is invalid, we will fail in the authentication step + if secret == nil || secret.Data == nil { + ah.logger.Error("token file validation failed, token may be invalid", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + token, ok := secret.Data["id"].(string) + if !ok || token == "" { + ah.logger.Error("token file validation returned empty client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + + duration, _ := secret.Data["ttl"].(json.Number).Int64() + leaseDuration = int(duration) + renewable, _ := secret.Data["renewable"].(bool) + secret.Auth = &api.SecretAuth{ + ClientToken: token, + LeaseDuration: int(duration), + Renewable: renewable, + } + ah.logger.Info("authentication successful, sending token to sinks") + ah.OutputCh <- token + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- token + } + if ah.enableExecTokenCh { + ah.ExecTokenCh <- token + } + + tokenType := secret.Data["type"].(string) + if tokenType == "batch" { + ah.logger.Info("note that this token type is batch, and batch tokens cannot be renewed", "ttl", leaseDuration) + } + } else { + if secret == nil || secret.Auth == nil { + ah.logger.Error("authentication returned nil auth info", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + if secret.Auth.ClientToken == "" { + ah.logger.Error("authentication returned empty client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + + leaseDuration = secret.LeaseDuration + ah.logger.Info("authentication successful, sending token to sinks") + ah.OutputCh <- secret.Auth.ClientToken + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- secret.Auth.ClientToken + } + if ah.enableExecTokenCh { + ah.ExecTokenCh <- secret.Auth.ClientToken + } + } + + am.CredSuccess() + backoffCfg.backoff.Reset() + } + + if watcher != nil { + watcher.Stop() + } + + watcher, err = clientToUse.NewLifetimeWatcher(&api.LifetimeWatcherInput{ + Secret: secret, + }) + if err != nil { + ah.logger.Error("error creating lifetime watcher", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoffSleep(ctx, backoffCfg) { + continue + } + return err + } + + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "success"}, 1) + // We don't want to trigger the renewal process for the root token + if isRootToken(leaseDuration, isTokenFileMethod, secret) { + ah.logger.Info("not starting token renewal process, as token is root token") + } else { + ah.logger.Info("starting renewal process") + go watcher.Renew() + } + + LifetimeWatcherLoop: + for { + select { + case <-ctx.Done(): + ah.logger.Info("shutdown triggered, stopping lifetime watcher") + watcher.Stop() + break LifetimeWatcherLoop + + case err := <-watcher.DoneCh(): + ah.logger.Info("lifetime watcher done channel triggered, re-authenticating") + if err != nil { + ah.logger.Error("error renewing token", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + // Add some exponential backoff so that if auth is successful + // but the watcher errors, we won't go into an immediate + // aggressive retry loop. + // This might be quite a small sleep, since if we have a successful + // auth, we reset the backoff. Still, some backoff is important, and + // ensuring we follow the normal flow is important: + // auth -> try to renew + if !backoffSleep(ctx, backoffCfg) { + // We're at max retries. Return an error. + return fmt.Errorf("exceeded max retries failing to renew auth token") + } + } + + // If the lease duration is 0, wait a second before re-authenticating + // so that we don't go into a loop, as the LifetimeWatcher will immediately + // return for tokens like this. + if leaseDuration == 0 { + time.Sleep(1 * time.Second) + } + + break LifetimeWatcherLoop + + case <-watcher.RenewCh(): + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "success"}, 1) + ah.logger.Info("renewed auth token") + + case <-credCh: + ah.logger.Info("auth method found new credentials, re-authenticating") + break LifetimeWatcherLoop + } + } + } +} + +// isRootToken checks if the secret in the argument is the root token +// This is determinable without leaseDuration and isTokenFileMethod, +// but those make it easier to rule out other tokens cheaply. +func isRootToken(leaseDuration int, isTokenFileMethod bool, secret *api.Secret) bool { + // This check is cheaper than the others, so we do this first. + if leaseDuration == 0 && isTokenFileMethod && !secret.Renewable { + if secret != nil { + policies, err := secret.TokenPolicies() + if err == nil { + if len(policies) == 1 && policies[0] == "root" { + return true + } + } + } + } + return false +} + +// autoAuthBackoff tracks exponential backoff state. +type autoAuthBackoff struct { + backoff *backoff.Backoff +} + +func newAutoAuthBackoff(min, max time.Duration, exitErr bool) *autoAuthBackoff { + if max <= 0 { + max = defaultMaxBackoff + } + + if min <= 0 { + min = defaultMinBackoff + } + + retries := math.MaxInt + if exitErr { + retries = 0 + } + + b := backoff.NewBackoff(retries, min, max) + + return &autoAuthBackoff{ + backoff: b, + } +} + +func (b autoAuthBackoff) String() string { + return b.backoff.Current().Truncate(10 * time.Millisecond).String() +} diff --git a/command/agentproxyshared/auth/auth_test.go b/command/agentproxyshared/auth/auth_test.go new file mode 100644 index 000000000000..4ecfff03f535 --- /dev/null +++ b/command/agentproxyshared/auth/auth_test.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package auth + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/userpass" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +type userpassTestMethod struct{} + +func newUserpassTestMethod(t *testing.T, client *api.Client) AuthMethod { + err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ + Type: "userpass", + Config: api.AuthConfigInput{ + DefaultLeaseTTL: "1s", + MaxLeaseTTL: "3s", + }, + }) + if err != nil { + t.Fatal(err) + } + + return &userpassTestMethod{} +} + +func (u *userpassTestMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + _, err := client.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ + "password": "bar", + }) + if err != nil { + return "", nil, nil, err + } + return "auth/userpass/login/foo", nil, map[string]interface{}{ + "password": "bar", + }, nil +} + +func (u *userpassTestMethod) NewCreds() chan struct{} { + return nil +} + +func (u *userpassTestMethod) CredSuccess() { +} + +func (u *userpassTestMethod) Shutdown() { +} + +func TestAuthHandler(t *testing.T) { + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + ctx, cancelFunc := context.WithCancel(context.Background()) + + ah := NewAuthHandler(&AuthHandlerConfig{ + Logger: logging.NewVaultLogger(hclog.Trace).Named("auth.handler"), + Client: client, + }) + + am := newUserpassTestMethod(t, client) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + + // Consume tokens so we don't block + stopTime := time.Now().Add(5 * time.Second) + closed := false +consumption: + for { + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + break consumption + case <-ah.OutputCh: + case <-ah.TemplateTokenCh: + // Nothing + case <-time.After(stopTime.Sub(time.Now())): + if !closed { + cancelFunc() + closed = true + } + } + } +} + +func TestAgentBackoff(t *testing.T) { + max := 1024 * time.Second + backoff := newAutoAuthBackoff(defaultMinBackoff, max, false) + + // Test initial value + if backoff.backoff.Current() > defaultMinBackoff || backoff.backoff.Current() < defaultMinBackoff*3/4 { + t.Fatalf("expected 1s initial backoff, got: %v", backoff.backoff.Current()) + } + + // Test that backoffSleep values are in expected range (75-100% of 2*previous) + next, _ := backoff.backoff.Next() + for i := 0; i < 9; i++ { + old := next + next, _ = backoff.backoff.Next() + + expMax := 2 * old + expMin := 3 * expMax / 4 + + if next < expMin || next > expMax { + t.Fatalf("expected backoffSleep in range %v to %v, got: %v", expMin, expMax, backoff) + } + } + + // Test that backoffSleep is capped + for i := 0; i < 100; i++ { + _, _ = backoff.backoff.Next() + if backoff.backoff.Current() > max { + t.Fatalf("backoff exceeded max of 100s: %v", backoff) + } + } + + // Test reset + backoff.backoff.Reset() + if backoff.backoff.Current() > defaultMinBackoff || backoff.backoff.Current() < defaultMinBackoff*3/4 { + t.Fatalf("expected 1s backoff after reset, got: %v", backoff.backoff.Current()) + } +} + +func TestAgentMinBackoffCustom(t *testing.T) { + type test struct { + minBackoff time.Duration + want time.Duration + } + + tests := []test{ + {minBackoff: 0 * time.Second, want: 1 * time.Second}, + {minBackoff: 1 * time.Second, want: 1 * time.Second}, + {minBackoff: 5 * time.Second, want: 5 * time.Second}, + {minBackoff: 10 * time.Second, want: 10 * time.Second}, + } + + for _, test := range tests { + max := 1024 * time.Second + backoff := newAutoAuthBackoff(test.minBackoff, max, false) + + // Test initial value + if backoff.backoff.Current() > test.want || backoff.backoff.Current() < test.want*3/4 { + t.Fatalf("expected %d initial backoffSleep, got: %v", test.want, backoff.backoff.Current()) + } + + // Test that backoffSleep values are in expected range (75-100% of 2*previous) + next, _ := backoff.backoff.Next() + for i := 0; i < 5; i++ { + old := next + next, _ = backoff.backoff.Next() + + expMax := 2 * old + expMin := 3 * expMax / 4 + + if next < expMin || next > expMax { + t.Fatalf("expected backoffSleep in range %v to %v, got: %v", expMin, expMax, backoff) + } + } + + // Test that backoffSleep is capped + for i := 0; i < 100; i++ { + next, _ = backoff.backoff.Next() + if next > max { + t.Fatalf("backoffSleep exceeded max of 100s: %v", backoff) + } + } + + // Test reset + backoff.backoff.Reset() + if backoff.backoff.Current() > test.want || backoff.backoff.Current() < test.want*3/4 { + t.Fatalf("expected %d backoffSleep after reset, got: %v", test.want, backoff.backoff.Current()) + } + } +} diff --git a/command/agent/auth/aws/aws.go b/command/agentproxyshared/auth/aws/aws.go similarity index 97% rename from command/agent/auth/aws/aws.go rename to command/agentproxyshared/auth/aws/aws.go index b9dbdd5499c8..13ab7e483389 100644 --- a/command/agent/auth/aws/aws.go +++ b/command/agentproxyshared/auth/aws/aws.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aws import ( @@ -17,7 +20,7 @@ import ( "github.com/hashicorp/go-secure-stdlib/awsutil" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) const ( @@ -155,10 +158,10 @@ func NewAWSAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { // Check for an optional custom frequency at which we should poll for creds. credentialPollIntervalSec := defaultCredentialPollInterval if credentialPollIntervalRaw, ok := conf.Config["credential_poll_interval"]; ok { - if credentialPollInterval, ok := credentialPollIntervalRaw.(int); ok { + if credentialPollInterval, ok := credentialPollIntervalRaw.(int); ok && credentialPollInterval > 0 { credentialPollIntervalSec = credentialPollInterval } else { - return nil, errors.New("could not convert 'credential_poll_interval' into int") + return nil, errors.New("could not convert 'credential_poll_interval' into positive int") } } diff --git a/command/agentproxyshared/auth/azure/azure.go b/command/agentproxyshared/auth/azure/azure.go new file mode 100644 index 000000000000..8db5ff5093de --- /dev/null +++ b/command/agentproxyshared/auth/azure/azure.go @@ -0,0 +1,280 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package azure + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + + policy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + az "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + cleanhttp "github.com/hashicorp/go-cleanhttp" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +const ( + instanceEndpoint = "http://169.254.169.254/metadata/instance" + identityEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // minimum version 2018-02-01 needed for identity metadata + // regional availability: https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service + apiVersion = "2018-02-01" +) + +type azureMethod struct { + logger hclog.Logger + mountPath string + + authenticateFromEnvironment bool + role string + scope string + resource string + objectID string + clientID string +} + +func NewAzureAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + a := &azureMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + a.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + resourceRaw, ok := conf.Config["resource"] + if !ok { + return nil, errors.New("missing 'resource' value") + } + a.resource, ok = resourceRaw.(string) + if !ok { + return nil, errors.New("could not convert 'resource' config value to string") + } + + objectIDRaw, ok := conf.Config["object_id"] + if ok { + a.objectID, ok = objectIDRaw.(string) + if !ok { + return nil, errors.New("could not convert 'object_id' config value to string") + } + } + + clientIDRaw, ok := conf.Config["client_id"] + if ok { + a.clientID, ok = clientIDRaw.(string) + if !ok { + return nil, errors.New("could not convert 'client_id' config value to string") + } + } + + scopeRaw, ok := conf.Config["scope"] + if ok { + a.scope, ok = scopeRaw.(string) + if !ok { + return nil, errors.New("could not convert 'scope' config value to string") + } + } + if a.scope == "" { + a.scope = fmt.Sprintf("%s/.default", a.resource) + } + + authenticateFromEnvironmentRaw, ok := conf.Config["authenticate_from_environment"] + if ok { + authenticateFromEnvironment, err := parseutil.ParseBool(authenticateFromEnvironmentRaw) + if err != nil { + return nil, fmt.Errorf("could not convert 'authenticate_from_environment' config value to bool: %w", err) + } + a.authenticateFromEnvironment = authenticateFromEnvironment + } + + switch { + case a.role == "": + return nil, errors.New("'role' value is empty") + case a.resource == "": + return nil, errors.New("'resource' value is empty") + case a.objectID != "" && a.clientID != "": + return nil, errors.New("only one of 'object_id' or 'client_id' may be provided") + } + + return a, nil +} + +func (a *azureMethod) Authenticate(ctx context.Context, client *api.Client) (retPath string, header http.Header, retData map[string]interface{}, retErr error) { + a.logger.Trace("beginning authentication") + + // Fetch instance data + var instance struct { + Compute struct { + Name string + ResourceGroupName string + SubscriptionID string + VMScaleSetName string + ResourceID string + } + } + + body, err := getInstanceMetadataInfo(ctx) + if err != nil { + retErr = err + return + } + + err = jsonutil.DecodeJSON(body, &instance) + if err != nil { + retErr = fmt.Errorf("error parsing instance metadata response: %w", err) + return + } + + token := "" + if a.authenticateFromEnvironment { + token, err = getAzureTokenFromEnvironment(ctx, a.scope) + if err != nil { + retErr = err + return + } + } else { + token, err = getTokenFromIdentityEndpoint(ctx, a.resource, a.objectID, a.clientID) + if err != nil { + retErr = err + return + } + } + + // Attempt login + data := map[string]interface{}{ + "role": a.role, + "vm_name": instance.Compute.Name, + "vmss_name": instance.Compute.VMScaleSetName, + "resource_group_name": instance.Compute.ResourceGroupName, + "subscription_id": instance.Compute.SubscriptionID, + "jwt": token, + } + + return fmt.Sprintf("%s/login", a.mountPath), nil, data, nil +} + +func (a *azureMethod) NewCreds() chan struct{} { + return nil +} + +func (a *azureMethod) CredSuccess() { +} + +func (a *azureMethod) Shutdown() { +} + +// getAzureTokenFromEnvironment Is Azure's preferred way for authentication, and takes values +// from environment variables to form a credential. +// It uses a DefaultAzureCredential: +// https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#readme-defaultazurecredential +// Environment variables are taken into account in the following order: +// https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#readme-environment-variables +func getAzureTokenFromEnvironment(ctx context.Context, scope string) (string, error) { + cred, err := az.NewDefaultAzureCredential(nil) + if err != nil { + return "", err + } + + tokenOpts := policy.TokenRequestOptions{Scopes: []string{scope}} + tk, err := cred.GetToken(ctx, tokenOpts) + if err != nil { + return "", err + } + return tk.Token, nil +} + +// getInstanceMetadataInfo calls the Azure Instance Metadata endpoint to get +// information about the Azure environment it's running in. +func getInstanceMetadataInfo(ctx context.Context) ([]byte, error) { + return getMetadataInfo(ctx, instanceEndpoint, "", "", "") +} + +// getTokenFromIdentityEndpoint is kept for backwards compatibility purposes. Using the +// newer APIs and the Azure SDK should be preferred over this mechanism. +func getTokenFromIdentityEndpoint(ctx context.Context, resource, objectID, clientID string) (string, error) { + var identity struct { + AccessToken string `json:"access_token"` + } + + body, err := getMetadataInfo(ctx, identityEndpoint, resource, objectID, clientID) + if err != nil { + return "", err + } + + err = jsonutil.DecodeJSON(body, &identity) + if err != nil { + return "", fmt.Errorf("error parsing identity metadata response: %w", err) + } + + return identity.AccessToken, nil +} + +// getMetadataInfo calls the Azure metadata endpoint with the given parameters. +// An empty resource, objectID and clientID will return metadata information. +func getMetadataInfo(ctx context.Context, endpoint, resource, objectID, clientID string) ([]byte, error) { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, err + } + + q := req.URL.Query() + q.Add("api-version", apiVersion) + if resource != "" { + q.Add("resource", resource) + } + if objectID != "" { + q.Add("object_id", objectID) + } + if clientID != "" { + q.Add("client_id", clientID) + } + req.URL.RawQuery = q.Encode() + req.Header.Set("Metadata", "true") + req.Header.Set("User-Agent", useragent.String()) + req = req.WithContext(ctx) + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("error fetching metadata from %s: %w", endpoint, err) + } + + if resp == nil { + return nil, fmt.Errorf("empty response fetching metadata from %s", endpoint) + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error reading metadata from %s: %w", endpoint, err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("error response in metadata from %s: %s", endpoint, body) + } + + return body, nil +} diff --git a/command/agentproxyshared/auth/azure/azure_test.go b/command/agentproxyshared/auth/azure/azure_test.go new file mode 100644 index 000000000000..0c9b9985d02f --- /dev/null +++ b/command/agentproxyshared/auth/azure/azure_test.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package azure + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +// TestAzureAuthMethod tests that NewAzureAuthMethod succeeds +// with valid config. +func TestAzureAuthMethod(t *testing.T) { + t.Parallel() + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "auth-test", + Config: map[string]interface{}{ + "resource": "test", + "client_id": "test", + "role": "test", + "scope": "test", + "authenticate_from_environment": true, + }, + } + + _, err := NewAzureAuthMethod(config) + if err != nil { + t.Fatal(err) + } +} + +// TestAzureAuthMethod_StringAuthFromEnvironment tests that NewAzureAuthMethod succeeds +// with valid config, where authenticate_from_environment is a string literal. +func TestAzureAuthMethod_StringAuthFromEnvironment(t *testing.T) { + t.Parallel() + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "auth-test", + Config: map[string]interface{}{ + "resource": "test", + "client_id": "test", + "role": "test", + "scope": "test", + "authenticate_from_environment": "true", + }, + } + + _, err := NewAzureAuthMethod(config) + if err != nil { + t.Fatal(err) + } +} + +// TestAzureAuthMethod_BadConfig tests that NewAzureAuthMethod fails with +// an invalid config. +func TestAzureAuthMethod_BadConfig(t *testing.T) { + t.Parallel() + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "auth-test", + Config: map[string]interface{}{ + "bad_value": "abc", + }, + } + + _, err := NewAzureAuthMethod(config) + if err == nil { + t.Fatal("Expected error, got none.") + } +} + +// TestAzureAuthMethod_BadAuthFromEnvironment tests that NewAzureAuthMethod fails +// with otherwise valid config, but where authenticate_from_environment is +// an invalid string literal. +func TestAzureAuthMethod_BadAuthFromEnvironment(t *testing.T) { + t.Parallel() + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "auth-test", + Config: map[string]interface{}{ + "resource": "test", + "client_id": "test", + "role": "test", + "scope": "test", + "authenticate_from_environment": "bad_value", + }, + } + + _, err := NewAzureAuthMethod(config) + if err == nil { + t.Fatal("Expected error, got none.") + } +} diff --git a/command/agentproxyshared/auth/cert/cert.go b/command/agentproxyshared/auth/cert/cert.go new file mode 100644 index 000000000000..fabe9a6365fb --- /dev/null +++ b/command/agentproxyshared/auth/cert/cert.go @@ -0,0 +1,158 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cert + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +type certMethod struct { + logger hclog.Logger + mountPath string + name string + + caCert string + clientCert string + clientKey string + reload bool + + // Client is the cached client to use if cert info was provided. + client *api.Client +} + +var _ auth.AuthMethodWithClient = &certMethod{} + +func NewCertAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + + // Not concerned if the conf.Config is empty as the 'name' + // parameter is optional when using TLS Auth + + c := &certMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + } + + if conf.Config != nil { + nameRaw, ok := conf.Config["name"] + if !ok { + nameRaw = "" + } + c.name, ok = nameRaw.(string) + if !ok { + return nil, errors.New("could not convert 'name' config value to string") + } + + caCertRaw, ok := conf.Config["ca_cert"] + if ok { + c.caCert, ok = caCertRaw.(string) + if !ok { + return nil, errors.New("could not convert 'ca_cert' config value to string") + } + } + + clientCertRaw, ok := conf.Config["client_cert"] + if ok { + c.clientCert, ok = clientCertRaw.(string) + if !ok { + return nil, errors.New("could not convert 'cert_file' config value to string") + } + } + + clientKeyRaw, ok := conf.Config["client_key"] + if ok { + c.clientKey, ok = clientKeyRaw.(string) + if !ok { + return nil, errors.New("could not convert 'cert_key' config value to string") + } + } + + reload, ok := conf.Config["reload"] + if ok { + c.reload, ok = reload.(bool) + if !ok { + return nil, errors.New("could not convert 'reload' config value to bool") + } + } + } + + return c, nil +} + +func (c *certMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + c.logger.Trace("beginning authentication") + + authMap := map[string]interface{}{} + + if c.name != "" { + authMap["name"] = c.name + } + + return fmt.Sprintf("%s/login", c.mountPath), nil, authMap, nil +} + +func (c *certMethod) NewCreds() chan struct{} { + return nil +} + +func (c *certMethod) CredSuccess() {} + +func (c *certMethod) Shutdown() {} + +// AuthClient uses the existing client's address and returns a new client with +// the auto-auth method's certificate information if that's provided in its +// config map. +func (c *certMethod) AuthClient(client *api.Client) (*api.Client, error) { + c.logger.Trace("deriving auth client to use") + + clientToAuth := client + + if c.caCert != "" || (c.clientKey != "" && c.clientCert != "") { + // Return cached client if present + if c.client != nil && !c.reload { + return c.client, nil + } + + config := api.DefaultConfig() + if config.Error != nil { + return nil, config.Error + } + config.Address = client.Address() + + t := &api.TLSConfig{ + CACert: c.caCert, + ClientCert: c.clientCert, + ClientKey: c.clientKey, + } + + // Setup TLS config + if err := config.ConfigureTLS(t); err != nil { + return nil, err + } + + var err error + clientToAuth, err = api.NewClient(config) + if err != nil { + return nil, err + } + if ns := client.Headers().Get(consts.NamespaceHeaderName); ns != "" { + clientToAuth.SetNamespace(ns) + } + + // Cache the client for future use + c.client = clientToAuth + } + + return clientToAuth, nil +} diff --git a/command/agentproxyshared/auth/cert/cert_test.go b/command/agentproxyshared/auth/cert/cert_test.go new file mode 100644 index 000000000000..6a7e4f779e9c --- /dev/null +++ b/command/agentproxyshared/auth/cert/cert_test.go @@ -0,0 +1,191 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cert + +import ( + "context" + "os" + "path" + "reflect" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +func TestCertAuthMethod_Authenticate(t *testing.T) { + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "foo", + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + loginPath, _, authMap, err := method.Authenticate(context.Background(), client) + if err != nil { + t.Fatal(err) + } + + expectedLoginPath := path.Join(config.MountPath, "/login") + if loginPath != expectedLoginPath { + t.Fatalf("mismatch on login path: got: %s, expected: %s", loginPath, expectedLoginPath) + } + + expectedAuthMap := map[string]interface{}{ + "name": config.Config["name"], + } + if !reflect.DeepEqual(authMap, expectedAuthMap) { + t.Fatalf("mismatch on login path:\ngot:\n\t%v\nexpected:\n\t%v", authMap, expectedAuthMap) + } +} + +func TestCertAuthMethod_AuthClient_withoutCerts(t *testing.T) { + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "without-certs", + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client != clientToUse { + t.Fatal("error: expected AuthClient to return back original client") + } +} + +func TestCertAuthMethod_AuthClient_withCerts(t *testing.T) { + clientCert, err := os.Open("./test-fixtures/keys/cert.pem") + if err != nil { + t.Fatal(err) + } + defer clientCert.Close() + + clientKey, err := os.Open("./test-fixtures/keys/key.pem") + if err != nil { + t.Fatal(err) + } + defer clientKey.Close() + + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "with-certs", + "client_cert": clientCert.Name(), + "client_key": clientKey.Name(), + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client == clientToUse { + t.Fatal("expected client from AuthClient to be different from original client") + } + + // Call AuthClient again to get back the cached client + cachedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if cachedClient != clientToUse { + t.Fatal("expected client from AuthClient to return back a cached client") + } +} + +func TestCertAuthMethod_AuthClient_withCertsReload(t *testing.T) { + clientCert, err := os.Open("./test-fixtures/keys/cert.pem") + if err != nil { + t.Fatal(err) + } + + defer clientCert.Close() + + clientKey, err := os.Open("./test-fixtures/keys/key.pem") + if err != nil { + t.Fatal(err) + } + + defer clientKey.Close() + + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "with-certs-reloaded", + "client_cert": clientCert.Name(), + "client_key": clientKey.Name(), + "reload": true, + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client == clientToUse { + t.Fatal("expected client from AuthClient to be different from original client") + } + + // Call AuthClient again to get back a new client with reloaded certificates + reloadedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if reloadedClient == clientToUse { + t.Fatal("expected client from AuthClient to return back a new client") + } +} diff --git a/command/agent/auth/cert/test-fixtures/keys/cert.pem b/command/agentproxyshared/auth/cert/test-fixtures/keys/cert.pem similarity index 100% rename from command/agent/auth/cert/test-fixtures/keys/cert.pem rename to command/agentproxyshared/auth/cert/test-fixtures/keys/cert.pem diff --git a/command/agent/auth/cert/test-fixtures/keys/key.pem b/command/agentproxyshared/auth/cert/test-fixtures/keys/key.pem similarity index 100% rename from command/agent/auth/cert/test-fixtures/keys/key.pem rename to command/agentproxyshared/auth/cert/test-fixtures/keys/key.pem diff --git a/command/agent/auth/cert/test-fixtures/keys/pkioutput b/command/agentproxyshared/auth/cert/test-fixtures/keys/pkioutput similarity index 100% rename from command/agent/auth/cert/test-fixtures/keys/pkioutput rename to command/agentproxyshared/auth/cert/test-fixtures/keys/pkioutput diff --git a/command/agent/auth/cert/test-fixtures/root/pkioutput b/command/agentproxyshared/auth/cert/test-fixtures/root/pkioutput similarity index 100% rename from command/agent/auth/cert/test-fixtures/root/pkioutput rename to command/agentproxyshared/auth/cert/test-fixtures/root/pkioutput diff --git a/command/agent/auth/cert/test-fixtures/root/root.crl b/command/agentproxyshared/auth/cert/test-fixtures/root/root.crl similarity index 100% rename from command/agent/auth/cert/test-fixtures/root/root.crl rename to command/agentproxyshared/auth/cert/test-fixtures/root/root.crl diff --git a/command/agent/auth/cert/test-fixtures/root/rootcacert.pem b/command/agentproxyshared/auth/cert/test-fixtures/root/rootcacert.pem similarity index 100% rename from command/agent/auth/cert/test-fixtures/root/rootcacert.pem rename to command/agentproxyshared/auth/cert/test-fixtures/root/rootcacert.pem diff --git a/command/agent/auth/cert/test-fixtures/root/rootcakey.pem b/command/agentproxyshared/auth/cert/test-fixtures/root/rootcakey.pem similarity index 100% rename from command/agent/auth/cert/test-fixtures/root/rootcakey.pem rename to command/agentproxyshared/auth/cert/test-fixtures/root/rootcakey.pem diff --git a/command/agent/auth/cf/cf.go b/command/agentproxyshared/auth/cf/cf.go similarity index 94% rename from command/agent/auth/cf/cf.go rename to command/agentproxyshared/auth/cf/cf.go index 9508b7164f2b..27396be20af7 100644 --- a/command/agent/auth/cf/cf.go +++ b/command/agentproxyshared/auth/cf/cf.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cf import ( @@ -12,7 +15,7 @@ import ( cf "github.com/hashicorp/vault-plugin-auth-cf" "github.com/hashicorp/vault-plugin-auth-cf/signatures" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) type cfMethod struct { diff --git a/command/agent/auth/gcp/gcp.go b/command/agentproxyshared/auth/gcp/gcp.go similarity index 97% rename from command/agent/auth/gcp/gcp.go rename to command/agentproxyshared/auth/gcp/gcp.go index 45d9b74f9497..5d913581bca5 100644 --- a/command/agent/auth/gcp/gcp.go +++ b/command/agentproxyshared/auth/gcp/gcp.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package gcp import ( @@ -14,7 +17,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" "golang.org/x/oauth2" "google.golang.org/api/iamcredentials/v1" ) diff --git a/command/agentproxyshared/auth/jwt/jwt.go b/command/agentproxyshared/auth/jwt/jwt.go new file mode 100644 index 000000000000..fce03ad7d76d --- /dev/null +++ b/command/agentproxyshared/auth/jwt/jwt.go @@ -0,0 +1,260 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package jwt + +import ( + "context" + "errors" + "fmt" + "io/fs" + "net/http" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/sdk/helper/parseutil" +) + +type jwtMethod struct { + logger hclog.Logger + path string + mountPath string + role string + removeJWTAfterReading bool + removeJWTFollowsSymlinks bool + credsFound chan struct{} + watchCh chan string + stopCh chan struct{} + doneCh chan struct{} + credSuccessGate chan struct{} + ticker *time.Ticker + once *sync.Once + latestToken *atomic.Value +} + +// NewJWTAuthMethod returns an implementation of Agent's auth.AuthMethod +// interface for JWT auth. +func NewJWTAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + j := &jwtMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + removeJWTAfterReading: true, + credsFound: make(chan struct{}), + watchCh: make(chan string), + stopCh: make(chan struct{}), + doneCh: make(chan struct{}), + credSuccessGate: make(chan struct{}), + once: new(sync.Once), + latestToken: new(atomic.Value), + } + j.latestToken.Store("") + + pathRaw, ok := conf.Config["path"] + if !ok { + return nil, errors.New("missing 'path' value") + } + j.path, ok = pathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'path' config value to string") + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + j.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + if removeJWTAfterReadingRaw, ok := conf.Config["remove_jwt_after_reading"]; ok { + removeJWTAfterReading, err := parseutil.ParseBool(removeJWTAfterReadingRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'remove_jwt_after_reading' value: %w", err) + } + j.removeJWTAfterReading = removeJWTAfterReading + } + + if removeJWTFollowsSymlinksRaw, ok := conf.Config["remove_jwt_follows_symlinks"]; ok { + removeJWTFollowsSymlinks, err := parseutil.ParseBool(removeJWTFollowsSymlinksRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'remove_jwt_follows_symlinks' value: %w", err) + } + j.removeJWTFollowsSymlinks = removeJWTFollowsSymlinks + } + + switch { + case j.path == "": + return nil, errors.New("'path' value is empty") + case j.role == "": + return nil, errors.New("'role' value is empty") + } + + // Default readPeriod + readPeriod := 1 * time.Minute + + if jwtReadPeriodRaw, ok := conf.Config["jwt_read_period"]; ok { + jwtReadPeriod, err := parseutil.ParseDurationSecond(jwtReadPeriodRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'jwt_read_period' value: %w", err) + } + readPeriod = jwtReadPeriod + } else { + // If we don't delete the JWT after reading, use a slower reload period, + // otherwise we would re-read the whole file every 500ms, instead of just + // doing a stat on the file every 500ms. + if j.removeJWTAfterReading { + readPeriod = 500 * time.Millisecond + } + } + + j.ticker = time.NewTicker(readPeriod) + + go j.runWatcher() + + j.logger.Info("jwt auth method created", "path", j.path) + + return j, nil +} + +func (j *jwtMethod) Authenticate(_ context.Context, _ *api.Client) (string, http.Header, map[string]interface{}, error) { + j.logger.Trace("beginning authentication") + + j.ingressToken() + + latestToken := j.latestToken.Load().(string) + if latestToken == "" { + return "", nil, nil, errors.New("latest known jwt is empty, cannot authenticate") + } + + return fmt.Sprintf("%s/login", j.mountPath), nil, map[string]interface{}{ + "role": j.role, + "jwt": latestToken, + }, nil +} + +func (j *jwtMethod) NewCreds() chan struct{} { + return j.credsFound +} + +func (j *jwtMethod) CredSuccess() { + j.once.Do(func() { + close(j.credSuccessGate) + }) +} + +func (j *jwtMethod) Shutdown() { + j.ticker.Stop() + close(j.stopCh) + <-j.doneCh +} + +func (j *jwtMethod) runWatcher() { + defer close(j.doneCh) + + select { + case <-j.stopCh: + return + + case <-j.credSuccessGate: + // We only start the next loop once we're initially successful, + // since at startup Authenticate will be called, and we don't want + // to end up immediately re-authenticating by having found a new + // value + } + + for { + select { + case <-j.stopCh: + return + + case <-j.ticker.C: + latestToken := j.latestToken.Load().(string) + j.ingressToken() + newToken := j.latestToken.Load().(string) + if newToken != latestToken { + j.logger.Debug("new jwt file found") + j.credsFound <- struct{}{} + } + } + } +} + +func (j *jwtMethod) ingressToken() { + fi, err := os.Lstat(j.path) + if err != nil { + if os.IsNotExist(err) { + return + } + j.logger.Error("error encountered stat'ing jwt file", "error", err) + return + } + + // Check that the path refers to a file. + // If it's a symlink, it could still be a symlink to a directory, + // but os.ReadFile below will return a descriptive error. + evalSymlinkPath := j.path + switch mode := fi.Mode(); { + case mode.IsRegular(): + // regular file + case mode&fs.ModeSymlink != 0: + // If our file path is a symlink, we should also return early (like above) without error + // if the file that is linked to is not present, otherwise we will error when trying + // to read that file by following the link in the os.ReadFile call. + evalSymlinkPath, err = filepath.EvalSymlinks(j.path) + if err != nil { + j.logger.Error("error encountered evaluating symlinks", "error", err) + return + } + _, err := os.Stat(evalSymlinkPath) + if err != nil { + if os.IsNotExist(err) { + return + } + j.logger.Error("error encountered stat'ing jwt file after evaluating symlinks", "error", err) + return + } + default: + j.logger.Error("jwt file is not a regular file or symlink") + return + } + + token, err := os.ReadFile(j.path) + if err != nil { + j.logger.Error("failed to read jwt file", "error", err) + return + } + + switch len(token) { + case 0: + j.logger.Warn("empty jwt file read") + + default: + j.latestToken.Store(string(token)) + } + + if j.removeJWTAfterReading { + pathToRemove := j.path + if j.removeJWTFollowsSymlinks { + // If removeJWTFollowsSymlinks is set, we follow the symlink and delete the jwt, + // not just the symlink that links to the jwt + pathToRemove = evalSymlinkPath + } + if err := os.Remove(pathToRemove); err != nil { + j.logger.Error("error removing jwt file", "error", err) + } + } +} diff --git a/command/agentproxyshared/auth/jwt/jwt_test.go b/command/agentproxyshared/auth/jwt/jwt_test.go new file mode 100644 index 000000000000..62fbc24e8110 --- /dev/null +++ b/command/agentproxyshared/auth/jwt/jwt_test.go @@ -0,0 +1,262 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package jwt + +import ( + "bytes" + "os" + "path" + "strings" + "sync/atomic" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +func TestIngressToken(t *testing.T) { + const ( + dir = "dir" + file = "file" + empty = "empty" + missing = "missing" + symlinked = "symlinked" + ) + + rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + + setupTestDir := func() string { + testDir, err := os.MkdirTemp(rootDir, "") + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(path.Join(testDir, file), []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + _, err = os.Create(path.Join(testDir, empty)) + if err != nil { + t.Fatal(err) + } + err = os.Mkdir(path.Join(testDir, dir), 0o755) + if err != nil { + t.Fatal(err) + } + err = os.Symlink(path.Join(testDir, file), path.Join(testDir, symlinked)) + if err != nil { + t.Fatal(err) + } + + return testDir + } + + for _, tc := range []struct { + name string + path string + errString string + }{ + { + "happy path", + file, + "", + }, + { + "path is directory", + dir, + "[ERROR] jwt file is not a regular file or symlink", + }, + { + "path is symlink", + symlinked, + "", + }, + { + "path is missing (implies nothing for ingressToken to do)", + missing, + "", + }, + { + "path is empty file", + empty, + "[WARN] empty jwt file read", + }, + } { + testDir := setupTestDir() + logBuffer := bytes.Buffer{} + jwtAuth := &jwtMethod{ + logger: hclog.New(&hclog.LoggerOptions{ + Output: &logBuffer, + }), + latestToken: new(atomic.Value), + path: path.Join(testDir, tc.path), + } + + jwtAuth.ingressToken() + + if tc.errString != "" { + if !strings.Contains(logBuffer.String(), tc.errString) { + t.Fatal("logs did no contain expected error", tc.errString, logBuffer.String()) + } + } else { + if strings.Contains(logBuffer.String(), "[ERROR]") || strings.Contains(logBuffer.String(), "[WARN]") { + t.Fatal("logs contained unexpected error", logBuffer.String()) + } + } + } +} + +func TestDeleteAfterReading(t *testing.T) { + for _, tc := range map[string]struct { + configValue string + shouldDelete bool + }{ + "default": { + "", + true, + }, + "explicit true": { + "true", + true, + }, + "false": { + "false", + false, + }, + } { + rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + tokenPath := path.Join(rootDir, "token") + err = os.WriteFile(tokenPath, []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + + config := &auth.AuthConfig{ + Config: map[string]interface{}{ + "path": tokenPath, + "role": "unusedrole", + }, + Logger: hclog.Default(), + } + if tc.configValue != "" { + config.Config["remove_jwt_after_reading"] = tc.configValue + } + + jwtAuth, err := NewJWTAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + jwtAuth.(*jwtMethod).ingressToken() + + if _, err := os.Lstat(tokenPath); tc.shouldDelete { + if err == nil || !os.IsNotExist(err) { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestDeleteAfterReadingSymlink(t *testing.T) { + for _, tc := range map[string]struct { + configValue string + shouldDelete bool + removeJWTFollowsSymlinks bool + }{ + "default": { + "", + true, + false, + }, + "explicit true": { + "true", + true, + false, + }, + "false": { + "false", + false, + false, + }, + "default + removeJWTFollowsSymlinks": { + "", + true, + true, + }, + "explicit true + removeJWTFollowsSymlinks": { + "true", + true, + true, + }, + "false + removeJWTFollowsSymlinks": { + "false", + false, + true, + }, + } { + rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + tokenPath := path.Join(rootDir, "token") + err = os.WriteFile(tokenPath, []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + + symlink, err := os.CreateTemp("", "auth.jwt.symlink.test.") + if err != nil { + t.Fatal(err) + } + symlinkName := symlink.Name() + symlink.Close() + os.Remove(symlinkName) + os.Symlink(tokenPath, symlinkName) + + config := &auth.AuthConfig{ + Config: map[string]interface{}{ + "path": symlinkName, + "role": "unusedrole", + }, + Logger: hclog.Default(), + } + if tc.configValue != "" { + config.Config["remove_jwt_after_reading"] = tc.configValue + } + config.Config["remove_jwt_follows_symlinks"] = tc.removeJWTFollowsSymlinks + + jwtAuth, err := NewJWTAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + jwtAuth.(*jwtMethod).ingressToken() + + pathToCheck := symlinkName + if tc.removeJWTFollowsSymlinks { + pathToCheck = tokenPath + } + if _, err := os.Lstat(pathToCheck); tc.shouldDelete { + if err == nil || !os.IsNotExist(err) { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} diff --git a/command/agent/auth/kerberos/integtest/integrationtest.sh b/command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh similarity index 98% rename from command/agent/auth/kerberos/integtest/integrationtest.sh rename to command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh index 28da55f599cf..6b8a6925de95 100755 --- a/command/agent/auth/kerberos/integtest/integrationtest.sh +++ b/command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh @@ -1,4 +1,7 @@ #!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # Instructions # This integration test is for the Vault Kerberos agent. # Before running, execute: diff --git a/command/agent/auth/kerberos/kerberos.go b/command/agentproxyshared/auth/kerberos/kerberos.go similarity index 95% rename from command/agent/auth/kerberos/kerberos.go rename to command/agentproxyshared/auth/kerberos/kerberos.go index 894c177d5c8a..566fa222a47f 100644 --- a/command/agent/auth/kerberos/kerberos.go +++ b/command/agentproxyshared/auth/kerberos/kerberos.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package kerberos import ( @@ -10,7 +13,7 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" kerberos "github.com/hashicorp/vault-plugin-auth-kerberos" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" "github.com/jcmturner/gokrb5/v8/spnego" ) diff --git a/command/agent/auth/kerberos/kerberos_test.go b/command/agentproxyshared/auth/kerberos/kerberos_test.go similarity index 94% rename from command/agent/auth/kerberos/kerberos_test.go rename to command/agentproxyshared/auth/kerberos/kerberos_test.go index 4cfe3479ed4c..819cb7dff4ec 100644 --- a/command/agent/auth/kerberos/kerberos_test.go +++ b/command/agentproxyshared/auth/kerberos/kerberos_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package kerberos import ( "testing" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) func TestNewKerberosAuthMethod(t *testing.T) { diff --git a/command/agent/auth/kubernetes/kubernetes.go b/command/agentproxyshared/auth/kubernetes/kubernetes.go similarity index 95% rename from command/agent/auth/kubernetes/kubernetes.go rename to command/agentproxyshared/auth/kubernetes/kubernetes.go index c30f3cb5a68b..639226505f25 100644 --- a/command/agent/auth/kubernetes/kubernetes.go +++ b/command/agentproxyshared/auth/kubernetes/kubernetes.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package kubernetes import ( @@ -12,7 +15,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" ) const ( diff --git a/command/agent/auth/kubernetes/kubernetes_test.go b/command/agentproxyshared/auth/kubernetes/kubernetes_test.go similarity index 97% rename from command/agent/auth/kubernetes/kubernetes_test.go rename to command/agentproxyshared/auth/kubernetes/kubernetes_test.go index 34f965c77096..93b348c7f521 100644 --- a/command/agent/auth/kubernetes/kubernetes_test.go +++ b/command/agentproxyshared/auth/kubernetes/kubernetes_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package kubernetes import ( @@ -9,7 +12,7 @@ import ( "github.com/hashicorp/errwrap" hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth" "github.com/hashicorp/vault/sdk/helper/logging" ) diff --git a/command/agentproxyshared/auth/ldap/ldap.go b/command/agentproxyshared/auth/ldap/ldap.go new file mode 100644 index 000000000000..d654f21898e4 --- /dev/null +++ b/command/agentproxyshared/auth/ldap/ldap.go @@ -0,0 +1,259 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package ldap + +import ( + "context" + "errors" + "fmt" + "io/fs" + "net/http" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/sdk/helper/parseutil" +) + +type ldapMethod struct { + logger hclog.Logger + mountPath string + + username string + passwordFilePath string + removePasswordAfterReading bool + removePasswordFollowsSymlinks bool + credsFound chan struct{} + watchCh chan string + stopCh chan struct{} + doneCh chan struct{} + credSuccessGate chan struct{} + ticker *time.Ticker + once *sync.Once + latestPass *atomic.Value +} + +// NewLdapMethod reads the user configuration and returns a configured +// LdapAuthMethod +func NewLdapAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + k := &ldapMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + removePasswordAfterReading: true, + credsFound: make(chan struct{}), + watchCh: make(chan string), + stopCh: make(chan struct{}), + doneCh: make(chan struct{}), + credSuccessGate: make(chan struct{}), + once: new(sync.Once), + latestPass: new(atomic.Value), + } + + k.latestPass.Store("") + usernameRaw, ok := conf.Config["username"] + if !ok { + return nil, errors.New("missing 'username' value") + } + k.username, ok = usernameRaw.(string) + if !ok { + return nil, errors.New("could not convert 'username' config value to string") + } + + passFilePathRaw, ok := conf.Config["password_file_path"] + if !ok { + return nil, errors.New("missing 'password_file_path' value") + } + k.passwordFilePath, ok = passFilePathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'password_file_path' config value to string") + } + if removePassAfterReadingRaw, ok := conf.Config["remove_password_after_reading"]; ok { + removePassAfterReading, err := parseutil.ParseBool(removePassAfterReadingRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'remove_password_after_reading' value: %w", err) + } + k.removePasswordAfterReading = removePassAfterReading + } + + if removePassFollowsSymlinksRaw, ok := conf.Config["remove_password_follows_symlinks"]; ok { + removePassFollowsSymlinks, err := parseutil.ParseBool(removePassFollowsSymlinksRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'remove_password_follows_symlinks' value: %w", err) + } + k.removePasswordFollowsSymlinks = removePassFollowsSymlinks + } + switch { + case k.passwordFilePath == "": + return nil, errors.New("'password_file_path' value is empty") + case k.username == "": + return nil, errors.New("'username' value is empty") + } + + // Default readPeriod + readPeriod := 1 * time.Minute + + if passReadPeriodRaw, ok := conf.Config["password_read_period"]; ok { + passReadPeriod, err := parseutil.ParseDurationSecond(passReadPeriodRaw) + if err != nil || passReadPeriod <= 0 { + return nil, fmt.Errorf("error parsing 'password_read_period' value into a positive value: %w", err) + } + readPeriod = passReadPeriod + } else { + // If we don't delete the password after reading, use a slower reload period, + // otherwise we would re-read the whole file every 500ms, instead of just + // doing a stat on the file every 500ms. + if k.removePasswordAfterReading { + readPeriod = 500 * time.Millisecond + } + } + + k.ticker = time.NewTicker(readPeriod) + + go k.runWatcher() + + k.logger.Info("ldap auth method created", "password_file_path", k.passwordFilePath) + + return k, nil +} + +func (k *ldapMethod) Authenticate(ctx context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + k.logger.Trace("beginning authentication") + + k.ingressPass() + + latestPass := k.latestPass.Load().(string) + + if latestPass == "" { + return "", nil, nil, errors.New("latest known password is empty, cannot authenticate") + } + k.logger.Info("last known password in Authentication setup is") + return fmt.Sprintf("%s/login/%s", k.mountPath, k.username), nil, map[string]interface{}{ + "password": latestPass, + }, nil +} + +func (k *ldapMethod) NewCreds() chan struct{} { + return k.credsFound +} + +func (k *ldapMethod) CredSuccess() { + k.once.Do(func() { + close(k.credSuccessGate) + }) +} + +func (k *ldapMethod) Shutdown() { + k.ticker.Stop() + close(k.stopCh) + <-k.doneCh +} + +func (k *ldapMethod) runWatcher() { + defer close(k.doneCh) + + select { + case <-k.stopCh: + return + + case <-k.credSuccessGate: + // We only start the next loop once we're initially successful, + // since at startup Authenticate will be called, and we don't want + // to end up immediately re-authenticating by having found a new + // value + } + + for { + select { + case <-k.stopCh: + return + + case <-k.ticker.C: + latestPass := k.latestPass.Load().(string) + k.ingressPass() + newPass := k.latestPass.Load().(string) + if newPass != latestPass { + k.logger.Debug("new password file found") + k.credsFound <- struct{}{} + } + } + } +} + +func (k *ldapMethod) ingressPass() { + fi, err := os.Lstat(k.passwordFilePath) + if err != nil { + if os.IsNotExist(err) { + return + } + k.logger.Error("error encountered stat'ing password file", "error", err) + return + } + + // Check that the path refers to a file. + // If it's a symlink, it could still be a symlink to a directory, + // but os.ReadFile below will return a descriptive error. + evalSymlinkPath := k.passwordFilePath + switch mode := fi.Mode(); { + case mode.IsRegular(): + // regular file + case mode&fs.ModeSymlink != 0: + // If our file path is a symlink, we should also return early (like above) without error + // if the file that is linked to is not present, otherwise we will error when trying + // to read that file by following the link in the os.ReadFile call. + evalSymlinkPath, err = filepath.EvalSymlinks(k.passwordFilePath) + if err != nil { + k.logger.Error("error encountered evaluating symlinks", "error", err) + return + } + _, err := os.Stat(evalSymlinkPath) + if err != nil { + if os.IsNotExist(err) { + return + } + k.logger.Error("error encountered stat'ing password file after evaluating symlinks", "error", err) + return + } + default: + k.logger.Error("password file is not a regular file or symlink") + return + } + + pass, err := os.ReadFile(k.passwordFilePath) + if err != nil { + k.logger.Error("failed to read password file", "error", err) + return + } + + switch len(pass) { + case 0: + k.logger.Warn("empty password file read") + + default: + k.latestPass.Store(string(pass)) + } + + if k.removePasswordAfterReading { + pathToRemove := k.passwordFilePath + if k.removePasswordFollowsSymlinks { + // If removePassFollowsSymlinks is set, we follow the symlink and delete the password, + // not just the symlink that links to the password file + pathToRemove = evalSymlinkPath + } + if err := os.Remove(pathToRemove); err != nil { + k.logger.Error("error removing password file", "error", err) + } + } +} diff --git a/command/agentproxyshared/auth/ldap/ldap_test.go b/command/agentproxyshared/auth/ldap/ldap_test.go new file mode 100644 index 000000000000..8f612db8c572 --- /dev/null +++ b/command/agentproxyshared/auth/ldap/ldap_test.go @@ -0,0 +1,262 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package ldap + +import ( + "bytes" + "os" + "path" + "strings" + "sync/atomic" + "testing" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +func TestIngressPass(t *testing.T) { + const ( + dir = "dir" + file = "file" + empty = "empty" + missing = "missing" + symlinked = "symlinked" + ) + + rootDir, err := os.MkdirTemp("", "vault-agent-ldap-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + + setupTestDir := func() string { + testDir, err := os.MkdirTemp(rootDir, "") + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(path.Join(testDir, file), []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + _, err = os.Create(path.Join(testDir, empty)) + if err != nil { + t.Fatal(err) + } + err = os.Mkdir(path.Join(testDir, dir), 0o755) + if err != nil { + t.Fatal(err) + } + err = os.Symlink(path.Join(testDir, file), path.Join(testDir, symlinked)) + if err != nil { + t.Fatal(err) + } + + return testDir + } + + for _, tc := range []struct { + name string + path string + errString string + }{ + { + "happy path", + file, + "", + }, + { + "path is directory", + dir, + "[ERROR] password file is not a regular file or symlink", + }, + { + "password file path is symlink", + symlinked, + "", + }, + { + "password file path is missing (implies nothing for ingressPass to do)", + missing, + "", + }, + { + "password file path is empty file", + empty, + "[WARN] empty password file read", + }, + } { + testDir := setupTestDir() + logBuffer := bytes.Buffer{} + ldapAuth := &ldapMethod{ + logger: hclog.New(&hclog.LoggerOptions{ + Output: &logBuffer, + }), + latestPass: new(atomic.Value), + passwordFilePath: path.Join(testDir, tc.path), + } + + ldapAuth.ingressPass() + + if tc.errString != "" { + if !strings.Contains(logBuffer.String(), tc.errString) { + t.Fatal("logs did no contain expected error", tc.errString, logBuffer.String()) + } + } else { + if strings.Contains(logBuffer.String(), "[ERROR]") || strings.Contains(logBuffer.String(), "[WARN]") { + t.Fatal("logs contained unexpected error", logBuffer.String()) + } + } + } +} + +func TestDeleteAfterReading(t *testing.T) { + for _, tc := range map[string]struct { + configValue string + shouldDelete bool + }{ + "default": { + "", + true, + }, + "explicit true": { + "true", + true, + }, + "false": { + "false", + false, + }, + } { + rootDir, err := os.MkdirTemp("", "vault-agent-ldap-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + passPath := path.Join(rootDir, "pass") + err = os.WriteFile(passPath, []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + + config := &auth.AuthConfig{ + Config: map[string]interface{}{ + "password_file_path": passPath, + "username": "testuser", + }, + Logger: hclog.Default(), + } + if tc.configValue != "" { + config.Config["remove_password_after_reading"] = tc.configValue + } + + ldapAuth, err := NewLdapAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + ldapAuth.(*ldapMethod).ingressPass() + + if _, err := os.Lstat(passPath); tc.shouldDelete { + if err == nil || !os.IsNotExist(err) { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestDeleteAfterReadingSymlink(t *testing.T) { + for _, tc := range map[string]struct { + configValue string + shouldDelete bool + removePassFollowsSymlinks bool + }{ + "default": { + "", + true, + false, + }, + "explicit true": { + "true", + true, + false, + }, + "false": { + "false", + false, + false, + }, + "default + removePassFollowsSymlinks": { + "", + true, + true, + }, + "explicit true + removePassFollowsSymlinks": { + "true", + true, + true, + }, + "false + removePassFollowsSymlinks": { + "false", + false, + true, + }, + } { + rootDir, err := os.MkdirTemp("", "vault-agent-ldap-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + passPath := path.Join(rootDir, "pass") + err = os.WriteFile(passPath, []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + + symlink, err := os.CreateTemp("", "auth.ldap.symlink.test.") + if err != nil { + t.Fatal(err) + } + symlinkName := symlink.Name() + symlink.Close() + os.Remove(symlinkName) + os.Symlink(passPath, symlinkName) + + config := &auth.AuthConfig{ + Config: map[string]interface{}{ + "password_file_path": symlinkName, + "username": "testuser", + }, + Logger: hclog.Default(), + } + if tc.configValue != "" { + config.Config["remove_password_after_reading"] = tc.configValue + } + config.Config["remove_password_follows_symlinks"] = tc.removePassFollowsSymlinks + + ldapAuth, err := NewLdapAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + ldapAuth.(*ldapMethod).ingressPass() + + pathToCheck := symlinkName + if tc.removePassFollowsSymlinks { + pathToCheck = passPath + } + if _, err := os.Lstat(pathToCheck); tc.shouldDelete { + if err == nil || !os.IsNotExist(err) { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} diff --git a/command/agentproxyshared/auth/oci/oci.go b/command/agentproxyshared/auth/oci/oci.go new file mode 100644 index 000000000000..40294065366e --- /dev/null +++ b/command/agentproxyshared/auth/oci/oci.go @@ -0,0 +1,264 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package oci + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "os/user" + "path" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/oracle/oci-go-sdk/common" + ociAuth "github.com/oracle/oci-go-sdk/common/auth" +) + +const ( + typeAPIKey = "apikey" + typeInstance = "instance" + + /* + + IAM creds can be inferred from instance metadata or the container + identity service, and those creds expire at varying intervals with + new creds becoming available at likewise varying intervals. Let's + default to polling once a minute so all changes can be picked up + rather quickly. This is configurable, however. + + */ + defaultCredCheckFreqSeconds = 60 * time.Second + + defaultConfigFileName = "config" + defaultConfigDirName = ".oci" + configFilePathEnvVarName = "OCI_CONFIG_FILE" + secondaryConfigDirName = ".oraclebmc" +) + +func NewOCIAuthMethod(conf *auth.AuthConfig, vaultAddress string) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + a := &ociMethod{ + logger: conf.Logger, + vaultAddress: vaultAddress, + mountPath: conf.MountPath, + credsFound: make(chan struct{}), + stopCh: make(chan struct{}), + } + + typeRaw, ok := conf.Config["type"] + if !ok { + return nil, errors.New("missing 'type' value") + } + authType, ok := typeRaw.(string) + if !ok { + return nil, errors.New("could not convert 'type' config value to string") + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + a.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + // Check for an optional custom frequency at which we should poll for creds. + credCheckFreqSec := defaultCredCheckFreqSeconds + if checkFreqRaw, ok := conf.Config["credential_poll_interval"]; ok { + checkFreq, err := parseutil.ParseDurationSecond(checkFreqRaw) + if err != nil { + return nil, fmt.Errorf("could not parse credential_poll_interval: %v", err) + } + credCheckFreqSec = checkFreq + } + + switch { + case a.role == "": + return nil, errors.New("'role' value is empty") + case authType == "": + return nil, errors.New("'type' value is empty") + case authType != typeAPIKey && authType != typeInstance: + return nil, errors.New("'type' value is invalid") + case authType == typeAPIKey: + defaultConfigFile := getDefaultConfigFilePath() + homeFolder := getHomeFolder() + secondaryConfigFile := path.Join(homeFolder, secondaryConfigDirName, defaultConfigFileName) + + environmentProvider := common.ConfigurationProviderEnvironmentVariables("OCI", "") + defaultFileProvider, _ := common.ConfigurationProviderFromFile(defaultConfigFile, "") + secondaryFileProvider, _ := common.ConfigurationProviderFromFile(secondaryConfigFile, "") + + provider, _ := common.ComposingConfigurationProvider([]common.ConfigurationProvider{environmentProvider, defaultFileProvider, secondaryFileProvider}) + a.configurationProvider = provider + case authType == typeInstance: + configurationProvider, err := ociAuth.InstancePrincipalConfigurationProvider() + if err != nil { + return nil, fmt.Errorf("failed to create instance principal configuration provider: %v", err) + } + a.configurationProvider = configurationProvider + } + + // Do an initial population of the creds because we want to err right away if we can't + // even get a first set. + creds, err := a.configurationProvider.KeyID() + if err != nil { + return nil, err + } + a.lastCreds = creds + + go a.pollForCreds(credCheckFreqSec) + + return a, nil +} + +type ociMethod struct { + logger hclog.Logger + vaultAddress string + mountPath string + + configurationProvider common.ConfigurationProvider + role string + + // These are used to share the latest creds safely across goroutines. + credLock sync.Mutex + lastCreds string + + // Notifies the outer environment that it should call Authenticate again. + credsFound chan struct{} + + // Detects that the outer environment is closing. + stopCh chan struct{} +} + +func (a *ociMethod) Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) { + a.credLock.Lock() + defer a.credLock.Unlock() + + a.logger.Trace("beginning authentication") + + requestPath := fmt.Sprintf("/v1/%s/login/%s", a.mountPath, a.role) + requestURL := fmt.Sprintf("%s%s", a.vaultAddress, requestPath) + + request, err := http.NewRequest("GET", requestURL, nil) + if err != nil { + return "", nil, nil, fmt.Errorf("error creating authentication request: %w", err) + } + + request.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) + + signer := common.DefaultRequestSigner(a.configurationProvider) + + err = signer.Sign(request) + if err != nil { + return "", nil, nil, fmt.Errorf("error signing authentication request: %w", err) + } + + parsedVaultAddress, err := url.Parse(a.vaultAddress) + if err != nil { + return "", nil, nil, fmt.Errorf("unable to parse vault address: %w", err) + } + + request.Header.Set("Host", parsedVaultAddress.Host) + request.Header.Set("(request-target)", fmt.Sprintf("%s %s", "get", requestPath)) + + data := map[string]interface{}{ + "request_headers": request.Header, + } + + return fmt.Sprintf("%s/login/%s", a.mountPath, a.role), nil, data, nil +} + +func (a *ociMethod) NewCreds() chan struct{} { + return a.credsFound +} + +func (a *ociMethod) CredSuccess() {} + +func (a *ociMethod) Shutdown() { + close(a.credsFound) + close(a.stopCh) +} + +func (a *ociMethod) pollForCreds(frequency time.Duration) { + ticker := time.NewTicker(frequency) + defer ticker.Stop() + for { + select { + case <-a.stopCh: + a.logger.Trace("shutdown triggered, stopping OCI auth handler") + return + case <-ticker.C: + if err := a.checkCreds(); err != nil { + a.logger.Warn("unable to retrieve current creds, retaining last creds", "error", err) + } + } + } +} + +func (a *ociMethod) checkCreds() error { + a.credLock.Lock() + defer a.credLock.Unlock() + + a.logger.Trace("checking for new credentials") + currentCreds, err := a.configurationProvider.KeyID() + if err != nil { + return err + } + // These will always have different pointers regardless of whether their + // values are identical, hence the use of DeepEqual. + if currentCreds == a.lastCreds { + a.logger.Trace("credentials are unchanged") + return nil + } + a.lastCreds = currentCreds + a.logger.Trace("new credentials detected, triggering Authenticate") + a.credsFound <- struct{}{} + return nil +} + +func getHomeFolder() string { + current, e := user.Current() + if e != nil { + // Give up and try to return something sensible + home, err := os.UserHomeDir() + if err != nil { + return "" + } + return home + } + return current.HomeDir +} + +func getDefaultConfigFilePath() string { + homeFolder := getHomeFolder() + defaultConfigFile := path.Join(homeFolder, defaultConfigDirName, defaultConfigFileName) + if _, err := os.Stat(defaultConfigFile); err == nil { + return defaultConfigFile + } + + // Read configuration file path from OCI_CONFIG_FILE env var + fallbackConfigFile, existed := os.LookupEnv(configFilePathEnvVarName) + if !existed { + return defaultConfigFile + } + if _, err := os.Stat(fallbackConfigFile); os.IsNotExist(err) { + return defaultConfigFile + } + return fallbackConfigFile +} diff --git a/command/agentproxyshared/auth/token-file/token_file.go b/command/agentproxyshared/auth/token-file/token_file.go new file mode 100644 index 000000000000..c2154f7ab960 --- /dev/null +++ b/command/agentproxyshared/auth/token-file/token_file.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package token_file + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "strings" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +type tokenFileMethod struct { + logger hclog.Logger + mountPath string + + cachedToken string + tokenFilePath string +} + +func NewTokenFileAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + a := &tokenFileMethod{ + logger: conf.Logger, + mountPath: "auth/token", + } + + tokenFilePathRaw, ok := conf.Config["token_file_path"] + if !ok { + return nil, errors.New("missing 'token_file_path' value") + } + a.tokenFilePath, ok = tokenFilePathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'token_file_path' config value to string") + } + if a.tokenFilePath == "" { + return nil, errors.New("'token_file_path' value is empty") + } + + return a, nil +} + +func (a *tokenFileMethod) Authenticate(ctx context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + token, err := os.ReadFile(a.tokenFilePath) + if err != nil { + if a.cachedToken == "" { + return "", nil, nil, fmt.Errorf("error reading token file and no cached token known: %w", err) + } + a.logger.Warn("error reading token file", "error", err) + } + if len(token) == 0 { + if a.cachedToken == "" { + return "", nil, nil, errors.New("token file empty and no cached token known") + } + a.logger.Warn("token file exists but read empty value, re-using cached value") + } else { + a.cachedToken = strings.TrimSpace(string(token)) + } + + // i.e. auth/token/lookup-self + return fmt.Sprintf("%s/lookup-self", a.mountPath), nil, map[string]interface{}{ + "token": a.cachedToken, + }, nil +} + +func (a *tokenFileMethod) NewCreds() chan struct{} { + return nil +} + +func (a *tokenFileMethod) CredSuccess() { +} + +func (a *tokenFileMethod) Shutdown() { +} diff --git a/command/agentproxyshared/auth/token-file/token_file_test.go b/command/agentproxyshared/auth/token-file/token_file_test.go new file mode 100644 index 000000000000..7e6e8982b245 --- /dev/null +++ b/command/agentproxyshared/auth/token-file/token_file_test.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package token_file + +import ( + "os" + "path/filepath" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +func TestNewTokenFileAuthMethodEmptyConfig(t *testing.T) { + logger := logging.NewVaultLogger(log.Trace) + _, err := NewTokenFileAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.method"), + Config: map[string]interface{}{}, + }) + if err == nil { + t.Fatal("Expected error due to empty config") + } +} + +func TestNewTokenFileEmptyFilePath(t *testing.T) { + logger := logging.NewVaultLogger(log.Trace) + _, err := NewTokenFileAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.method"), + Config: map[string]interface{}{ + "token_file_path": "", + }, + }) + if err == nil { + t.Fatalf("Expected error when giving empty file path") + } +} + +func TestNewTokenFileAuthenticate(t *testing.T) { + tokenFile, err := os.Create(filepath.Join(t.TempDir(), "token_file")) + tokenFileContents := "super-secret-token" + if err != nil { + t.Fatal(err) + } + tokenFileName := tokenFile.Name() + tokenFile.Close() // WriteFile doesn't need it open + os.WriteFile(tokenFileName, []byte(tokenFileContents), 0o666) + defer os.Remove(tokenFileName) + + logger := logging.NewVaultLogger(log.Trace) + am, err := NewTokenFileAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.method"), + Config: map[string]interface{}{ + "token_file_path": tokenFileName, + }, + }) + if err != nil { + t.Fatal(err) + } + + path, headers, data, err := am.Authenticate(nil, nil) + if err != nil { + t.Fatal(err) + } + if path != "auth/token/lookup-self" { + t.Fatalf("Incorrect path, was %s", path) + } + if headers != nil { + t.Fatalf("Expected no headers, instead got %v", headers) + } + if data == nil { + t.Fatal("Data was nil") + } + tokenDataFromAuthMethod := data["token"].(string) + if tokenDataFromAuthMethod != tokenFileContents { + t.Fatalf("Incorrect token file contents return by auth method, expected %s, got %s", tokenFileContents, tokenDataFromAuthMethod) + } + + _, err = os.Stat(tokenFileName) + if err != nil { + t.Fatal("Token file removed") + } +} diff --git a/command/agentproxyshared/cache/api_proxy.go b/command/agentproxyshared/cache/api_proxy.go new file mode 100644 index 000000000000..f0bd743b88b4 --- /dev/null +++ b/command/agentproxyshared/cache/api_proxy.go @@ -0,0 +1,191 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "fmt" + gohttp "net/http" + "sync" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/http" +) + +//go:generate enumer -type=EnforceConsistency -trimprefix=EnforceConsistency +type EnforceConsistency int + +const ( + EnforceConsistencyNever EnforceConsistency = iota + EnforceConsistencyAlways +) + +//go:generate enumer -type=WhenInconsistentAction -trimprefix=WhenInconsistent +type WhenInconsistentAction int + +const ( + WhenInconsistentFail WhenInconsistentAction = iota + WhenInconsistentRetry + WhenInconsistentForward +) + +// APIProxy is an implementation of the proxier interface that is used to +// forward the request to Vault and get the response. +type APIProxy struct { + client *api.Client + logger hclog.Logger + enforceConsistency EnforceConsistency + whenInconsistentAction WhenInconsistentAction + l sync.RWMutex + lastIndexStates []string + userAgentString string + userAgentStringFunction func(string) string + // clientNamespace is a one-time set representation of the namespace of the client + // (i.e. client.Namespace()) to avoid repeated calls and lock usage. + clientNamespace string + prependConfiguredNamespace bool +} + +var _ Proxier = &APIProxy{} + +type APIProxyConfig struct { + Client *api.Client + Logger hclog.Logger + EnforceConsistency EnforceConsistency + WhenInconsistentAction WhenInconsistentAction + // UserAgentString is used as the User Agent when the proxied client + // does not have a user agent of its own. + UserAgentString string + // UserAgentStringFunction is the function to transform the proxied client's + // user agent into one that includes Vault-specific information. + UserAgentStringFunction func(string) string + // PrependConfiguredNamespace configures whether the client's namespace + // should be prepended to proxied requests + PrependConfiguredNamespace bool +} + +func NewAPIProxy(config *APIProxyConfig) (Proxier, error) { + if config.Client == nil { + return nil, fmt.Errorf("nil API client") + } + return &APIProxy{ + client: config.Client, + logger: config.Logger, + enforceConsistency: config.EnforceConsistency, + whenInconsistentAction: config.WhenInconsistentAction, + userAgentString: config.UserAgentString, + userAgentStringFunction: config.UserAgentStringFunction, + prependConfiguredNamespace: config.PrependConfiguredNamespace, + clientNamespace: namespace.Canonicalize(config.Client.Namespace()), + }, nil +} + +func (ap *APIProxy) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + client, err := ap.client.Clone() + if err != nil { + return nil, err + } + client.SetToken(req.Token) + + // Derive and set a logger for the client + clientLogger := ap.logger.Named("client") + client.SetLogger(clientLogger) + + // http.Transport will transparently request gzip and decompress the response, but only if + // the client doesn't manually set the header. Removing any Accept-Encoding header allows the + // transparent compression to occur. + req.Request.Header.Del("Accept-Encoding") + + if req.Request.Header == nil { + req.Request.Header = make(gohttp.Header) + } + + // Set our User-Agent to be one indicating we are Vault Agent's API proxy. + // If the sending client had one, preserve it. + if req.Request.Header.Get("User-Agent") != "" { + initialUserAgent := req.Request.Header.Get("User-Agent") + req.Request.Header.Set("User-Agent", ap.userAgentStringFunction(initialUserAgent)) + } else { + req.Request.Header.Set("User-Agent", ap.userAgentString) + } + + client.SetHeaders(req.Request.Header) + if ap.prependConfiguredNamespace && ap.clientNamespace != "" { + currentNamespace := namespace.Canonicalize(client.Namespace()) + newNamespace := namespace.Canonicalize(ap.clientNamespace + currentNamespace) + client.SetNamespace(newNamespace) + } + + fwReq := client.NewRequest(req.Request.Method, req.Request.URL.Path) + fwReq.BodyBytes = req.RequestBody + + query := req.Request.URL.Query() + if len(query) != 0 { + fwReq.Params = query + } + + var newState string + manageState := ap.enforceConsistency == EnforceConsistencyAlways && + req.Request.Header.Get(http.VaultIndexHeaderName) == "" && + req.Request.Header.Get(http.VaultForwardHeaderName) == "" && + req.Request.Header.Get(http.VaultInconsistentHeaderName) == "" + + if manageState { + client = client.WithResponseCallbacks(api.RecordState(&newState)) + ap.l.RLock() + lastStates := ap.lastIndexStates + ap.l.RUnlock() + if len(lastStates) != 0 { + client = client.WithRequestCallbacks(api.RequireState(lastStates...)) + switch ap.whenInconsistentAction { + case WhenInconsistentFail: + // In this mode we want to delegate handling of inconsistency + // failures to the external client talking to Agent. + client.SetCheckRetry(retryablehttp.DefaultRetryPolicy) + case WhenInconsistentRetry: + // In this mode we want to handle retries due to inconsistency + // internally. This is the default api.Client behaviour so + // we needn't do anything. + case WhenInconsistentForward: + fwReq.Headers.Set(http.VaultInconsistentHeaderName, http.VaultInconsistentForward) + } + } + } + + // Make the request to Vault and get the response + ap.logger.Info("forwarding request to Vault", "method", req.Request.Method, "path", req.Request.URL.Path) + + resp, err := client.RawRequestWithContext(ctx, fwReq) + if resp == nil && err != nil { + // We don't want to cache nil responses, so we simply return the error + return nil, err + } + + if newState != "" { + ap.l.Lock() + // We want to be using the "newest" states seen, but newer isn't well + // defined here. There can be two states S1 and S2 which aren't strictly ordered: + // S1 could have a newer localindex and S2 could have a newer replicatedindex. So + // we need to merge them. But we can't merge them because we wouldn't be able to + // "sign" the resulting header because we don't have access to the HMAC key that + // Vault uses to do so. So instead we compare any of the 0-2 saved states + // we have to the new header, keeping the newest 1-2 of these, and sending + // them to Vault to evaluate. + ap.lastIndexStates = api.MergeReplicationStates(ap.lastIndexStates, newState) + ap.l.Unlock() + } + + // Before error checking from the request call, we'd want to initialize a SendResponse to + // potentially return + sendResponse, newErr := NewSendResponse(resp, nil) + if newErr != nil { + return nil, newErr + } + + // Bubble back the api.Response as well for error checking/handling at the handler layer. + return sendResponse, err +} diff --git a/command/agent/cache/api_proxy_test.go b/command/agentproxyshared/cache/api_proxy_test.go similarity index 86% rename from command/agent/cache/api_proxy_test.go rename to command/agentproxyshared/cache/api_proxy_test.go index aec0b72d1fc9..9e7035918cb3 100644 --- a/command/agent/cache/api_proxy_test.go +++ b/command/agentproxyshared/cache/api_proxy_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cache import ( @@ -9,16 +12,16 @@ import ( "testing" "time" - "github.com/hashicorp/vault/builtin/credential/userpass" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/userpass" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" ) const policyAdmin = ` @@ -32,8 +35,10 @@ func TestAPIProxy(t *testing.T) { defer cleanup() proxier, err := NewAPIProxy(&APIProxyConfig{ - Client: client, - Logger: logging.NewVaultLogger(hclog.Trace), + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), }) if err != nil { t.Fatal(err) @@ -68,8 +73,10 @@ func TestAPIProxyNoCache(t *testing.T) { defer cleanup() proxier, err := NewAPIProxy(&APIProxyConfig{ - Client: client, - Logger: logging.NewVaultLogger(hclog.Trace), + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), }) if err != nil { t.Fatal(err) @@ -106,8 +113,10 @@ func TestAPIProxy_queryParams(t *testing.T) { defer cleanup() proxier, err := NewAPIProxy(&APIProxyConfig{ - Client: client, - Logger: logging.NewVaultLogger(hclog.Trace), + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), }) if err != nil { t.Fatal(err) @@ -177,15 +186,9 @@ func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *v ctx = context.Background() } - // Handle sane defaults if coreConfig == nil { - coreConfig = &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: logging.NewVaultLogger(hclog.Trace), - } + coreConfig = &vault.CoreConfig{} } - // Always set up the userpass backend since we use that to generate an admin // token for the client that will make proxied requests to through the agent. if coreConfig.CredentialBackends == nil || coreConfig.CredentialBackends["userpass"] == nil { @@ -246,12 +249,14 @@ func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *v t.Fatal(err) } - apiProxyLogger := logging.NewVaultLogger(hclog.Trace).Named("apiproxy") + apiProxyLogger := cluster.Logger.Named("apiproxy") // Create the API proxier apiProxy, err := NewAPIProxy(&APIProxyConfig{ - Client: clienToUse, - Logger: apiProxyLogger, + Client: clienToUse, + Logger: apiProxyLogger, + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), }) if err != nil { t.Fatal(err) @@ -262,15 +267,17 @@ func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *v var leaseCache *LeaseCache if useCache { - cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + cacheLogger := cluster.Logger.Named("cache") // Create the lease cache proxier and set its underlying proxier to // the API proxier. leaseCache, err = NewLeaseCache(&LeaseCacheConfig{ - Client: clienToUse, - BaseContext: ctx, - Proxier: apiProxy, - Logger: cacheLogger.Named("leasecache"), + Client: clienToUse, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + CacheDynamicSecrets: true, + UserAgentToUse: "test", }) if err != nil { t.Fatal(err) diff --git a/command/agentproxyshared/cache/cache_test.go b/command/agentproxyshared/cache/cache_test.go new file mode 100644 index 000000000000..12e1e18e3a40 --- /dev/null +++ b/command/agentproxyshared/cache/cache_test.go @@ -0,0 +1,1208 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "sync" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/go-hclog" + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/sink/mock" + "github.com/hashicorp/vault/helper/namespace" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func tokenRevocationValidation(t *testing.T, sampleSpace map[string]string, expected map[string]string, leaseCache *LeaseCache) { + t.Helper() + for val, valType := range sampleSpace { + index, err := leaseCache.db.Get(valType, val) + if err != nil && err != cachememdb.ErrCacheItemNotFound { + t.Fatal(err) + } + if expected[val] == "" && index != nil { + t.Fatalf("failed to evict index from the cache: type: %q, value: %q", valType, val) + } + if expected[val] != "" && index == nil { + t.Fatalf("evicted an undesired index from cache: type: %q, value: %q", valType, val) + } + } +} + +func TestCache_AutoAuthTokenStripping(t *testing.T) { + response1 := `{"data": {"id": "testid", "accessor": "testaccessor", "request": "lookup-self"}}` + response2 := `{"data": {"id": "testid", "accessor": "testaccessor", "request": "lookup"}}` + response3 := `{"auth": {"client_token": "testid", "accessor": "testaccessor"}}` + response4 := `{"auth": {"client_token": "testid", "accessor": "testaccessor"}}` + responses := []*SendResponse{ + newTestSendResponse(http.StatusOK, response1), + newTestSendResponse(http.StatusOK, response2), + newTestSendResponse(http.StatusOK, response3), + newTestSendResponse(http.StatusOK, response4), + } + + leaseCache := testNewLeaseCache(t, responses) + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + ctx := namespace.RootContext(nil) + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) + + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), true)) + server := &http.Server{ + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: cacheLogger.StandardLogger(nil), + } + go server.Serve(listener) + + testClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { + t.Fatal(err) + } + + // Empty the token in the client. Auto-auth token should be put to use. + testClient.SetToken("") + secret, err := testClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if secret.Data["id"] != nil || secret.Data["accessor"] != nil || secret.Data["request"].(string) != "lookup-self" { + t.Fatalf("failed to strip off auto-auth token on lookup-self") + } + + secret, err = testClient.Auth().Token().Lookup("") + if err != nil { + t.Fatal(err) + } + if secret.Data["id"] != nil || secret.Data["accessor"] != nil || secret.Data["request"].(string) != "lookup" { + t.Fatalf("failed to strip off auto-auth token on lookup") + } + + secret, err = testClient.Auth().Token().RenewSelf(1) + if err != nil { + t.Fatal(err) + } + if secret.Auth == nil { + secretJson, _ := json.Marshal(secret) + t.Fatalf("Expected secret to have Auth but was %s", secretJson) + } + if secret.Auth.ClientToken != "" || secret.Auth.Accessor != "" { + t.Fatalf("failed to strip off auto-auth token on renew-self") + } + + secret, err = testClient.Auth().Token().Renew("testid", 1) + if err != nil { + t.Fatal(err) + } + if secret.Auth == nil { + secretJson, _ := json.Marshal(secret) + t.Fatalf("Expected secret to have Auth but was %s", secretJson) + } + if secret.Auth.ClientToken != "" || secret.Auth.Accessor != "" { + t.Fatalf("failed to strip off auto-auth token on renew") + } +} + +func TestCache_AutoAuthClientTokenProxyStripping(t *testing.T) { + leaseCache := &mockTokenVerifierProxier{} + dummyToken := "DUMMY" + realToken := "testid" + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + ctx := namespace.RootContext(nil) + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + // mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) + + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false)) + server := &http.Server{ + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: cacheLogger.StandardLogger(nil), + } + go server.Serve(listener) + + testClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { + t.Fatal(err) + } + + // Empty the token in the client. Auto-auth token should be put to use. + testClient.SetToken(dummyToken) + _, err = testClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if leaseCache.currentToken != realToken { + t.Fatalf("failed to use real token from auto-auth") + } +} + +func TestCache_ConcurrentRequests(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + wg := &sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + key := fmt.Sprintf("kv/foo/%d_%d", i, rand.Int()) + _, err := testClient.Logical().Write(key, map[string]interface{}{ + "key": key, + }) + if err != nil { + t.Fatal(err) + } + secret, err := testClient.Logical().Read(key) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data["key"].(string) != key { + t.Fatal(fmt.Sprintf("failed to read value for key: %q", key)) + } + }(i) + + } + wg.Wait() +} + +func TestCache_TokenRevocations_RevokeOrphan(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke-orphan the intermediate token. This should result in its own + // eviction and evictions of the revoked token's leases. All other things + // including the child tokens and leases of the child tokens should be + // untouched. + testClient.SetToken(token2) + err = testClient.Auth().Token().RevokeOrphan(token2) + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = map[string]string{ + token1: "token", + lease1: "lease", + token3: "token", + lease3: "lease", + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_LeafLevelToken(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke the lef token. This should evict all the leases belonging to this + // token, evict entries for all the child tokens and their respective + // leases. + testClient.SetToken(token3) + err = testClient.Auth().Token().RevokeSelf("") + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = map[string]string{ + token1: "token", + lease1: "lease", + token2: "token", + lease2: "lease", + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_IntermediateLevelToken(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke the second level token. This should evict all the leases + // belonging to this token, evict entries for all the child tokens and + // their respective leases. + testClient.SetToken(token2) + err = testClient.Auth().Token().RevokeSelf("") + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = map[string]string{ + token1: "token", + lease1: "lease", + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_TopLevelToken(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke the top level token. This should evict all the leases belonging + // to this token, evict entries for all the child tokens and their + // respective leases. + testClient.SetToken(token1) + err = testClient.Auth().Token().RevokeSelf("") + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = make(map[string]string) + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_Shutdown(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + ctx, rootCancelFunc := context.WithCancel(namespace.RootContext(nil)) + cleanup, _, testClient, leaseCache := setupClusterAndAgent(ctx, t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + rootCancelFunc() + time.Sleep(1 * time.Second) + + // Ensure that all the entries are now gone + expected = make(map[string]string) + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_BaseContextCancellation(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Cancel the base context of the lease cache. This should trigger + // evictions of all the entries from the cache. + leaseCache.baseCtxInfo.CancelFunc() + time.Sleep(1 * time.Second) + + // Ensure that all the entries are now gone + expected = make(map[string]string) + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_NonCacheable(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.Factory, + }, + } + + cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + // Query mounts first + origMounts, err := testClient.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + // Mount a kv backend + if err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + Options: map[string]string{ + "version": "2", + }, + }); err != nil { + t.Fatal(err) + } + + // Query mounts again + newMounts, err := testClient.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(origMounts, newMounts); diff == nil { + t.Logf("response #1: %#v", origMounts) + t.Logf("response #2: %#v", newMounts) + t.Fatal("expected requests to be not cached") + } + + // Query a non-existing mount, expect an error from api.Response + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + r := testClient.NewRequest("GET", "/v1/kv-invalid") + + apiResp, err := testClient.RawRequestWithContext(ctx, r) + if apiResp != nil { + defer apiResp.Body.Close() + } + if apiResp.Error() == nil || (apiResp != nil && apiResp.StatusCode != 404) { + t.Fatalf("expected an error response and a 404 from requesting an invalid path, got: %#v", apiResp) + } + if err == nil { + t.Fatal("expected an error from requesting an invalid path") + } +} + +func TestCache_Caching_AuthResponse(t *testing.T) { + cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + testClient.SetToken(token) + + authTokeCreateReq := func(t *testing.T, policies map[string]interface{}) *api.Secret { + resp, err := testClient.Logical().Write("auth/token/create", policies) + if err != nil { + t.Fatal(err) + } + if resp.Auth == nil || resp.Auth.ClientToken == "" { + t.Fatalf("expected a valid client token in the response, got = %#v", resp) + } + + return resp + } + + // Test on auth response by creating a child token + { + proxiedResp := authTokeCreateReq(t, map[string]interface{}{ + "policies": "default", + }) + + cachedResp := authTokeCreateReq(t, map[string]interface{}{ + "policies": "default", + }) + + if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil { + t.Fatal(diff) + } + } + + // Test on *non-renewable* auth response by creating a child root token + { + proxiedResp := authTokeCreateReq(t, nil) + + cachedResp := authTokeCreateReq(t, nil) + + if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil { + t.Fatal(diff) + } + } +} + +func TestCache_Caching_LeaseResponse(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + cleanup, client, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + err := client.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Test proxy by issuing two different requests + { + // Write data to the lease-kv backend + _, err := testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + _, err = testClient.Logical().Write("kv/foobar", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + firstResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + + secondResp, err := testClient.Logical().Read("kv/foobar") + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(firstResp, secondResp); diff == nil { + t.Logf("response: %#v", firstResp) + t.Fatal("expected proxied responses, got cached response on second request") + } + } + + // Test caching behavior by issue the same request twice + { + _, err := testClient.Logical().Write("kv/baz", map[string]interface{}{ + "value": "foo", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + proxiedResp, err := testClient.Logical().Read("kv/baz") + if err != nil { + t.Fatal(err) + } + + cachedResp, err := testClient.Logical().Read("kv/baz") + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(proxiedResp, cachedResp); diff != nil { + t.Fatal(diff) + } + } +} + +func TestCache_Caching_CacheClear(t *testing.T) { + t.Run("request_path", func(t *testing.T) { + testCachingCacheClearCommon(t, "request_path") + }) + + t.Run("lease", func(t *testing.T) { + testCachingCacheClearCommon(t, "lease") + }) + + t.Run("token", func(t *testing.T) { + testCachingCacheClearCommon(t, "token") + }) + + t.Run("token_accessor", func(t *testing.T) { + testCachingCacheClearCommon(t, "token_accessor") + }) + + t.Run("all", func(t *testing.T) { + testCachingCacheClearCommon(t, "all") + }) +} + +func testCachingCacheClearCommon(t *testing.T, clearType string) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + cleanup, client, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + err := client.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Write data to the lease-kv backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Proxy this request, agent should cache the response + resp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + gotLeaseID := resp.LeaseID + + // Verify the entry exists + idx, err := leaseCache.db.Get(cachememdb.IndexNameLease, gotLeaseID) + if err != nil { + t.Fatal(err) + } + + if idx == nil { + t.Fatalf("expected cached entry, got: %v", idx) + } + + data := map[string]interface{}{ + "type": clearType, + } + + // We need to set the value here depending on what we're trying to test. + // Some values are be static, but others are dynamically generated at runtime. + switch clearType { + case "request_path": + data["value"] = "/v1/kv/foo" + case "lease": + data["value"] = resp.LeaseID + case "token": + data["value"] = testClient.Token() + case "token_accessor": + lookupResp, err := client.Auth().Token().Lookup(testClient.Token()) + if err != nil { + t.Fatal(err) + } + data["value"] = lookupResp.Data["accessor"] + case "all": + default: + t.Fatalf("invalid type provided: %v", clearType) + } + + r := testClient.NewRequest("PUT", consts.AgentPathCacheClear) + if err := r.SetJSONBody(data); err != nil { + t.Fatal(err) + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + apiResp, err := testClient.RawRequestWithContext(ctx, r) + if apiResp != nil { + defer apiResp.Body.Close() + } + if apiResp != nil && apiResp.StatusCode == 404 { + _, parseErr := api.ParseSecret(apiResp.Body) + switch parseErr { + case nil: + case io.EOF: + default: + t.Fatal(err) + } + } + if err != nil { + t.Fatal(err) + } + + time.Sleep(100 * time.Millisecond) + + // Verify the entry is cleared + idx, err = leaseCache.db.Get(cachememdb.IndexNameLease, gotLeaseID) + if err != cachememdb.ErrCacheItemNotFound { + t.Fatal("expected entry to be nil, got", err) + } +} + +func TestCache_AuthTokenCreateOrphan(t *testing.T) { + t.Run("create", func(t *testing.T) { + t.Run("managed", func(t *testing.T) { + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + NoParent: true, + } + resp, err := testClient.Auth().Token().Create(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + + t.Run("non-managed", func(t *testing.T) { + cleanup, clusterClient, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + NoParent: true, + } + + // Use the test client but set the token to one that's not managed by agent + testClient.SetToken(clusterClient.Token()) + + resp, err := testClient.Auth().Token().Create(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + }) + + t.Run("create-orphan", func(t *testing.T) { + t.Run("managed", func(t *testing.T) { + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + } + resp, err := testClient.Auth().Token().CreateOrphan(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + + t.Run("non-managed", func(t *testing.T) { + cleanup, clusterClient, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + } + + // Use the test client but set the token to one that's not managed by agent + testClient.SetToken(clusterClient.Token()) + + resp, err := testClient.Auth().Token().CreateOrphan(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + }) +} diff --git a/command/agent/cache/cacheboltdb/bolt.go b/command/agentproxyshared/cache/cacheboltdb/bolt.go similarity index 95% rename from command/agent/cache/cacheboltdb/bolt.go rename to command/agentproxyshared/cache/cacheboltdb/bolt.go index 72cb7f3b8246..05d5ad93637a 100644 --- a/command/agent/cache/cacheboltdb/bolt.go +++ b/command/agentproxyshared/cache/cacheboltdb/bolt.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cacheboltdb import ( @@ -9,10 +12,10 @@ import ( "time" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" "github.com/hashicorp/go-hclog" wrapping "github.com/hashicorp/go-kms-wrapping/v2" "github.com/hashicorp/go-multierror" - bolt "go.etcd.io/bbolt" ) const ( @@ -36,6 +39,14 @@ const ( // TokenType - Bucket/type for auto-auth tokens TokenType = "token" + // StaticSecretType - Bucket/type for static secrets + StaticSecretType = "static-secret" + + // TokenCapabilitiesType - Bucket/type for the token capabilities that + // are used to govern access to static secrets. These will be updated + // periodically to ensure that access to the cached secret remains. + TokenCapabilitiesType = "token-capabilities" + // LeaseType - v2 Bucket/type for auth AND secret leases. // // This bucket stores keys in the same order they were created using @@ -154,7 +165,7 @@ func createV1BoltSchema(tx *bolt.Tx) error { func createV2BoltSchema(tx *bolt.Tx) error { // Create the buckets for tokens and leases. - for _, bucket := range []string{TokenType, LeaseType, lookupType} { + for _, bucket := range []string{TokenType, LeaseType, lookupType, StaticSecretType, TokenCapabilitiesType} { if _, err := tx.CreateBucketIfNotExists([]byte(bucket)); err != nil { return fmt.Errorf("failed to create %s bucket: %w", bucket, err) } @@ -256,6 +267,10 @@ func (b *BoltStorage) Set(ctx context.Context, id string, plaintext []byte, inde if err := meta.Put([]byte(AutoAuthToken), protoBlob); err != nil { return fmt.Errorf("failed to set latest auto-auth token: %w", err) } + case StaticSecretType: + key = []byte(id) + case TokenCapabilitiesType: + key = []byte(id) default: return fmt.Errorf("called Set for unsupported type %q", indexType) } @@ -408,7 +423,7 @@ func (b *BoltStorage) Close() error { // the schema/layout func (b *BoltStorage) Clear() error { return b.db.Update(func(tx *bolt.Tx) error { - for _, name := range []string{TokenType, LeaseType, lookupType} { + for _, name := range []string{TokenType, LeaseType, lookupType, StaticSecretType, TokenCapabilitiesType} { b.logger.Trace("deleting bolt bucket", "name", name) if err := tx.DeleteBucket([]byte(name)); err != nil { return err diff --git a/command/agent/cache/cacheboltdb/bolt_test.go b/command/agentproxyshared/cache/cacheboltdb/bolt_test.go similarity index 87% rename from command/agent/cache/cacheboltdb/bolt_test.go rename to command/agentproxyshared/cache/cacheboltdb/bolt_test.go index d6f5a742ef34..06a31780b5ad 100644 --- a/command/agent/cache/cacheboltdb/bolt_test.go +++ b/command/agentproxyshared/cache/cacheboltdb/bolt_test.go @@ -1,9 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cacheboltdb import ( "context" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -12,11 +14,11 @@ import ( "time" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/cache/keymanager" + "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - bolt "go.etcd.io/bbolt" ) func getTestKeyManager(t *testing.T) keymanager.KeyManager { @@ -31,7 +33,7 @@ func getTestKeyManager(t *testing.T) keymanager.KeyManager { func TestBolt_SetGet(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -57,7 +59,7 @@ func TestBolt_SetGet(t *testing.T) { func TestBoltDelete(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -89,7 +91,7 @@ func TestBoltDelete(t *testing.T) { func TestBoltClear(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -123,6 +125,20 @@ func TestBoltClear(t *testing.T) { require.Len(t, tokens, 1) assert.Equal(t, []byte("hello"), tokens[0]) + err = b.Set(ctx, "static-secret", []byte("hello"), StaticSecretType) + require.NoError(t, err) + staticSecrets, err := b.GetByType(ctx, StaticSecretType) + require.NoError(t, err) + require.Len(t, staticSecrets, 1) + assert.Equal(t, []byte("hello"), staticSecrets[0]) + + err = b.Set(ctx, "capabilities-index", []byte("hello"), TokenCapabilitiesType) + require.NoError(t, err) + capabilities, err := b.GetByType(ctx, TokenCapabilitiesType) + require.NoError(t, err) + require.Len(t, capabilities, 1) + assert.Equal(t, []byte("hello"), capabilities[0]) + // Clear the bolt db, and check that it's indeed clear err = b.Clear() require.NoError(t, err) @@ -132,12 +148,18 @@ func TestBoltClear(t *testing.T) { tokens, err = b.GetByType(ctx, TokenType) require.NoError(t, err) assert.Len(t, tokens, 0) + staticSecrets, err = b.GetByType(ctx, StaticSecretType) + require.NoError(t, err) + require.Len(t, staticSecrets, 0) + capabilities, err = b.GetByType(ctx, TokenCapabilitiesType) + require.NoError(t, err) + require.Len(t, capabilities, 0) } func TestBoltSetAutoAuthToken(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -207,11 +229,11 @@ func TestDBFileExists(t *testing.T) { var tmpPath string var err error if tc.mkDir { - tmpPath, err = ioutil.TempDir("", "test-db-path") + tmpPath, err = os.MkdirTemp("", "test-db-path") require.NoError(t, err) } if tc.createFile { - err = ioutil.WriteFile(path.Join(tmpPath, DatabaseFileName), []byte("test-db-path"), 0o600) + err = os.WriteFile(path.Join(tmpPath, DatabaseFileName), []byte("test-db-path"), 0o600) require.NoError(t, err) } exists, err := DBFileExists(tmpPath) @@ -241,7 +263,7 @@ func Test_SetGetRetrievalToken(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -267,7 +289,7 @@ func Test_SetGetRetrievalToken(t *testing.T) { func TestBolt_MigrateFromV1ToV2Schema(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) @@ -339,7 +361,7 @@ func TestBolt_MigrateFromV1ToV2Schema(t *testing.T) { func TestBolt_MigrateFromInvalidToV2Schema(t *testing.T) { ctx := context.Background() - path, err := ioutil.TempDir("", "bolt-test") + path, err := os.MkdirTemp("", "bolt-test") require.NoError(t, err) defer os.RemoveAll(path) diff --git a/command/agentproxyshared/cache/cachememdb/cache_memdb.go b/command/agentproxyshared/cache/cachememdb/cache_memdb.go new file mode 100644 index 000000000000..ed2cd0ac8001 --- /dev/null +++ b/command/agentproxyshared/cache/cachememdb/cache_memdb.go @@ -0,0 +1,328 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cachememdb + +import ( + "errors" + "fmt" + "sync/atomic" + + memdb "github.com/hashicorp/go-memdb" +) + +const ( + tableNameIndexer = "indexer" + tableNameCapabilitiesIndexer = "capabilities-indexer" +) + +// ErrCacheItemNotFound is returned on Get and GetCapabilitiesIndex calls +// when the entry is not found in the cache. +var ErrCacheItemNotFound = errors.New("cache item not found") + +// CacheMemDB is the underlying cache database for storing indexes. +type CacheMemDB struct { + db *atomic.Value +} + +// New creates a new instance of CacheMemDB. +func New() (*CacheMemDB, error) { + db, err := newDB() + if err != nil { + return nil, err + } + + c := &CacheMemDB{ + db: new(atomic.Value), + } + c.db.Store(db) + + return c, nil +} + +func newDB() (*memdb.MemDB, error) { + cacheSchema := &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + tableNameIndexer: { + Name: tableNameIndexer, + Indexes: map[string]*memdb.IndexSchema{ + // This index enables fetching the cached item based on the + // identifier of the index. + IndexNameID: { + Name: IndexNameID, + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + // This index enables fetching all the entries in cache for + // a given request path, in a given namespace. + IndexNameRequestPath: { + Name: IndexNameRequestPath, + Unique: false, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Namespace", + }, + &memdb.StringFieldIndex{ + Field: "RequestPath", + }, + }, + }, + }, + // This index enables fetching all the entries in cache + // belonging to the leases of a given token. + IndexNameLeaseToken: { + Name: IndexNameLeaseToken, + Unique: false, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "LeaseToken", + }, + }, + // This index enables fetching all the entries in cache + // that are tied to the given token, regardless of the + // entries belonging to the token or belonging to the + // lease. + IndexNameToken: { + Name: IndexNameToken, + Unique: true, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "Token", + }, + }, + // This index enables fetching all the entries in cache for + // the given parent token. + IndexNameTokenParent: { + Name: IndexNameTokenParent, + Unique: false, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "TokenParent", + }, + }, + // This index enables fetching all the entries in cache for + // the given accessor. + IndexNameTokenAccessor: { + Name: IndexNameTokenAccessor, + Unique: true, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "TokenAccessor", + }, + }, + // This index enables fetching all the entries in cache for + // the given lease identifier. + IndexNameLease: { + Name: IndexNameLease, + Unique: true, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "Lease", + }, + }, + }, + }, + tableNameCapabilitiesIndexer: { + Name: tableNameCapabilitiesIndexer, + Indexes: map[string]*memdb.IndexSchema{ + // This index enables fetching the cached item based on the + // identifier of the index. + CapabilitiesIndexNameID: { + Name: CapabilitiesIndexNameID, + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + }, + }, + }, + } + + db, err := memdb.NewMemDB(cacheSchema) + if err != nil { + return nil, err + } + return db, nil +} + +// Get returns the index based on the indexer and the index values provided. +// If the capabilities index isn't present, it will return nil, ErrCacheItemNotFound +func (c *CacheMemDB) Get(indexName string, indexValues ...interface{}) (*Index, error) { + if !validIndexName(indexName) { + return nil, fmt.Errorf("invalid index name %q", indexName) + } + + txn := c.db.Load().(*memdb.MemDB).Txn(false) + + raw, err := txn.First(tableNameIndexer, indexName, indexValues...) + if err != nil { + return nil, err + } + + if raw == nil { + return nil, ErrCacheItemNotFound + } + + index, ok := raw.(*Index) + if !ok { + return nil, errors.New("unable to parse index value from the cache") + } + + return index, nil +} + +// Set stores the index into the cache. +func (c *CacheMemDB) Set(index *Index) error { + if index == nil { + return errors.New("nil index provided") + } + + txn := c.db.Load().(*memdb.MemDB).Txn(true) + defer txn.Abort() + + if err := txn.Insert(tableNameIndexer, index); err != nil { + return fmt.Errorf("unable to insert index into cache: %v", err) + } + + txn.Commit() + + return nil +} + +// GetCapabilitiesIndex returns the CapabilitiesIndex from the cache. +// If the capabilities index isn't present, it will return nil, ErrCacheItemNotFound +func (c *CacheMemDB) GetCapabilitiesIndex(indexName string, indexValues ...interface{}) (*CapabilitiesIndex, error) { + if !validCapabilitiesIndexName(indexName) { + return nil, fmt.Errorf("invalid index name %q", indexName) + } + + txn := c.db.Load().(*memdb.MemDB).Txn(false) + + raw, err := txn.First(tableNameCapabilitiesIndexer, indexName, indexValues...) + if err != nil { + return nil, err + } + + if raw == nil { + return nil, ErrCacheItemNotFound + } + + index, ok := raw.(*CapabilitiesIndex) + if !ok { + return nil, errors.New("unable to parse capabilities index value from the cache") + } + + return index, nil +} + +// SetCapabilitiesIndex stores the CapabilitiesIndex index into the cache. +func (c *CacheMemDB) SetCapabilitiesIndex(index *CapabilitiesIndex) error { + if index == nil { + return errors.New("nil capabilities index provided") + } + + txn := c.db.Load().(*memdb.MemDB).Txn(true) + defer txn.Abort() + + if err := txn.Insert(tableNameCapabilitiesIndexer, index); err != nil { + return fmt.Errorf("unable to insert index into cache: %v", err) + } + + txn.Commit() + + return nil +} + +// EvictCapabilitiesIndex removes a capabilities index from the cache based on index name and value. +func (c *CacheMemDB) EvictCapabilitiesIndex(indexName string, indexValues ...interface{}) error { + index, err := c.GetCapabilitiesIndex(indexName, indexValues...) + if err == ErrCacheItemNotFound { + return nil + } + if err != nil { + return fmt.Errorf("unable to fetch index on cache deletion: %v", err) + } + + txn := c.db.Load().(*memdb.MemDB).Txn(true) + defer txn.Abort() + + if err := txn.Delete(tableNameCapabilitiesIndexer, index); err != nil { + return fmt.Errorf("unable to delete index from cache: %v", err) + } + + txn.Commit() + + return nil +} + +// GetByPrefix returns all the cached indexes based on the index name and the +// value prefix. +func (c *CacheMemDB) GetByPrefix(indexName string, indexValues ...interface{}) ([]*Index, error) { + if !validIndexName(indexName) { + return nil, fmt.Errorf("invalid index name %q", indexName) + } + + indexName = indexName + "_prefix" + + // Get all the objects + txn := c.db.Load().(*memdb.MemDB).Txn(false) + + iter, err := txn.Get(tableNameIndexer, indexName, indexValues...) + if err != nil { + return nil, err + } + + var indexes []*Index + for { + obj := iter.Next() + if obj == nil { + break + } + index, ok := obj.(*Index) + if !ok { + return nil, fmt.Errorf("failed to cast cached index") + } + + indexes = append(indexes, index) + } + + return indexes, nil +} + +// Evict removes an index from the cache based on index name and value. +func (c *CacheMemDB) Evict(indexName string, indexValues ...interface{}) error { + index, err := c.Get(indexName, indexValues...) + if errors.Is(err, ErrCacheItemNotFound) { + return nil + } + if err != nil { + return fmt.Errorf("unable to fetch index on cache deletion: %v", err) + } + + txn := c.db.Load().(*memdb.MemDB).Txn(true) + defer txn.Abort() + + if err := txn.Delete(tableNameIndexer, index); err != nil { + return fmt.Errorf("unable to delete index from cache: %v", err) + } + + txn.Commit() + + return nil +} + +// Flush resets the underlying cache object. +func (c *CacheMemDB) Flush() error { + newDB, err := newDB() + if err != nil { + return err + } + + c.db.Store(newDB) + + return nil +} diff --git a/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go b/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go new file mode 100644 index 000000000000..63959141d6d4 --- /dev/null +++ b/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go @@ -0,0 +1,486 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cachememdb + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/go-test/deep" +) + +func testContextInfo() *ContextInfo { + ctx, cancelFunc := context.WithCancel(context.Background()) + + return &ContextInfo{ + Ctx: ctx, + CancelFunc: cancelFunc, + } +} + +func TestNew(t *testing.T) { + _, err := New() + if err != nil { + t.Fatal(err) + } +} + +func TestCacheMemDB_Get(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Test invalid index name + _, err = cache.Get("foo", "bar") + if err == nil { + t.Fatal("expected error") + } + + // Test on empty cache + index, err := cache.Get(IndexNameID, "foo") + if err != ErrCacheItemNotFound { + t.Fatal("expected cache item to be not found", err) + } + if index != nil { + t.Fatalf("expected nil index, got: %v", index) + } + + // Populate cache + in := &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Token: "test_token", + TokenAccessor: "test_accessor", + Lease: "test_lease", + Response: []byte("hello world"), + Tokens: map[string]struct{}{}, + } + + if err := cache.Set(in); err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + indexName string + indexValues []interface{} + }{ + { + "by_index_id", + "id", + []interface{}{in.ID}, + }, + { + "by_request_path", + "request_path", + []interface{}{in.Namespace, in.RequestPath}, + }, + { + "by_lease", + "lease", + []interface{}{in.Lease}, + }, + { + "by_token", + "token", + []interface{}{in.Token}, + }, + { + "by_token_accessor", + "token_accessor", + []interface{}{in.TokenAccessor}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + out, err := cache.Get(tc.indexName, tc.indexValues...) + if err != nil && err != ErrCacheItemNotFound { + t.Fatal(err) + } + if diff := deep.Equal(in, out); diff != nil { + t.Fatal(diff) + } + }) + } +} + +func TestCacheMemDB_GetByPrefix(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Test invalid index name + _, err = cache.GetByPrefix("foo", "bar", "baz") + if err == nil { + t.Fatal("expected error") + } + + // Test on empty cache + index, err := cache.GetByPrefix(IndexNameRequestPath, "foo", "bar") + if err != nil { + t.Fatal(err) + } + if index != nil { + t.Fatalf("expected nil index, got: %v", index) + } + + // Populate cache + in := &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path/1", + Token: "test_token", + TokenParent: "test_token_parent", + TokenAccessor: "test_accessor", + Lease: "path/to/test_lease/1", + LeaseToken: "test_lease_token", + Response: []byte("hello world"), + } + + if err := cache.Set(in); err != nil { + t.Fatal(err) + } + + // Populate cache + in2 := &Index{ + ID: "test_id_2", + Namespace: "test_ns/", + RequestPath: "/v1/request/path/2", + Token: "test_token2", + TokenParent: "test_token_parent", + TokenAccessor: "test_accessor2", + Lease: "path/to/test_lease/2", + LeaseToken: "test_lease_token", + Response: []byte("hello world"), + } + + if err := cache.Set(in2); err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + indexName string + indexValues []interface{} + }{ + { + "by_request_path", + IndexNameRequestPath, + []interface{}{"test_ns/", "/v1/request/path"}, + }, + { + "by_lease", + IndexNameLease, + []interface{}{"path/to/test_lease"}, + }, + { + "by_token_parent", + IndexNameTokenParent, + []interface{}{"test_token_parent"}, + }, + { + "by_lease_token", + IndexNameLeaseToken, + []interface{}{"test_lease_token"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + out, err := cache.GetByPrefix(tc.indexName, tc.indexValues...) + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal([]*Index{in, in2}, out); diff != nil { + t.Fatal(diff) + } + }) + } +} + +func TestCacheMemDB_Set(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + index *Index + wantErr bool + }{ + { + "nil", + nil, + true, + }, + { + "empty_fields", + &Index{}, + true, + }, + { + "missing_required_fields", + &Index{ + Lease: "foo", + }, + true, + }, + { + "all_fields", + &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Token: "test_token", + TokenAccessor: "test_accessor", + Lease: "test_lease", + RenewCtxInfo: testContextInfo(), + }, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if err := cache.Set(tc.index); (err != nil) != tc.wantErr { + t.Fatalf("CacheMemDB.Set() error = %v, wantErr = %v", err, tc.wantErr) + } + }) + } +} + +func TestCacheMemDB_Evict(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Test on empty cache + if err := cache.Evict(IndexNameID, "foo"); err != nil { + t.Fatal(err) + } + + testIndex := &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Token: "test_token", + TokenAccessor: "test_token_accessor", + Lease: "test_lease", + RenewCtxInfo: testContextInfo(), + } + + testCases := []struct { + name string + indexName string + indexValues []interface{} + insertIndex *Index + wantErr bool + }{ + { + "empty_params", + "", + []interface{}{""}, + nil, + true, + }, + { + "invalid_params", + "foo", + []interface{}{"bar"}, + nil, + true, + }, + { + "by_id", + "id", + []interface{}{"test_id"}, + testIndex, + false, + }, + { + "by_request_path", + "request_path", + []interface{}{"test_ns/", "/v1/request/path"}, + testIndex, + false, + }, + { + "by_token", + "token", + []interface{}{"test_token"}, + testIndex, + false, + }, + { + "by_token_accessor", + "token_accessor", + []interface{}{"test_accessor"}, + testIndex, + false, + }, + { + "by_lease", + "lease", + []interface{}{"test_lease"}, + testIndex, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if tc.insertIndex != nil { + if err := cache.Set(tc.insertIndex); err != nil { + t.Fatal(err) + } + } + + if err := cache.Evict(tc.indexName, tc.indexValues...); (err != nil) != tc.wantErr { + t.Fatal(err) + } + + // Verify that the cache doesn't contain the entry any more + index, err := cache.Get(tc.indexName, tc.indexValues...) + if err != ErrCacheItemNotFound && !tc.wantErr { + t.Fatal("expected cache item to be not found", err) + } + if index != nil { + t.Fatalf("expected nil entry, got = %#v", index) + } + }) + } +} + +func TestCacheMemDB_Flush(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Populate cache + in := &Index{ + ID: "test_id", + Token: "test_token", + Lease: "test_lease", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Response: []byte("hello world"), + } + + if err := cache.Set(in); err != nil { + t.Fatal(err) + } + + // Reset the cache + if err := cache.Flush(); err != nil { + t.Fatal(err) + } + + // Check the cache doesn't contain inserted index + out, err := cache.Get(IndexNameID, "test_id") + if err != ErrCacheItemNotFound { + t.Fatal("expected cache item to be not found", err) + } + if out != nil { + t.Fatalf("expected cache to be empty, got = %v", out) + } +} + +// TestCacheMemDB_EvictCapabilitiesIndex tests EvictCapabilitiesIndex works as expected. +func TestCacheMemDB_EvictCapabilitiesIndex(t *testing.T) { + cache, err := New() + require.Nil(t, err) + + // Test on empty cache + err = cache.EvictCapabilitiesIndex(IndexNameID, "foo") + require.Nil(t, err) + + capabilitiesIndex := &CapabilitiesIndex{ + ID: "id", + Token: "token", + } + + err = cache.SetCapabilitiesIndex(capabilitiesIndex) + require.Nil(t, err) + + err = cache.EvictCapabilitiesIndex(IndexNameID, capabilitiesIndex.ID) + require.Nil(t, err) + + // Verify that the cache doesn't contain the entry anymore + index, err := cache.GetCapabilitiesIndex(IndexNameID, capabilitiesIndex.ID) + require.Equal(t, ErrCacheItemNotFound, err) + require.Nil(t, index) +} + +// TestCacheMemDB_GetCapabilitiesIndex tests GetCapabilitiesIndex works as expected. +func TestCacheMemDB_GetCapabilitiesIndex(t *testing.T) { + cache, err := New() + require.Nil(t, err) + + capabilitiesIndex := &CapabilitiesIndex{ + ID: "id", + Token: "token", + } + + err = cache.SetCapabilitiesIndex(capabilitiesIndex) + require.Nil(t, err) + + // Verify that we can retrieve the index + index, err := cache.GetCapabilitiesIndex(IndexNameID, capabilitiesIndex.ID) + require.Nil(t, err) + require.Equal(t, capabilitiesIndex, index) + + // Verify behaviour on a non-existing ID + index, err = cache.GetCapabilitiesIndex(IndexNameID, "not a real id") + require.Equal(t, ErrCacheItemNotFound, err) + require.Nil(t, index) + + // Verify behaviour with a non-existing index name + index, err = cache.GetCapabilitiesIndex("not a real name", capabilitiesIndex.ID) + require.NotNil(t, err) +} + +// TestCacheMemDB_SetCapabilitiesIndex tests SetCapabilitiesIndex works as expected. +func TestCacheMemDB_SetCapabilitiesIndex(t *testing.T) { + cache, err := New() + require.Nil(t, err) + + capabilitiesIndex := &CapabilitiesIndex{ + ID: "id", + Token: "token", + } + + err = cache.SetCapabilitiesIndex(capabilitiesIndex) + require.Nil(t, err) + + // Verify we can retrieve the index + index, err := cache.GetCapabilitiesIndex(IndexNameID, capabilitiesIndex.ID) + require.Nil(t, err) + require.Equal(t, capabilitiesIndex, index) + + // Verify behaviour on a nil index + err = cache.SetCapabilitiesIndex(nil) + require.NotNil(t, err) + + // Verify behaviour on an index without id + err = cache.SetCapabilitiesIndex(&CapabilitiesIndex{ + Token: "token", + }) + require.NotNil(t, err) + + // Verify behaviour on an index with only ID + err = cache.SetCapabilitiesIndex(&CapabilitiesIndex{ + ID: "id", + }) + require.Nil(t, err) +} diff --git a/command/agentproxyshared/cache/cachememdb/index.go b/command/agentproxyshared/cache/cachememdb/index.go new file mode 100644 index 000000000000..484409a57954 --- /dev/null +++ b/command/agentproxyshared/cache/cachememdb/index.go @@ -0,0 +1,225 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cachememdb + +import ( + "context" + "encoding/json" + "net/http" + "sync" + "time" +) + +// Index holds the response to be cached along with multiple other values that +// serve as pointers to refer back to this index. +type Index struct { + // ID is a value that uniquely represents the request held by this + // index. This is computed by serializing and hashing the response object. + // Required: true, Unique: true + ID string + + // Token is the token that fetched the response held by this index + // Required: true, Unique: true + Token string + + // Tokens is a set of tokens that can access this cached response, + // which is used for static secret caching, and enabling multiple + // tokens to be able to access the same cache entry for static secrets. + // Implemented as a map so that all values are unique. + // Required: false, Unique: false + Tokens map[string]struct{} + + // TokenParent is the parent token of the token held by this index + // Required: false, Unique: false + TokenParent string + + // TokenAccessor is the accessor of the token being cached in this index + // Required: true, Unique: true + TokenAccessor string + + // Namespace is the namespace that was provided in the request path as the + // Vault namespace to query + Namespace string + + // RequestPath is the path of the request that resulted in the response + // held by this index. + // For dynamic secrets, this will be the actual path sent to the request, + // e.g. /v1/foo/bar (which will not include the namespace if it was included + // in the headers). + // For static secrets, this will be the canonical path to the secret (i.e. + // after calling getStaticSecretPathFromRequest--see its godocs for more + // information). + // Required: true, Unique: false + RequestPath string + + // Lease is the identifier of the lease in Vault, that belongs to the + // response held by this index. + // Required: false, Unique: true + Lease string + + // LeaseToken is the identifier of the token that created the lease held by + // this index. + // Required: false, Unique: false + LeaseToken string + + // Response is the serialized response object that the agent is caching. + Response []byte + + // RenewCtxInfo holds the context and the corresponding cancel func for the + // goroutine that manages the renewal of the secret belonging to the + // response in this index. + RenewCtxInfo *ContextInfo + + // RequestMethod is the HTTP method of the request + RequestMethod string + + // RequestToken is the token used in the request + RequestToken string + + // RequestHeader is the header used in the request + RequestHeader http.Header + + // LastRenewed is the timestamp of last renewal + LastRenewed time.Time + + // Type is the index type (token, auth-lease, secret-lease, static-secret) + Type string + + // IndexLock is a lock held for some indexes to prevent data + // races upon update. + IndexLock sync.RWMutex +} + +// CapabilitiesIndex holds the capabilities for cached static secrets. +// This type of index does not represent a response. +type CapabilitiesIndex struct { + // ID is a value that uniquely represents the request held by this + // index. This is computed by hashing the token that this capabilities + // index represents the capabilities of. + // Required: true, Unique: true + ID string + + // Token is the token that fetched the response held by this index + // Required: true, Unique: true + Token string + + // ReadablePaths is a set of paths with read capabilities for the given token. + // Implemented as a map for uniqueness. The key to the map is a path (such as + // `foo/bar` that we've demonstrated we can read. + ReadablePaths map[string]struct{} + + // IndexLock is a lock held for some indexes to prevent data + // races upon update. + IndexLock sync.RWMutex +} + +type IndexName uint32 + +const ( + // IndexNameID is the ID of the index constructed from the serialized request. + IndexNameID = "id" + + // IndexNameLease is the lease of the index. + IndexNameLease = "lease" + + // IndexNameRequestPath is the request path of the index. + IndexNameRequestPath = "request_path" + + // IndexNameToken is the token of the index. + IndexNameToken = "token" + + // IndexNameTokenAccessor is the token accessor of the index. + IndexNameTokenAccessor = "token_accessor" + + // IndexNameTokenParent is the token parent of the index. + IndexNameTokenParent = "token_parent" + + // IndexNameLeaseToken is the token that created the lease. + IndexNameLeaseToken = "lease_token" + + // CapabilitiesIndexNameID is the ID of the capabilities index. + CapabilitiesIndexNameID = "id" +) + +func validIndexName(indexName string) bool { + switch indexName { + case IndexNameID: + case IndexNameLease: + case IndexNameRequestPath: + case IndexNameToken: + case IndexNameTokenAccessor: + case IndexNameTokenParent: + case IndexNameLeaseToken: + default: + return false + } + return true +} + +func validCapabilitiesIndexName(indexName string) bool { + switch indexName { + case CapabilitiesIndexNameID: + default: + return false + } + return true +} + +type ContextInfo struct { + Ctx context.Context + CancelFunc context.CancelFunc + DoneCh chan struct{} +} + +func NewContextInfo(ctx context.Context) *ContextInfo { + if ctx == nil { + return nil + } + + ctxInfo := new(ContextInfo) + ctxInfo.Ctx, ctxInfo.CancelFunc = context.WithCancel(ctx) + ctxInfo.DoneCh = make(chan struct{}) + return ctxInfo +} + +// Serialize returns a json marshal'ed Index object, without the RenewCtxInfo +func (i Index) Serialize() ([]byte, error) { + i.RenewCtxInfo = nil + + indexBytes, err := json.Marshal(i) + if err != nil { + return nil, err + } + + return indexBytes, nil +} + +// Deserialize converts json bytes to an Index object +// Note: RenewCtxInfo will need to be reconstructed elsewhere. +func Deserialize(indexBytes []byte) (*Index, error) { + index := new(Index) + if err := json.Unmarshal(indexBytes, index); err != nil { + return nil, err + } + return index, nil +} + +// SerializeCapabilitiesIndex returns a json marshal'ed CapabilitiesIndex object +func (i CapabilitiesIndex) SerializeCapabilitiesIndex() ([]byte, error) { + indexBytes, err := json.Marshal(i) + if err != nil { + return nil, err + } + + return indexBytes, nil +} + +// DeserializeCapabilitiesIndex converts json bytes to an CapabilitiesIndex object +func DeserializeCapabilitiesIndex(indexBytes []byte) (*CapabilitiesIndex, error) { + index := new(CapabilitiesIndex) + if err := json.Unmarshal(indexBytes, index); err != nil { + return nil, err + } + return index, nil +} diff --git a/command/agent/cache/cachememdb/index_test.go b/command/agentproxyshared/cache/cachememdb/index_test.go similarity index 89% rename from command/agent/cache/cachememdb/index_test.go rename to command/agentproxyshared/cache/cachememdb/index_test.go index 577e37d647cd..7b348e3402bc 100644 --- a/command/agent/cache/cachememdb/index_test.go +++ b/command/agentproxyshared/cache/cachememdb/index_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cachememdb import ( @@ -14,6 +17,7 @@ func TestSerializeDeserialize(t *testing.T) { testIndex := &Index{ ID: "testid", Token: "testtoken", + Tokens: map[string]struct{}{"token1": {}, "token2": {}}, TokenParent: "parent token", TokenAccessor: "test accessor", Namespace: "test namespace", diff --git a/command/agentproxyshared/cache/enforceconsistency_enumer.go b/command/agentproxyshared/cache/enforceconsistency_enumer.go new file mode 100644 index 000000000000..e2354111df3d --- /dev/null +++ b/command/agentproxyshared/cache/enforceconsistency_enumer.go @@ -0,0 +1,49 @@ +// Code generated by "enumer -type=EnforceConsistency -trimprefix=EnforceConsistency"; DO NOT EDIT. + +package cache + +import ( + "fmt" +) + +const _EnforceConsistencyName = "NeverAlways" + +var _EnforceConsistencyIndex = [...]uint8{0, 5, 11} + +func (i EnforceConsistency) String() string { + if i < 0 || i >= EnforceConsistency(len(_EnforceConsistencyIndex)-1) { + return fmt.Sprintf("EnforceConsistency(%d)", i) + } + return _EnforceConsistencyName[_EnforceConsistencyIndex[i]:_EnforceConsistencyIndex[i+1]] +} + +var _EnforceConsistencyValues = []EnforceConsistency{0, 1} + +var _EnforceConsistencyNameToValueMap = map[string]EnforceConsistency{ + _EnforceConsistencyName[0:5]: 0, + _EnforceConsistencyName[5:11]: 1, +} + +// EnforceConsistencyString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func EnforceConsistencyString(s string) (EnforceConsistency, error) { + if val, ok := _EnforceConsistencyNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to EnforceConsistency values", s) +} + +// EnforceConsistencyValues returns all values of the enum +func EnforceConsistencyValues() []EnforceConsistency { + return _EnforceConsistencyValues +} + +// IsAEnforceConsistency returns "true" if the value is listed in the enum definition. "false" otherwise +func (i EnforceConsistency) IsAEnforceConsistency() bool { + for _, v := range _EnforceConsistencyValues { + if i == v { + return true + } + } + return false +} diff --git a/command/agent/cache/handler.go b/command/agentproxyshared/cache/handler.go similarity index 97% rename from command/agent/cache/handler.go rename to command/agentproxyshared/cache/handler.go index e634174c61ae..25acaee01cb8 100644 --- a/command/agent/cache/handler.go +++ b/command/agentproxyshared/cache/handler.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cache import ( @@ -15,7 +18,7 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" ) diff --git a/command/agentproxyshared/cache/keymanager/manager.go b/command/agentproxyshared/cache/keymanager/manager.go new file mode 100644 index 000000000000..46fc499d2e14 --- /dev/null +++ b/command/agentproxyshared/cache/keymanager/manager.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package keymanager + +import ( + "context" + + wrapping "github.com/hashicorp/go-kms-wrapping/v2" +) + +const ( + KeyID = "root" +) + +type KeyManager interface { + // Returns a wrapping.Wrapper which can be used to perform key-related operations. + Wrapper() wrapping.Wrapper + // RetrievalToken is the material returned which can be used to source back the + // encryption key. Depending on the implementation, the token can be the + // encryption key itself or a token/identifier used to exchange the token. + RetrievalToken(ctx context.Context) ([]byte, error) +} diff --git a/command/agent/cache/keymanager/passthrough.go b/command/agentproxyshared/cache/keymanager/passthrough.go similarity index 96% rename from command/agent/cache/keymanager/passthrough.go rename to command/agentproxyshared/cache/keymanager/passthrough.go index 68a1fc221b62..f88d2787a725 100644 --- a/command/agent/cache/keymanager/passthrough.go +++ b/command/agentproxyshared/cache/keymanager/passthrough.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package keymanager import ( diff --git a/command/agent/cache/keymanager/passthrough_test.go b/command/agentproxyshared/cache/keymanager/passthrough_test.go similarity index 93% rename from command/agent/cache/keymanager/passthrough_test.go rename to command/agentproxyshared/cache/keymanager/passthrough_test.go index 084a71a143f2..b3dc9b72525c 100644 --- a/command/agent/cache/keymanager/passthrough_test.go +++ b/command/agentproxyshared/cache/keymanager/passthrough_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package keymanager import ( diff --git a/command/agentproxyshared/cache/lease_cache.go b/command/agentproxyshared/cache/lease_cache.go new file mode 100644 index 000000000000..6e8b564a620a --- /dev/null +++ b/command/agentproxyshared/cache/lease_cache.go @@ -0,0 +1,1753 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "bufio" + "bytes" + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/helper/namespace" + nshelper "github.com/hashicorp/vault/helper/namespace" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/cryptoutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" + gocache "github.com/patrickmn/go-cache" + "go.uber.org/atomic" +) + +const ( + vaultPathTokenCreate = "/v1/auth/token/create" + vaultPathTokenRevoke = "/v1/auth/token/revoke" + vaultPathTokenRevokeSelf = "/v1/auth/token/revoke-self" + vaultPathTokenRevokeAccessor = "/v1/auth/token/revoke-accessor" + vaultPathTokenRevokeOrphan = "/v1/auth/token/revoke-orphan" + vaultPathTokenLookup = "/v1/auth/token/lookup" + vaultPathTokenLookupSelf = "/v1/auth/token/lookup-self" + vaultPathTokenRenew = "/v1/auth/token/renew" + vaultPathTokenRenewSelf = "/v1/auth/token/renew-self" + vaultPathLeaseRevoke = "/v1/sys/leases/revoke" + vaultPathLeaseRevokeForce = "/v1/sys/leases/revoke-force" + vaultPathLeaseRevokePrefix = "/v1/sys/leases/revoke-prefix" +) + +var ( + contextIndexID = contextIndex{} + errInvalidType = errors.New("invalid type provided") + revocationPaths = []string{ + strings.TrimPrefix(vaultPathTokenRevoke, "/v1"), + strings.TrimPrefix(vaultPathTokenRevokeSelf, "/v1"), + strings.TrimPrefix(vaultPathTokenRevokeAccessor, "/v1"), + strings.TrimPrefix(vaultPathTokenRevokeOrphan, "/v1"), + strings.TrimPrefix(vaultPathLeaseRevoke, "/v1"), + strings.TrimPrefix(vaultPathLeaseRevokeForce, "/v1"), + strings.TrimPrefix(vaultPathLeaseRevokePrefix, "/v1"), + } +) + +type contextIndex struct{} + +type cacheClearRequest struct { + Type string `json:"type"` + Value string `json:"value"` + Namespace string `json:"namespace"` +} + +// LeaseCache is an implementation of Proxier that handles +// the caching of responses. It passes the incoming request +// to an underlying Proxier implementation. +type LeaseCache struct { + client *api.Client + proxier Proxier + logger hclog.Logger + db *cachememdb.CacheMemDB + baseCtxInfo *cachememdb.ContextInfo + l *sync.RWMutex + + // userAgentToUse is the user agent to use when making independent requests + // to Vault. + userAgentToUse string + + // idLocks is used during cache lookup to ensure that identical requests made + // in parallel won't trigger multiple renewal goroutines. + idLocks []*locksutil.LockEntry + + // inflightCache keeps track of inflight requests + inflightCache *gocache.Cache + + // ps is the persistent storage for tokens and leases + ps *cacheboltdb.BoltStorage + + // shuttingDown is used to determine if cache needs to be evicted or not + // when the context is cancelled + shuttingDown atomic.Bool + + // cacheStaticSecrets is used to determine if the cache should also + // cache static secrets, as well as dynamic secrets. + cacheStaticSecrets bool + + // cacheDynamicSecrets is used to determine if the cache should + // cache dynamic secrets + cacheDynamicSecrets bool + + // capabilityManager is used when static secrets are enabled to + // manage the capabilities of cached tokens. + capabilityManager *StaticSecretCapabilityManager +} + +// LeaseCacheConfig is the configuration for initializing a new +// LeaseCache. +type LeaseCacheConfig struct { + Client *api.Client + BaseContext context.Context + Proxier Proxier + Logger hclog.Logger + UserAgentToUse string + Storage *cacheboltdb.BoltStorage + CacheStaticSecrets bool + CacheDynamicSecrets bool +} + +type inflightRequest struct { + // ch is closed by the request that ends up processing the set of + // parallel request + ch chan struct{} + + // remaining is the number of remaining inflight request that needs to + // be processed before this object can be cleaned up + remaining *atomic.Uint64 +} + +func newInflightRequest() *inflightRequest { + return &inflightRequest{ + ch: make(chan struct{}), + remaining: atomic.NewUint64(0), + } +} + +// NewLeaseCache creates a new instance of a LeaseCache. +func NewLeaseCache(conf *LeaseCacheConfig) (*LeaseCache, error) { + if conf == nil { + return nil, errors.New("nil configuration provided") + } + + if conf.Proxier == nil || conf.Logger == nil { + return nil, fmt.Errorf("missing configuration required params: %v", conf) + } + + if conf.Client == nil { + return nil, fmt.Errorf("nil API client") + } + + if conf.UserAgentToUse == "" { + return nil, fmt.Errorf("no user agent specified -- see useragent.go") + } + + db, err := cachememdb.New() + if err != nil { + return nil, err + } + + // Create a base context for the lease cache layer + baseCtxInfo := cachememdb.NewContextInfo(conf.BaseContext) + + return &LeaseCache{ + client: conf.Client, + proxier: conf.Proxier, + logger: conf.Logger, + userAgentToUse: conf.UserAgentToUse, + db: db, + baseCtxInfo: baseCtxInfo, + l: &sync.RWMutex{}, + idLocks: locksutil.CreateLocks(), + inflightCache: gocache.New(gocache.NoExpiration, gocache.NoExpiration), + ps: conf.Storage, + cacheStaticSecrets: conf.CacheStaticSecrets, + cacheDynamicSecrets: conf.CacheDynamicSecrets, + }, nil +} + +// SetCapabilityManager is a setter for CapabilityManager. If set, will manage capabilities +// for capability indexes. +func (c *LeaseCache) SetCapabilityManager(capabilityManager *StaticSecretCapabilityManager) { + c.capabilityManager = capabilityManager +} + +// SetShuttingDown is a setter for the shuttingDown field +func (c *LeaseCache) SetShuttingDown(in bool) { + c.shuttingDown.Store(in) + + // Since we're shutting down, also stop the capability manager's jobs. + // We can do this forcibly since no there's no reason to update + // the cache when we're shutting down. + if c.capabilityManager != nil { + c.capabilityManager.Stop() + } +} + +// SetPersistentStorage is a setter for the persistent storage field in +// LeaseCache +func (c *LeaseCache) SetPersistentStorage(storageIn *cacheboltdb.BoltStorage) { + c.ps = storageIn +} + +// PersistentStorage is a getter for the persistent storage field in +// LeaseCache +func (c *LeaseCache) PersistentStorage() *cacheboltdb.BoltStorage { + return c.ps +} + +// checkCacheForDynamicSecretRequest checks the cache for a particular request based on its +// computed ID. It returns a non-nil *SendResponse if an entry is found. +func (c *LeaseCache) checkCacheForDynamicSecretRequest(id string) (*SendResponse, error) { + return c.checkCacheForRequest(id, nil) +} + +// checkCacheForStaticSecretRequest checks the cache for a particular request based on its +// computed ID. It returns a non-nil *SendResponse if an entry is found. +// If a request is provided, it will validate that the token is allowed to retrieve this +// cache entry, and return nil if it isn't. It will also evict the cache if this is a non-GET +// request. +func (c *LeaseCache) checkCacheForStaticSecretRequest(id string, req *SendRequest) (*SendResponse, error) { + return c.checkCacheForRequest(id, req) +} + +// checkCacheForRequest checks the cache for a particular request based on its +// computed ID. It returns a non-nil *SendResponse if an entry is found. +// If a token is provided, it will validate that the token is allowed to retrieve this +// cache entry, and return nil if it isn't. +func (c *LeaseCache) checkCacheForRequest(id string, req *SendRequest) (*SendResponse, error) { + index, err := c.db.Get(cachememdb.IndexNameID, id) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return nil, nil + } + if err != nil { + return nil, err + } + + index.IndexLock.RLock() + defer index.IndexLock.RUnlock() + + var token string + if req != nil { + // Req will be non-nil if we're checking for a static secret. + // Token might still be "" if it's going to an unauthenticated + // endpoint, or similar. For static secrets, we only care about + // requests with tokens attached, as KV is authenticated. + token = req.Token + } + + if token != "" { + // We are checking for a static secret. We need to ensure that this token + // has previously demonstrated access to this static secret. + // We could check the capabilities cache here, but since these + // indexes should be in sync, this saves us an extra cache get. + if _, ok := index.Tokens[token]; !ok { + // We don't have access to this static secret, so + // we do not return the cached response. + return nil, nil + } + } + + // Cached request is found, deserialize the response + reader := bufio.NewReader(bytes.NewReader(index.Response)) + resp, err := http.ReadResponse(reader, nil) + if err != nil { + c.logger.Error("failed to deserialize response", "error", err) + return nil, err + } + + sendResp, err := NewSendResponse(&api.Response{Response: resp}, index.Response) + if err != nil { + c.logger.Error("failed to create new send response", "error", err) + return nil, err + } + sendResp.CacheMeta.Hit = true + + respTime, err := http.ParseTime(resp.Header.Get("Date")) + if err != nil { + c.logger.Error("failed to parse cached response date", "error", err) + return nil, err + } + sendResp.CacheMeta.Age = time.Now().Sub(respTime) + + return sendResp, nil +} + +// Send performs a cache lookup on the incoming request. If it's a cache hit, +// it will return the cached response, otherwise it will delegate to the +// underlying Proxier and cache the received response. +func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + // Compute the index ID for both static and dynamic secrets. + // The primary difference is that for dynamic secrets, the + // Vault token forms part of the index. + dynamicSecretCacheId, err := computeIndexID(req) + if err != nil { + c.logger.Error("failed to compute cache key", "error", err) + return nil, err + } + staticSecretCacheId := computeStaticSecretCacheIndex(req) + + // Check the inflight cache to see if there are other inflight requests + // of the same kind, based on the computed ID. If so, we increment a counter + + // Note: we lock both the dynamic secret cache ID and the static secret cache ID + // as at this stage, we don't know what kind of secret it is. + var inflight *inflightRequest + + defer func() { + // Cleanup on the cache if there are no remaining inflight requests. + // This is the last step, so we defer the call first + if inflight != nil && inflight.remaining.Load() == 0 { + c.inflightCache.Delete(dynamicSecretCacheId) + if staticSecretCacheId != "" { + c.inflightCache.Delete(staticSecretCacheId) + } + } + }() + + idLockDynamicSecret := locksutil.LockForKey(c.idLocks, dynamicSecretCacheId) + + // Briefly grab an ID-based lock in here to emulate a load-or-store behavior + // and prevent concurrent cacheable requests from being proxied twice if + // they both miss the cache due to it being clean when peeking the cache + // entry. + idLockDynamicSecret.Lock() + inflightRaw, found := c.inflightCache.Get(dynamicSecretCacheId) + if found { + idLockDynamicSecret.Unlock() + inflight = inflightRaw.(*inflightRequest) + inflight.remaining.Inc() + defer inflight.remaining.Dec() + + // If found it means that there's an inflight request being processed. + // We wait until that's finished before proceeding further. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-inflight.ch: + } + } else { + if inflight == nil { + inflight = newInflightRequest() + inflight.remaining.Inc() + defer inflight.remaining.Dec() + defer close(inflight.ch) + } + + c.inflightCache.Set(dynamicSecretCacheId, inflight, gocache.NoExpiration) + idLockDynamicSecret.Unlock() + } + + if staticSecretCacheId != "" { + idLockStaticSecret := locksutil.LockForKey(c.idLocks, staticSecretCacheId) + + // Briefly grab an ID-based lock in here to emulate a load-or-store behavior + // and prevent concurrent cacheable requests from being proxied twice if + // they both miss the cache due to it being clean when peeking the cache + // entry. + idLockStaticSecret.Lock() + inflightRaw, found = c.inflightCache.Get(staticSecretCacheId) + if found { + idLockStaticSecret.Unlock() + inflight = inflightRaw.(*inflightRequest) + inflight.remaining.Inc() + defer inflight.remaining.Dec() + + // If found it means that there's an inflight request being processed. + // We wait until that's finished before proceeding further. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-inflight.ch: + } + } else { + if inflight == nil { + inflight = newInflightRequest() + inflight.remaining.Inc() + defer inflight.remaining.Dec() + defer close(inflight.ch) + } + + c.inflightCache.Set(staticSecretCacheId, inflight, gocache.NoExpiration) + idLockStaticSecret.Unlock() + } + } + + // Check if the response for this request is already in the dynamic secret cache + cachedResp, err := c.checkCacheForDynamicSecretRequest(dynamicSecretCacheId) + if err != nil { + return nil, err + } + if cachedResp != nil { + c.logger.Debug("returning cached dynamic secret response", "path", req.Request.URL.Path) + return cachedResp, nil + } + + // Check if the response for this request is already in the static secret cache + if staticSecretCacheId != "" && req.Request.Method == http.MethodGet && req.Token != "" { + cachedResp, err = c.checkCacheForStaticSecretRequest(staticSecretCacheId, req) + if err != nil { + return nil, err + } + if cachedResp != nil { + c.logger.Debug("returning cached static secret response", "id", staticSecretCacheId, "path", req.Request.URL.Path) + return cachedResp, nil + } + } + + c.logger.Debug("forwarding request from cache", "method", req.Request.Method, "path", req.Request.URL.Path) + + // Pass the request down and get a response + resp, err := c.proxier.Send(ctx, req) + if err != nil { + return resp, err + } + + // If this is a non-2xx or if the returned response does not contain JSON payload, + // we skip caching + if resp.Response.StatusCode >= 300 || resp.Response.Header.Get("Content-Type") != "application/json" { + return resp, err + } + + // Get the namespace from the request header + namespace := req.Request.Header.Get(consts.NamespaceHeaderName) + // We need to populate an empty value since go-memdb will skip over indexes + // that contain empty values. + if namespace == "" { + namespace = "root/" + } + + // Build the index to cache based on the response received + index := &cachememdb.Index{ + Namespace: namespace, + RequestPath: req.Request.URL.Path, + LastRenewed: time.Now().UTC(), + } + + secret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody)) + if err != nil { + c.logger.Error("failed to parse response as secret", "error", err) + return nil, err + } + + isRevocation, err := c.handleRevocationRequest(ctx, req, resp) + if err != nil { + c.logger.Error("failed to process the response", "error", err) + return nil, err + } + + // If this is a revocation request, do not go through cache logic. + if isRevocation { + return resp, nil + } + + // Fast path for responses with no secrets + if secret == nil { + c.logger.Debug("pass-through response; no secret in response", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + // There shouldn't be a situation where secret.MountType == "kv" and + // staticSecretCacheId == "", but just in case. + // We restrict this to GETs as those are all we want to cache. + if c.cacheStaticSecrets && secret.MountType == "kv" && + staticSecretCacheId != "" && req.Request.Method == http.MethodGet { + index.Type = cacheboltdb.StaticSecretType + index.ID = staticSecretCacheId + // We set the request path to be the canonical static secret path, so that + // two differently shaped (but equivalent) requests to the same path + // will be the same. + // This differs slightly from dynamic secrets, where the /v1/ will be + // included in the request path. + index.RequestPath = getStaticSecretPathFromRequest(req) + + err := c.cacheStaticSecret(ctx, req, resp, index) + if err != nil { + return nil, err + } + return resp, nil + } else { + // Since it's not a static secret, set the ID to be the dynamic id + index.ID = dynamicSecretCacheId + } + + // Short-circuit if we've been configured to not cache dynamic secrets + if !c.cacheDynamicSecrets { + return resp, nil + } + + // Short-circuit if the secret is not renewable + tokenRenewable, err := secret.TokenIsRenewable() + if err != nil { + c.logger.Error("failed to parse renewable param", "error", err) + return nil, err + } + if !secret.Renewable && !tokenRenewable { + c.logger.Debug("pass-through response; secret not renewable", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + var renewCtxInfo *cachememdb.ContextInfo + switch { + case secret.LeaseID != "": + c.logger.Debug("processing lease response", "method", req.Request.Method, "path", req.Request.URL.Path) + entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // If the lease belongs to a token that is not managed by the lease cache, + // return the response without caching it. + c.logger.Debug("pass-through lease response; token not managed by lease cache", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + if err != nil { + return nil, err + } + + // Derive a context for renewal using the token's context + renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) + + index.Lease = secret.LeaseID + index.LeaseToken = req.Token + + index.Type = cacheboltdb.LeaseType + + case secret.Auth != nil: + c.logger.Debug("processing auth response", "method", req.Request.Method, "path", req.Request.URL.Path) + + // Check if this token creation request resulted in a non-orphan token, and if so + // correctly set the parentCtx to the request's token context. + var parentCtx context.Context + if !secret.Auth.Orphan { + entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // If the lease belongs to a token that is not managed by the lease cache, + // return the response without caching it. + c.logger.Debug("pass-through lease response; parent token not managed by lease cache", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + if err != nil { + return nil, err + } + + c.logger.Debug("setting parent context", "method", req.Request.Method, "path", req.Request.URL.Path) + parentCtx = entry.RenewCtxInfo.Ctx + + index.TokenParent = req.Token + } + + renewCtxInfo = c.createCtxInfo(parentCtx) + index.Token = secret.Auth.ClientToken + index.TokenAccessor = secret.Auth.Accessor + + index.Type = cacheboltdb.LeaseType + + default: + // We shouldn't be hitting this, but will err on the side of caution and + // simply proxy. + c.logger.Debug("pass-through response; secret without lease and token", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + // Serialize the response to store it in the cached index + var respBytes bytes.Buffer + err = resp.Response.Write(&respBytes) + if err != nil { + c.logger.Error("failed to serialize response", "error", err) + return nil, err + } + + // Reset the response body for upper layers to read + if resp.Response.Body != nil { + resp.Response.Body.Close() + } + resp.Response.Body = io.NopCloser(bytes.NewReader(resp.ResponseBody)) + + // Set the index's Response + index.Response = respBytes.Bytes() + + // Store the index ID in the lifetimewatcher context + renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) + + // Store the lifetime watcher context in the index + index.RenewCtxInfo = &cachememdb.ContextInfo{ + Ctx: renewCtx, + CancelFunc: renewCtxInfo.CancelFunc, + DoneCh: renewCtxInfo.DoneCh, + } + + // Add extra information necessary for restoring from persisted cache + index.RequestMethod = req.Request.Method + index.RequestToken = req.Token + index.RequestHeader = req.Request.Header + + if index.Type != cacheboltdb.StaticSecretType { + // Store the index in the cache + c.logger.Debug("storing dynamic secret response into the cache", "method", req.Request.Method, "path", req.Request.URL.Path, "id", index.ID) + err = c.Set(ctx, index) + if err != nil { + c.logger.Error("failed to cache the proxied response", "error", err) + return nil, err + } + + // Start renewing the secret in the response + go c.startRenewing(renewCtx, index, req, secret) + } + + return resp, nil +} + +func (c *LeaseCache) cacheStaticSecret(ctx context.Context, req *SendRequest, resp *SendResponse, index *cachememdb.Index) error { + // If a cached version of this secret exists, we now have access, so + // we don't need to re-cache, just update index.Tokens + indexFromCache, err := c.db.Get(cachememdb.IndexNameID, index.ID) + if err != nil && err != cachememdb.ErrCacheItemNotFound { + return err + } + + // The index already exists, so all we need to do is add our token + // to the index's allowed token list, then re-store it. + if indexFromCache != nil { + // We must hold a lock for the index while it's being updated. + // We keep the two locking mechanisms distinct, so that it's only writes + // that have to be serial. + indexFromCache.IndexLock.Lock() + defer indexFromCache.IndexLock.Unlock() + indexFromCache.Tokens[req.Token] = struct{}{} + + return c.storeStaticSecretIndex(ctx, req, indexFromCache) + } + + // Serialize the response to store it in the cached index + var respBytes bytes.Buffer + err = resp.Response.Write(&respBytes) + if err != nil { + c.logger.Error("failed to serialize response", "error", err) + return err + } + + // Reset the response body for upper layers to read + if resp.Response.Body != nil { + resp.Response.Body.Close() + } + resp.Response.Body = io.NopCloser(bytes.NewReader(resp.ResponseBody)) + + // Set the index's Response + index.Response = respBytes.Bytes() + + // Initialize the token map and add this token to it. + index.Tokens = map[string]struct{}{req.Token: {}} + + // Set the index type + index.Type = cacheboltdb.StaticSecretType + + return c.storeStaticSecretIndex(ctx, req, index) +} + +func (c *LeaseCache) storeStaticSecretIndex(ctx context.Context, req *SendRequest, index *cachememdb.Index) error { + // Store the index in the cache + c.logger.Debug("storing static secret response into the cache", "method", req.Request.Method, "path", req.Request.URL.Path, "id", index.ID) + err := c.Set(ctx, index) + if err != nil { + c.logger.Error("failed to cache the proxied response", "error", err) + return err + } + + capabilitiesIndex, created, err := c.retrieveOrCreateTokenCapabilitiesEntry(req.Token) + if err != nil { + c.logger.Error("failed to cache the proxied response", "error", err) + return err + } + + path := getStaticSecretPathFromRequest(req) + + // Extra caution -- avoid potential nil + if capabilitiesIndex.ReadablePaths == nil { + capabilitiesIndex.ReadablePaths = make(map[string]struct{}) + } + + // update the index with the new capability: + capabilitiesIndex.ReadablePaths[path] = struct{}{} + + err = c.SetCapabilitiesIndex(ctx, capabilitiesIndex) + if err != nil { + c.logger.Error("failed to cache token capabilities as part of caching the proxied response", "error", err) + return err + } + + // Lastly, ensure that we start renewing this index, if it's new. + // We require the 'created' check so that we don't renew the same + // index multiple times. + if c.capabilityManager != nil && created { + c.capabilityManager.StartRenewingCapabilities(capabilitiesIndex) + } + + return nil +} + +// retrieveOrCreateTokenCapabilitiesEntry will either retrieve the token +// capabilities entry from the cache, or create a new, empty one. +// The bool represents if a new token capability has been created. +func (c *LeaseCache) retrieveOrCreateTokenCapabilitiesEntry(token string) (*cachememdb.CapabilitiesIndex, bool, error) { + // The index ID is a hash of the token. + indexId := hashStaticSecretIndex(token) + indexFromCache, err := c.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) + if err != nil && err != cachememdb.ErrCacheItemNotFound { + return nil, false, err + } + + if indexFromCache != nil { + return indexFromCache, false, nil + } + + // Build the index to cache based on the response received + index := &cachememdb.CapabilitiesIndex{ + ID: indexId, + Token: token, + ReadablePaths: make(map[string]struct{}), + } + + return index, true, nil +} + +func (c *LeaseCache) createCtxInfo(ctx context.Context) *cachememdb.ContextInfo { + if ctx == nil { + c.l.RLock() + ctx = c.baseCtxInfo.Ctx + c.l.RUnlock() + } + return cachememdb.NewContextInfo(ctx) +} + +func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, req *SendRequest, secret *api.Secret) { + defer func() { + id := ctx.Value(contextIndexID).(string) + if c.shuttingDown.Load() { + c.logger.Trace("not evicting index from cache during shutdown", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) + return + } + c.logger.Debug("evicting index from cache", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) + err := c.Evict(index) + if err != nil { + c.logger.Error("failed to evict index", "id", id, "error", err) + return + } + }() + + client, err := c.client.Clone() + if err != nil { + c.logger.Error("failed to create API client in the lifetime watcher", "error", err) + return + } + client.SetToken(req.Token) + + headers := client.Headers() + if headers == nil { + headers = make(http.Header) + } + + // We do not preserve any initial User-Agent here since these requests are from + // the proxy subsystem, but are made by the lease cache's lifetime watcher, + // not triggered by a specific request. + headers.Set("User-Agent", c.userAgentToUse) + client.SetHeaders(headers) + + watcher, err := client.NewLifetimeWatcher(&api.LifetimeWatcherInput{ + Secret: secret, + }) + if err != nil { + c.logger.Error("failed to create secret lifetime watcher", "error", err) + return + } + + c.logger.Debug("initiating renewal", "method", req.Request.Method, "path", req.Request.URL.Path) + go watcher.Start() + defer watcher.Stop() + + for { + select { + case <-ctx.Done(): + // This is the case which captures context cancellations from token + // and leases. Since all the contexts are derived from the agent's + // context, this will also cover the shutdown scenario. + c.logger.Debug("context cancelled; stopping lifetime watcher", "path", req.Request.URL.Path) + return + case err := <-watcher.DoneCh(): + // This case covers renewal completion and renewal errors + if err != nil { + c.logger.Error("failed to renew secret", "error", err) + return + } + c.logger.Debug("renewal halted; evicting from cache", "path", req.Request.URL.Path) + return + case <-watcher.RenewCh(): + c.logger.Debug("secret renewed", "path", req.Request.URL.Path) + if c.ps != nil { + if err := c.updateLastRenewed(ctx, index, time.Now().UTC()); err != nil { + c.logger.Warn("not able to update lastRenewed time for cached index", "id", index.ID) + } + } + case <-index.RenewCtxInfo.DoneCh: + // This case indicates the renewal process to shutdown and evict + // the cache entry. This is triggered when a specific secret + // renewal needs to be killed without affecting any of the derived + // context renewals. + c.logger.Debug("done channel closed") + return + } + } +} + +func (c *LeaseCache) updateLastRenewed(ctx context.Context, index *cachememdb.Index, t time.Time) error { + idLock := locksutil.LockForKey(c.idLocks, index.ID) + idLock.Lock() + defer idLock.Unlock() + + getIndex, err := c.db.Get(cachememdb.IndexNameID, index.ID) + if err != nil && err != cachememdb.ErrCacheItemNotFound { + return err + } + index.LastRenewed = t + if err := c.Set(ctx, getIndex); err != nil { + return err + } + return nil +} + +// computeIndexID results in a value that uniquely identifies a request +// received by the agent. It does so by SHA256 hashing the serialized request +// object containing the request path, query parameters and body parameters. +func computeIndexID(req *SendRequest) (string, error) { + var b bytes.Buffer + + cloned := req.Request.Clone(context.Background()) + cloned.Header.Del(vaulthttp.VaultIndexHeaderName) + cloned.Header.Del(vaulthttp.VaultForwardHeaderName) + cloned.Header.Del(vaulthttp.VaultInconsistentHeaderName) + // Serialize the request + if err := cloned.Write(&b); err != nil { + return "", fmt.Errorf("failed to serialize request: %v", err) + } + + // Reset the request body after it has been closed by Write + req.Request.Body = io.NopCloser(bytes.NewReader(req.RequestBody)) + + // Append req.Token into the byte slice. This is needed since auto-auth'ed + // requests sets the token directly into SendRequest.Token + if _, err := b.WriteString(req.Token); err != nil { + return "", fmt.Errorf("failed to write token to hash input: %w", err) + } + + return hex.EncodeToString(cryptoutil.Blake2b256Hash(string(b.Bytes()))), nil +} + +// canonicalizeStaticSecretPath takes an API request path such as +// /v1/foo/bar and a namespace, and turns it into a canonical representation +// of the secret's path in Vault. +// We opt for this form as namespace.Canonicalize returns a namespace in the +// form of "ns1/", so we keep consistent with path canonicalization. +func canonicalizeStaticSecretPath(requestPath string, ns string) string { + // /sys/capabilities accepts both requests that look like foo/bar + // and /foo/bar but not /v1/foo/bar. + // We trim the /v1/ from the start of the URL to get the foo/bar form. + // This means that we can use the paths we retrieve from the + // /sys/capabilities endpoint to access this index + // without having to re-add the /v1/ + path := strings.TrimPrefix(requestPath, "/v1/") + // Trim any leading slashes, as we never want those. + // This ensures /foo/bar gets turned to foo/bar + path = strings.TrimPrefix(path, "/") + + // If a namespace was provided in a way that wasn't directly in the path, + // it must be added to the path. + path = namespace.Canonicalize(ns) + path + + return path +} + +// getStaticSecretPathFromRequest gets the canonical path for a +// request, taking into account intricacies relating to /v1/ and namespaces +// in the header. +// Returns a path like foo/bar or ns1/foo/bar. +// We opt for this form as namespace.Canonicalize returns a namespace in the +// form of "ns1/", so we keep consistent with path canonicalization. +func getStaticSecretPathFromRequest(req *SendRequest) string { + path := req.Request.URL.Path + // Static secrets always have /v1 as a prefix. This enables us to + // enable a pass-through and never attempt to cache or view-from-cache + // any request without the /v1 prefix. + if !strings.HasPrefix(path, "/v1") { + return "" + } + var namespace string + if header := req.Request.Header; header != nil { + namespace = header.Get(api.NamespaceHeaderName) + } + return canonicalizeStaticSecretPath(path, namespace) +} + +// hashStaticSecretIndex is a simple function that hashes the path into +// a function. This is kept as a helper function for ease of use by downstream functions. +func hashStaticSecretIndex(unhashedIndex string) string { + return hex.EncodeToString(cryptoutil.Blake2b256Hash(unhashedIndex)) +} + +// computeStaticSecretCacheIndex results in a value that uniquely identifies a static +// secret's cached ID. Notably, we intentionally ignore headers (for example, +// the X-Vault-Token header) to remain agnostic to which token is being +// used in the request. We care only about the path. +// This will return "" if the index does not have a /v1 prefix, and therefore +// cannot be a static secret. +func computeStaticSecretCacheIndex(req *SendRequest) string { + path := getStaticSecretPathFromRequest(req) + if path == "" { + return path + } + return hashStaticSecretIndex(path) +} + +// HandleCacheClear returns a handlerFunc that can perform cache clearing operations. +func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // If the cache is not enabled, return a 200 + if c == nil { + return + } + + // Only handle POST/PUT requests + switch r.Method { + case http.MethodPost: + case http.MethodPut: + default: + return + } + + req := new(cacheClearRequest) + if err := jsonutil.DecodeJSONFromReader(r.Body, req); err != nil { + if err == io.EOF { + err = errors.New("empty JSON provided") + } + logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse JSON input: %w", err)) + return + } + + c.logger.Debug("received cache-clear request", "type", req.Type, "namespace", req.Namespace, "value", req.Value) + + in, err := parseCacheClearInput(req) + if err != nil { + c.logger.Error("unable to parse clear input", "error", err) + logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse clear input: %w", err)) + return + } + + if err := c.handleCacheClear(ctx, in); err != nil { + // Default to 500 on error, unless the user provided an invalid type, + // which would then be a 400. + httpStatus := http.StatusInternalServerError + if err == errInvalidType { + httpStatus = http.StatusBadRequest + } + logical.RespondError(w, httpStatus, fmt.Errorf("failed to clear cache: %w", err)) + return + } + + return + }) +} + +func (c *LeaseCache) handleCacheClear(ctx context.Context, in *cacheClearInput) error { + if in == nil { + return errors.New("no value(s) provided to clear corresponding cache entries") + } + + switch in.Type { + case "request_path": + // For this particular case, we need to ensure that there are 2 provided + // indexers for the proper lookup. + if in.RequestPath == "" { + return errors.New("request path not provided") + } + + // The first value provided for this case will be the namespace, but if it's + // an empty value we need to overwrite it with "root/" to ensure proper + // cache lookup. + if in.Namespace == "" { + in.Namespace = "root/" + } + + // Find all the cached entries which has the given request path and + // cancel the contexts of all the respective lifetime watchers + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameRequestPath, in.Namespace, in.RequestPath) + if err != nil { + return err + } + for _, index := range indexes { + // If it's a static secret, we must remove directly, as there + // is no renew func to cancel. + if index.Type == cacheboltdb.StaticSecretType { + err = c.db.Evict(cachememdb.IndexNameID, index.ID) + if err != nil { + return err + } + } else { + if index.RenewCtxInfo != nil { + if index.RenewCtxInfo.CancelFunc != nil { + index.RenewCtxInfo.CancelFunc() + } + } + } + } + + case "token": + if in.Token == "" { + return errors.New("token not provided") + } + + // Get the context for the given token and cancel its context + index, err := c.db.Get(cachememdb.IndexNameToken, in.Token) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return nil + } + if err != nil { + return err + } + + c.logger.Debug("canceling context of index attached to token") + + index.RenewCtxInfo.CancelFunc() + + case "token_accessor": + if in.TokenAccessor == "" && in.Type != cacheboltdb.StaticSecretType { + return errors.New("token accessor not provided") + } + + // Get the cached index and cancel the corresponding lifetime watcher + // context + index, err := c.db.Get(cachememdb.IndexNameTokenAccessor, in.TokenAccessor) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return nil + } + if err != nil { + return err + } + + c.logger.Debug("canceling context of index attached to accessor") + + index.RenewCtxInfo.CancelFunc() + + case "lease": + if in.Lease == "" { + return errors.New("lease not provided") + } + + // Get the cached index and cancel the corresponding lifetime watcher + // context + index, err := c.db.Get(cachememdb.IndexNameLease, in.Lease) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return nil + } + if err != nil { + return err + } + + c.logger.Debug("canceling context of index attached to accessor") + + index.RenewCtxInfo.CancelFunc() + + case "all": + // Cancel the base context which triggers all the goroutines to + // stop and evict entries from cache. + c.logger.Debug("canceling base context") + c.l.Lock() + c.baseCtxInfo.CancelFunc() + // Reset the base context + baseCtx, baseCancel := context.WithCancel(ctx) + c.baseCtxInfo = &cachememdb.ContextInfo{ + Ctx: baseCtx, + CancelFunc: baseCancel, + } + c.l.Unlock() + + // Reset the memdb instance (and persistent storage if enabled) + if err := c.Flush(); err != nil { + return err + } + + default: + return errInvalidType + } + + c.logger.Debug("successfully cleared matching cache entries") + + return nil +} + +// handleRevocationRequest checks whether the originating request is a +// revocation request, and if so perform applicable cache cleanups. +// Returns true is this is a revocation request. +func (c *LeaseCache) handleRevocationRequest(ctx context.Context, req *SendRequest, resp *SendResponse) (bool, error) { + // Lease and token revocations return 204's on success. Fast-path if that's + // not the case. + if resp.Response.StatusCode != http.StatusNoContent { + return false, nil + } + + _, path := deriveNamespaceAndRevocationPath(req) + + switch { + case path == vaultPathTokenRevoke: + // Get the token from the request body + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + tokenRaw, ok := jsonBody["token"] + if !ok { + return false, fmt.Errorf("failed to get token from request body") + } + token, ok := tokenRaw.(string) + if !ok { + return false, fmt.Errorf("expected token in the request body to be string") + } + + // Clear the cache entry associated with the token and all the other + // entries belonging to the leases derived from this token. + in := &cacheClearInput{ + Type: "token", + Token: token, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case path == vaultPathTokenRevokeSelf: + // Clear the cache entry associated with the token and all the other + // entries belonging to the leases derived from this token. + in := &cacheClearInput{ + Type: "token", + Token: req.Token, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case path == vaultPathTokenRevokeAccessor: + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + accessorRaw, ok := jsonBody["accessor"] + if !ok { + return false, fmt.Errorf("failed to get accessor from request body") + } + accessor, ok := accessorRaw.(string) + if !ok { + return false, fmt.Errorf("expected accessor in the request body to be string") + } + + in := &cacheClearInput{ + Type: "token_accessor", + TokenAccessor: accessor, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case path == vaultPathTokenRevokeOrphan: + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + tokenRaw, ok := jsonBody["token"] + if !ok { + return false, fmt.Errorf("failed to get token from request body") + } + token, ok := tokenRaw.(string) + if !ok { + return false, fmt.Errorf("expected token in the request body to be string") + } + + // Kill the lifetime watchers of all the leases attached to the revoked + // token + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLeaseToken, token) + if err != nil { + return false, err + } + for _, index := range indexes { + index.RenewCtxInfo.CancelFunc() + } + + // Kill the lifetime watchers of the revoked token + index, err := c.db.Get(cachememdb.IndexNameToken, token) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return true, nil + } + if err != nil { + return false, err + } + + // Indicate the lifetime watcher goroutine for this index to return. + // This will not affect the child tokens because the context is not + // getting cancelled. + close(index.RenewCtxInfo.DoneCh) + + // Clear the parent references of the revoked token in the entries + // belonging to the child tokens of the revoked token. + indexes, err = c.db.GetByPrefix(cachememdb.IndexNameTokenParent, token) + if err != nil { + return false, err + } + for _, index := range indexes { + index.TokenParent = "" + err = c.db.Set(index) + if err != nil { + c.logger.Error("failed to persist index", "error", err) + return false, err + } + } + + case path == vaultPathLeaseRevoke: + // TODO: Should lease present in the URL itself be considered here? + // Get the lease from the request body + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + leaseIDRaw, ok := jsonBody["lease_id"] + if !ok { + return false, fmt.Errorf("failed to get lease_id from request body") + } + leaseID, ok := leaseIDRaw.(string) + if !ok { + return false, fmt.Errorf("expected lease_id the request body to be string") + } + in := &cacheClearInput{ + Type: "lease", + Lease: leaseID, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case strings.HasPrefix(path, vaultPathLeaseRevokeForce): + // Trim the URL path to get the request path prefix + prefix := strings.TrimPrefix(path, vaultPathLeaseRevokeForce) + // Get all the cache indexes that use the request path containing the + // prefix and cancel the lifetime watcher context of each. + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) + if err != nil { + return false, err + } + + _, tokenNSID := namespace.SplitIDFromString(req.Token) + for _, index := range indexes { + _, leaseNSID := namespace.SplitIDFromString(index.Lease) + // Only evict leases that match the token's namespace + if tokenNSID == leaseNSID { + index.RenewCtxInfo.CancelFunc() + } + } + + case strings.HasPrefix(path, vaultPathLeaseRevokePrefix): + // Trim the URL path to get the request path prefix + prefix := strings.TrimPrefix(path, vaultPathLeaseRevokePrefix) + // Get all the cache indexes that use the request path containing the + // prefix and cancel the lifetime watcher context of each. + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) + if err != nil { + return false, err + } + + _, tokenNSID := namespace.SplitIDFromString(req.Token) + for _, index := range indexes { + _, leaseNSID := namespace.SplitIDFromString(index.Lease) + // Only evict leases that match the token's namespace + if tokenNSID == leaseNSID { + index.RenewCtxInfo.CancelFunc() + } + } + + default: + return false, nil + } + + c.logger.Debug("triggered caching eviction from revocation request") + + return true, nil +} + +// Set stores the index in the cachememdb, and also stores it in the persistent +// cache (if enabled) +func (c *LeaseCache) Set(ctx context.Context, index *cachememdb.Index) error { + if err := c.db.Set(index); err != nil { + return err + } + + if c.ps != nil { + plaintext, err := index.Serialize() + if err != nil { + return err + } + + if err := c.ps.Set(ctx, index.ID, plaintext, index.Type); err != nil { + return err + } + c.logger.Trace("set entry in persistent storage", "type", index.Type, "path", index.RequestPath, "id", index.ID) + } + + return nil +} + +// SetCapabilitiesIndex stores the capabilities index in the cachememdb, and also stores it in the persistent +// cache (if enabled) +func (c *LeaseCache) SetCapabilitiesIndex(ctx context.Context, index *cachememdb.CapabilitiesIndex) error { + if err := c.db.SetCapabilitiesIndex(index); err != nil { + return err + } + + if c.ps != nil { + plaintext, err := index.SerializeCapabilitiesIndex() + if err != nil { + return err + } + + if err := c.ps.Set(ctx, index.ID, plaintext, cacheboltdb.TokenCapabilitiesType); err != nil { + return err + } + c.logger.Trace("set entry in persistent storage", "type", cacheboltdb.TokenCapabilitiesType, "id", index.ID) + } + + return nil +} + +// Evict removes an Index from the cachememdb, and also removes it from the +// persistent cache (if enabled) +func (c *LeaseCache) Evict(index *cachememdb.Index) error { + if err := c.db.Evict(cachememdb.IndexNameID, index.ID); err != nil { + return err + } + + if c.ps != nil { + if err := c.ps.Delete(index.ID, index.Type); err != nil { + return err + } + c.logger.Trace("deleted item from persistent storage", "id", index.ID) + } + + return nil +} + +// Flush the cachememdb and persistent cache (if enabled) +func (c *LeaseCache) Flush() error { + if err := c.db.Flush(); err != nil { + return err + } + + if c.ps != nil { + c.logger.Trace("clearing persistent storage") + return c.ps.Clear() + } + + return nil +} + +// Restore loads the cachememdb from the persistent storage passed in. Loads +// tokens first, since restoring a lease's renewal context and watcher requires +// looking up the token in the cachememdb. +// Restore also restarts any capability management for managed static secret +// tokens. +func (c *LeaseCache) Restore(ctx context.Context, storage *cacheboltdb.BoltStorage) error { + var errs *multierror.Error + + // Process tokens first + tokens, err := storage.GetByType(ctx, cacheboltdb.TokenType) + if err != nil { + errs = multierror.Append(errs, err) + } else { + if err := c.restoreTokens(tokens); err != nil { + errs = multierror.Append(errs, err) + } + } + + // Then process leases + leases, err := storage.GetByType(ctx, cacheboltdb.LeaseType) + if err != nil { + errs = multierror.Append(errs, err) + } else { + for _, lease := range leases { + newIndex, err := cachememdb.Deserialize(lease) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + + c.logger.Trace("restoring lease", "id", newIndex.ID, "path", newIndex.RequestPath) + + // Check if this lease has already expired + expired, err := c.hasExpired(time.Now().UTC(), newIndex) + if err != nil { + c.logger.Warn("failed to check if lease is expired", "id", newIndex.ID, "error", err) + } + if expired { + continue + } + + if err := c.restoreLeaseRenewCtx(newIndex); err != nil { + errs = multierror.Append(errs, err) + continue + } + if err := c.db.Set(newIndex); err != nil { + errs = multierror.Append(errs, err) + continue + } + c.logger.Trace("restored lease", "id", newIndex.ID, "path", newIndex.RequestPath) + } + } + + // Then process static secrets and their capabilities + if c.cacheStaticSecrets { + staticSecrets, err := storage.GetByType(ctx, cacheboltdb.StaticSecretType) + if err != nil { + errs = multierror.Append(errs, err) + } else { + for _, staticSecret := range staticSecrets { + newIndex, err := cachememdb.Deserialize(staticSecret) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + + c.logger.Trace("restoring static secret index", "id", newIndex.ID, "path", newIndex.RequestPath) + if err := c.db.Set(newIndex); err != nil { + errs = multierror.Append(errs, err) + continue + } + } + } + + capabilityIndexes, err := storage.GetByType(ctx, cacheboltdb.TokenCapabilitiesType) + if err != nil { + errs = multierror.Append(errs, err) + } else { + for _, capabilityIndex := range capabilityIndexes { + newIndex, err := cachememdb.DeserializeCapabilitiesIndex(capabilityIndex) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + + c.logger.Trace("restoring capability index", "id", newIndex.ID) + if err := c.db.SetCapabilitiesIndex(newIndex); err != nil { + errs = multierror.Append(errs, err) + continue + } + + if c.capabilityManager != nil { + c.capabilityManager.StartRenewingCapabilities(newIndex) + } + } + } + } + + return errs.ErrorOrNil() +} + +func (c *LeaseCache) restoreTokens(tokens [][]byte) error { + var errors *multierror.Error + + for _, token := range tokens { + newIndex, err := cachememdb.Deserialize(token) + if err != nil { + errors = multierror.Append(errors, err) + continue + } + newIndex.RenewCtxInfo = c.createCtxInfo(nil) + if err := c.db.Set(newIndex); err != nil { + errors = multierror.Append(errors, err) + continue + } + c.logger.Trace("restored token", "id", newIndex.ID) + } + + return errors.ErrorOrNil() +} + +// restoreLeaseRenewCtx re-creates a RenewCtx for an index object and starts +// the watcher go routine +func (c *LeaseCache) restoreLeaseRenewCtx(index *cachememdb.Index) error { + if index.Response == nil { + return fmt.Errorf("cached response was nil for %s", index.ID) + } + + // Parse the secret to determine which type it is + reader := bufio.NewReader(bytes.NewReader(index.Response)) + resp, err := http.ReadResponse(reader, nil) + if err != nil { + c.logger.Error("failed to deserialize response", "error", err) + return err + } + secret, err := api.ParseSecret(resp.Body) + if err != nil { + c.logger.Error("failed to parse response as secret", "error", err) + return err + } + + var renewCtxInfo *cachememdb.ContextInfo + switch { + case secret.LeaseID != "": + entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) + } + if err != nil { + return err + } + + // Derive a context for renewal using the token's context + renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) + + case secret.Auth != nil: + var parentCtx context.Context + if !secret.Auth.Orphan { + entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // If parent token is not managed by the cache, child shouldn't be + // either. + if entry == nil { + return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) + } + } + if err != nil { + return err + } + + c.logger.Debug("setting parent context", "method", index.RequestMethod, "path", index.RequestPath) + parentCtx = entry.RenewCtxInfo.Ctx + } + renewCtxInfo = c.createCtxInfo(parentCtx) + default: + // This isn't a renewable cache entry, i.e. a static secret cache entry. + // We return, because there's nothing to do. + return nil + } + + renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) + index.RenewCtxInfo = &cachememdb.ContextInfo{ + Ctx: renewCtx, + CancelFunc: renewCtxInfo.CancelFunc, + DoneCh: renewCtxInfo.DoneCh, + } + + sendReq := &SendRequest{ + Token: index.RequestToken, + Request: &http.Request{ + Header: index.RequestHeader, + Method: index.RequestMethod, + URL: &url.URL{ + Path: index.RequestPath, + }, + }, + } + go c.startRenewing(renewCtx, index, sendReq, secret) + + return nil +} + +// deriveNamespaceAndRevocationPath returns the namespace and relative path for +// revocation paths. +// +// If the path contains a namespace, but it's not a revocation path, it will be +// returned as-is, since there's no way to tell where the namespace ends and +// where the request path begins purely based off a string. +// +// Case 1: /v1/ns1/leases/revoke -> ns1/, /v1/leases/revoke +// Case 2: ns1/ /v1/leases/revoke -> ns1/, /v1/leases/revoke +// Case 3: /v1/ns1/foo/bar -> root/, /v1/ns1/foo/bar +// Case 4: ns1/ /v1/foo/bar -> ns1/, /v1/foo/bar +func deriveNamespaceAndRevocationPath(req *SendRequest) (string, string) { + namespace := "root/" + nsHeader := req.Request.Header.Get(consts.NamespaceHeaderName) + if nsHeader != "" { + namespace = nsHeader + } + + fullPath := req.Request.URL.Path + nonVersionedPath := strings.TrimPrefix(fullPath, "/v1") + + for _, pathToCheck := range revocationPaths { + // We use strings.Contains here for paths that can contain + // vars in the path, e.g. /v1/lease/revoke-prefix/:prefix + i := strings.Index(nonVersionedPath, pathToCheck) + // If there's no match, move on to the next check + if i == -1 { + continue + } + + // If the index is 0, this is a relative path with no namespace preppended, + // so we can break early + if i == 0 { + break + } + + // We need to turn /ns1 into ns1/, this makes it easy + namespaceInPath := nshelper.Canonicalize(nonVersionedPath[:i]) + + // If it's root, we replace, otherwise we join + if namespace == "root/" { + namespace = namespaceInPath + } else { + namespace = namespace + namespaceInPath + } + + return namespace, fmt.Sprintf("/v1%s", nonVersionedPath[i:]) + } + + return namespace, fmt.Sprintf("/v1%s", nonVersionedPath) +} + +// RegisterAutoAuthToken adds the provided auto-token into the cache. This is +// primarily used to register the auto-auth token and should only be called +// within a sink's WriteToken func. +func (c *LeaseCache) RegisterAutoAuthToken(token string) error { + // Get the token from the cache + oldIndex, err := c.db.Get(cachememdb.IndexNameToken, token) + if err != nil && err != cachememdb.ErrCacheItemNotFound { + return err + } + + // If the index is found, just keep it in the cache and ignore the incoming + // token (since they're the same) + if oldIndex != nil { + c.logger.Trace("auto-auth token already exists in cache; no need to store it again") + return nil + } + + // The following randomly generated values are required for index stored by + // the cache, but are not actually used. We use random values to prevent + // accidental access. + id, err := base62.Random(5) + if err != nil { + return err + } + namespace, err := base62.Random(5) + if err != nil { + return err + } + requestPath, err := base62.Random(5) + if err != nil { + return err + } + + index := &cachememdb.Index{ + ID: id, + Token: token, + Namespace: namespace, + RequestPath: requestPath, + Type: cacheboltdb.TokenType, + } + + // Derive a context off of the lease cache's base context + ctxInfo := c.createCtxInfo(nil) + + index.RenewCtxInfo = &cachememdb.ContextInfo{ + Ctx: ctxInfo.Ctx, + CancelFunc: ctxInfo.CancelFunc, + DoneCh: ctxInfo.DoneCh, + } + + // Store the index in the cache + c.logger.Debug("storing auto-auth token into the cache") + err = c.Set(c.baseCtxInfo.Ctx, index) + if err != nil { + c.logger.Error("failed to cache the auto-auth token", "error", err) + return err + } + + return nil +} + +type cacheClearInput struct { + Type string + + RequestPath string + Namespace string + Token string + TokenAccessor string + Lease string +} + +func parseCacheClearInput(req *cacheClearRequest) (*cacheClearInput, error) { + if req == nil { + return nil, errors.New("nil request options provided") + } + + if req.Type == "" { + return nil, errors.New("no type provided") + } + + in := &cacheClearInput{ + Type: req.Type, + Namespace: req.Namespace, + } + + switch req.Type { + case "request_path": + in.RequestPath = req.Value + case "token": + in.Token = req.Value + case "token_accessor": + in.TokenAccessor = req.Value + case "lease": + in.Lease = req.Value + } + + return in, nil +} + +func (c *LeaseCache) hasExpired(currentTime time.Time, index *cachememdb.Index) (bool, error) { + reader := bufio.NewReader(bytes.NewReader(index.Response)) + resp, err := http.ReadResponse(reader, nil) + if err != nil { + return false, fmt.Errorf("failed to deserialize response: %w", err) + } + secret, err := api.ParseSecret(resp.Body) + if err != nil { + return false, fmt.Errorf("failed to parse response as secret: %w", err) + } + + elapsed := currentTime.Sub(index.LastRenewed) + var leaseDuration int + switch { + case secret.LeaseID != "": + leaseDuration = secret.LeaseDuration + case secret.Auth != nil: + leaseDuration = secret.Auth.LeaseDuration + default: + return false, errors.New("secret without lease encountered in expiration check") + } + + if int(elapsed.Seconds()) > leaseDuration { + c.logger.Trace("secret has expired", "id", index.ID, "elapsed", elapsed, "lease duration", leaseDuration) + return true, nil + } + return false, nil +} diff --git a/command/agentproxyshared/cache/lease_cache_test.go b/command/agentproxyshared/cache/lease_cache_test.go new file mode 100644 index 000000000000..7bb454828659 --- /dev/null +++ b/command/agentproxyshared/cache/lease_cache_test.go @@ -0,0 +1,1676 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "encoding/hex" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/go-test/deep" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/cryptoutil" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" +) + +func testNewLeaseCache(t *testing.T, responses []*SendResponse) *LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + lc, err := NewLeaseCache(&LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: NewMockProxier(responses), + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + CacheStaticSecrets: true, + CacheDynamicSecrets: true, + UserAgentToUse: "test", + }) + if err != nil { + t.Fatal(err) + } + return lc +} + +func testNewLeaseCacheWithDelay(t *testing.T, cacheable bool, delay int) *LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + lc, err := NewLeaseCache(&LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: &mockDelayProxier{cacheable, delay}, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + CacheStaticSecrets: true, + CacheDynamicSecrets: true, + UserAgentToUse: "test", + }) + if err != nil { + t.Fatal(err) + } + + return lc +} + +func testNewLeaseCacheWithPersistence(t *testing.T, responses []*SendResponse, storage *cacheboltdb.BoltStorage) *LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + require.NoError(t, err) + + lc, err := NewLeaseCache(&LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: NewMockProxier(responses), + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + Storage: storage, + CacheStaticSecrets: true, + CacheDynamicSecrets: true, + UserAgentToUse: "test", + }) + require.NoError(t, err) + + return lc +} + +func TestCache_ComputeIndexID(t *testing.T) { + tests := []struct { + name string + req *SendRequest + want string + wantErr bool + }{ + { + "basic", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "test", + }, + }, + }, + "7b5db388f211fd9edca8c6c254831fb01ad4e6fe624dbb62711f256b5e803717", + false, + }, + { + "ignore consistency headers", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "test", + }, + Header: http.Header{ + vaulthttp.VaultIndexHeaderName: []string{"foo"}, + vaulthttp.VaultInconsistentHeaderName: []string{"foo"}, + vaulthttp.VaultForwardHeaderName: []string{"foo"}, + }, + }, + }, + "7b5db388f211fd9edca8c6c254831fb01ad4e6fe624dbb62711f256b5e803717", + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := computeIndexID(tt.req) + if (err != nil) != tt.wantErr { + t.Errorf("actual_error: %v, expected_error: %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, string(tt.want)) { + t.Errorf("bad: index id; actual: %q, expected: %q", got, string(tt.want)) + } + }) + } +} + +// TestCache_ComputeStaticSecretIndexID ensures that +// computeStaticSecretCacheIndex works correctly. If this test breaks, then our +// hashing algorithm has changed, and we risk breaking backwards compatibility. +func TestCache_ComputeStaticSecretIndexID(t *testing.T) { + req := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/foo/bar", + }, + }, + } + + index := computeStaticSecretCacheIndex(req) + // We expect this to be "", as it doesn't start with /v1 + expectedIndex := "" + require.Equal(t, expectedIndex, index) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/foo/bar", + }, + }, + } + + expectedIndex = "b117a962f19f17fa372c8681cadcd6fd370d28ee6e0a7012196b780bef601b53" + index2 := computeStaticSecretCacheIndex(req) + require.Equal(t, expectedIndex, index2) +} + +// Test_GetStaticSecretPathFromRequestNoNamespaces tests that getStaticSecretPathFromRequest +// behaves as expected when no namespaces are involved. +func Test_GetStaticSecretPathFromRequestNoNamespaces(t *testing.T) { + req := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/foo/bar", + }, + }, + } + + path := getStaticSecretPathFromRequest(req) + require.Equal(t, "foo/bar", path) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + // Paths like this are not static secrets, so we should return "" + Path: "foo/bar", + }, + }, + } + + path = getStaticSecretPathFromRequest(req) + require.Equal(t, "", path) +} + +// Test_GetStaticSecretPathFromRequestNamespaces tests that getStaticSecretPathFromRequest +// behaves as expected when namespaces are involved. +func Test_GetStaticSecretPathFromRequestNamespaces(t *testing.T) { + req := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/foo/bar", + }, + Header: map[string][]string{api.NamespaceHeaderName: {"ns1"}}, + }, + } + + path := getStaticSecretPathFromRequest(req) + require.Equal(t, "ns1/foo/bar", path) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/foo/bar", + }, + }, + } + + path = getStaticSecretPathFromRequest(req) + require.Equal(t, "ns1/foo/bar", path) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + // Paths like this are not static secrets, so we should return "" + Path: "ns1/foo/bar", + }, + }, + } + + path = getStaticSecretPathFromRequest(req) + require.Equal(t, "", path) +} + +// TestCache_CanonicalizeStaticSecretPath ensures that +// canonicalizeStaticSecretPath works as expected with all kinds of inputs. +func TestCache_CanonicalizeStaticSecretPath(t *testing.T) { + expected := "foo/bar" + actual := canonicalizeStaticSecretPath("/v1/foo/bar", "") + require.Equal(t, expected, actual) + + actual = canonicalizeStaticSecretPath("foo/bar", "") + require.Equal(t, expected, actual) + actual = canonicalizeStaticSecretPath("/foo/bar", "") + require.Equal(t, expected, actual) + + expected = "ns1/foo/bar" + actual = canonicalizeStaticSecretPath("/v1/ns1/foo/bar", "") + require.Equal(t, expected, actual) + + actual = canonicalizeStaticSecretPath("ns1/foo/bar", "") + require.Equal(t, expected, actual) + actual = canonicalizeStaticSecretPath("/ns1/foo/bar", "") + require.Equal(t, expected, actual) + + expected = "ns1/foo/bar" + actual = canonicalizeStaticSecretPath("/v1/foo/bar", "ns1") + require.Equal(t, expected, actual) + + actual = canonicalizeStaticSecretPath("/foo/bar", "ns1") + require.Equal(t, expected, actual) + actual = canonicalizeStaticSecretPath("foo/bar", "ns1") + require.Equal(t, expected, actual) + + expected = "ns1/foo/bar" + actual = canonicalizeStaticSecretPath("/v1/foo/bar", "ns1/") + require.Equal(t, expected, actual) + + actual = canonicalizeStaticSecretPath("/foo/bar", "ns1/") + require.Equal(t, expected, actual) + actual = canonicalizeStaticSecretPath("foo/bar", "ns1/") + require.Equal(t, expected, actual) + + expected = "ns1/foo/bar" + actual = canonicalizeStaticSecretPath("/v1/foo/bar", "/ns1/") + require.Equal(t, expected, actual) + + actual = canonicalizeStaticSecretPath("/foo/bar", "/ns1/") + require.Equal(t, expected, actual) + actual = canonicalizeStaticSecretPath("foo/bar", "/ns1/") + require.Equal(t, expected, actual) +} + +// TestCache_ComputeStaticSecretIndexIDNamespaces ensures that +// computeStaticSecretCacheIndex correctly identifies that a request +// with a namespace header and a request specifying the namespace in the path +// are equivalent. +func TestCache_ComputeStaticSecretIndexIDNamespaces(t *testing.T) { + req := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "foo/bar", + }, + Header: map[string][]string{api.NamespaceHeaderName: {"ns1"}}, + }, + } + + index := computeStaticSecretCacheIndex(req) + // Paths like this are not static secrets, so we should expect "" + require.Equal(t, "", index) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "ns1/foo/bar", + }, + }, + } + + // Paths like this are not static secrets, so we should expect "" + index2 := computeStaticSecretCacheIndex(req) + require.Equal(t, "", index2) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/foo/bar", + }, + }, + } + + expectedIndex := "a4605679d269aa1bebac7079a471a33403413f388f63bf0da3c771b225857932" + // We expect that computeStaticSecretCacheIndex will compute the same index + index3 := computeStaticSecretCacheIndex(req) + require.Equal(t, expectedIndex, index3) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/foo/bar", + }, + Header: map[string][]string{api.NamespaceHeaderName: {"ns1"}}, + }, + } + + index4 := computeStaticSecretCacheIndex(req) + require.Equal(t, expectedIndex, index4) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/foo/bar", + }, + Header: map[string][]string{api.NamespaceHeaderName: {"ns1/"}}, + }, + } + + // Paths like this are not static secrets, so we should expect "" + index5 := computeStaticSecretCacheIndex(req) + require.Equal(t, "", index5) + + req = &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/foo/bar", + }, + Header: map[string][]string{api.NamespaceHeaderName: {"ns1/"}}, + }, + } + + index6 := computeStaticSecretCacheIndex(req) + require.Equal(t, expectedIndex, index6) +} + +func TestLeaseCache_EmptyToken(t *testing.T) { + responses := []*SendResponse{ + newTestSendResponse(http.StatusCreated, `{"value": "invalid", "auth": {"client_token": "testtoken"}}`), + } + lc := testNewLeaseCache(t, responses) + + // Even if the send request doesn't have a token on it, a successful + // cacheable response should result in the index properly getting populated + // with a token and memdb shouldn't complain while inserting the index. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("expected a non empty response") + } +} + +func TestLeaseCache_SendCacheable(t *testing.T) { + // Emulate 2 responses from the api proxy. One returns a new token and the + // other returns a lease. + responses := []*SendResponse{ + newTestSendResponse(http.StatusCreated, `{"auth": {"client_token": "testtoken", "renewable": true}}`), + newTestSendResponse(http.StatusOK, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}}`), + } + + lc := testNewLeaseCache(t, responses) + // Register a token so that the token and lease requests are cached + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + // Make a request. A response with a new token is returned to the lease + // cache and that will be cached. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Send the same request again to get the cached response + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Check TokenParent + cachedItem, err := lc.db.Get(cachememdb.IndexNameToken, "testtoken") + if err != nil { + t.Fatal(err) + } + if cachedItem == nil { + t.Fatalf("expected token entry from cache") + } + if cachedItem.TokenParent != "autoauthtoken" { + t.Fatalf("unexpected value for tokenparent: %s", cachedItem.TokenParent) + } + + // Modify the request a little bit to ensure the second response is + // returned to the lease cache. + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Make the same request again and ensure that the same response is returned + // again. + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } +} + +// TestLeaseCache_StoreCacheableStaticSecret tests that cacheStaticSecret works +// as expected, creating the two expected cache entries, and also ensures +// that we can evict the cache entry with the cache clear API afterwards. +func TestLeaseCache_StoreCacheableStaticSecret(t *testing.T) { + request := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/secrets/foo/bar", + }, + }, + Token: "token", + } + response := newTestSendResponse(http.StatusCreated, `{"data": {"foo": "bar"}, "mount_type": "kvv2"}`) + responses := []*SendResponse{ + response, + } + index := &cachememdb.Index{ + Type: cacheboltdb.StaticSecretType, + RequestPath: request.Request.URL.Path, + Namespace: "root/", + Token: "token", + ID: computeStaticSecretCacheIndex(request), + } + + lc := testNewLeaseCache(t, responses) + + // We expect two entries to be stored by this: + // 1. The actual static secret + // 2. The capabilities index + err := lc.cacheStaticSecret(context.Background(), request, response, index) + if err != nil { + return + } + + indexFromDB, err := lc.db.Get(cachememdb.IndexNameID, index.ID) + if err != nil { + return + } + + require.NotNil(t, indexFromDB) + require.Equal(t, "token", indexFromDB.Token) + require.Equal(t, map[string]struct{}{"token": {}}, indexFromDB.Tokens) + require.Equal(t, cacheboltdb.StaticSecretType, indexFromDB.Type) + require.Equal(t, request.Request.URL.Path, indexFromDB.RequestPath) + require.Equal(t, "root/", indexFromDB.Namespace) + + capabilitiesIndexFromDB, err := lc.db.GetCapabilitiesIndex(cachememdb.IndexNameID, hex.EncodeToString(cryptoutil.Blake2b256Hash(index.Token))) + if err != nil { + return + } + + require.NotNil(t, capabilitiesIndexFromDB) + require.Equal(t, "token", capabilitiesIndexFromDB.Token) + require.Equal(t, map[string]struct{}{"secrets/foo/bar": {}}, capabilitiesIndexFromDB.ReadablePaths) + + err = lc.handleCacheClear(context.Background(), &cacheClearInput{ + Type: "request_path", + RequestPath: request.Request.URL.Path, + }) + require.NoError(t, err) + + expectedClearedIndex, err := lc.db.Get(cachememdb.IndexNameID, index.ID) + require.Equal(t, cachememdb.ErrCacheItemNotFound, err) + require.Nil(t, expectedClearedIndex) +} + +// TestLeaseCache_StaticSecret_CacheClear_All tests that static secrets are +// stored correctly, as well as removed from the cache by a cache clear with +// "all" specified as the type. +func TestLeaseCache_StaticSecret_CacheClear_All(t *testing.T) { + request := &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/secrets/foo/bar", + }, + }, + Token: "token", + } + response := newTestSendResponse(http.StatusCreated, `{"data": {"foo": "bar"}, "mount_type": "kvv2"}`) + responses := []*SendResponse{ + response, + } + index := &cachememdb.Index{ + Type: cacheboltdb.StaticSecretType, + RequestPath: request.Request.URL.Path, + Namespace: "root/", + Token: "token", + ID: computeStaticSecretCacheIndex(request), + } + + lc := testNewLeaseCache(t, responses) + + // We expect two entries to be stored by this: + // 1. The actual static secret + // 2. The capabilities index + err := lc.cacheStaticSecret(context.Background(), request, response, index) + if err != nil { + return + } + + indexFromDB, err := lc.db.Get(cachememdb.IndexNameID, index.ID) + if err != nil { + return + } + + require.NotNil(t, indexFromDB) + require.Equal(t, "token", indexFromDB.Token) + require.Equal(t, map[string]struct{}{"token": {}}, indexFromDB.Tokens) + require.Equal(t, cacheboltdb.StaticSecretType, indexFromDB.Type) + require.Equal(t, request.Request.URL.Path, indexFromDB.RequestPath) + require.Equal(t, "root/", indexFromDB.Namespace) + + capabilitiesIndexFromDB, err := lc.db.GetCapabilitiesIndex(cachememdb.IndexNameID, hex.EncodeToString(cryptoutil.Blake2b256Hash(index.Token))) + if err != nil { + t.Fatal(err) + } + + require.NotNil(t, capabilitiesIndexFromDB) + require.Equal(t, "token", capabilitiesIndexFromDB.Token) + require.Equal(t, map[string]struct{}{"secrets/foo/bar": {}}, capabilitiesIndexFromDB.ReadablePaths) + + err = lc.handleCacheClear(context.Background(), &cacheClearInput{ + Type: "all", + }) + require.NoError(t, err) + + expectedClearedIndex, err := lc.db.Get(cachememdb.IndexNameID, index.ID) + require.Equal(t, cachememdb.ErrCacheItemNotFound, err) + require.Nil(t, expectedClearedIndex) + + expectedClearedCapabilitiesIndex, err := lc.db.GetCapabilitiesIndex(cachememdb.IndexNameID, capabilitiesIndexFromDB.ID) + require.Equal(t, cachememdb.ErrCacheItemNotFound, err) + require.Nil(t, expectedClearedCapabilitiesIndex) +} + +// TestLeaseCache_SendCacheableStaticSecret tests that the cache has no issue returning +// static secret style responses. It's similar to TestLeaseCache_SendCacheable in that it +// only tests the surface level of the functionality, but there are other tests that +// test the rest. +func TestLeaseCache_SendCacheableStaticSecret(t *testing.T) { + response := newTestSendResponse(http.StatusCreated, `{"data": {"foo": "bar"}, "mount_type": "kvv2"}`) + responses := []*SendResponse{ + response, + response, + response, + response, + } + + lc := testNewLeaseCache(t, responses) + + // Register a token + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + // Make a request. A response with a new token is returned to the lease + // cache and that will be cached. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, response.Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Send the same request again to get the cached response + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Modify the request a little to ensure the second response is + // returned to the lease cache. + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, response.Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Make the same request again and ensure that the same response is returned + // again. + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, response.Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } +} + +func TestLeaseCache_SendNonCacheable(t *testing.T) { + responses := []*SendResponse{ + newTestSendResponse(http.StatusOK, `{"value": "output"}`), + newTestSendResponse(http.StatusNotFound, `{"value": "invalid"}`), + newTestSendResponse(http.StatusOK, `Hello`), + newTestSendResponse(http.StatusTemporaryRedirect, ""), + } + + lc := testNewLeaseCache(t, responses) + + // Send a request through the lease cache which is not cacheable (there is + // no lease information or auth information in the response) + sendReq := &SendRequest{ + Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[0].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Since the response is non-cacheable, the second response will be + // returned. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[1].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Since the response is non-cacheable, the third response will be + // returned. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", "http://example.com", nil), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[2].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Since the response is non-cacheable, the fourth response will be + // returned. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", "http://example.com", nil), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[3].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } +} + +func TestLeaseCache_SendNonCacheableNonTokenLease(t *testing.T) { + // Create the cache + responses := []*SendResponse{ + newTestSendResponse(http.StatusOK, `{"value": "output", "lease_id": "foo"}`), + newTestSendResponse(http.StatusCreated, `{"value": "invalid", "auth": {"client_token": "testtoken"}}`), + } + lc := testNewLeaseCache(t, responses) + + // Send a request through lease cache which returns a response containing + // lease_id. Response will not be cached because it doesn't belong to a + // token that is managed by the lease cache. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[0].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + _, err = lc.db.Get(cachememdb.IndexNameRequestPath, "root/", urlPath) + if err != cachememdb.ErrCacheItemNotFound { + t.Fatal("expected entry to be nil, got", err) + } + + // Verify that the response is not cached by sending the same request and + // by expecting a different response. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[1].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + _, err = lc.db.Get(cachememdb.IndexNameRequestPath, "root/", urlPath) + if err != cachememdb.ErrCacheItemNotFound { + t.Fatal("expected entry to be nil, got", err) + } +} + +func TestLeaseCache_HandleCacheClear(t *testing.T) { + lc := testNewLeaseCache(t, nil) + + handler := lc.HandleCacheClear(context.Background()) + ts := httptest.NewServer(handler) + defer ts.Close() + + // Test missing body, should return 400 + resp, err := http.Post(ts.URL, "application/json", nil) + if err != nil { + t.Fatal() + } + if resp.StatusCode != http.StatusBadRequest { + t.Fatalf("status code mismatch: expected = %v, got = %v", http.StatusBadRequest, resp.StatusCode) + } + + testCases := []struct { + name string + reqType string + reqValue string + expectedStatusCode int + }{ + { + "invalid_type", + "foo", + "", + http.StatusBadRequest, + }, + { + "invalid_value", + "", + "bar", + http.StatusBadRequest, + }, + { + "all", + "all", + "", + http.StatusOK, + }, + { + "by_request_path", + "request_path", + "foo", + http.StatusOK, + }, + { + "by_token", + "token", + "foo", + http.StatusOK, + }, + { + "by_lease", + "lease", + "foo", + http.StatusOK, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + reqBody := fmt.Sprintf("{\"type\": \"%s\", \"value\": \"%s\"}", tc.reqType, tc.reqValue) + resp, err := http.Post(ts.URL, "application/json", strings.NewReader(reqBody)) + if err != nil { + t.Fatal(err) + } + if tc.expectedStatusCode != resp.StatusCode { + t.Fatalf("status code mismatch: expected = %v, got = %v", tc.expectedStatusCode, resp.StatusCode) + } + }) + } +} + +func TestCache_DeriveNamespaceAndRevocationPath(t *testing.T) { + tests := []struct { + name string + req *SendRequest + wantNamespace string + wantRelativePath string + }{ + { + "non_revocation_full_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/sys/mounts", + }, + }, + }, + "root/", + "/v1/ns1/sys/mounts", + }, + { + "non_revocation_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/sys/mounts", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/sys/mounts", + }, + { + "non_revocation_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns2/sys/mounts", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/ns2/sys/mounts", + }, + { + "revocation_full_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/sys/leases/revoke", + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke", + }, + { + "revocation_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/sys/leases/revoke", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke", + }, + { + "revocation_relative_partial_ns", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns2/sys/leases/revoke", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/ns2/", + "/v1/sys/leases/revoke", + }, + { + "revocation_prefix_full_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/sys/leases/revoke-prefix/foo", + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke-prefix/foo", + }, + { + "revocation_prefix_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/sys/leases/revoke-prefix/foo", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke-prefix/foo", + }, + { + "revocation_prefix_partial_ns", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns2/sys/leases/revoke-prefix/foo", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/ns2/", + "/v1/sys/leases/revoke-prefix/foo", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotNamespace, gotRelativePath := deriveNamespaceAndRevocationPath(tt.req) + if gotNamespace != tt.wantNamespace { + t.Errorf("deriveNamespaceAndRevocationPath() gotNamespace = %v, want %v", gotNamespace, tt.wantNamespace) + } + if gotRelativePath != tt.wantRelativePath { + t.Errorf("deriveNamespaceAndRevocationPath() gotRelativePath = %v, want %v", gotRelativePath, tt.wantRelativePath) + } + }) + } +} + +func TestLeaseCache_Concurrent_NonCacheable(t *testing.T) { + lc := testNewLeaseCacheWithDelay(t, false, 50) + + // We are going to send 100 requests, each taking 50ms to process. If these + // requests are processed serially, it will take ~5seconds to finish. we + // use a ContextWithTimeout to tell us if this is the case by giving ample + // time for it process them concurrently but time out if they get processed + // serially. + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + wgDoneCh := make(chan struct{}) + errCh := make(chan error) + + go func() { + var wg sync.WaitGroup + // 100 concurrent requests + for i := 0; i < 100; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + // Send a request through the lease cache which is not cacheable (there is + // no lease information or auth information in the response) + sendReq := &SendRequest{ + Request: httptest.NewRequest("GET", "http://example.com", nil), + } + + _, err := lc.Send(ctx, sendReq) + if err != nil { + errCh <- err + } + }() + } + + wg.Wait() + close(wgDoneCh) + }() + + select { + case <-ctx.Done(): + t.Fatalf("request timed out: %s", ctx.Err()) + case <-wgDoneCh: + case err := <-errCh: + t.Fatal(err) + } +} + +func TestLeaseCache_Concurrent_Cacheable(t *testing.T) { + lc := testNewLeaseCacheWithDelay(t, true, 50) + + if err := lc.RegisterAutoAuthToken("autoauthtoken"); err != nil { + t.Fatal(err) + } + + // We are going to send 100 requests, each taking 50ms to process. If these + // requests are processed serially, it will take ~5seconds to finish, so we + // use a ContextWithTimeout to tell us if this is the case. + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + var cacheCount atomic.Uint32 + wgDoneCh := make(chan struct{}) + errCh := make(chan error) + + go func() { + var wg sync.WaitGroup + // Start 100 concurrent requests + for i := 0; i < 100; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", "http://example.com/v1/sample/api", nil), + } + + resp, err := lc.Send(ctx, sendReq) + if err != nil { + errCh <- err + } + + if resp.CacheMeta != nil && resp.CacheMeta.Hit { + cacheCount.Inc() + } + }() + } + + wg.Wait() + close(wgDoneCh) + }() + + select { + case <-ctx.Done(): + t.Fatalf("request timed out: %s", ctx.Err()) + case <-wgDoneCh: + case err := <-errCh: + t.Fatal(err) + } + + // Ensure that all but one request got proxied. The other 99 should be + // returned from the cache. + if cacheCount.Load() != 99 { + t.Fatalf("Should have returned a cached response 99 times, got %d", cacheCount.Load()) + } +} + +func setupBoltStorage(t *testing.T) (tempCacheDir string, boltStorage *cacheboltdb.BoltStorage) { + t.Helper() + + km, err := keymanager.NewPassthroughKeyManager(context.Background(), nil) + require.NoError(t, err) + + tempCacheDir, err = ioutil.TempDir("", "agent-cache-test") + require.NoError(t, err) + boltStorage, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: tempCacheDir, + Logger: hclog.Default(), + Wrapper: km.Wrapper(), + }) + require.NoError(t, err) + require.NotNil(t, boltStorage) + // The calling function should `defer boltStorage.Close()` and `defer os.RemoveAll(tempCacheDir)` + return tempCacheDir, boltStorage +} + +func compareBeforeAndAfter(t *testing.T, before, after *LeaseCache, beforeLen, afterLen int) { + beforeDB, err := before.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.Len(t, beforeDB, beforeLen) + afterDB, err := after.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.Len(t, afterDB, afterLen) + for _, cachedItem := range beforeDB { + if strings.Contains(cachedItem.RequestPath, "expect-missing") { + continue + } + restoredItem, err := after.db.Get(cachememdb.IndexNameID, cachedItem.ID) + require.NoError(t, err) + + assert.NoError(t, err) + assert.Equal(t, cachedItem.ID, restoredItem.ID) + assert.Equal(t, cachedItem.Lease, restoredItem.Lease) + assert.Equal(t, cachedItem.LeaseToken, restoredItem.LeaseToken) + assert.Equal(t, cachedItem.Namespace, restoredItem.Namespace) + assert.EqualValues(t, cachedItem.RequestHeader, restoredItem.RequestHeader) + assert.Equal(t, cachedItem.RequestMethod, restoredItem.RequestMethod) + assert.Equal(t, cachedItem.RequestPath, restoredItem.RequestPath) + assert.Equal(t, cachedItem.RequestToken, restoredItem.RequestToken) + assert.Equal(t, cachedItem.Response, restoredItem.Response) + assert.Equal(t, cachedItem.Token, restoredItem.Token) + assert.Equal(t, cachedItem.TokenAccessor, restoredItem.TokenAccessor) + assert.Equal(t, cachedItem.TokenParent, restoredItem.TokenParent) + + // check what we can in the renewal context + assert.NotEmpty(t, restoredItem.RenewCtxInfo.CancelFunc) + assert.NotZero(t, restoredItem.RenewCtxInfo.DoneCh) + require.NotEmpty(t, restoredItem.RenewCtxInfo.Ctx) + assert.Equal(t, + cachedItem.RenewCtxInfo.Ctx.Value(contextIndexID), + restoredItem.RenewCtxInfo.Ctx.Value(contextIndexID), + ) + } +} + +func TestLeaseCache_PersistAndRestore(t *testing.T) { + // Emulate responses from the api proxy. The first two use the auto-auth + // token, and the others use another token. + // The test re-sends each request to ensure that the response is cached + // so the number of responses and cacheTests specified should always be equal. + responses := []*SendResponse{ + newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 600}}`), + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 600}`), + // The auth token will get manually deleted from the bolt DB storage, causing both of the following two responses + // to be missing from the cache after a restore, because the lease is a child of the auth token. + newTestSendResponse(202, `{"auth": {"client_token": "testtoken2", "renewable": true, "orphan": true, "lease_duration": 600}}`), + newTestSendResponse(203, `{"lease_id": "secret2-lease", "renewable": true, "data": {"number": "two"}, "lease_duration": 600}`), + // 204 No content gets special handling - avoid. + newTestSendResponse(250, `{"auth": {"client_token": "testtoken3", "renewable": true, "orphan": true, "lease_duration": 600}}`), + newTestSendResponse(251, `{"lease_id": "secret3-lease", "renewable": true, "data": {"number": "three"}, "lease_duration": 600}`), + newTestSendResponse(http.StatusCreated, `{"data": {"foo": "bar"}, "mount_type": "kvv2"}`), + } + + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + // Register an auto-auth token so that the token and lease requests are cached + err := lc.RegisterAutoAuthToken("autoauthtoken") + require.NoError(t, err) + + cacheTests := []struct { + token string + method string + urlPath string + body string + deleteFromPersistentStore bool // If true, will be deleted from bolt DB to induce an error on restore + expectMissingAfterRestore bool // If true, the response is not expected to be present in the restored cache + }{ + { + // Make a request. A response with a new token is returned to the + // lease cache and that will be cached. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input"}`, + }, + { + // Modify the request a little bit to ensure the second response is + // returned to the lease cache. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input_changed"}`, + }, + { + // Simulate an approle login to get another token + method: "PUT", + urlPath: "http://example.com/v1/auth/approle-expect-missing/login", + body: `{"role_id": "my role", "secret_id": "my secret"}`, + deleteFromPersistentStore: true, + expectMissingAfterRestore: true, + }, + { + // Test caching with the token acquired from the approle login + token: "testtoken2", + method: "GET", + urlPath: "http://example.com/v1/sample-expect-missing/api", + body: `{"second": "input"}`, + // This will be missing from the restored cache because its parent token was deleted + expectMissingAfterRestore: true, + }, + { + // Simulate another approle login to get another token + method: "PUT", + urlPath: "http://example.com/v1/auth/approle/login", + body: `{"role_id": "my role", "secret_id": "my secret"}`, + }, + { + // Test caching with the token acquired from the latest approle login + token: "testtoken3", + method: "GET", + urlPath: "http://example.com/v1/sample3/api", + body: `{"third": "input"}`, + }, + } + + var deleteIDs []string + for i, ct := range cacheTests { + // Send once to cache + req := httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + + sendReq := &SendRequest{ + Token: ct.token, + Request: req, + } + if ct.deleteFromPersistentStore { + deleteID, err := computeIndexID(sendReq) + require.NoError(t, err) + deleteIDs = append(deleteIDs, deleteID) + // Now reset the body after calculating the index + req = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + sendReq.Request = req + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, responses[i].Response.StatusCode, resp.Response.StatusCode, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + + // Send again to test cache. If this isn't cached, the response returned + // will be the next in the list and the status code will not match. + req = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + sendCacheReq := &SendRequest{ + Token: ct.token, + Request: req, + } + respCached, err := lc.Send(context.Background(), sendCacheReq) + require.NoError(t, err, "failed to send request %+v", ct) + assert.Equal(t, responses[i].Response.StatusCode, respCached.Response.StatusCode, "expected proxied response") + require.NotNil(t, respCached.CacheMeta) + assert.True(t, respCached.CacheMeta.Hit) + } + + require.NotEmpty(t, deleteIDs) + for _, deleteID := range deleteIDs { + err = boltStorage.Delete(deleteID, cacheboltdb.LeaseType) + require.NoError(t, err) + } + + // Now we know the cache is working, so try restoring from the persisted + // cache's storage. Responses 3 and 4 have been cleared from the cache, so + // re-send those. + restoredCache := testNewLeaseCache(t, responses[2:4]) + + err = restoredCache.Restore(context.Background(), boltStorage) + errors, ok := err.(*multierror.Error) + require.True(t, ok) + assert.Len(t, errors.Errors, 1) + assert.Contains(t, errors.Error(), "could not find parent Token testtoken2") + + // Now compare the cache contents before and after + compareBeforeAndAfter(t, lc, restoredCache, 7, 5) + + // And finally send the cache requests once to make sure they're all being + // served from the restoredCache unless they were intended to be missing after restore. + for i, ct := range cacheTests { + req := httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + sendCacheReq := &SendRequest{ + Token: ct.token, + Request: req, + } + respCached, err := restoredCache.Send(context.Background(), sendCacheReq) + require.NoError(t, err, "failed to send request %+v", ct) + assert.Equal(t, responses[i].Response.StatusCode, respCached.Response.StatusCode, "expected proxied response") + if ct.expectMissingAfterRestore { + require.Nil(t, respCached.CacheMeta) + } else { + require.NotNil(t, respCached.CacheMeta) + assert.True(t, respCached.CacheMeta.Hit) + } + } +} + +func TestLeaseCache_PersistAndRestore_WithManyDependencies(t *testing.T) { + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + + var requests []*SendRequest + var responses []*SendResponse + var orderedRequestPaths []string + + // helper func to generate new auth leases with a child secret lease attached + authAndSecretLease := func(id int, parentToken, newToken string) { + t.Helper() + path := fmt.Sprintf("/v1/auth/approle-%d/login", id) + orderedRequestPaths = append(orderedRequestPaths, path) + requests = append(requests, &SendRequest{ + Token: parentToken, + Request: httptest.NewRequest("PUT", "http://example.com"+path, strings.NewReader("")), + }) + responses = append(responses, newTestSendResponse(200, fmt.Sprintf(`{"auth": {"client_token": "%s", "renewable": true, "lease_duration": 600}}`, newToken))) + + // Fetch a leased secret using the new token + path = fmt.Sprintf("/v1/kv/%d", id) + orderedRequestPaths = append(orderedRequestPaths, path) + requests = append(requests, &SendRequest{ + Token: newToken, + Request: httptest.NewRequest("GET", "http://example.com"+path, strings.NewReader("")), + }) + responses = append(responses, newTestSendResponse(200, fmt.Sprintf(`{"lease_id": "secret-%d-lease", "renewable": true, "data": {"number": %d}, "lease_duration": 600}`, id, id))) + } + + // Pathological case: a long chain of child tokens + authAndSecretLease(0, "autoauthtoken", "many-ancestors-token;0") + for i := 1; i <= 50; i++ { + // Create a new generation of child token + authAndSecretLease(i, fmt.Sprintf("many-ancestors-token;%d", i-1), fmt.Sprintf("many-ancestors-token;%d", i)) + } + + // Lots of sibling tokens with auto auth token as their parent + for i := 51; i <= 100; i++ { + authAndSecretLease(i, "autoauthtoken", fmt.Sprintf("many-siblings-token;%d", i)) + } + + // Also create some extra siblings for an auth token further down the chain + for i := 101; i <= 110; i++ { + authAndSecretLease(i, "many-ancestors-token;25", fmt.Sprintf("many-siblings-for-ancestor-token;%d", i)) + } + + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + // Register an auto-auth token so that the token and lease requests are cached + err := lc.RegisterAutoAuthToken("autoauthtoken") + require.NoError(t, err) + + for _, req := range requests { + // Send once to cache + resp, err := lc.Send(context.Background(), req) + require.NoError(t, err) + assert.Equal(t, 200, resp.Response.StatusCode, "expected success") + assert.Nil(t, resp.CacheMeta) + } + + // Ensure leases are retrieved in the correct order + var processed int + + leases, err := boltStorage.GetByType(context.Background(), cacheboltdb.LeaseType) + require.NoError(t, err) + for _, lease := range leases { + index, err := cachememdb.Deserialize(lease) + require.NoError(t, err) + require.Equal(t, orderedRequestPaths[processed], index.RequestPath) + processed++ + } + + assert.Equal(t, len(orderedRequestPaths), processed) + + restoredCache := testNewLeaseCache(t, nil) + err = restoredCache.Restore(context.Background(), boltStorage) + require.NoError(t, err) + + // Now compare the cache contents before and after + compareBeforeAndAfter(t, lc, restoredCache, 223, 223) +} + +func TestEvictPersistent(t *testing.T) { + ctx := context.Background() + + responses := []*SendResponse{ + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}}`), + } + + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + // populate cache by sending request through + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", "http://example.com/v1/sample/api", strings.NewReader(`{"value": "some_input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, resp.Response.StatusCode, 201, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + + // Check bolt for the cached lease + secrets, err := lc.ps.GetByType(ctx, cacheboltdb.LeaseType) + require.NoError(t, err) + assert.Len(t, secrets, 1) + + // Call clear for the request path + err = lc.handleCacheClear(context.Background(), &cacheClearInput{ + Type: "request_path", + RequestPath: "/v1/sample/api", + }) + require.NoError(t, err) + + time.Sleep(2 * time.Second) + + // Check that cached item is gone + secrets, err = lc.ps.GetByType(ctx, cacheboltdb.LeaseType) + require.NoError(t, err) + assert.Len(t, secrets, 0) +} + +func TestRegisterAutoAuth_sameToken(t *testing.T) { + // If the auto-auth token already exists in the cache, it should not be + // stored again in a new index. + lc := testNewLeaseCache(t, nil) + err := lc.RegisterAutoAuthToken("autoauthtoken") + assert.NoError(t, err) + + oldTokenIndex, err := lc.db.Get(cachememdb.IndexNameToken, "autoauthtoken") + assert.NoError(t, err) + oldTokenID := oldTokenIndex.ID + + // register the same token again + err = lc.RegisterAutoAuthToken("autoauthtoken") + assert.NoError(t, err) + + // check that there's only one index for autoauthtoken + entries, err := lc.db.GetByPrefix(cachememdb.IndexNameToken, "autoauthtoken") + assert.NoError(t, err) + assert.Len(t, entries, 1) + + newTokenIndex, err := lc.db.Get(cachememdb.IndexNameToken, "autoauthtoken") + assert.NoError(t, err) + + // compare the ID's since those are randomly generated when an index for a + // token is added to the cache, so if a new token was added, the id's will + // not match. + assert.Equal(t, oldTokenID, newTokenIndex.ID) +} + +func Test_hasExpired(t *testing.T) { + responses := []*SendResponse{ + newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 60}}`), + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 60}`), + } + lc := testNewLeaseCache(t, responses) + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + cacheTests := []struct { + token string + urlPath string + leaseType string + wantStatusCode int + }{ + { + // auth lease + token: "autoauthtoken", + urlPath: "/v1/sample/auth", + leaseType: cacheboltdb.LeaseType, + wantStatusCode: responses[0].Response.StatusCode, + }, + { + // secret lease + token: "autoauthtoken", + urlPath: "/v1/sample/secret", + leaseType: cacheboltdb.LeaseType, + wantStatusCode: responses[1].Response.StatusCode, + }, + } + + for _, ct := range cacheTests { + // Send once to cache + urlPath := "http://example.com" + ct.urlPath + sendReq := &SendRequest{ + Token: ct.token, + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, resp.Response.StatusCode, ct.wantStatusCode, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + + // get the Index out of the mem cache + index, err := lc.db.Get(cachememdb.IndexNameRequestPath, "root/", ct.urlPath) + require.NoError(t, err) + assert.Equal(t, ct.leaseType, index.Type) + + // The lease duration is 60 seconds, so time.Now() should be within that + notExpired, err := lc.hasExpired(time.Now().UTC(), index) + require.NoError(t, err) + assert.False(t, notExpired) + + // In 90 seconds the index should be "expired" + futureTime := time.Now().UTC().Add(time.Second * 90) + expired, err := lc.hasExpired(futureTime, index) + require.NoError(t, err) + assert.True(t, expired) + } +} + +func TestLeaseCache_hasExpired_wrong_type(t *testing.T) { + index := &cachememdb.Index{ + Type: cacheboltdb.TokenType, + Response: []byte(`HTTP/0.0 200 OK +Content-Type: application/json +Date: Tue, 02 Mar 2021 17:54:16 GMT + +{}`), + } + + lc := testNewLeaseCache(t, nil) + expired, err := lc.hasExpired(time.Now().UTC(), index) + assert.False(t, expired) + assert.EqualError(t, err, `secret without lease encountered in expiration check`) +} + +func TestLeaseCacheRestore_expired(t *testing.T) { + // Emulate 2 responses from the api proxy, both expired + responses := []*SendResponse{ + newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": -600}}`), + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": -600}`), + } + + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + // Register an auto-auth token so that the token and lease requests are cached in mem + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + cacheTests := []struct { + token string + method string + urlPath string + body string + wantStatusCode int + }{ + { + // Make a request. A response with a new token is returned to the + // lease cache and that will be cached. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input"}`, + wantStatusCode: responses[0].Response.StatusCode, + }, + { + // Modify the request a little bit to ensure the second response is + // returned to the lease cache. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input_changed"}`, + wantStatusCode: responses[1].Response.StatusCode, + }, + } + + for _, ct := range cacheTests { + // Send once to cache + sendReq := &SendRequest{ + Token: ct.token, + Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, resp.Response.StatusCode, ct.wantStatusCode, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + } + + // Restore from the persisted cache's storage + restoredCache := testNewLeaseCache(t, nil) + + err := restoredCache.Restore(context.Background(), boltStorage) + assert.NoError(t, err) + + // The original mem cache should between one-to-three items. + // This will usually be three, but could be less if any renewals + // happens before this check, which will evict the expired cache entries. + // e.g. you add a time.Sleep before this, it will be 1. We check + // between the range to reduce flakiness. + beforeDB, err := lc.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.LessOrEqual(t, len(beforeDB), 3) + assert.LessOrEqual(t, 1, len(beforeDB)) + + // There should only be one item in the restored cache: the autoauth token + afterDB, err := restoredCache.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.Len(t, afterDB, 1) + + // Just verify that the one item in the restored mem cache matches one in the original mem cache, and that it's the auto-auth token + beforeItem, err := lc.db.Get(cachememdb.IndexNameID, afterDB[0].ID) + require.NoError(t, err) + assert.NotNil(t, beforeItem) + + assert.Equal(t, "autoauthtoken", afterDB[0].Token) + assert.Equal(t, cacheboltdb.TokenType, afterDB[0].Type) +} diff --git a/command/agentproxyshared/cache/listener.go b/command/agentproxyshared/cache/listener.go new file mode 100644 index 000000000000..c962a2c8c370 --- /dev/null +++ b/command/agentproxyshared/cache/listener.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "crypto/tls" + "fmt" + "net" + "strings" + + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" +) + +type ListenerBundle struct { + Listener net.Listener + TLSConfig *tls.Config + TLSReloadFunc reloadutil.ReloadFunc +} + +func StartListener(lnConfig *configutil.Listener) (*ListenerBundle, error) { + addr := lnConfig.Address + + var ln net.Listener + var err error + switch lnConfig.Type { + case "tcp": + if addr == "" { + addr = "127.0.0.1:8200" + } + + bindProto := "tcp" + // If they've passed 0.0.0.0, we only want to bind on IPv4 + // rather than golang's dual stack default + if strings.HasPrefix(addr, "0.0.0.0:") { + bindProto = "tcp4" + } + + ln, err = net.Listen(bindProto, addr) + if err != nil { + return nil, err + } + ln = &server.TCPKeepAliveListener{ln.(*net.TCPListener)} + + case "unix": + var uConfig *listenerutil.UnixSocketsConfig + if lnConfig.SocketMode != "" && + lnConfig.SocketUser != "" && + lnConfig.SocketGroup != "" { + uConfig = &listenerutil.UnixSocketsConfig{ + Mode: lnConfig.SocketMode, + User: lnConfig.SocketUser, + Group: lnConfig.SocketGroup, + } + } + ln, err = listenerutil.UnixSocketListener(addr, uConfig) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("invalid listener type: %q", lnConfig.Type) + } + + props := map[string]string{"addr": ln.Addr().String()} + tlsConf, reloadFunc, err := listenerutil.TLSConfig(lnConfig, props, nil) + if err != nil { + return nil, err + } + if tlsConf != nil { + ln = tls.NewListener(ln, tlsConf) + } + + cfg := &ListenerBundle{ + Listener: ln, + TLSConfig: tlsConf, + TLSReloadFunc: reloadFunc, + } + + return cfg, nil +} diff --git a/command/agentproxyshared/cache/proxy.go b/command/agentproxyshared/cache/proxy.go new file mode 100644 index 000000000000..503d981d60e3 --- /dev/null +++ b/command/agentproxyshared/cache/proxy.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "bytes" + "context" + "io" + "net/http" + "time" + + "github.com/hashicorp/vault/api" +) + +// SendRequest is the input for Proxier.Send. +type SendRequest struct { + Token string + Request *http.Request + + // RequestBody is the stored body bytes from Request.Body. It is set here to + // avoid reading and re-setting the stream multiple times. + RequestBody []byte +} + +// SendResponse is the output from Proxier.Send. +type SendResponse struct { + Response *api.Response + + // ResponseBody is the stored body bytes from Response.Body. It is set here to + // avoid reading and re-setting the stream multiple times. + ResponseBody []byte + CacheMeta *CacheMeta +} + +// CacheMeta contains metadata information about the response, +// such as whether it was a cache hit or miss, and the age of the +// cached entry. +type CacheMeta struct { + Hit bool + Age time.Duration +} + +// Proxier is the interface implemented by different components that are +// responsible for performing specific tasks, such as caching and proxying. All +// these tasks combined together would serve the request received by the agent. +type Proxier interface { + Send(ctx context.Context, req *SendRequest) (*SendResponse, error) +} + +// NewSendResponse creates a new SendResponse and takes care of initializing its +// fields properly. +func NewSendResponse(apiResponse *api.Response, responseBody []byte) (*SendResponse, error) { + resp := &SendResponse{ + Response: apiResponse, + CacheMeta: &CacheMeta{}, + } + + // If a response body is separately provided we set that as the SendResponse.ResponseBody, + // otherwise we will do an ioutil.ReadAll to extract the response body from apiResponse. + switch { + case len(responseBody) > 0: + resp.ResponseBody = responseBody + case apiResponse.Body != nil: + respBody, err := io.ReadAll(apiResponse.Body) + if err != nil { + return nil, err + } + // Close the old body + apiResponse.Body.Close() + + // Re-set the response body after reading from the Reader + apiResponse.Body = io.NopCloser(bytes.NewReader(respBody)) + + resp.ResponseBody = respBody + } + + return resp, nil +} diff --git a/command/agentproxyshared/cache/static_secret_cache_updater.go b/command/agentproxyshared/cache/static_secret_cache_updater.go new file mode 100644 index 000000000000..fbae0f3f24d7 --- /dev/null +++ b/command/agentproxyshared/cache/static_secret_cache_updater.go @@ -0,0 +1,422 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/helper/useragent" + "golang.org/x/exp/maps" + "nhooyr.io/websocket" +) + +// Example Event: +//{ +// "id": "a3be9fb1-b514-519f-5b25-b6f144a8c1ce", +// "source": "https://vaultproject.io/", +// "specversion": "1.0", +// "type": "*", +// "data": { +// "event": { +// "id": "a3be9fb1-b514-519f-5b25-b6f144a8c1ce", +// "metadata": { +// "current_version": "1", +// "data_path": "secret/data/foo", +// "modified": "true", +// "oldest_version": "0", +// "operation": "data-write", +// "path": "secret/data/foo" +// } +// }, +// "event_type": "kv-v2/data-write", +// "plugin_info": { +// "mount_class": "secret", +// "mount_accessor": "kv_5dc4d18e", +// "mount_path": "secret/", +// "plugin": "kv" +// } +// }, +// "datacontentype": "application/cloudevents", +// "time": "2023-09-12T15:19:49.394915-07:00" +//} + +// StaticSecretCacheUpdater is a struct that utilizes +// the event system to keep the static secret cache up to date. +type StaticSecretCacheUpdater struct { + client *api.Client + leaseCache *LeaseCache + logger hclog.Logger + tokenSink sink.Sink +} + +// StaticSecretCacheUpdaterConfig is the configuration for initializing a new +// StaticSecretCacheUpdater. +type StaticSecretCacheUpdaterConfig struct { + Client *api.Client + LeaseCache *LeaseCache + Logger hclog.Logger + // TokenSink is a token sync that will have the latest + // token from auto-auth in it, to be used in event system + // connections. + TokenSink sink.Sink +} + +// NewStaticSecretCacheUpdater creates a new instance of a StaticSecretCacheUpdater. +func NewStaticSecretCacheUpdater(conf *StaticSecretCacheUpdaterConfig) (*StaticSecretCacheUpdater, error) { + if conf == nil { + return nil, errors.New("nil configuration provided") + } + + if conf.LeaseCache == nil { + return nil, fmt.Errorf("nil Lease Cache (a required parameter): %v", conf) + } + + if conf.Logger == nil { + return nil, fmt.Errorf("nil Logger (a required parameter): %v", conf) + } + + if conf.Client == nil { + return nil, fmt.Errorf("nil API client (a required parameter): %v", conf) + } + + if conf.TokenSink == nil { + return nil, fmt.Errorf("nil token sink (a required parameter): %v", conf) + } + + return &StaticSecretCacheUpdater{ + client: conf.Client, + leaseCache: conf.LeaseCache, + logger: conf.Logger, + tokenSink: conf.TokenSink, + }, nil +} + +// streamStaticSecretEvents streams static secret events and updates +// the cache when updates are notified. This method will return errors in cases +// of failed updates, malformed events, and other. +// For best results, the caller of this function should retry on error with backoff, +// if it is desired for the cache to always remain up to date. +func (updater *StaticSecretCacheUpdater) streamStaticSecretEvents(ctx context.Context) error { + // First, ensure our token is up-to-date: + updater.client.SetToken(updater.tokenSink.(sink.SinkReader).Token()) + conn, err := updater.openWebSocketConnection(ctx) + if err != nil { + return err + } + defer conn.Close(websocket.StatusNormalClosure, "") + + err = updater.preEventStreamUpdate(ctx) + if err != nil { + return fmt.Errorf("error when performing pre-event stream secret update: %w", err) + } + + for { + select { + case <-ctx.Done(): + return nil + default: + _, message, err := conn.Read(ctx) + if err != nil { + // The caller of this function should make the decision on if to retry. If it does, then + // the websocket connection will be retried, and we will check for missed events. + return fmt.Errorf("error when attempting to read from event stream, reopening websocket: %w", err) + } + updater.logger.Trace("received event", "message", string(message)) + messageMap := make(map[string]interface{}) + err = json.Unmarshal(message, &messageMap) + if err != nil { + return fmt.Errorf("error when unmarshaling event, message: %s\nerror: %w", string(message), err) + } + data, ok := messageMap["data"].(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected event format when decoding 'data' element, message: %s\nerror: %w", string(message), err) + } + event, ok := data["event"].(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected event format when decoding 'event' element, message: %s\nerror: %w", string(message), err) + } + metadata, ok := event["metadata"].(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected event format when decoding 'metadata' element, message: %s\nerror: %w", string(message), err) + } + modified, ok := metadata["modified"].(string) + if ok && modified == "true" { + path, ok := metadata["path"].(string) + if !ok { + return fmt.Errorf("unexpected event format when decoding 'path' element, message: %s\nerror: %w", string(message), err) + } + err := updater.updateStaticSecret(ctx, path) + if err != nil { + // While we are kind of 'missing' an event this way, re-calling this function will + // result in the secret remaining up to date. + return fmt.Errorf("error updating static secret: path: %q, message: %s error: %w", path, message, err) + } + } else { + // This is an event we're not interested in, ignore it and + // carry on. + continue + } + } + } + + return nil +} + +// preEventStreamUpdate is called after successful connection to the event system but before +// we process any events, to ensure we don't miss any updates. +// In some cases, this will result in multiple processing of the same updates, but +// this ensures that we don't lose any updates to secrets that might have been sent +// while the connection is forming. +func (updater *StaticSecretCacheUpdater) preEventStreamUpdate(ctx context.Context) error { + indexes, err := updater.leaseCache.db.GetByPrefix(cachememdb.IndexNameID) + if err != nil { + return err + } + + updater.logger.Debug("starting pre-event stream update of static secrets") + + var errs *multierror.Error + for _, index := range indexes { + if index.Type != cacheboltdb.StaticSecretType { + continue + } + err = updater.updateStaticSecret(ctx, index.RequestPath) + if err != nil { + errs = multierror.Append(errs, err) + } + } + + updater.logger.Debug("finished pre-event stream update of static secrets") + + return errs.ErrorOrNil() +} + +// updateStaticSecret checks for updates for a static secret on the path given, +// and updates the cache if appropriate +func (updater *StaticSecretCacheUpdater) updateStaticSecret(ctx context.Context, path string) error { + // We clone the client, as we won't be using the same token. + client, err := updater.client.Clone() + if err != nil { + return err + } + + indexId := hashStaticSecretIndex(path) + + updater.logger.Debug("received update static secret request", "path", path, "indexId", indexId) + + index, err := updater.leaseCache.db.Get(cachememdb.IndexNameID, indexId) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // This event doesn't correspond to a secret in our cache + // so this is a no-op. + return nil + } + if err != nil { + return err + } + + // We use a raw request so that we can store all the + // request information, just like we do in the Proxier Send methods. + request := client.NewRequest(http.MethodGet, "/v1/"+path) + if request.Headers == nil { + request.Headers = make(http.Header) + } + request.Headers.Set("User-Agent", useragent.ProxyString()) + + var resp *api.Response + var tokensToRemove []string + var successfulAttempt bool + for _, token := range maps.Keys(index.Tokens) { + client.SetToken(token) + request.Headers.Set(api.AuthHeaderName, token) + resp, err = client.RawRequestWithContext(ctx, request) + if err != nil { + updater.logger.Trace("received error when trying to update cache", "path", path, "err", err, "token", token) + // We cannot access this secret with this token for whatever reason, + // so token for removal. + tokensToRemove = append(tokensToRemove, token) + continue + } else { + // We got our updated secret! + successfulAttempt = true + break + } + } + + if successfulAttempt { + // We need to update the index, so first, hold the lock. + index.IndexLock.Lock() + defer index.IndexLock.Unlock() + + // First, remove the tokens we noted couldn't access the secret from the token index + for _, token := range tokensToRemove { + delete(index.Tokens, token) + } + + sendResponse, err := NewSendResponse(resp, nil) + if err != nil { + return err + } + + // Serialize the response to store it in the cached index + var respBytes bytes.Buffer + err = sendResponse.Response.Write(&respBytes) + if err != nil { + updater.logger.Error("failed to serialize response", "error", err) + return err + } + + // Set the index's Response + index.Response = respBytes.Bytes() + index.LastRenewed = time.Now().UTC() + + // Lastly, store the secret + updater.logger.Debug("storing response into the cache due to update", "path", path) + err = updater.leaseCache.db.Set(index) + if err != nil { + return err + } + } else { + // No token could successfully update the secret, or secret was deleted. + // We should evict the cache instead of re-storing the secret. + updater.logger.Debug("evicting response from cache", "path", path) + err = updater.leaseCache.db.Evict(cachememdb.IndexNameID, indexId) + if err != nil { + return err + } + } + + return nil +} + +// openWebSocketConnection opens a websocket connection to the event system for +// the events that the static secret cache updater is interested in. +func (updater *StaticSecretCacheUpdater) openWebSocketConnection(ctx context.Context) (*websocket.Conn, error) { + // We parse this into a URL object to get the specific host and scheme + // information without nasty string parsing. + vaultURL, err := url.Parse(updater.client.Address()) + if err != nil { + return nil, err + } + vaultHost := vaultURL.Host + // If we're using https, use wss, otherwise ws + scheme := "wss" + if vaultURL.Scheme == "http" { + scheme = "ws" + } + + webSocketURL := url.URL{ + Path: "/v1/sys/events/subscribe/kv*", + Host: vaultHost, + Scheme: scheme, + } + query := webSocketURL.Query() + query.Set("json", "true") + webSocketURL.RawQuery = query.Encode() + + updater.client.AddHeader(api.AuthHeaderName, updater.client.Token()) + updater.client.AddHeader(api.NamespaceHeaderName, updater.client.Namespace()) + + // Populate these now to avoid recreating them in the upcoming for loop. + headers := updater.client.Headers() + wsURL := webSocketURL.String() + httpClient := updater.client.CloneConfig().HttpClient + + // We do ten attempts, to ensure we follow forwarding to the leader. + var conn *websocket.Conn + var resp *http.Response + for attempt := 0; attempt < 10; attempt++ { + conn, resp, err = websocket.Dial(ctx, wsURL, &websocket.DialOptions{ + HTTPClient: httpClient, + HTTPHeader: headers, + }) + if err == nil { + break + } + + switch { + case resp == nil: + break + case resp.StatusCode == http.StatusTemporaryRedirect: + wsURL = resp.Header.Get("Location") + continue + default: + break + } + } + + if err != nil { + if resp != nil { + if resp.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("received 404 when opening web socket to %s, ensure Vault is Enterprise version 1.16 or above", wsURL) + } + } + return nil, fmt.Errorf("error returned when opening event stream web socket to %s, ensure auto-auth token"+ + " has correct permissions and Vault is Enterprise version 1.16 or above: %w", wsURL, err) + } + + if conn == nil { + return nil, errors.New(fmt.Sprintf("too many redirects as part of establishing web socket connection to %s", wsURL)) + } + + return conn, nil +} + +// Run is intended to be the method called by Vault Proxy, that runs the subsystem. +// Once a token is provided to the sink, we will start the websocket and start consuming +// events and updating secrets. +// Run will shut down gracefully when the context is cancelled. +func (updater *StaticSecretCacheUpdater) Run(ctx context.Context) error { + updater.logger.Info("starting static secret cache updater subsystem") + defer func() { + updater.logger.Info("static secret cache updater subsystem stopped") + }() + +tokenLoop: + for { + select { + case <-ctx.Done(): + return nil + default: + // Wait for the auto-auth token to be populated... + if updater.tokenSink.(sink.SinkReader).Token() != "" { + break tokenLoop + } + time.Sleep(100 * time.Millisecond) + } + } + + shouldBackoff := false + for { + select { + case <-ctx.Done(): + return nil + default: + // If we're erroring and the context isn't done, we should add + // a little backoff to make sure we don't accidentally overload + // Vault or similar. + if shouldBackoff { + time.Sleep(10 * time.Second) + } + err := updater.streamStaticSecretEvents(ctx) + if err != nil { + updater.logger.Error("error occurred during streaming static secret cache update events", "err", err) + shouldBackoff = true + continue + } + } + } +} diff --git a/command/agentproxyshared/cache/static_secret_cache_updater_test.go b/command/agentproxyshared/cache/static_secret_cache_updater_test.go new file mode 100644 index 000000000000..f77ad4168a60 --- /dev/null +++ b/command/agentproxyshared/cache/static_secret_cache_updater_test.go @@ -0,0 +1,734 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/testhelpers/minimal" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "nhooyr.io/websocket" +) + +// Avoiding a circular dependency in the test. +type mockSink struct { + token *atomic.String +} + +func (m *mockSink) Token() string { + return m.token.Load() +} + +func (m *mockSink) WriteToken(token string) error { + m.token.Store(token) + return nil +} + +func newMockSink(t *testing.T) sink.Sink { + t.Helper() + + return &mockSink{ + token: atomic.NewString(""), + } +} + +// testNewStaticSecretCacheUpdater returns a new StaticSecretCacheUpdater +// for use in tests. +func testNewStaticSecretCacheUpdater(t *testing.T, client *api.Client) *StaticSecretCacheUpdater { + t.Helper() + + lc := testNewLeaseCache(t, []*SendResponse{}) + tokenSink := newMockSink(t) + tokenSink.WriteToken(client.Token()) + + updater, err := NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.updater"), + TokenSink: tokenSink, + }) + if err != nil { + t.Fatal(err) + } + return updater +} + +// TestNewStaticSecretCacheUpdater tests the NewStaticSecretCacheUpdater method, +// to ensure it errors out when appropriate. +func TestNewStaticSecretCacheUpdater(t *testing.T) { + t.Parallel() + + lc := testNewLeaseCache(t, []*SendResponse{}) + config := api.DefaultConfig() + logger := logging.NewVaultLogger(hclog.Trace).Named("cache.updater") + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + tokenSink := newMockSink(t) + + // Expect an error if any of the arguments are nil: + updater, err := NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: nil, + LeaseCache: lc, + Logger: logger, + TokenSink: tokenSink, + }) + require.Error(t, err) + require.Nil(t, updater) + + updater, err = NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: nil, + Logger: logger, + TokenSink: tokenSink, + }) + require.Error(t, err) + require.Nil(t, updater) + + updater, err = NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: lc, + Logger: nil, + TokenSink: tokenSink, + }) + require.Error(t, err) + require.Nil(t, updater) + + updater, err = NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.updater"), + TokenSink: nil, + }) + require.Error(t, err) + require.Nil(t, updater) + + // Don't expect an error if the arguments are as expected + updater, err = NewStaticSecretCacheUpdater(&StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.updater"), + TokenSink: tokenSink, + }) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, updater) +} + +// TestOpenWebSocketConnection tests that the openWebSocketConnection function +// works as expected (fails on CE, succeeds on ent). +// This uses a TLS enabled (wss) WebSocket connection. +func TestOpenWebSocketConnection(t *testing.T) { + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + updater.tokenSink.WriteToken(client.Token()) + + conn, err := updater.openWebSocketConnection(context.Background()) + if constants.IsEnterprise { + require.NoError(t, err) + require.NotNil(t, conn) + } else { + require.Nil(t, conn) + require.Errorf(t, err, "ensure Vault is Enterprise version 1.16 or above") + } +} + +// TestOpenWebSocketConnectionReceivesEventsDefaultMount tests that the openWebSocketConnection function +// works as expected with the default KVV1 mount, and then the connection can be used to receive an event. +// This acts as more of an event system sanity check than a test of the updater +// logic. It's still important coverage, though. +func TestOpenWebSocketConnectionReceivesEventsDefaultMount(t *testing.T) { + if !constants.IsEnterprise { + t.Skip("test can only run on enterprise due to requiring the event notification system") + } + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + conn, err := updater.openWebSocketConnection(context.Background()) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, conn) + + t.Cleanup(func() { + conn.Close(websocket.StatusNormalClosure, "") + }) + + makeData := func(i int) map[string]interface{} { + return map[string]interface{}{ + "foo": fmt.Sprintf("bar%d", i), + } + } + // Put a secret, which should trigger an event + err = client.KVv1("secret").Put(context.Background(), "foo", makeData(100)) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 5; i++ { + // Do a fresh PUT just to refresh the secret and send a new message + err = client.KVv1("secret").Put(context.Background(), "foo", makeData(i)) + if err != nil { + t.Fatal(err) + } + + // This method blocks until it gets a secret, so this test + // will only pass if we're receiving events correctly. + _, message, err := conn.Read(context.Background()) + if err != nil { + t.Fatal(err) + } + t.Log(string(message)) + } +} + +// TestOpenWebSocketConnectionReceivesEventsKVV1 tests that the openWebSocketConnection function +// works as expected with KVV1, and then the connection can be used to receive an event. +// This acts as more of an event system sanity check than a test of the updater +// logic. It's still important coverage, though. +func TestOpenWebSocketConnectionReceivesEventsKVV1(t *testing.T) { + if !constants.IsEnterprise { + t.Skip("test can only run on enterprise due to requiring the event notification system") + } + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.Factory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + conn, err := updater.openWebSocketConnection(context.Background()) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, conn) + + t.Cleanup(func() { + conn.Close(websocket.StatusNormalClosure, "") + }) + + err = client.Sys().Mount("secret-v1", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + makeData := func(i int) map[string]interface{} { + return map[string]interface{}{ + "foo": fmt.Sprintf("bar%d", i), + } + } + // Put a secret, which should trigger an event + err = client.KVv1("secret-v1").Put(context.Background(), "foo", makeData(100)) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 5; i++ { + // Do a fresh PUT just to refresh the secret and send a new message + err = client.KVv1("secret-v1").Put(context.Background(), "foo", makeData(i)) + if err != nil { + t.Fatal(err) + } + + // This method blocks until it gets a secret, so this test + // will only pass if we're receiving events correctly. + _, _, err := conn.Read(context.Background()) + if err != nil { + t.Fatal(err) + } + } +} + +// TestOpenWebSocketConnectionReceivesEvents tests that the openWebSocketConnection function +// works as expected with KVV2, and then the connection can be used to receive an event. +// This acts as more of an event system sanity check than a test of the updater +// logic. It's still important coverage, though. +func TestOpenWebSocketConnectionReceivesEventsKVV2(t *testing.T) { + if !constants.IsEnterprise { + t.Skip("test can only run on enterprise due to requiring the event notification system") + } + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + conn, err := updater.openWebSocketConnection(context.Background()) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, conn) + + t.Cleanup(func() { + conn.Close(websocket.StatusNormalClosure, "") + }) + + makeData := func(i int) map[string]interface{} { + return map[string]interface{}{ + "foo": fmt.Sprintf("bar%d", i), + } + } + + err = client.Sys().Mount("secret-v2", &api.MountInput{ + Type: "kv-v2", + }) + if err != nil { + t.Fatal(err) + } + + // Put a secret, which should trigger an event + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", makeData(100)) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 5; i++ { + // Do a fresh PUT just to refresh the secret and send a new message + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", makeData(i)) + if err != nil { + t.Fatal(err) + } + + // This method blocks until it gets a secret, so this test + // will only pass if we're receiving events correctly. + _, _, err := conn.Read(context.Background()) + if err != nil { + t.Fatal(err) + } + } +} + +// TestOpenWebSocketConnectionTestServer tests that the openWebSocketConnection function +// works as expected using vaulthttp.TestServer. This server isn't TLS enabled, so tests +// the ws path (as opposed to the wss) path. +func TestOpenWebSocketConnectionTestServer(t *testing.T) { + if !constants.IsEnterprise { + t.Skip("test can only run on enterprise due to requiring the event notification system") + } + t.Parallel() + // We need a valid cluster for the connection to succeed. + core := vault.TestCoreWithConfig(t, &vault.CoreConfig{}) + ln, addr := vaulthttp.TestServer(t, core) + defer ln.Close() + + keys, rootToken := vault.TestCoreInit(t, core) + for _, key := range keys { + _, err := core.Unseal(key) + if err != nil { + t.Fatal(err) + } + } + + config := api.DefaultConfig() + config.Address = addr + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.SetToken(rootToken) + updater := testNewStaticSecretCacheUpdater(t, client) + + conn, err := updater.openWebSocketConnection(context.Background()) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, conn) +} + +// Test_StreamStaticSecretEvents_UpdatesCacheWithNewSecrets tests that an event will +// properly update the corresponding secret in Proxy's cache. This is a little more end-to-end-y +// than TestUpdateStaticSecret, and essentially is testing a similar thing, though is +// ensuring that updateStaticSecret gets called by the event arriving +// (as part of streamStaticSecretEvents) instead of testing calling it explicitly. +func Test_StreamStaticSecretEvents_UpdatesCacheWithNewSecrets(t *testing.T) { + if !constants.IsEnterprise { + t.Skip("test can only run on enterprise due to requiring the event notification system") + } + t.Parallel() + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + wg := &sync.WaitGroup{} + runStreamStaticSecretEvents := func() { + wg.Add(1) + err := updater.streamStaticSecretEvents(context.Background()) + if err != nil { + t.Fatal(err) + } + } + go runStreamStaticSecretEvents() + + // First, create the secret in the cache that we expect to be updated: + path := "secret-v2/data/foo" + indexId := hashStaticSecretIndex(path) + initialTime := time.Now().UTC() + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: path, + LastRenewed: initialTime, + ID: indexId, + // Valid token provided, so update should work. + Tokens: map[string]struct{}{client.Token(): {}}, + Response: []byte{}, + } + err := leaseCache.db.Set(index) + if err != nil { + t.Fatal(err) + } + + secretData := map[string]interface{}{ + "foo": "bar", + } + + err = client.Sys().Mount("secret-v2", &api.MountInput{ + Type: "kv-v2", + }) + if err != nil { + t.Fatal(err) + } + + // Wait for the event stream to be fully up and running. Should be faster than this in reality, but + // we make it five seconds to protect against CI flakiness. + time.Sleep(5 * time.Second) + + // Put a secret, which should trigger an event + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", secretData) + if err != nil { + t.Fatal(err) + } + + // Wait for the event to arrive. Events are usually much, much faster + // than this, but we make it five seconds to protect against CI flakiness. + time.Sleep(5 * time.Second) + + // Then, do a GET to see if the index got updated by the event + newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, newIndex) + require.NotEqual(t, []byte{}, newIndex.Response) + require.Truef(t, initialTime.Before(newIndex.LastRenewed), "last updated time not updated on index") + require.Equal(t, index.RequestPath, newIndex.RequestPath) + require.Equal(t, index.Tokens, newIndex.Tokens) + + wg.Done() +} + +// TestUpdateStaticSecret tests that updateStaticSecret works as expected, reaching out +// to Vault to get an updated secret when called. +func TestUpdateStaticSecret(t *testing.T) { + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + path := "secret/foo" + indexId := hashStaticSecretIndex(path) + initialTime := time.Now().UTC() + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: "secret/foo", + LastRenewed: initialTime, + ID: indexId, + // Valid token provided, so update should work. + Tokens: map[string]struct{}{client.Token(): {}}, + Response: []byte{}, + } + err := leaseCache.db.Set(index) + if err != nil { + t.Fatal(err) + } + + secretData := map[string]interface{}{ + "foo": "bar", + } + + // create the secret in Vault. n.b. the test cluster has already mounted the KVv1 backend at "secret" + err = client.KVv1("secret").Put(context.Background(), "foo", secretData) + if err != nil { + t.Fatal(err) + } + + // attempt the update + err = updater.updateStaticSecret(context.Background(), path) + if err != nil { + t.Fatal(err) + } + + newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, newIndex) + require.Truef(t, initialTime.Before(newIndex.LastRenewed), "last updated time not updated on index") + require.NotEqual(t, []byte{}, newIndex.Response) + require.Equal(t, index.RequestPath, newIndex.RequestPath) + require.Equal(t, index.Tokens, newIndex.Tokens) +} + +// TestUpdateStaticSecret_EvictsIfInvalidTokens tests that updateStaticSecret will +// evict secrets from the cache if no valid tokens are left. +func TestUpdateStaticSecret_EvictsIfInvalidTokens(t *testing.T) { + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + path := "secret/foo" + indexId := hashStaticSecretIndex(path) + renewTime := time.Now().UTC() + + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: "secret/foo", + LastRenewed: renewTime, + ID: indexId, + // Note: invalid Tokens value provided, so this secret cannot be updated, and must be evicted + Tokens: map[string]struct{}{"invalid token": {}}, + } + err := leaseCache.db.Set(index) + if err != nil { + t.Fatal(err) + } + + secretData := map[string]interface{}{ + "foo": "bar", + } + + // create the secret in Vault. n.b. the test cluster has already mounted the KVv1 backend at "secret" + err = client.KVv1("secret").Put(context.Background(), "foo", secretData) + if err != nil { + t.Fatal(err) + } + + // attempt the update + err = updater.updateStaticSecret(context.Background(), path) + if err != nil { + t.Fatal(err) + } + + newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) + require.Equal(t, cachememdb.ErrCacheItemNotFound, err) + require.Nil(t, newIndex) +} + +// TestUpdateStaticSecret_HandlesNonCachedPaths tests that updateStaticSecret +// doesn't fail or error if we try and give it an update to a path that isn't cached. +func TestUpdateStaticSecret_HandlesNonCachedPaths(t *testing.T) { + t.Parallel() + // We need a valid cluster for the connection to succeed. + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + + path := "secret/foo" + + // attempt the update + err := updater.updateStaticSecret(context.Background(), path) + if err != nil { + t.Fatal(err) + } + require.Nil(t, err) +} + +// TestPreEventStreamUpdate tests that preEventStreamUpdate correctly +// updates old static secrets in the cache. +func TestPreEventStreamUpdate(t *testing.T) { + t.Parallel() + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + // First, create the secret in the cache that we expect to be updated: + path := "secret-v2/data/foo" + indexId := hashStaticSecretIndex(path) + initialTime := time.Now().UTC() + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: path, + LastRenewed: initialTime, + ID: indexId, + // Valid token provided, so update should work. + Tokens: map[string]struct{}{client.Token(): {}}, + Response: []byte{}, + Type: cacheboltdb.StaticSecretType, + } + err := leaseCache.db.Set(index) + if err != nil { + t.Fatal(err) + } + + secretData := map[string]interface{}{ + "foo": "bar", + } + + err = client.Sys().Mount("secret-v2", &api.MountInput{ + Type: "kv-v2", + }) + if err != nil { + t.Fatal(err) + } + + // Put a secret (with different values to what's currently in the cache) + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", secretData) + if err != nil { + t.Fatal(err) + } + + // perform the pre-event stream update: + err = updater.preEventStreamUpdate(context.Background()) + require.Nil(t, err) + + // Then, do a GET to see if the event got updated + newIndex, err := leaseCache.db.Get(cachememdb.IndexNameID, indexId) + require.Nil(t, err) + require.NotNil(t, newIndex) + require.NotEqual(t, []byte{}, newIndex.Response) + require.Truef(t, initialTime.Before(newIndex.LastRenewed), "last updated time not updated on index") + require.Equal(t, index.RequestPath, newIndex.RequestPath) + require.Equal(t, index.Tokens, newIndex.Tokens) +} + +// TestPreEventStreamUpdateErrorUpdating tests that preEventStreamUpdate correctly responds +// to errors on secret updates +func TestPreEventStreamUpdateErrorUpdating(t *testing.T) { + t.Parallel() + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + client := cluster.Cores[0].Client + + updater := testNewStaticSecretCacheUpdater(t, client) + leaseCache := updater.leaseCache + + // First, create the secret in the cache that we expect to be updated: + path := "secret-v2/data/foo" + indexId := hashStaticSecretIndex(path) + initialTime := time.Now().UTC() + // pre-populate the leaseCache with a secret to update + index := &cachememdb.Index{ + Namespace: "root/", + RequestPath: path, + LastRenewed: initialTime, + ID: indexId, + // Valid token provided, so update should work. + Tokens: map[string]struct{}{client.Token(): {}}, + Response: []byte{}, + Type: cacheboltdb.StaticSecretType, + } + err := leaseCache.db.Set(index) + if err != nil { + t.Fatal(err) + } + + secretData := map[string]interface{}{ + "foo": "bar", + } + + err = client.Sys().Mount("secret-v2", &api.MountInput{ + Type: "kv-v2", + }) + if err != nil { + t.Fatal(err) + } + + // Put a secret (with different values to what's currently in the cache) + _, err = client.KVv2("secret-v2").Put(context.Background(), "foo", secretData) + if err != nil { + t.Fatal(err) + } + + // Seal Vault, so that the update will fail + cluster.EnsureCoresSealed(t) + + // perform the pre-event stream update: + err = updater.preEventStreamUpdate(context.Background()) + require.Nil(t, err) + + // Then, we expect the index to be evicted since the token failed to update + _, err = leaseCache.db.Get(cachememdb.IndexNameID, indexId) + require.Equal(t, cachememdb.ErrCacheItemNotFound, err) +} diff --git a/command/agentproxyshared/cache/static_secret_capability_manager.go b/command/agentproxyshared/cache/static_secret_capability_manager.go new file mode 100644 index 000000000000..46de4740dd9b --- /dev/null +++ b/command/agentproxyshared/cache/static_secret_capability_manager.go @@ -0,0 +1,261 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "errors" + "fmt" + "slices" + "strings" + "time" + + "github.com/gammazero/workerpool" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/mitchellh/mapstructure" + "golang.org/x/exp/maps" +) + +const ( + // DefaultWorkers is the default number of workers for the worker pool. + DefaultWorkers = 5 + + // DefaultStaticSecretTokenCapabilityRefreshInterval is the default time + // between each capability poll. This is configured with the following config value: + // static_secret_token_capability_refresh_interval + DefaultStaticSecretTokenCapabilityRefreshInterval = 5 * time.Minute +) + +// StaticSecretCapabilityManager is a struct that utilizes +// a worker pool to keep capabilities up to date. +type StaticSecretCapabilityManager struct { + client *api.Client + leaseCache *LeaseCache + logger hclog.Logger + workerPool *workerpool.WorkerPool + staticSecretTokenCapabilityRefreshInterval time.Duration +} + +// StaticSecretCapabilityManagerConfig is the configuration for initializing a new +// StaticSecretCapabilityManager. +type StaticSecretCapabilityManagerConfig struct { + LeaseCache *LeaseCache + Logger hclog.Logger + Client *api.Client + StaticSecretTokenCapabilityRefreshInterval time.Duration +} + +// NewStaticSecretCapabilityManager creates a new instance of a StaticSecretCapabilityManager. +func NewStaticSecretCapabilityManager(conf *StaticSecretCapabilityManagerConfig) (*StaticSecretCapabilityManager, error) { + if conf == nil { + return nil, errors.New("nil configuration provided") + } + + if conf.LeaseCache == nil { + return nil, fmt.Errorf("nil Lease Cache (a required parameter): %v", conf) + } + + if conf.Logger == nil { + return nil, fmt.Errorf("nil Logger (a required parameter): %v", conf) + } + + if conf.Client == nil { + return nil, fmt.Errorf("nil Client (a required parameter): %v", conf) + } + + if conf.StaticSecretTokenCapabilityRefreshInterval == 0 { + conf.StaticSecretTokenCapabilityRefreshInterval = DefaultStaticSecretTokenCapabilityRefreshInterval + } + + workerPool := workerpool.New(DefaultWorkers) + + return &StaticSecretCapabilityManager{ + client: conf.Client, + leaseCache: conf.LeaseCache, + logger: conf.Logger, + workerPool: workerPool, + staticSecretTokenCapabilityRefreshInterval: conf.StaticSecretTokenCapabilityRefreshInterval, + }, nil +} + +// submitWorkToPoolAfterInterval submits work to the pool after the defined +// staticSecretTokenCapabilityRefreshInterval +func (sscm *StaticSecretCapabilityManager) submitWorkToPoolAfterInterval(work func()) { + time.AfterFunc(sscm.staticSecretTokenCapabilityRefreshInterval, func() { + if !sscm.workerPool.Stopped() { + sscm.workerPool.Submit(work) + } + }) +} + +// Stop stops all ongoing jobs and ensures future jobs will not +// get added to the worker pool. +func (sscm *StaticSecretCapabilityManager) Stop() { + sscm.workerPool.Stop() +} + +// StartRenewingCapabilities takes a polling job and submits a constant renewal of capabilities to the worker pool. +// indexToRenew is the capabilities index we'll renew the capabilities for. +func (sscm *StaticSecretCapabilityManager) StartRenewingCapabilities(indexToRenew *cachememdb.CapabilitiesIndex) { + var work func() + work = func() { + if sscm.workerPool.Stopped() { + sscm.logger.Trace("worker pool stopped, stopping renewal") + return + } + + capabilitiesIndex, err := sscm.leaseCache.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexToRenew.ID) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // This cache entry no longer exists, so there is no more work to do. + sscm.logger.Trace("cache item not found for capabilities refresh, stopping the process") + return + } + if err != nil { + sscm.logger.Error("error when attempting to get capabilities index to refresh token capabilities", "indexToRenew.ID", indexToRenew.ID, "err", err) + sscm.submitWorkToPoolAfterInterval(work) + return + } + + capabilitiesIndex.IndexLock.RLock() + token := capabilitiesIndex.Token + indexReadablePathsMap := capabilitiesIndex.ReadablePaths + capabilitiesIndex.IndexLock.RUnlock() + indexReadablePaths := maps.Keys(indexReadablePathsMap) + + client, err := sscm.client.Clone() + if err != nil { + sscm.logger.Error("error when attempting clone client to refresh token capabilities", "indexToRenew.ID", indexToRenew.ID, "err", err) + sscm.submitWorkToPoolAfterInterval(work) + return + } + + client.SetToken(token) + + capabilities, err := getCapabilities(indexReadablePaths, client) + if err != nil { + sscm.logger.Error("error when attempting to retrieve updated token capabilities", "indexToRenew.ID", indexToRenew.ID, "err", err) + sscm.submitWorkToPoolAfterInterval(work) + return + } + + newReadablePaths := reconcileCapabilities(indexReadablePaths, capabilities) + if maps.Equal(indexReadablePathsMap, newReadablePaths) { + sscm.logger.Trace("capabilities were the same for index, nothing to do", "indexToRenew.ID", indexToRenew.ID) + // there's nothing to update! + sscm.submitWorkToPoolAfterInterval(work) + return + } + + // before updating or evicting the index, we must update the tokens on + // for each path, update the corresponding index with the diff + for _, path := range indexReadablePaths { + // If the old path isn't contained in the new readable paths, + // we must delete it from the tokens map for its corresponding + // path index. + if _, ok := newReadablePaths[path]; !ok { + indexId := hashStaticSecretIndex(path) + index, err := sscm.leaseCache.db.Get(cachememdb.IndexNameID, indexId) + if errors.Is(err, cachememdb.ErrCacheItemNotFound) { + // Nothing to update! + continue + } + if err != nil { + sscm.logger.Error("error when attempting to update corresponding paths for capabilities index", "indexToRenew.ID", indexToRenew.ID, "err", err) + sscm.submitWorkToPoolAfterInterval(work) + return + } + sscm.logger.Trace("updating tokens for index, as capability has been lost", "index.ID", index.ID, "request_path", index.RequestPath) + index.IndexLock.Lock() + delete(index.Tokens, capabilitiesIndex.Token) + err = sscm.leaseCache.Set(context.Background(), index) + if err != nil { + sscm.logger.Error("error when attempting to update index in cache", "index.ID", index.ID, "err", err) + } + index.IndexLock.Unlock() + } + } + + // Lastly, we should update the capabilities index, either evicting or updating it + capabilitiesIndex.IndexLock.Lock() + defer capabilitiesIndex.IndexLock.Unlock() + if len(newReadablePaths) == 0 { + err := sscm.leaseCache.db.EvictCapabilitiesIndex(cachememdb.IndexNameID, indexToRenew.ID) + if err != nil { + sscm.logger.Error("error when attempting to evict capabilities from cache", "index.ID", indexToRenew.ID, "err", err) + sscm.submitWorkToPoolAfterInterval(work) + return + } + // If we successfully evicted the index, no need to re-submit the work to the pool. + return + } + + // The token still has some capabilities, so, update the capabilities index: + capabilitiesIndex.ReadablePaths = newReadablePaths + err = sscm.leaseCache.SetCapabilitiesIndex(context.Background(), capabilitiesIndex) + if err != nil { + sscm.logger.Error("error when attempting to update capabilities from cache", "index.ID", indexToRenew.ID, "err", err) + } + + // Finally, put ourselves back on the work pool after + sscm.submitWorkToPoolAfterInterval(work) + return + } + + sscm.submitWorkToPoolAfterInterval(work) +} + +// getCapabilities is a wrapper around a /sys/capabilities-self call that returns +// capabilities as a map with paths as keys, and capabilities as values. +func getCapabilities(paths []string, client *api.Client) (map[string][]string, error) { + body := make(map[string]interface{}) + body["paths"] = paths + capabilities := make(map[string][]string) + + secret, err := client.Logical().Write("sys/capabilities-self", body) + if err != nil && strings.Contains(err.Error(), "permission denied") { + // Token has expired. Return an empty set of capabilities: + return capabilities, nil + } + if err != nil { + return nil, err + } + + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + for _, path := range paths { + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err + } + + capabilities[path] = res + } + + return capabilities, nil +} + +// reconcileCapabilities takes a set of known readable paths, and a set of capabilities (a response from the +// sys/capabilities-self endpoint) and returns a subset of the readablePaths after taking into account any updated +// capabilities as a set, represented by a map of strings to structs. +// It will delete any path in readablePaths if it does not have a "root" or "read" capability listed in the +// capabilities map. +func reconcileCapabilities(readablePaths []string, capabilities map[string][]string) map[string]struct{} { + newReadablePaths := make(map[string]struct{}) + for pathName, permissions := range capabilities { + if slices.Contains(permissions, "read") || slices.Contains(permissions, "root") { + // We do this as an additional sanity check. We never want to + // add permissions that weren't there before. + if slices.Contains(readablePaths, pathName) { + newReadablePaths[pathName] = struct{}{} + } + } + } + + return newReadablePaths +} diff --git a/command/agentproxyshared/cache/static_secret_capability_manager_test.go b/command/agentproxyshared/cache/static_secret_capability_manager_test.go new file mode 100644 index 000000000000..ca15ca43f18f --- /dev/null +++ b/command/agentproxyshared/cache/static_secret_capability_manager_test.go @@ -0,0 +1,436 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/helper/testhelpers/minimal" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/stretchr/testify/require" +) + +// testNewStaticSecretCapabilityManager returns a new StaticSecretCapabilityManager +// for use in tests. +func testNewStaticSecretCapabilityManager(t *testing.T, client *api.Client) *StaticSecretCapabilityManager { + t.Helper() + + lc := testNewLeaseCache(t, []*SendResponse{}) + + updater, err := NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.capabilitiesmanager"), + Client: client, + StaticSecretTokenCapabilityRefreshInterval: 250 * time.Millisecond, + }) + if err != nil { + t.Fatal(err) + } + + return updater +} + +// TestNewStaticSecretCapabilityManager tests the NewStaticSecretCapabilityManager method, +// to ensure it errors out when appropriate. +func TestNewStaticSecretCapabilityManager(t *testing.T) { + t.Parallel() + + lc := testNewLeaseCache(t, []*SendResponse{}) + logger := logging.NewVaultLogger(hclog.Trace).Named("cache.capabilitiesmanager") + client, err := api.NewClient(api.DefaultConfig()) + require.Nil(t, err) + + // Expect an error if any of the arguments are nil: + updater, err := NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: nil, + Logger: logger, + Client: client, + }) + require.Error(t, err) + require.Nil(t, updater) + + updater, err = NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: lc, + Logger: nil, + Client: client, + }) + require.Error(t, err) + require.Nil(t, updater) + + updater, err = NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: lc, + Logger: logger, + Client: nil, + }) + require.Error(t, err) + require.Nil(t, updater) + + // Don't expect an error if the arguments are as expected + updater, err = NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.capabilitiesmanager"), + Client: client, + }) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, updater) + require.NotNil(t, updater.workerPool) + require.NotNil(t, updater.staticSecretTokenCapabilityRefreshInterval) + require.NotNil(t, updater.client) + require.NotNil(t, updater.leaseCache) + require.NotNil(t, updater.logger) + require.Equal(t, DefaultStaticSecretTokenCapabilityRefreshInterval, updater.staticSecretTokenCapabilityRefreshInterval) + + // Lastly, double check that the refresh interval can be properly set + updater, err = NewStaticSecretCapabilityManager(&StaticSecretCapabilityManagerConfig{ + LeaseCache: lc, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.capabilitiesmanager"), + Client: client, + StaticSecretTokenCapabilityRefreshInterval: time.Hour, + }) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, updater) + require.NotNil(t, updater.workerPool) + require.NotNil(t, updater.staticSecretTokenCapabilityRefreshInterval) + require.NotNil(t, updater.client) + require.NotNil(t, updater.leaseCache) + require.NotNil(t, updater.logger) + require.Equal(t, time.Hour, updater.staticSecretTokenCapabilityRefreshInterval) +} + +// TestGetCapabilitiesRootToken tests the getCapabilities method with the root +// token, expecting to get "root" capabilities on valid paths +func TestGetCapabilitiesRootToken(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + capabilitiesToCheck := []string{"auth/token/create", "sys/health"} + capabilities, err := getCapabilities(capabilitiesToCheck, client) + require.NoError(t, err) + + expectedCapabilities := map[string][]string{ + "auth/token/create": {"root"}, + "sys/health": {"root"}, + } + require.Equal(t, expectedCapabilities, capabilities) +} + +// TestGetCapabilitiesLowPrivilegeToken tests the getCapabilities method with +// a low privilege token, expecting to get deny or non-root capabilities +func TestGetCapabilitiesLowPrivilegeToken(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + renewable := true + // Set the token's policies to 'default' and nothing else + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + secret, err := client.Auth().Token().CreateOrphan(tokenCreateRequest) + require.NoError(t, err) + token := secret.Auth.ClientToken + + client.SetToken(token) + + capabilitiesToCheck := []string{"auth/token/create", "sys/capabilities-self", "auth/token/lookup-self"} + capabilities, err := getCapabilities(capabilitiesToCheck, client) + require.NoError(t, err) + + expectedCapabilities := map[string][]string{ + "auth/token/create": {"deny"}, + "sys/capabilities-self": {"update"}, + "auth/token/lookup-self": {"read"}, + } + require.Equal(t, expectedCapabilities, capabilities) +} + +// TestGetCapabilitiesBadClientToken tests that getCapabilities +// returns an empty set of capabilities if the token is bad (and it gets a 403) +func TestGetCapabilitiesBadClientToken(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + client.SetToken("") + + capabilitiesToCheck := []string{"auth/token/create", "sys/capabilities-self", "auth/token/lookup-self"} + capabilities, err := getCapabilities(capabilitiesToCheck, client) + require.Nil(t, err) + require.Equal(t, map[string][]string{}, capabilities) +} + +// TestGetCapabilitiesEmptyPaths tests the getCapabilities will error on an empty +// set of paths to check +func TestGetCapabilitiesEmptyPaths(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + var capabilitiesToCheck []string + _, err := getCapabilities(capabilitiesToCheck, client) + require.Error(t, err) +} + +// TestReconcileCapabilities tests that reconcileCapabilities will +// correctly previously remove readable paths that we don't have read access to. +func TestReconcileCapabilities(t *testing.T) { + t.Parallel() + paths := []string{"auth/token/create", "sys/capabilities-self", "auth/token/lookup-self"} + capabilities := map[string][]string{ + "auth/token/create": {"deny"}, + "sys/capabilities-self": {"update"}, + "auth/token/lookup-self": {"read"}, + } + + updatedCapabilities := reconcileCapabilities(paths, capabilities) + expectedUpdatedCapabilities := map[string]struct{}{ + "auth/token/lookup-self": {}, + } + require.Equal(t, expectedUpdatedCapabilities, updatedCapabilities) +} + +// TestReconcileCapabilitiesNoOp tests that reconcileCapabilities will +// correctly not remove capabilities when they all remain readable. +func TestReconcileCapabilitiesNoOp(t *testing.T) { + t.Parallel() + paths := []string{"foo/bar", "bar/baz", "baz/foo"} + capabilities := map[string][]string{ + "foo/bar": {"read"}, + "bar/baz": {"root"}, + "baz/foo": {"read"}, + } + + updatedCapabilities := reconcileCapabilities(paths, capabilities) + expectedUpdatedCapabilities := map[string]struct{}{ + "foo/bar": {}, + "bar/baz": {}, + "baz/foo": {}, + } + require.Equal(t, expectedUpdatedCapabilities, updatedCapabilities) +} + +// TestReconcileCapabilitiesNoAdding tests that reconcileCapabilities will +// not add any capabilities that weren't present in the first argument to the function +func TestReconcileCapabilitiesNoAdding(t *testing.T) { + t.Parallel() + paths := []string{"auth/token/create", "sys/capabilities-self", "auth/token/lookup-self"} + capabilities := map[string][]string{ + "auth/token/create": {"deny"}, + "sys/capabilities-self": {"update"}, + "auth/token/lookup-self": {"read"}, + "some/new/path": {"read"}, + } + + updatedCapabilities := reconcileCapabilities(paths, capabilities) + expectedUpdatedCapabilities := map[string]struct{}{ + "auth/token/lookup-self": {}, + } + require.Equal(t, expectedUpdatedCapabilities, updatedCapabilities) +} + +// TestSubmitWorkNoOp tests that we will gracefully end if the capabilities index +// does not exist in the cache +func TestSubmitWorkNoOp(t *testing.T) { + t.Parallel() + client, err := api.NewClient(api.DefaultConfig()) + require.Nil(t, err) + sscm := testNewStaticSecretCapabilityManager(t, client) + // This index will be a no-op, as this does not exist in the cache + index := &cachememdb.CapabilitiesIndex{ + ID: "test", + } + sscm.StartRenewingCapabilities(index) + + // Wait for the job to complete... + time.Sleep(1 * time.Second) + require.Equal(t, 0, sscm.workerPool.WaitingQueueSize()) +} + +// TestSubmitWorkUpdatesIndex tests that an index will be correctly updated if the capabilities differ. +func TestSubmitWorkUpdatesIndex(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + // Create a low permission token + renewable := true + // Set the token's policies to 'default' and nothing else + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + secret, err := client.Auth().Token().CreateOrphan(tokenCreateRequest) + require.NoError(t, err) + token := secret.Auth.ClientToken + indexId := hashStaticSecretIndex(token) + + sscm := testNewStaticSecretCapabilityManager(t, client) + index := &cachememdb.CapabilitiesIndex{ + ID: indexId, + Token: token, + // The token will (perhaps obviously) not have + // read access to /foo/bar, but will to /auth/token/lookup-self + ReadablePaths: map[string]struct{}{ + "foo/bar": {}, + "auth/token/lookup-self": {}, + }, + } + err = sscm.leaseCache.db.SetCapabilitiesIndex(index) + require.Nil(t, err) + + sscm.StartRenewingCapabilities(index) + + // Wait for the job to complete at least once... + time.Sleep(3 * time.Second) + + newIndex, err := sscm.leaseCache.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) + require.Nil(t, err) + newIndex.IndexLock.RLock() + require.Equal(t, map[string]struct{}{ + "auth/token/lookup-self": {}, + }, newIndex.ReadablePaths) + newIndex.IndexLock.RUnlock() + + // Forcefully stop any remaining workers + sscm.workerPool.Stop() +} + +// TestSubmitWorkUpdatesIndexWithBadToken tests that an index will be correctly updated if the token +// has expired and we cannot access the sys capabilities endpoint. +func TestSubmitWorkUpdatesIndexWithBadToken(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + token := "not real token" + indexId := hashStaticSecretIndex(token) + + sscm := testNewStaticSecretCapabilityManager(t, client) + index := &cachememdb.CapabilitiesIndex{ + ID: indexId, + Token: token, + ReadablePaths: map[string]struct{}{ + "foo/bar": {}, + "auth/token/lookup-self": {}, + }, + } + err := sscm.leaseCache.db.SetCapabilitiesIndex(index) + require.Nil(t, err) + + sscm.StartRenewingCapabilities(index) + + // Wait for the job to complete at least once... + time.Sleep(3 * time.Second) + + // This entry should be evicted. + newIndex, err := sscm.leaseCache.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) + require.Equal(t, err, cachememdb.ErrCacheItemNotFound) + require.Nil(t, newIndex) + + // Forcefully stop any remaining workers + sscm.workerPool.Stop() +} + +// TestSubmitWorkUpdatesAllIndexes tests that an index will be correctly updated if the capabilities differ, as +// well as the indexes related to the paths that are being checked for. +func TestSubmitWorkUpdatesAllIndexes(t *testing.T) { + t.Parallel() + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + + // Create a low permission token + renewable := true + // Set the token's policies to 'default' and nothing else + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + secret, err := client.Auth().Token().CreateOrphan(tokenCreateRequest) + require.NoError(t, err) + token := secret.Auth.ClientToken + indexId := hashStaticSecretIndex(token) + + sscm := testNewStaticSecretCapabilityManager(t, client) + index := &cachememdb.CapabilitiesIndex{ + ID: indexId, + Token: token, + // The token will (perhaps obviously) not have + // read access to /foo/bar, but will to /auth/token/lookup-self + ReadablePaths: map[string]struct{}{ + "foo/bar": {}, + "auth/token/lookup-self": {}, + }, + } + err = sscm.leaseCache.db.SetCapabilitiesIndex(index) + require.Nil(t, err) + + pathIndexId1 := hashStaticSecretIndex("foo/bar") + pathIndex1 := &cachememdb.Index{ + ID: pathIndexId1, + Namespace: "root/", + Tokens: map[string]struct{}{ + token: {}, + }, + RequestPath: "foo/bar", + Response: []byte{}, + } + + pathIndexId2 := hashStaticSecretIndex("auth/token/lookup-self") + pathIndex2 := &cachememdb.Index{ + ID: pathIndexId2, + Namespace: "root/", + Tokens: map[string]struct{}{ + token: {}, + }, + RequestPath: "auth/token/lookup-self", + Response: []byte{}, + } + + err = sscm.leaseCache.db.Set(pathIndex1) + require.Nil(t, err) + + err = sscm.leaseCache.db.Set(pathIndex2) + require.Nil(t, err) + + sscm.StartRenewingCapabilities(index) + + // Wait for the job to complete at least once... + time.Sleep(1 * time.Second) + + newIndex, err := sscm.leaseCache.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) + require.Nil(t, err) + newIndex.IndexLock.RLock() + require.Equal(t, map[string]struct{}{ + "auth/token/lookup-self": {}, + }, newIndex.ReadablePaths) + newIndex.IndexLock.RUnlock() + + // For this, we expect the token to have been deleted + newPathIndex1, err := sscm.leaseCache.db.Get(cachememdb.IndexNameID, pathIndexId1) + require.Nil(t, err) + require.Equal(t, map[string]struct{}{}, newPathIndex1.Tokens) + + // For this, we expect no change + newPathIndex2, err := sscm.leaseCache.db.Get(cachememdb.IndexNameID, pathIndexId2) + require.Nil(t, err) + require.Equal(t, newPathIndex2, newPathIndex2) + + // Forcefully stop any remaining workers + sscm.workerPool.Stop() +} diff --git a/command/agentproxyshared/cache/testing.go b/command/agentproxyshared/cache/testing.go new file mode 100644 index 000000000000..4bc2e1d02521 --- /dev/null +++ b/command/agentproxyshared/cache/testing.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cache + +import ( + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "net/http" + "strings" + "time" + + "github.com/hashicorp/vault/helper/useragent" + + "github.com/hashicorp/vault/api" +) + +// mockProxier is a mock implementation of the Proxier interface, used for testing purposes. +// The mock will return the provided responses every time it reaches its Send method, up to +// the last provided response. This lets tests control what the next/underlying Proxier layer +// might expect to return. +type mockProxier struct { + proxiedResponses []*SendResponse + responseIndex int +} + +func NewMockProxier(responses []*SendResponse) *mockProxier { + return &mockProxier{ + proxiedResponses: responses, + } +} + +func (p *mockProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + if p.responseIndex >= len(p.proxiedResponses) { + return nil, fmt.Errorf("index out of bounds: responseIndex = %d, responses = %d", p.responseIndex, len(p.proxiedResponses)) + } + resp := p.proxiedResponses[p.responseIndex] + + p.responseIndex++ + + return resp, nil +} + +func (p *mockProxier) ResponseIndex() int { + return p.responseIndex +} + +func newTestSendResponse(status int, body string) *SendResponse { + headers := make(http.Header) + headers.Add("User-Agent", useragent.AgentProxyString()) + resp := &SendResponse{ + Response: &api.Response{ + Response: &http.Response{ + StatusCode: status, + Header: headers, + }, + }, + } + resp.Response.Header.Set("Date", time.Now().Format(http.TimeFormat)) + + if body != "" { + resp.Response.Body = io.NopCloser(strings.NewReader(body)) + resp.ResponseBody = []byte(body) + } + + if json.Valid([]byte(body)) { + resp.Response.Header.Set("content-type", "application/json") + } + + return resp +} + +type mockTokenVerifierProxier struct { + currentToken string +} + +func (p *mockTokenVerifierProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + p.currentToken = req.Token + resp := newTestSendResponse(http.StatusOK, + `{"data": {"id": "`+p.currentToken+`"}}`) + + return resp, nil +} + +func (p *mockTokenVerifierProxier) GetCurrentRequestToken() string { + return p.currentToken +} + +type mockDelayProxier struct { + cacheableResp bool + delay int +} + +func (p *mockDelayProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + if p.delay > 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(time.Duration(p.delay) * time.Millisecond): + } + } + + // If this is a cacheable response, we return a unique response every time + if p.cacheableResp { + rand.Seed(time.Now().Unix()) + s := fmt.Sprintf(`{"lease_id": "%d", "renewable": true, "data": {"foo": "bar"}}`, rand.Int()) + return newTestSendResponse(http.StatusOK, s), nil + } + + return newTestSendResponse(http.StatusOK, `{"value": "output"}`), nil +} diff --git a/command/agentproxyshared/cache/wheninconsistentaction_enumer.go b/command/agentproxyshared/cache/wheninconsistentaction_enumer.go new file mode 100644 index 000000000000..fdbf58e1baf7 --- /dev/null +++ b/command/agentproxyshared/cache/wheninconsistentaction_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=WhenInconsistentAction -trimprefix=WhenInconsistent"; DO NOT EDIT. + +package cache + +import ( + "fmt" +) + +const _WhenInconsistentActionName = "FailRetryForward" + +var _WhenInconsistentActionIndex = [...]uint8{0, 4, 9, 16} + +func (i WhenInconsistentAction) String() string { + if i < 0 || i >= WhenInconsistentAction(len(_WhenInconsistentActionIndex)-1) { + return fmt.Sprintf("WhenInconsistentAction(%d)", i) + } + return _WhenInconsistentActionName[_WhenInconsistentActionIndex[i]:_WhenInconsistentActionIndex[i+1]] +} + +var _WhenInconsistentActionValues = []WhenInconsistentAction{0, 1, 2} + +var _WhenInconsistentActionNameToValueMap = map[string]WhenInconsistentAction{ + _WhenInconsistentActionName[0:4]: 0, + _WhenInconsistentActionName[4:9]: 1, + _WhenInconsistentActionName[9:16]: 2, +} + +// WhenInconsistentActionString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func WhenInconsistentActionString(s string) (WhenInconsistentAction, error) { + if val, ok := _WhenInconsistentActionNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to WhenInconsistentAction values", s) +} + +// WhenInconsistentActionValues returns all values of the enum +func WhenInconsistentActionValues() []WhenInconsistentAction { + return _WhenInconsistentActionValues +} + +// IsAWhenInconsistentAction returns "true" if the value is listed in the enum definition. "false" otherwise +func (i WhenInconsistentAction) IsAWhenInconsistentAction() bool { + for _, v := range _WhenInconsistentActionValues { + if i == v { + return true + } + } + return false +} diff --git a/command/agentproxyshared/helpers.go b/command/agentproxyshared/helpers.go new file mode 100644 index 000000000000..f1ef47cfee33 --- /dev/null +++ b/command/agentproxyshared/helpers.go @@ -0,0 +1,240 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package agentproxyshared + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth/alicloud" + "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + "github.com/hashicorp/vault/command/agentproxyshared/auth/aws" + "github.com/hashicorp/vault/command/agentproxyshared/auth/azure" + "github.com/hashicorp/vault/command/agentproxyshared/auth/cert" + "github.com/hashicorp/vault/command/agentproxyshared/auth/cf" + "github.com/hashicorp/vault/command/agentproxyshared/auth/gcp" + "github.com/hashicorp/vault/command/agentproxyshared/auth/jwt" + "github.com/hashicorp/vault/command/agentproxyshared/auth/kerberos" + "github.com/hashicorp/vault/command/agentproxyshared/auth/kubernetes" + "github.com/hashicorp/vault/command/agentproxyshared/auth/ldap" + "github.com/hashicorp/vault/command/agentproxyshared/auth/oci" + token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" +) + +// GetAutoAuthMethodFromConfig Calls the appropriate NewAutoAuthMethod function, initializing +// the auto-auth method, based on the auto-auth method type. Returns an error if one happens or +// the method type is invalid. +func GetAutoAuthMethodFromConfig(autoAuthMethodType string, authConfig *auth.AuthConfig, vaultAddress string) (auth.AuthMethod, error) { + switch autoAuthMethodType { + case "alicloud": + return alicloud.NewAliCloudAuthMethod(authConfig) + case "aws": + return aws.NewAWSAuthMethod(authConfig) + case "azure": + return azure.NewAzureAuthMethod(authConfig) + case "cert": + return cert.NewCertAuthMethod(authConfig) + case "cf": + return cf.NewCFAuthMethod(authConfig) + case "gcp": + return gcp.NewGCPAuthMethod(authConfig) + case "jwt": + return jwt.NewJWTAuthMethod(authConfig) + case "kerberos": + return kerberos.NewKerberosAuthMethod(authConfig) + case "kubernetes": + return kubernetes.NewKubernetesAuthMethod(authConfig) + case "approle": + return approle.NewApproleAuthMethod(authConfig) + case "oci": + return oci.NewOCIAuthMethod(authConfig, vaultAddress) + case "token_file": + return token_file.NewTokenFileAuthMethod(authConfig) + case "pcf": // Deprecated. + return cf.NewCFAuthMethod(authConfig) + case "ldap": + return ldap.NewLdapAuthMethod(authConfig) + default: + return nil, errors.New(fmt.Sprintf("unknown auth method %q", autoAuthMethodType)) + } +} + +// PersistConfig contains configuration needed for persistent caching +type PersistConfig struct { + Type string + Path string `hcl:"path"` + KeepAfterImport bool `hcl:"keep_after_import"` + ExitOnErr bool `hcl:"exit_on_err"` + ServiceAccountTokenFile string `hcl:"service_account_token_file"` +} + +// AddPersistentStorageToLeaseCache adds persistence to a lease cache, based on a given PersistConfig +// Returns a close function to be deferred and the old token, if found, or an error +func AddPersistentStorageToLeaseCache(ctx context.Context, leaseCache *cache.LeaseCache, persistConfig *PersistConfig, logger log.Logger) (func() error, string, error) { + if persistConfig == nil { + return nil, "", errors.New("persist config was nil") + } + + if persistConfig.Path == "" { + return nil, "", errors.New("must specify persistent cache path") + } + + // Set AAD based on key protection type + var aad string + var err error + switch persistConfig.Type { + case "kubernetes": + aad, err = getServiceAccountJWT(persistConfig.ServiceAccountTokenFile) + if err != nil { + tokenFileName := persistConfig.ServiceAccountTokenFile + if len(tokenFileName) == 0 { + tokenFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + return nil, "", fmt.Errorf("failed to read service account token from %s: %w", tokenFileName, err) + } + default: + return nil, "", fmt.Errorf("persistent key protection type %q not supported", persistConfig.Type) + } + + // Check if bolt file exists already + dbFileExists, err := cacheboltdb.DBFileExists(persistConfig.Path) + if err != nil { + return nil, "", fmt.Errorf("failed to check if bolt file exists at path %s: %w", persistConfig.Path, err) + } + if dbFileExists { + // Open the bolt file, but wait to setup Encryption + ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: persistConfig.Path, + Logger: logger.Named("cacheboltdb"), + }) + if err != nil { + return nil, "", fmt.Errorf("error opening persistent cache %v", err) + } + + // Get the token from bolt for retrieving the encryption key, + // then setup encryption so that restore is possible + token, err := ps.GetRetrievalToken() + if err != nil { + return nil, "", fmt.Errorf("error getting retrieval token from persistent cache: %w", err) + } + + if err := ps.Close(); err != nil { + return nil, "", fmt.Errorf("failed to close persistent cache file after getting retrieval token: %w", err) + } + + km, err := keymanager.NewPassthroughKeyManager(ctx, token) + if err != nil { + return nil, "", fmt.Errorf("failed to configure persistence encryption for cache: %w", err) + } + + // Open the bolt file with the wrapper provided + ps, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: persistConfig.Path, + Logger: logger.Named("cacheboltdb"), + Wrapper: km.Wrapper(), + AAD: aad, + }) + if err != nil { + return nil, "", fmt.Errorf("error opening persistent cache with wrapper: %w", err) + } + + // Restore anything in the persistent cache to the memory cache + if err := leaseCache.Restore(ctx, ps); err != nil { + logger.Error(fmt.Sprintf("error restoring in-memory cache from persisted file: %v", err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + logger.Info("loaded memcache from persistent storage") + + // Check for previous auto-auth token + oldTokenBytes, err := ps.GetAutoAuthToken(ctx) + if err != nil { + logger.Error(fmt.Sprintf("error in fetching previous auto-auth token: %v", err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + var previousToken string + if len(oldTokenBytes) > 0 { + oldToken, err := cachememdb.Deserialize(oldTokenBytes) + if err != nil { + logger.Error(fmt.Sprintf("error in deserializing previous auto-auth token cache entryn: %v", err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + previousToken = oldToken.Token + } + + // If keep_after_import true, set persistent storage layer in + // leaseCache, else remove db file + if persistConfig.KeepAfterImport { + leaseCache.SetPersistentStorage(ps) + return ps.Close, previousToken, nil + } else { + if err := ps.Close(); err != nil { + logger.Warn(fmt.Sprintf("failed to close persistent cache file: %s", err)) + } + dbFile := filepath.Join(persistConfig.Path, cacheboltdb.DatabaseFileName) + if err := os.Remove(dbFile); err != nil { + logger.Error(fmt.Sprintf("failed to remove persistent storage file %s: %v", dbFile, err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + return nil, previousToken, nil + } + } else { + km, err := keymanager.NewPassthroughKeyManager(ctx, nil) + if err != nil { + return nil, "", fmt.Errorf("failed to configure persistence encryption for cache: %w", err) + } + ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: persistConfig.Path, + Logger: logger.Named("cacheboltdb"), + Wrapper: km.Wrapper(), + AAD: aad, + }) + if err != nil { + return nil, "", fmt.Errorf("error creating persistent cache: %w", err) + } + logger.Info("configured persistent storage", "path", persistConfig.Path) + + // Stash the key material in bolt + token, err := km.RetrievalToken(ctx) + if err != nil { + return nil, "", fmt.Errorf("error getting persistence key: %w", err) + } + if err := ps.StoreRetrievalToken(token); err != nil { + return nil, "", fmt.Errorf("error setting key in persistent cache: %w", err) + } + + leaseCache.SetPersistentStorage(ps) + return ps.Close, "", nil + } +} + +// getServiceAccountJWT attempts to read the service account JWT from the specified token file path. +// Defaults to using the Kubernetes default service account file path if token file path is empty. +func getServiceAccountJWT(tokenFile string) (string, error) { + if len(tokenFile) == 0 { + tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + token, err := os.ReadFile(tokenFile) + if err != nil { + return "", err + } + return strings.TrimSpace(string(token)), nil +} diff --git a/command/agentproxyshared/helpers_test.go b/command/agentproxyshared/helpers_test.go new file mode 100644 index 000000000000..e5f1d6007c86 --- /dev/null +++ b/command/agentproxyshared/helpers_test.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package agentproxyshared + +import ( + "context" + "os" + "testing" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +func testNewLeaseCache(t *testing.T, responses []*cache.SendResponse) *cache.LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + lc, err := cache.NewLeaseCache(&cache.LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: cache.NewMockProxier(responses), + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + CacheDynamicSecrets: true, + UserAgentToUse: "test", + }) + if err != nil { + t.Fatal(err) + } + return lc +} + +func populateTempFile(t *testing.T, name, contents string) *os.File { + t.Helper() + + file, err := os.CreateTemp(t.TempDir(), name) + if err != nil { + t.Fatal(err) + } + + _, err = file.WriteString(contents) + if err != nil { + t.Fatal(err) + } + + err = file.Close() + if err != nil { + t.Fatal(err) + } + + return file +} + +// Test_AddPersistentStorageToLeaseCache Tests that AddPersistentStorageToLeaseCache() correctly +// adds persistent storage to a lease cache +func Test_AddPersistentStorageToLeaseCache(t *testing.T) { + tempDir := t.TempDir() + serviceAccountTokenFile := populateTempFile(t, "proxy-config.hcl", "token") + + persistConfig := &PersistConfig{ + Type: "kubernetes", + Path: tempDir, + KeepAfterImport: false, + ExitOnErr: false, + ServiceAccountTokenFile: serviceAccountTokenFile.Name(), + } + + leaseCache := testNewLeaseCache(t, nil) + if leaseCache.PersistentStorage() != nil { + t.Fatal("persistent storage was available before ours was added") + } + + deferFunc, token, err := AddPersistentStorageToLeaseCache(context.Background(), leaseCache, persistConfig, logging.NewVaultLogger(hclog.Info)) + if err != nil { + t.Fatal(err) + } + + if leaseCache.PersistentStorage() == nil { + t.Fatal("persistent storage was not added") + } + + if token != "" { + t.Fatal("expected token to be empty") + } + + if deferFunc == nil { + t.Fatal("expected deferFunc to not be nil") + } +} diff --git a/command/agent/sink/file/file_sink.go b/command/agentproxyshared/sink/file/file_sink.go similarity index 96% rename from command/agent/sink/file/file_sink.go rename to command/agentproxyshared/sink/file/file_sink.go index f2faf5641797..bf26a86b3c7b 100644 --- a/command/agent/sink/file/file_sink.go +++ b/command/agentproxyshared/sink/file/file_sink.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package file import ( @@ -9,7 +12,7 @@ import ( hclog "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink" ) // fileSink is a Sink implementation that writes a token to a file diff --git a/command/agent/sink/file/file_sink_test.go b/command/agentproxyshared/sink/file/file_sink_test.go similarity index 95% rename from command/agent/sink/file/file_sink_test.go rename to command/agentproxyshared/sink/file/file_sink_test.go index 9749522b4931..e603c6a32d3d 100644 --- a/command/agent/sink/file/file_sink_test.go +++ b/command/agentproxyshared/sink/file/file_sink_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package file import ( @@ -9,7 +12,7 @@ import ( hclog "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink" "github.com/hashicorp/vault/sdk/helper/logging" ) diff --git a/command/agent/sink/file/sink_test.go b/command/agentproxyshared/sink/file/sink_test.go similarity index 95% rename from command/agent/sink/file/sink_test.go rename to command/agentproxyshared/sink/file/sink_test.go index 839340f0c88d..d061342af979 100644 --- a/command/agent/sink/file/sink_test.go +++ b/command/agentproxyshared/sink/file/sink_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package file import ( @@ -12,7 +15,7 @@ import ( hclog "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink" "github.com/hashicorp/vault/sdk/helper/logging" ) diff --git a/command/agent/sink/inmem/inmem_sink.go b/command/agentproxyshared/sink/inmem/inmem_sink.go similarity index 81% rename from command/agent/sink/inmem/inmem_sink.go rename to command/agentproxyshared/sink/inmem/inmem_sink.go index 2dfa09115ca7..a9e7ee9a3036 100644 --- a/command/agent/sink/inmem/inmem_sink.go +++ b/command/agentproxyshared/sink/inmem/inmem_sink.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package inmem import ( "errors" hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agent/cache" - "github.com/hashicorp/vault/command/agent/sink" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" "go.uber.org/atomic" ) diff --git a/command/agentproxyshared/sink/mock/mock_sink.go b/command/agentproxyshared/sink/mock/mock_sink.go new file mode 100644 index 000000000000..c660da790624 --- /dev/null +++ b/command/agentproxyshared/sink/mock/mock_sink.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package mock + +import ( + "github.com/hashicorp/vault/command/agentproxyshared/sink" +) + +type mockSink struct { + token string +} + +func NewSink(token string) sink.Sink { + return &mockSink{ + token: token, + } +} + +func (m *mockSink) WriteToken(token string) error { + m.token = token + return nil +} + +func (m *mockSink) Token() string { + return m.token +} diff --git a/command/agent/sink/sink.go b/command/agentproxyshared/sink/sink.go similarity index 97% rename from command/agent/sink/sink.go rename to command/agentproxyshared/sink/sink.go index 75ea91dc306b..571153cd5f0b 100644 --- a/command/agent/sink/sink.go +++ b/command/agentproxyshared/sink/sink.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package sink import ( @@ -151,10 +154,12 @@ func (ss *SinkServer) Run(ctx context.Context, incoming chan string, sinks []*Si if err := writeSink(st.sink, st.token); err != nil { backoff := 2*time.Second + time.Duration(ss.random.Int63()%int64(time.Second*2)-int64(time.Second)) ss.logger.Error("error returned by sink function, retrying", "error", err, "backoff", backoff.String()) + timer := time.NewTimer(backoff) select { case <-ctx.Done(): + timer.Stop() return nil - case <-time.After(backoff): + case <-timer.C: atomic.AddInt32(ss.remaining, 1) sinkCh <- st } diff --git a/command/agent/winsvc/service.go b/command/agentproxyshared/winsvc/service.go similarity index 76% rename from command/agent/winsvc/service.go rename to command/agentproxyshared/winsvc/service.go index c8d21f5c7d0a..df15971c9ab8 100644 --- a/command/agent/winsvc/service.go +++ b/command/agentproxyshared/winsvc/service.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package winsvc var chanGraceExit = make(chan int) diff --git a/command/agent/winsvc/service_windows.go b/command/agentproxyshared/winsvc/service_windows.go similarity index 93% rename from command/agent/winsvc/service_windows.go rename to command/agentproxyshared/winsvc/service_windows.go index 69177e01fd66..f3807d9ec7db 100644 --- a/command/agent/winsvc/service_windows.go +++ b/command/agentproxyshared/winsvc/service_windows.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build windows package winsvc diff --git a/command/approle_concurrency_integ_test.go b/command/approle_concurrency_integ_test.go index 5dbcce064c8d..b40dd352c989 100644 --- a/command/approle_concurrency_integ_test.go +++ b/command/approle_concurrency_integ_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,6 @@ import ( "sync" "testing" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" auth "github.com/hashicorp/vault/api/auth/approle" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" @@ -17,9 +19,6 @@ import ( func TestAppRole_Integ_ConcurrentLogins(t *testing.T) { var err error coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: log.NewNullLogger(), CredentialBackends: map[string]logical.Factory{ "approle": credAppRole.Factory, }, diff --git a/command/audit.go b/command/audit.go index 8acea78f36c4..1c59140a9a98 100644 --- a/command/audit.go +++ b/command/audit.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*AuditCommand)(nil) diff --git a/command/audit_disable.go b/command/audit_disable.go index ddebfcbeda1b..79914ea5d993 100644 --- a/command/audit_disable.go +++ b/command/audit_disable.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/audit_disable_test.go b/command/audit_disable_test.go index 0a7e8e4dcd99..786140ee326e 100644 --- a/command/audit_disable_test.go +++ b/command/audit_disable_test.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testAuditDisableCommand(tb testing.TB) (*cli.MockUi, *AuditDisableCommand) { diff --git a/command/audit_enable.go b/command/audit_enable.go index 9ed7d5d30694..a163f471cc4f 100644 --- a/command/audit_enable.go +++ b/command/audit_enable.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,8 +9,8 @@ import ( "os" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/audit_enable_test.go b/command/audit_enable_test.go index 7d19f086ad58..6c0c769e23ed 100644 --- a/command/audit_enable_test.go +++ b/command/audit_enable_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,7 +9,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testAuditEnableCommand(tb testing.TB) (*cli.MockUi, *AuditEnableCommand) { diff --git a/command/audit_list.go b/command/audit_list.go index a2dde2180a84..cf3a16f0f079 100644 --- a/command/audit_list.go +++ b/command/audit_list.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,8 +8,8 @@ import ( "sort" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/audit_list_test.go b/command/audit_list_test.go index 9cbb0af5eee3..43ddbacf91f2 100644 --- a/command/audit_list_test.go +++ b/command/audit_list_test.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testAuditListCommand(tb testing.TB) (*cli.MockUi, *AuditListCommand) { diff --git a/command/auth.go b/command/auth.go index 3c47b2b889c4..57489a186f1d 100644 --- a/command/auth.go +++ b/command/auth.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*AuthCommand)(nil) diff --git a/command/auth_disable.go b/command/auth_disable.go index 773486107a51..1476b71d0f07 100644 --- a/command/auth_disable.go +++ b/command/auth_disable.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/auth_disable_test.go b/command/auth_disable_test.go index 51419b86637a..f9da8a7d770c 100644 --- a/command/auth_disable_test.go +++ b/command/auth_disable_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testAuthDisableCommand(tb testing.TB) (*cli.MockUi, *AuthDisableCommand) { diff --git a/command/auth_enable.go b/command/auth_enable.go index bddf11cb56aa..dcea5141fcf0 100644 --- a/command/auth_enable.go +++ b/command/auth_enable.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,9 +10,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -38,6 +40,7 @@ type AuthEnableCommand struct { flagTokenType string flagVersion int flagPluginVersion string + flagIdentityTokenKey string } func (c *AuthEnableCommand) Synopsis() string { @@ -152,7 +155,7 @@ func (c *AuthEnableCommand) Flags() *FlagSets { f.StringVar(&StringVar{ Name: "plugin-name", Target: &c.flagPluginName, - Completion: c.PredictVaultPlugins(consts.PluginTypeCredential), + Completion: c.PredictVaultPlugins(api.PluginTypeCredential), Usage: "Name of the auth method plugin. This plugin name must already " + "exist in the Vault server's plugin catalog.", }) @@ -207,6 +210,13 @@ func (c *AuthEnableCommand) Flags() *FlagSets { Usage: "Select the semantic version of the plugin to enable.", }) + f.StringVar(&StringVar{ + Name: flagNameIdentityTokenKey, + Target: &c.flagIdentityTokenKey, + Default: "default", + Usage: "Select the key used to sign plugin identity tokens.", + }) + return set } @@ -310,6 +320,10 @@ func (c *AuthEnableCommand) Run(args []string) int { if fl.Name == flagNamePluginVersion { authOpts.Config.PluginVersion = c.flagPluginVersion } + + if fl.Name == flagNameIdentityTokenKey { + authOpts.Config.IdentityTokenKey = c.flagIdentityTokenKey + } }) if err := client.Sys().EnableAuthWithOptions(authPath, authOpts); err != nil { diff --git a/command/auth_enable_test.go b/command/auth_enable_test.go index 211c1da7ddb4..3467c9b00657 100644 --- a/command/auth_enable_test.go +++ b/command/auth_enable_test.go @@ -1,14 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "io/ioutil" + "sort" "strings" "testing" "github.com/go-test/deep" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/helper/builtinplugins" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/sdk/helper/strutil" ) func testAuthEnableCommand(tb testing.TB) (*cli.MockUi, *AuthEnableCommand) { @@ -49,18 +55,6 @@ func TestAuthEnableCommand_Run(t *testing.T) { "", 2, }, - { - "deprecated builtin with standard mount", - []string{"app-id"}, - "mount entry associated with pending removal builtin", - 2, - }, - { - "deprecated builtin with different mount", - []string{"-path=/tmp", "app-id"}, - "mount entry associated with pending removal builtin", - 2, - }, } for _, tc := range cases { @@ -105,6 +99,7 @@ func TestAuthEnableCommand_Run(t *testing.T) { "-passthrough-request-headers", "www-authentication", "-allowed-response-headers", "authorization", "-listing-visibility", "unauth", + "-identity-token-key", "default", "userpass", }) if exp := 0; code != exp { @@ -144,6 +139,9 @@ func TestAuthEnableCommand_Run(t *testing.T) { if diff := deep.Equal([]string{"foo,bar"}, authInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) } + if diff := deep.Equal("default", authInfo.Config.IdentityTokenKey); len(diff) > 0 { + t.Errorf("Failed to find expected values in IdentityTokenKey. Difference is: %v", diff) + } }) t.Run("communication_failure", func(t *testing.T) { @@ -189,7 +187,7 @@ func TestAuthEnableCommand_Run(t *testing.T) { var backends []string for _, f := range files { - if f.IsDir() { + if f.IsDir() && f.Name() != "token" { backends = append(backends, f.Name()) } } @@ -214,12 +212,11 @@ func TestAuthEnableCommand_Run(t *testing.T) { // of credential backends. backends = append(backends, "pcf") - // Add 1 to account for the "token" backend, which is visible when you walk the filesystem but - // is treated as special and excluded from the registry. - // Subtract 1 to account for "oidc" which is an alias of "jwt" and not a separate plugin. - expected := len(builtinplugins.Registry.Keys(consts.PluginTypeCredential)) - if len(backends) != expected { - t.Fatalf("expected %d credential backends, got %d", expected, len(backends)) + regkeys := strutil.StrListDelete(builtinplugins.Registry.Keys(consts.PluginTypeCredential), "oidc") + sort.Strings(regkeys) + sort.Strings(backends) + if d := cmp.Diff(regkeys, backends); len(d) > 0 { + t.Fatalf("found credential registry mismatch: %v", d) } for _, b := range backends { diff --git a/command/auth_help.go b/command/auth_help.go index 41ea7be5f5d1..68365e737c3b 100644 --- a/command/auth_help.go +++ b/command/auth_help.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/auth_help_test.go b/command/auth_help_test.go index 0c0d36f168f2..e437f29199e3 100644 --- a/command/auth_help_test.go +++ b/command/auth_help_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" ) diff --git a/command/auth_list.go b/command/auth_list.go index f8b53d1518f3..d095156e155b 100644 --- a/command/auth_list.go +++ b/command/auth_list.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,8 +9,8 @@ import ( "strconv" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/auth_list_test.go b/command/auth_list_test.go index decf6e9b06f0..087010a8ce35 100644 --- a/command/auth_list_test.go +++ b/command/auth_list_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testAuthListCommand(tb testing.TB) (*cli.MockUi, *AuthListCommand) { diff --git a/command/auth_move.go b/command/auth_move.go index 9e591ba64f0d..3ede5fc49fc9 100644 --- a/command/auth_move.go +++ b/command/auth_move.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/auth_move_test.go b/command/auth_move_test.go index 035938efe5aa..0b585e7e0031 100644 --- a/command/auth_move_test.go +++ b/command/auth_move_test.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testAuthMoveCommand(tb testing.TB) (*cli.MockUi, *AuthMoveCommand) { diff --git a/command/auth_test.go b/command/auth_test.go index f0fd5d065d8b..9071c0c157a0 100644 --- a/command/auth_test.go +++ b/command/auth_test.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/command/token" ) diff --git a/command/auth_tune.go b/command/auth_tune.go index 6e3d3e7bce8f..56c2d25fae96 100644 --- a/command/auth_tune.go +++ b/command/auth_tune.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,8 +10,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -36,6 +39,7 @@ type AuthTuneCommand struct { flagUserLockoutDuration time.Duration flagUserLockoutCounterResetDuration time.Duration flagUserLockoutDisable bool + flagIdentityTokenKey string } func (c *AuthTuneCommand) Synopsis() string { @@ -192,6 +196,13 @@ func (c *AuthTuneCommand) Flags() *FlagSets { "the plugin catalog, and will not start running until the plugin is reloaded.", }) + f.StringVar(&StringVar{ + Name: flagNameIdentityTokenKey, + Target: &c.flagIdentityTokenKey, + Default: "default", + Usage: "Select the key used to sign plugin identity tokens.", + }) + return set } @@ -291,6 +302,10 @@ func (c *AuthTuneCommand) Run(args []string) int { if fl.Name == flagNamePluginVersion { mountConfigInput.PluginVersion = c.flagPluginVersion } + + if fl.Name == flagNameIdentityTokenKey { + mountConfigInput.IdentityTokenKey = c.flagIdentityTokenKey + } }) // Append /auth (since that's where auths live) and a trailing slash to diff --git a/command/auth_tune_test.go b/command/auth_tune_test.go index 635a70f44be8..c9b7923d83de 100644 --- a/command/auth_tune_test.go +++ b/command/auth_tune_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,10 +8,9 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" ) func testAuthTuneCommand(tb testing.TB) (*cli.MockUi, *AuthTuneCommand) { @@ -76,8 +78,7 @@ func TestAuthTuneCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Run("flags_all", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -105,7 +106,7 @@ func TestAuthTuneCommand_Run(t *testing.T) { t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) } - _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "userpass", consts.PluginTypeCredential) + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "userpass", api.PluginTypeCredential) code := cmd.Run([]string{ "-description", "new description", @@ -118,6 +119,7 @@ func TestAuthTuneCommand_Run(t *testing.T) { "-allowed-response-headers", "authorization,www-authentication", "-listing-visibility", "unauth", "-plugin-version", version, + "-identity-token-key", "default", "my-auth/", }) if exp := 0; code != exp { @@ -166,6 +168,9 @@ func TestAuthTuneCommand_Run(t *testing.T) { if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) } + if diff := deep.Equal("default", mountInfo.Config.IdentityTokenKey); len(diff) > 0 { + t.Errorf("Failed to find expected values in IdentityTokenKey. Difference is: %v", diff) + } }) t.Run("flags_description", func(t *testing.T) { diff --git a/command/base.go b/command/base.go index 3825db0b1c68..e431036c64fe 100644 --- a/command/base.go +++ b/command/base.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,17 +9,22 @@ import ( "fmt" "io" "io/ioutil" + "net/http" "os" "regexp" "strings" "sync" "time" + "github.com/hashicorp/vault/command/config" + + "github.com/hashicorp/cli" + hcpvlib "github.com/hashicorp/vault-hcp-lib" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/token" "github.com/hashicorp/vault/helper/namespace" "github.com/mattn/go-isatty" - "github.com/mitchellh/cli" + "github.com/mitchellh/go-homedir" "github.com/pkg/errors" "github.com/posener/complete" ) @@ -39,20 +47,20 @@ type BaseCommand struct { flags *FlagSets flagsOnce sync.Once - flagAddress string - flagAgentAddress string - flagCACert string - flagCAPath string - flagClientCert string - flagClientKey string - flagNamespace string - flagNS string - flagPolicyOverride bool - flagTLSServerName string - flagTLSSkipVerify bool - flagDisableRedirects bool - flagWrapTTL time.Duration - flagUnlockKey string + flagAddress string + flagAgentProxyAddress string + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagNamespace string + flagNS string + flagPolicyOverride bool + flagTLSServerName string + flagTLSSkipVerify bool + flagDisableRedirects bool + flagWrapTTL time.Duration + flagUnlockKey string flagFormat string flagField string @@ -60,12 +68,14 @@ type BaseCommand struct { flagOutputCurlString bool flagOutputPolicy bool flagNonInteractive bool + addrWarning string flagMFA []string flagHeader map[string]string - tokenHelper token.TokenHelper + tokenHelper token.TokenHelper + hcpTokenHelper hcpvlib.HCPTokenHelper client *api.Client } @@ -75,6 +85,15 @@ type BaseCommand struct { func (c *BaseCommand) Client() (*api.Client, error) { // Read the test client if present if c.client != nil { + // Ignoring homedir errors here and moving on to avoid + // spamming user with warnings/errors that homedir isn't set. + path, err := homedir.Dir() + if err == nil { + if err := c.applyHCPConfig(path); err != nil { + return nil, err + } + } + return c.client, nil } @@ -87,8 +106,8 @@ func (c *BaseCommand) Client() (*api.Client, error) { if c.flagAddress != "" { config.Address = c.flagAddress } - if c.flagAgentAddress != "" { - config.Address = c.flagAgentAddress + if c.flagAgentProxyAddress != "" { + config.Address = c.flagAgentProxyAddress } if c.flagOutputCurlString { @@ -183,9 +202,58 @@ func (c *BaseCommand) Client() (*api.Client, error) { c.client = client + // Ignoring homedir errors here and moving on to avoid + // spamming user with warnings/errors that homedir isn't set. + path, err := homedir.Dir() + if err == nil { + if err := c.applyHCPConfig(path); err != nil { + return nil, err + } + } + + if c.addrWarning != "" && c.UI != nil { + if os.Getenv("VAULT_ADDR") == "" { + if !c.flagNonInteractive && isatty.IsTerminal(os.Stdin.Fd()) { + c.UI.Warn(wrapAtLength(c.addrWarning)) + } + } + } + return client, nil } +func (c *BaseCommand) applyHCPConfig(path string) error { + if c.hcpTokenHelper == nil { + c.hcpTokenHelper = c.HCPTokenHelper() + } + + hcpToken, err := c.hcpTokenHelper.GetHCPToken(path) + if err != nil { + return err + } + + if hcpToken != nil { + cookie := &http.Cookie{ + Name: "hcp_access_token", + Value: hcpToken.AccessToken, + Expires: hcpToken.AccessTokenExpiry, + } + + if err := c.client.SetHCPCookie(cookie); err != nil { + return fmt.Errorf("unable to correctly connect to the HCP Vault cluster; please reconnect to HCP: %w", err) + } + + if err := c.client.SetAddress(hcpToken.ProxyAddr); err != nil { + return fmt.Errorf("unable to correctly set the HCP address: %w", err) + } + + // remove address warning since address was set to HCP's address + c.addrWarning = "" + } + + return nil +} + // SetAddress sets the token helper on the command; useful for the demo server and other outside cases. func (c *BaseCommand) SetAddress(addr string) { c.flagAddress = addr @@ -209,6 +277,14 @@ func (c *BaseCommand) TokenHelper() (token.TokenHelper, error) { return helper, nil } +// HCPTokenHelper returns the HCPToken helper attached to the command. +func (c *BaseCommand) HCPTokenHelper() hcpvlib.HCPTokenHelper { + if c.hcpTokenHelper != nil { + return c.hcpTokenHelper + } + return config.DefaultHCPTokenHelper() +} + // DefaultWrappingLookupFunc is the default wrapping function based on the // CLI flag. func (c *BaseCommand) DefaultWrappingLookupFunc(operation, path string) string { @@ -219,7 +295,7 @@ func (c *BaseCommand) DefaultWrappingLookupFunc(operation, path string) string { return api.DefaultWrappingLookupFunc(operation, path) } -// getValidationRequired checks to see if the secret exists and has an MFA +// getMFAValidationRequired checks to see if the secret exists and has an MFA // requirement. If MFA is required and the number of constraints is greater than // 1, we can assert that interactive validation is not required. func (c *BaseCommand) getMFAValidationRequired(secret *api.Secret) bool { @@ -318,16 +394,18 @@ func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets { Completion: complete.PredictAnything, Usage: "Address of the Vault server.", } + if c.flagAddress != "" { addrStringVar.Default = c.flagAddress } else { addrStringVar.Default = "https://127.0.0.1:8200" + c.addrWarning = fmt.Sprintf("WARNING! VAULT_ADDR and -address unset. Defaulting to %s.", addrStringVar.Default) } f.StringVar(addrStringVar) agentAddrStringVar := &StringVar{ Name: "agent-address", - Target: &c.flagAgentAddress, + Target: &c.flagAgentProxyAddress, EnvVar: api.EnvVaultAgentAddr, Completion: complete.PredictAnything, Usage: "Address of the Agent.", @@ -585,6 +663,7 @@ func (f *FlagSets) Completions() complete.Flags { type ( ParseOptions interface{} ParseOptionAllowRawFormat bool + DisableDisplayFlagWarning bool ) // Parse parses the given flags, returning any errors. @@ -592,9 +671,17 @@ type ( func (f *FlagSets) Parse(args []string, opts ...ParseOptions) error { err := f.mainSet.Parse(args) - warnings := generateFlagWarnings(f.Args()) - if warnings != "" && Format(f.ui) == "table" { - f.ui.Warn(warnings) + displayFlagWarningsDisabled := false + for _, opt := range opts { + if value, ok := opt.(DisableDisplayFlagWarning); ok { + displayFlagWarningsDisabled = bool(value) + } + } + if !displayFlagWarningsDisabled { + warnings := generateFlagWarnings(f.Args()) + if warnings != "" && Format(f.ui) == "table" { + f.ui.Warn(warnings) + } } if err != nil { diff --git a/command/base_flags.go b/command/base_flags.go index 5ec0af3cbe9a..3f3fc1abdad1 100644 --- a/command/base_flags.go +++ b/command/base_flags.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -249,14 +252,14 @@ func (i *intValue) Set(s string) error { return err } if v >= math.MinInt && v <= math.MaxInt { - *i.target = int(v) + *i.target = v return nil } return fmt.Errorf("Incorrect conversion of a 64-bit integer to a lower bit size. Value %d is not within bounds for int32", v) } -func (i *intValue) Get() interface{} { return int(*i.target) } -func (i *intValue) String() string { return strconv.Itoa(int(*i.target)) } +func (i *intValue) Get() interface{} { return *i.target } +func (i *intValue) String() string { return strconv.Itoa(*i.target) } func (i *intValue) Example() string { return "int" } func (i *intValue) Hidden() bool { return i.hidden } @@ -591,7 +594,7 @@ type DurationVar struct { func (f *FlagSet) DurationVar(i *DurationVar) { initial := i.Default if v, exist := os.LookupEnv(i.EnvVar); exist { - if d, err := time.ParseDuration(appendDurationSuffix(v)); err == nil { + if d, err := parseutil.ParseDurationSecond(v); err == nil { initial = d } } @@ -631,7 +634,7 @@ func (d *durationValue) Set(s string) error { s = "-1" } - v, err := time.ParseDuration(appendDurationSuffix(s)) + v, err := parseutil.ParseDurationSecond(s) if err != nil { return err } @@ -986,33 +989,3 @@ func (d *timeValue) Get() interface{} { return *d.target } func (d *timeValue) String() string { return (*d.target).String() } func (d *timeValue) Example() string { return "time" } func (d *timeValue) Hidden() bool { return d.hidden } - -// -- helpers -func envDefault(key, def string) string { - if v, exist := os.LookupEnv(key); exist { - return v - } - return def -} - -func envBoolDefault(key string, def bool) bool { - if v, exist := os.LookupEnv(key); exist { - b, err := strconv.ParseBool(v) - if err != nil { - panic(err) - } - return b - } - return def -} - -func envDurationDefault(key string, def time.Duration) time.Duration { - if v, exist := os.LookupEnv(key); exist { - d, err := time.ParseDuration(v) - if err != nil { - panic(err) - } - return d - } - return def -} diff --git a/command/base_flags_test.go b/command/base_flags_test.go index 39777d24a953..ea3561ef8f10 100644 --- a/command/base_flags_test.go +++ b/command/base_flags_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( diff --git a/command/base_helpers.go b/command/base_helpers.go index 60ad8af20cb1..06e8b8021932 100644 --- a/command/base_helpers.go +++ b/command/base_helpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -77,7 +80,7 @@ func ensureNoLeadingSlash(s string) string { return s } -// columnOuput prints the list of items as a table with no headers. +// columnOutput prints the list of items as a table with no headers. func columnOutput(list []string, c *columnize.Config) string { if len(list) == 0 { return "" diff --git a/command/base_helpers_test.go b/command/base_helpers_test.go index dee93b4bf44a..c5268007368b 100644 --- a/command/base_helpers_test.go +++ b/command/base_helpers_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -58,7 +61,7 @@ func TestParseArgsData(t *testing.T) { if err != nil { t.Fatal(err) } - f.Write([]byte(`{"foo":"bar"}`)) + f.WriteString(`{"foo":"bar"}`) f.Close() defer os.Remove(f.Name()) @@ -79,7 +82,7 @@ func TestParseArgsData(t *testing.T) { if err != nil { t.Fatal(err) } - f.Write([]byte(`bar`)) + f.WriteString(`bar`) f.Close() defer os.Remove(f.Name()) diff --git a/command/base_predict.go b/command/base_predict.go index 61cbe092d61d..72ba402fe97f 100644 --- a/command/base_predict.go +++ b/command/base_predict.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,7 +10,6 @@ import ( "sync" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" "github.com/posener/complete" ) @@ -155,7 +157,7 @@ func (b *BaseCommand) PredictVaultAuths() complete.Predictor { } // PredictVaultPlugins returns a predictor for installed plugins. -func (b *BaseCommand) PredictVaultPlugins(pluginTypes ...consts.PluginType) complete.Predictor { +func (b *BaseCommand) PredictVaultPlugins(pluginTypes ...api.PluginType) complete.Predictor { return NewPredict().VaultPlugins(pluginTypes...) } @@ -218,7 +220,7 @@ func (p *Predict) VaultAuths() complete.Predictor { // VaultPlugins returns a predictor for Vault's plugin catalog. This is a public // API for consumers, but you probably want BaseCommand.PredictVaultPlugins // instead. -func (p *Predict) VaultPlugins(pluginTypes ...consts.PluginType) complete.Predictor { +func (p *Predict) VaultPlugins(pluginTypes ...api.PluginType) complete.Predictor { filterFunc := func() []string { return p.plugins(pluginTypes...) } @@ -395,12 +397,12 @@ func (p *Predict) auths() []string { } // plugins returns a sorted list of the plugins in the catalog. -func (p *Predict) plugins(pluginTypes ...consts.PluginType) []string { +func (p *Predict) plugins(pluginTypes ...api.PluginType) []string { // This method's signature doesn't enforce that a pluginType must be passed in. // If it's not, it's likely the caller's intent is go get a list of all of them, // so let's help them out. if len(pluginTypes) == 0 { - pluginTypes = append(pluginTypes, consts.PluginTypeUnknown) + pluginTypes = append(pluginTypes, api.PluginTypeUnknown) } client := p.Client() @@ -411,7 +413,7 @@ func (p *Predict) plugins(pluginTypes ...consts.PluginType) []string { var plugins []string pluginsAdded := make(map[string]bool) for _, pluginType := range pluginTypes { - result, err := client.Sys().ListPlugins(&api.ListPluginsInput{Type: pluginType}) + result, err := client.Sys().ListPlugins(&api.ListPluginsInput{Type: api.PluginType(pluginType)}) if err != nil { return nil } diff --git a/command/base_predict_test.go b/command/base_predict_test.go index 644a366673d4..778245830ff1 100644 --- a/command/base_predict_test.go +++ b/command/base_predict_test.go @@ -1,9 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "reflect" "testing" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/api" "github.com/posener/complete" @@ -343,11 +348,9 @@ func TestPredict_Plugins(t *testing.T) { []string{ "ad", "alicloud", - "app-id", "approle", "aws", "azure", - "cassandra", "cassandra-database-plugin", "centrify", "cert", @@ -367,13 +370,10 @@ func TestPredict_Plugins(t *testing.T) { "kubernetes", "kv", "ldap", - "mongodb", "mongodb-database-plugin", "mongodbatlas", "mongodbatlas-database-plugin", - "mssql", "mssql-database-plugin", - "mysql", "mysql-aurora-database-plugin", "mysql-database-plugin", "mysql-legacy-database-plugin", @@ -385,13 +385,13 @@ func TestPredict_Plugins(t *testing.T) { "openldap", "pcf", // Deprecated. "pki", - "postgresql", "postgresql-database-plugin", "rabbitmq", "radius", "redis-database-plugin", "redis-elasticache-database-plugin", "redshift-database-plugin", + "saml", "snowflake-database-plugin", "ssh", "terraform", @@ -438,8 +438,16 @@ func TestPredict_Plugins(t *testing.T) { } } } - if !reflect.DeepEqual(act, tc.exp) { - t.Errorf("expected:%q, got: %q", tc.exp, act) + if !strutil.StrListContains(act, "saml") { + for i, v := range tc.exp { + if v == "saml" { + tc.exp = append(tc.exp[:i], tc.exp[i+1:]...) + break + } + } + } + if d := cmp.Diff(act, tc.exp); len(d) > 0 { + t.Errorf("expected: %q, got: %q, diff: %v", tc.exp, act, d) } }) } diff --git a/command/base_test.go b/command/base_test.go index b3f75e0eb05d..028f13fb44cc 100644 --- a/command/base_test.go +++ b/command/base_test.go @@ -1,9 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "net/http" "reflect" "testing" + + "github.com/hashicorp/vault/api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + hcpvlib "github.com/hashicorp/vault-hcp-lib" ) func getDefaultCliHeaders(t *testing.T) http.Header { @@ -67,3 +76,37 @@ func TestClient_FlagHeader(t *testing.T) { } } } + +// TestClient_HCPConfiguration tests that the HCP configuration is applied correctly when it exists in cache. +func TestClient_HCPConfiguration(t *testing.T) { + cases := map[string]struct { + Valid bool + ExpectedAddr string + }{ + "valid hcp configuration": { + Valid: true, + ExpectedAddr: "https://hcp-proxy.addr:8200", + }, + "empty hcp configuration": { + Valid: false, + ExpectedAddr: api.DefaultAddress, + }, + } + + for n, tst := range cases { + t.Run(n, func(t *testing.T) { + bc := &BaseCommand{hcpTokenHelper: &hcpvlib.TestingHCPTokenHelper{tst.Valid}} + cli, err := bc.Client() + assert.NoError(t, err) + + if tst.Valid { + require.Equal(t, tst.ExpectedAddr, cli.Address()) + require.NotEmpty(t, cli.HCPCookie()) + require.Contains(t, cli.HCPCookie(), "hcp_access_token=Test.Access.Token") + } else { + require.Equal(t, tst.ExpectedAddr, cli.Address()) + require.Empty(t, cli.HCPCookie()) + } + }) + } +} diff --git a/command/command_stubs_oss.go b/command/command_stubs_oss.go new file mode 100644 index 000000000000..bb199f373c0c --- /dev/null +++ b/command/command_stubs_oss.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package command + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +import ( + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/vault" +) + +func entInitCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions, commands map[string]cli.CommandFactory) { +} + +func entEnableFourClusterDev(c *ServerCommand, base *vault.CoreConfig, info map[string]string, infoKeys []string, tempDir string) int { + c.logger.Error("-dev-four-cluster only supported in enterprise Vault") + return 1 +} + +func entAdjustCoreConfig(config *server.Config, coreConfig *vault.CoreConfig) { +} + +func entCheckStorageType(coreConfig *vault.CoreConfig) bool { + return true +} + +func entGetFIPSInfoKey() string { + return "" +} + +func entGetRequestLimiterStatus(coreConfig vault.CoreConfig) string { + return "" +} diff --git a/command/command_test.go b/command/command_test.go index f4b8c7fd251a..5ef880b78742 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -9,6 +12,7 @@ import ( "testing" "time" + "github.com/hashicorp/cli" log "github.com/hashicorp/go-hclog" kv "github.com/hashicorp/vault-plugin-secrets-kv" "github.com/hashicorp/vault/api" @@ -18,11 +22,10 @@ import ( "github.com/hashicorp/vault/builtin/logical/transit" "github.com/hashicorp/vault/helper/benchhelpers" "github.com/hashicorp/vault/helper/builtinplugins" - "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical/inmem" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/vault/seal" auditFile "github.com/hashicorp/vault/builtin/audit/file" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" @@ -67,10 +70,54 @@ func testVaultServer(tb testing.TB) (*api.Client, func()) { return client, closer } +func testVaultServerWithSecrets(ctx context.Context, tb testing.TB) (*api.Client, func()) { + tb.Helper() + + client, _, closer := testVaultServerUnseal(tb) + + // enable kv-v1 backend + if err := client.Sys().Mount("kv-v1/", &api.MountInput{ + Type: "kv-v1", + }); err != nil { + tb.Fatal(err) + } + + // enable kv-v2 backend + if err := client.Sys().Mount("kv-v2/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + tb.Fatal(err) + } + + // populate dummy secrets + for _, path := range []string{ + "foo", + "app-1/foo", + "app-1/bar", + "app-1/nested/baz", + } { + if err := client.KVv1("kv-v1").Put(ctx, path, map[string]interface{}{ + "user": "test", + "password": "Hashi123", + }); err != nil { + tb.Fatal(err) + } + + if _, err := client.KVv2("kv-v2").Put(ctx, path, map[string]interface{}{ + "user": "test", + "password": "Hashi123", + }); err != nil { + tb.Fatal(err) + } + } + + return client, closer +} + func testVaultServerWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, func()) { tb.Helper() - client, _, closer := testVaultServerUnsealWithKVVersion(tb, kvVersion) + client, _, closer := testVaultServerUnsealWithKVVersionWithSeal(tb, kvVersion, nil) return client, closer } @@ -78,9 +125,6 @@ func testVaultServerAllBackends(tb testing.TB) (*api.Client, func()) { tb.Helper() client, _, closer := testVaultServerCoreConfig(tb, &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: defaultVaultLogger, CredentialBackends: credentialBackends, AuditBackends: auditBackends, LogicalBackends: logicalBackends, @@ -89,28 +133,29 @@ func testVaultServerAllBackends(tb testing.TB) (*api.Client, func()) { return client, closer } +// testVaultServerAutoUnseal creates a test vault cluster and sets it up with auto unseal +// the function returns a client, the recovery keys, and a closer function +func testVaultServerAutoUnseal(tb testing.TB) (*api.Client, []string, func()) { + testSeal, _ := seal.NewTestSeal(nil) + autoSeal := vault.NewAutoSeal(testSeal) + return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", autoSeal) +} + // testVaultServerUnseal creates a test vault cluster and returns a configured // API client, list of unseal keys (as strings), and a closer function. func testVaultServerUnseal(tb testing.TB) (*api.Client, []string, func()) { - return testVaultServerUnsealWithKVVersion(tb, "1") + return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", nil) } -func testVaultServerUnsealWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, []string, func()) { +func testVaultServerUnsealWithKVVersionWithSeal(tb testing.TB, kvVersion string, seal vault.Seal) (*api.Client, []string, func()) { tb.Helper() - logger := log.NewInterceptLogger(&log.LoggerOptions{ - Output: log.DefaultOutput, - Level: log.Debug, - JSONFormat: logging.ParseEnvLogFormat() == logging.JSONFormat, - }) return testVaultServerCoreConfigWithOpts(tb, &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: logger, CredentialBackends: defaultVaultCredentialBackends, AuditBackends: defaultVaultAuditBackends, LogicalBackends: defaultVaultLogicalBackends, BuiltinRegistry: builtinplugins.Registry, + Seal: seal, }, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, NumCores: 1, @@ -125,9 +170,6 @@ func testVaultServerPluginDir(tb testing.TB, pluginDir string) (*api.Client, []s tb.Helper() return testVaultServerCoreConfig(tb, &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - Logger: defaultVaultLogger, CredentialBackends: defaultVaultCredentialBackends, AuditBackends: defaultVaultAuditBackends, LogicalBackends: defaultVaultLogicalBackends, @@ -144,7 +186,8 @@ func testVaultServerCoreConfig(tb testing.TB, coreConfig *vault.CoreConfig) (*ap } // testVaultServerCoreConfig creates a new vault cluster with the given core -// configuration. This is a lower-level test helper. +// configuration. This is a lower-level test helper. If the seal config supports recovery keys, then +// recovery keys are returned. Otherwise, unseal keys are returned func testVaultServerCoreConfigWithOpts(tb testing.TB, coreConfig *vault.CoreConfig, opts *vault.TestClusterOptions) (*api.Client, []string, func()) { tb.Helper() @@ -159,14 +202,24 @@ func testVaultServerCoreConfigWithOpts(tb testing.TB, coreConfig *vault.CoreConf client := cluster.Cores[0].Client client.SetToken(cluster.RootToken) - // Convert the unseal keys to base64 encoded, since these are how the user - // will get them. - unsealKeys := make([]string, len(cluster.BarrierKeys)) - for i := range unsealKeys { - unsealKeys[i] = base64.StdEncoding.EncodeToString(cluster.BarrierKeys[i]) + var keys [][]byte + if coreConfig.Seal != nil && coreConfig.Seal.RecoveryKeySupported() { + keys = cluster.RecoveryKeys + } else { + keys = cluster.BarrierKeys } - return client, unsealKeys, func() { defer cluster.Cleanup() } + return client, encodeKeys(keys), cluster.Cleanup +} + +// Convert the unseal keys to base64 encoded, since these are how the user +// will get them. +func encodeKeys(rawKeys [][]byte) []string { + keys := make([]string, len(rawKeys)) + for i := range rawKeys { + keys[i] = base64.StdEncoding.EncodeToString(rawKeys[i]) + } + return keys } // testVaultServerUninit creates an uninitialized server. @@ -180,8 +233,6 @@ func testVaultServerUninit(tb testing.TB) (*api.Client, func()) { core, err := vault.NewCore(&vault.CoreConfig{ DisableMlock: true, - DisableCache: true, - Logger: defaultVaultLogger, Physical: inm, CredentialBackends: defaultVaultCredentialBackends, AuditBackends: defaultVaultAuditBackends, diff --git a/command/command_testonly/operator_usage_testonly_test.go b/command/command_testonly/operator_usage_testonly_test.go new file mode 100644 index 000000000000..47bec03720d0 --- /dev/null +++ b/command/command_testonly/operator_usage_testonly_test.go @@ -0,0 +1,97 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build testonly + +package command_testonly + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/command" + "github.com/hashicorp/vault/helper/timeutil" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/clientcountutil" + "github.com/hashicorp/vault/sdk/helper/clientcountutil/generation" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" +) + +func testOperatorUsageCommand(tb testing.TB) (*cli.MockUi, *command.OperatorUsageCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &command.OperatorUsageCommand{ + BaseCommand: &command.BaseCommand{ + UI: ui, + }, + } +} + +// TestOperatorUsageCommandRun writes mock activity log data and runs the +// operator usage command. The test verifies that the output contains the +// expected values per client type. +// This test cannot be run in parallel because it sets the VAULT_TOKEN env +// var +func TestOperatorUsageCommandRun(t *testing.T) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + }) + defer cluster.Cleanup() + core := cluster.Cores[0].Core + vault.TestWaitActive(t, core) + + client := cluster.Cores[0].Client + _, err := client.Logical().Write("sys/internal/counters/config", map[string]interface{}{"enabled": "enable"}) + require.NoError(t, err) + + now := time.Now().UTC() + + _, err = clientcountutil.NewActivityLogData(client). + NewPreviousMonthData(1). + NewClientsSeen(6, clientcountutil.WithClientType("entity")). + NewClientsSeen(4, clientcountutil.WithClientType("non-entity-token")). + NewClientsSeen(2, clientcountutil.WithClientType("secret-sync")). + NewCurrentMonthData(). + NewClientsSeen(3, clientcountutil.WithClientType("entity")). + NewClientsSeen(4, clientcountutil.WithClientType("non-entity-token")). + NewClientsSeen(5, clientcountutil.WithClientType("secret-sync")). + Write(context.Background(), generation.WriteOptions_WRITE_ENTITIES, generation.WriteOptions_WRITE_PRECOMPUTED_QUERIES) + require.NoError(t, err) + + ui, cmd := testOperatorUsageCommand(t) + + t.Setenv("VAULT_TOKEN", client.Token()) + start := timeutil.MonthsPreviousTo(1, now).Format(time.RFC3339) + end := timeutil.EndOfMonth(now).UTC().Format(time.RFC3339) + // Reset and check output + code := cmd.Run([]string{ + "-address", client.Address(), + "-tls-skip-verify", + "-start-time", start, + "-end-time", end, + }) + require.Equal(t, 0, code, ui.ErrorWriter.String()) + output := ui.OutputWriter.String() + outputLines := strings.Split(output, "\n") + require.Equal(t, fmt.Sprintf("Period start: %s", start), outputLines[0]) + require.Equal(t, fmt.Sprintf("Period end: %s", end), outputLines[1]) + + require.Contains(t, outputLines[3], "Secret sync") + nsCounts := strings.Fields(outputLines[5]) + require.Equal(t, "[root]", nsCounts[0]) + require.Equal(t, "9", nsCounts[1]) + require.Equal(t, "8", nsCounts[2]) + require.Equal(t, "7", nsCounts[3]) + require.Equal(t, "24", nsCounts[4]) + + totalCounts := strings.Fields(outputLines[7]) + require.Equal(t, "Total", totalCounts[0]) + require.Equal(t, nsCounts[1:], totalCounts[1:]) +} diff --git a/command/commands.go b/command/commands.go index 9327ae72c38a..b4d3e4eff2cc 100644 --- a/command/commands.go +++ b/command/commands.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,12 +8,13 @@ import ( "os/signal" "syscall" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/plugins/event" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" /* The builtinplugins package is initialized here because it, in turn, @@ -65,6 +69,7 @@ import ( physFile "github.com/hashicorp/vault/sdk/physical/file" physInmem "github.com/hashicorp/vault/sdk/physical/inmem" + hcpvlib "github.com/hashicorp/vault-hcp-lib" sr "github.com/hashicorp/vault/serviceregistration" csr "github.com/hashicorp/vault/serviceregistration/consul" ksr "github.com/hashicorp/vault/serviceregistration/kubernetes" @@ -87,10 +92,15 @@ const ( // EnvVaultLogLevel is used to specify the log level applied to logging // Supported log levels: Trace, Debug, Error, Warn, Info EnvVaultLogLevel = "VAULT_LOG_LEVEL" - - // DisableSSCTokens is an env var used to disable index bearing - // token functionality - DisableSSCTokens = "VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS" + // EnvVaultExperiments defines the experiments to enable for a server as a + // comma separated list. See experiments.ValidExperiments() for the list of + // valid experiments. Not mutable or persisted in storage, only read and + // logged at startup _per node_. This was initially introduced for the events + // system being developed over multiple release cycles. + EnvVaultExperiments = "VAULT_EXPERIMENTS" + // EnvVaultPluginTmpdir sets the folder to use for Unix sockets when setting + // up containerized plugins. + EnvVaultPluginTmpdir = "VAULT_PLUGIN_TMPDIR" // flagNameAddress is the flag used in the base command to read in the // address of the Vault server. @@ -131,6 +141,8 @@ const ( flagNameAllowedManagedKeys = "allowed-managed-keys" // flagNamePluginVersion selects what version of a plugin should be used. flagNamePluginVersion = "plugin-version" + // flagNameIdentityTokenKey selects the key used to sign plugin identity tokens + flagNameIdentityTokenKey = "identity-token-key" // flagNameUserLockoutThreshold is the flag name used for tuning the auth mount lockout threshold parameter flagNameUserLockoutThreshold = "user-lockout-threshold" // flagNameUserLockoutDuration is the flag name used for tuning the auth mount lockout duration parameter @@ -143,6 +155,8 @@ const ( flagNameDisableRedirects = "disable-redirects" // flagNameCombineLogs is used to specify whether log output should be combined and sent to stdout flagNameCombineLogs = "combine-logs" + // flagDisableGatedLogs is used to disable gated logs and immediately show the vault logs as they become available + flagDisableGatedLogs = "disable-gated-logs" // flagNameLogFile is used to specify the path to the log file that Vault should use for logging flagNameLogFile = "log-file" // flagNameLogRotateBytes is the flag used to specify the number of bytes a log file should be before it is rotated. @@ -156,6 +170,9 @@ const ( // flagNameLogLevel is used to specify the log level applied to logging // Supported log levels: Trace, Debug, Error, Warn, Info flagNameLogLevel = "log-level" + // flagNameDelegatedAuthAccessors allows operators to specify the allowed mount accessors a backend can delegate + // authentication + flagNameDelegatedAuthAccessors = "delegated-auth-accessors" ) var ( @@ -169,6 +186,8 @@ var ( "plugin": plugin.Factory, } + eventBackends = map[string]event.Factory{} + logicalBackends = map[string]logical.Factory{ "plugin": plugin.Factory, "database": logicalDb.Factory, @@ -213,14 +232,7 @@ var ( "kubernetes": ksr.NewServiceRegistration, } - initCommandsEnt = func(ui, serverCmdUi cli.Ui, runOpts *RunOptions) {} -) - -// Commands is the mapping of all the available commands. -var Commands map[string]cli.CommandFactory - -func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { - loginHandlers := map[string]LoginHandler{ + loginHandlers = map[string]LoginHandler{ "alicloud": &credAliCloud.CLIHandler{}, "aws": &credAws.CLIHandler{}, "centrify": &credCentrify.CLIHandler{}, @@ -242,23 +254,32 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { DefaultMount: "userpass", }, } +) +func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.CommandFactory { getBaseCommand := func() *BaseCommand { return &BaseCommand{ - UI: ui, - tokenHelper: runOpts.TokenHelper, - flagAddress: runOpts.Address, - client: runOpts.Client, + UI: ui, + tokenHelper: runOpts.TokenHelper, + flagAddress: runOpts.Address, + client: runOpts.Client, + hcpTokenHelper: runOpts.HCPTokenHelper, } } - Commands = map[string]cli.CommandFactory{ + commands := map[string]cli.CommandFactory{ "agent": func() (cli.Command, error) { return &AgentCommand{ BaseCommand: &BaseCommand{ UI: serverCmdUi, }, ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + }, nil + }, + "agent generate-config": func() (cli.Command, error) { + return &AgentGenerateConfigCommand{ + BaseCommand: getBaseCommand(), }, nil }, "audit": func() (cli.Command, error) { @@ -328,6 +349,11 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { BaseCommand: getBaseCommand(), }, nil }, + "events subscribe": func() (cli.Command, error) { + return &EventsSubscribeCommands{ + BaseCommand: getBaseCommand(), + }, nil + }, "lease": func() (cli.Command, error) { return &LeaseCommand{ BaseCommand: getBaseCommand(), @@ -471,6 +497,11 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { BaseCommand: getBaseCommand(), }, nil }, + "operator raft snapshot inspect": func() (cli.Command, error) { + return &OperatorRaftSnapshotInspectCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "operator raft snapshot restore": func() (cli.Command, error) { return &OperatorRaftSnapshotRestoreCommand{ BaseCommand: getBaseCommand(), @@ -506,6 +537,11 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { BaseCommand: getBaseCommand(), }, nil }, + "operator utilization": func() (cli.Command, error) { + return &OperatorUtilizationCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "operator unseal": func() (cli.Command, error) { return &OperatorUnsealCommand{ BaseCommand: getBaseCommand(), @@ -536,6 +572,26 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { BaseCommand: getBaseCommand(), }, nil }, + "pki issue": func() (cli.Command, error) { + return &PKIIssueCACommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki list-intermediates": func() (cli.Command, error) { + return &PKIListIntermediateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki reissue": func() (cli.Command, error) { + return &PKIReIssueCACommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki verify-sign": func() (cli.Command, error) { + return &PKIVerifySignCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "plugin": func() (cli.Command, error) { return &PluginCommand{ BaseCommand: getBaseCommand(), @@ -571,6 +627,40 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { BaseCommand: getBaseCommand(), }, nil }, + "plugin runtime": func() (cli.Command, error) { + return &PluginRuntimeCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime register": func() (cli.Command, error) { + return &PluginRuntimeRegisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime deregister": func() (cli.Command, error) { + return &PluginRuntimeDeregisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime info": func() (cli.Command, error) { + return &PluginRuntimeInfoCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime list": func() (cli.Command, error) { + return &PluginRuntimeListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "proxy": func() (cli.Command, error) { + return &ProxyCommand{ + BaseCommand: &BaseCommand{ + UI: serverCmdUi, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + }, nil + }, "policy": func() (cli.Command, error) { return &PolicyCommand{ BaseCommand: getBaseCommand(), @@ -655,6 +745,7 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { }, AuditBackends: auditBackends, CredentialBackends: credentialBackends, + EventBackends: eventBackends, LogicalBackends: logicalBackends, PhysicalBackends: physicalBackends, @@ -675,6 +766,36 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { BaseCommand: getBaseCommand(), }, nil }, + "transform": func() (cli.Command, error) { + return &TransformCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transform import": func() (cli.Command, error) { + return &TransformImportCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transform import-version": func() (cli.Command, error) { + return &TransformImportVersionCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transit": func() (cli.Command, error) { + return &TransitCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transit import": func() (cli.Command, error) { + return &TransitImportCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transit import-version": func() (cli.Command, error) { + return &TransitImportVersionCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "token": func() (cli.Command, error) { return &TokenCommand{ BaseCommand: getBaseCommand(), @@ -809,7 +930,23 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) { }, } - initCommandsEnt(ui, serverCmdUi, runOpts) + entInitCommands(ui, serverCmdUi, runOpts, commands) + initHCPCommands(ui, commands) + + return commands +} + +func initHCPCommands(ui cli.Ui, commands map[string]cli.CommandFactory) { + for cmd, cmdFactory := range hcpvlib.InitHCPCommand(ui) { + // check for conflicts and only put command in the map in case it doesn't conflict with existing one + _, ok := commands[cmd] + if !ok { + commands[cmd] = cmdFactory + } else { + ui.Error("Failed to initialize HCP commands.") + break + } + } } // MakeShutdownCh returns a channel that can be used for shutdown diff --git a/command/commands_nonwindows.go b/command/commands_nonwindows.go index c94c485f4907..90cfa253a2bc 100644 --- a/command/commands_nonwindows.go +++ b/command/commands_nonwindows.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !windows package command diff --git a/command/commands_test.go b/command/commands_test.go new file mode 100644 index 000000000000..ac057f737cce --- /dev/null +++ b/command/commands_test.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "testing" + + "github.com/hashicorp/cli" + "github.com/stretchr/testify/require" +) + +func Test_Commands_HCPInit(t *testing.T) { + tests := map[string]struct { + expectError bool + expectedErrorMsg string + }{ + "initialize with success": { + expectError: false, + }, + "initialize with error: existing commands conflict with init commands": { + expectError: true, + expectedErrorMsg: "Failed to initialize HCP commands.", + }, + } + + for n, tst := range tests { + t.Run(n, func(t *testing.T) { + t.Parallel() + + mockUi := cli.NewMockUi() + commands := initCommands(mockUi, nil, nil) + if tst.expectError { + initHCPCommands(mockUi, commands) + errMsg := mockUi.ErrorWriter.String() + require.NotEmpty(t, errMsg) + require.Contains(t, errMsg, tst.expectedErrorMsg) + } else { + errMsg := mockUi.ErrorWriter.String() + require.Empty(t, errMsg) + require.NotEmpty(t, commands) + } + }) + } +} diff --git a/command/commands_windows.go b/command/commands_windows.go index ed06a07406f6..016a2d2cb50e 100644 --- a/command/commands_windows.go +++ b/command/commands_windows.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build windows package command diff --git a/command/config.go b/command/config.go index b46581fc80b7..9a5ee7ac5700 100644 --- a/command/config.go +++ b/command/config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( diff --git a/command/config/config.go b/command/config/config.go index ef0c4adf6dcd..4421226f24ce 100644 --- a/command/config/config.go +++ b/command/config/config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package config import ( diff --git a/command/config/config_test.go b/command/config/config_test.go index b5d41361e9b7..04cb1be9863f 100644 --- a/command/config/config_test.go +++ b/command/config/config_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package config import ( diff --git a/command/config/hcp_token.go b/command/config/hcp_token.go new file mode 100644 index 000000000000..58bc7832e212 --- /dev/null +++ b/command/config/hcp_token.go @@ -0,0 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package config + +import hcpvlib "github.com/hashicorp/vault-hcp-lib" + +// DefaultHCPTokenHelper returns the HCP token helper that is configured for Vault. +// This helper should only be used for non-server CLI commands. +func DefaultHCPTokenHelper() hcpvlib.HCPTokenHelper { + return &hcpvlib.InternalHCPTokenHelper{} +} diff --git a/command/config/util.go b/command/config/util.go index 1ac47df7e053..3c6f7ca6239d 100644 --- a/command/config/util.go +++ b/command/config/util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package config import ( diff --git a/command/config/validate_listener.go b/command/config/validate_listener.go index 7a56ec699850..09123bdc75b3 100644 --- a/command/config/validate_listener.go +++ b/command/config/validate_listener.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !fips_140_3 package config diff --git a/command/config_test.go b/command/config_test.go index 0ed34992f3a4..187d4ce8b4be 100644 --- a/command/config_test.go +++ b/command/config_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( diff --git a/command/debug.go b/command/debug.go index 1ea27690171d..09df88fb4d60 100644 --- a/command/debug.go +++ b/command/debug.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -14,6 +17,7 @@ import ( "sync" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/gatedwriter" "github.com/hashicorp/go-secure-stdlib/strutil" @@ -23,7 +27,6 @@ import ( "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/version" "github.com/mholt/archiver/v3" - "github.com/mitchellh/cli" "github.com/oklog/run" "github.com/posener/complete" ) diff --git a/command/debug_test.go b/command/debug_test.go index 046474af8660..279c48f0a5ac 100644 --- a/command/debug_test.go +++ b/command/debug_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -13,9 +16,9 @@ import ( "testing" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/mholt/archiver/v3" - "github.com/mitchellh/cli" ) func testDebugCommand(tb testing.TB) (*cli.MockUi, *DebugCommand) { @@ -534,6 +537,10 @@ func TestDebugCommand_NoConnection(t *testing.T) { t.Fatal(err) } + if err := client.SetAddress(""); err != nil { + t.Fatal(err) + } + _, cmd := testDebugCommand(t) cmd.client = client cmd.skipTimingChecks = true diff --git a/command/delete.go b/command/delete.go index 78d786f11756..6986a84ec343 100644 --- a/command/delete.go +++ b/command/delete.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,7 +9,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/delete_test.go b/command/delete_test.go index e26d393b16fe..6da1d1d7fa91 100644 --- a/command/delete_test.go +++ b/command/delete_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testDeleteCommand(tb testing.TB) (*cli.MockUi, *DeleteCommand) { diff --git a/command/events.go b/command/events.go new file mode 100644 index 000000000000..48f1fdd22681 --- /dev/null +++ b/command/events.go @@ -0,0 +1,197 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "strings" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" + "nhooyr.io/websocket" +) + +var ( + _ cli.Command = (*EventsSubscribeCommands)(nil) + _ cli.CommandAutocomplete = (*EventsSubscribeCommands)(nil) +) + +type EventsSubscribeCommands struct { + *BaseCommand + + namespaces []string + bexprFilter string +} + +func (c *EventsSubscribeCommands) Synopsis() string { + return "Subscribe to events" +} + +func (c *EventsSubscribeCommands) Help() string { + helpText := ` +Usage: vault events subscribe [-namespaces=ns1] [-timeout=XYZs] [-filter=filterExpression] eventType + + Subscribe to events of the given event type (topic), which may be a glob + pattern (with "*" treated as a wildcard). The events will be sent to + standard out. + + The output will be a JSON object serialized using the default protobuf + JSON serialization format, with one line per event received. +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *EventsSubscribeCommands) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + f := set.NewFlagSet("Subscribe Options") + f.StringVar(&StringVar{ + Name: "filter", + Usage: `A boolean expression to use to filter events. Only events matching + the filter will be subscribed to. This is applied after any filtering + by event type or namespace.`, + Default: "", + Target: &c.bexprFilter, + }) + f.StringSliceVar(&StringSliceVar{ + Name: "namespaces", + Usage: `Specifies one or more patterns of additional child namespaces + to subscribe to. The namespace of the request is automatically + prepended, so specifying 'ns2' when the request is in the 'ns1' + namespace will result in subscribing to 'ns1/ns2', in addition to + 'ns1'. Patterns can include "*" characters to indicate + wildcards. The default is to subscribe only to the request's + namespace.`, + Default: []string{}, + Target: &c.namespaces, + }) + return set +} + +func (c *EventsSubscribeCommands) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *EventsSubscribeCommands) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *EventsSubscribeCommands) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + err = c.subscribeRequest(client, "sys/events/subscribe/"+args[0]) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + return 0 +} + +// cleanNamespace removes leading and trailing space and /'s from the namespace path. +func cleanNamespace(ns string) string { + ns = strings.TrimSpace(ns) + ns = strings.Trim(ns, "/") + return ns +} + +// cleanNamespaces removes leading and trailing space and /'s from the namespace paths. +func cleanNamespaces(namespaces []string) []string { + cleaned := make([]string, len(namespaces)) + for i, ns := range namespaces { + cleaned[i] = cleanNamespace(ns) + } + return cleaned +} + +func (c *EventsSubscribeCommands) subscribeRequest(client *api.Client, path string) error { + r := client.NewRequest("GET", "/v1/"+path) + u := r.URL + if u.Scheme == "http" { + u.Scheme = "ws" + } else { + u.Scheme = "wss" + } + q := u.Query() + q.Set("json", "true") + if len(c.namespaces) > 0 { + q["namespaces"] = cleanNamespaces(c.namespaces) + } + bexprFilter := strings.TrimSpace(c.bexprFilter) + if bexprFilter != "" { + q.Set("filter", bexprFilter) + } + u.RawQuery = q.Encode() + client.AddHeader("X-Vault-Token", client.Token()) + client.AddHeader("X-Vault-Namespace", client.Namespace()) + ctx := context.Background() + + // Follow redirects in case our request if our request is forwarded to the leader. + url := u.String() + var conn *websocket.Conn + var err error + for attempt := 0; attempt < 10; attempt++ { + var resp *http.Response + conn, resp, err = websocket.Dial(ctx, url, &websocket.DialOptions{ + HTTPClient: client.CloneConfig().HttpClient, + HTTPHeader: client.Headers(), + }) + + if err == nil { + break + } + + switch { + case resp == nil: + return err + case resp.StatusCode == http.StatusTemporaryRedirect: + url = resp.Header.Get("Location") + continue + case resp.StatusCode == http.StatusNotFound: + return errors.New("events endpoint not found; check `vault read sys/experiments` to see if an events experiment is available but disabled") + default: + return err + } + } + + if conn == nil { + return fmt.Errorf("too many redirects") + } + defer conn.Close(websocket.StatusNormalClosure, "") + + for { + _, message, err := conn.Read(ctx) + if err != nil { + return err + } + _, err = os.Stdout.Write(message) + if err != nil { + return err + } + } +} diff --git a/command/events_test.go b/command/events_test.go new file mode 100644 index 000000000000..dfeb12d706b2 --- /dev/null +++ b/command/events_test.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/cli" +) + +func testEventsSubscribeCommand(tb testing.TB) (*cli.MockUi, *EventsSubscribeCommands) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &EventsSubscribeCommands{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +// TestEventsSubscribeCommand_Run tests that the command argument parsing is working as expected. +func TestEventsSubscribeCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testEventsSubscribeCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } +} diff --git a/command/format.go b/command/format.go index 6543880fb2d0..548a9a089c85 100644 --- a/command/format.go +++ b/command/format.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -11,8 +14,8 @@ import ( "time" "github.com/ghodss/yaml" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/ryanuber/columnize" ) @@ -323,13 +326,14 @@ func (t TableFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) func (t TableFormatter) OutputSealStatusStruct(ui cli.Ui, secret *api.Secret, data interface{}) error { var status SealStatusOutput = data.(SealStatusOutput) var sealPrefix string - if status.RecoverySeal { - sealPrefix = "Recovery " - } out := []string{} out = append(out, "Key | Value") - out = append(out, fmt.Sprintf("%sSeal Type | %s", sealPrefix, status.Type)) + out = append(out, fmt.Sprintf("Seal Type | %s", status.Type)) + if status.RecoverySeal { + sealPrefix = "Recovery " + out = append(out, fmt.Sprintf("Recovery Seal Type | %s", status.RecoverySealType)) + } out = append(out, fmt.Sprintf("Initialized | %t", status.Initialized)) out = append(out, fmt.Sprintf("Sealed | %t", status.Sealed)) out = append(out, fmt.Sprintf("Total %sShares | %d", sealPrefix, status.N)) @@ -557,6 +561,9 @@ func (t TableFormatter) OutputSecret(ui cli.Ui, secret *api.Secret) error { for _, constraint := range constraintSet.Any { out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_id %s %s", k, constraint.Type, hopeDelim, constraint.ID)) out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_uses_passcode %s %t", k, constraint.Type, hopeDelim, constraint.UsesPasscode)) + if constraint.Name != "" { + out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_name %s %s", k, constraint.Type, hopeDelim, constraint.Name)) + } } } } else { // Token information only makes sense if no further MFA requirement (i.e. if we actually have a token) diff --git a/command/format_test.go b/command/format_test.go index 9c950af8f4de..77093a3e29b7 100644 --- a/command/format_test.go +++ b/command/format_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -13,11 +16,10 @@ import ( "github.com/hashicorp/vault/sdk/helper/jsonutil" ) -var output string - type mockUi struct { t *testing.T SampleData string + outputData *string } func (m mockUi) Ask(_ string) (string, error) { @@ -29,14 +31,15 @@ func (m mockUi) AskSecret(_ string) (string, error) { m.t.FailNow() return "", nil } -func (m mockUi) Output(s string) { output = s } +func (m mockUi) Output(s string) { *m.outputData = s } func (m mockUi) Info(s string) { m.t.Log(s) } func (m mockUi) Error(s string) { m.t.Log(s) } func (m mockUi) Warn(s string) { m.t.Log(s) } func TestJsonFormatter(t *testing.T) { os.Setenv(EnvVaultFormat, "json") - ui := mockUi{t: t, SampleData: "something"} + var output string + ui := mockUi{t: t, SampleData: "something", outputData: &output} if err := outputWithFormat(ui, nil, ui); err != 0 { t.Fatal(err) } @@ -53,7 +56,8 @@ func TestJsonFormatter(t *testing.T) { func TestYamlFormatter(t *testing.T) { os.Setenv(EnvVaultFormat, "yaml") - ui := mockUi{t: t, SampleData: "something"} + var output string + ui := mockUi{t: t, SampleData: "something", outputData: &output} if err := outputWithFormat(ui, nil, ui); err != 0 { t.Fatal(err) } @@ -71,7 +75,8 @@ func TestYamlFormatter(t *testing.T) { func TestTableFormatter(t *testing.T) { os.Setenv(EnvVaultFormat, "table") - ui := mockUi{t: t} + var output string + ui := mockUi{t: t, outputData: &output} // Testing secret formatting s := api.Secret{Data: map[string]interface{}{"k": "something"}} @@ -88,7 +93,8 @@ func TestTableFormatter(t *testing.T) { // fields in the embedded struct explicitly. It also checks the spacing, // indentation, and delimiters of table formatting explicitly. func TestStatusFormat(t *testing.T) { - ui := mockUi{t: t} + var output string + ui := mockUi{t: t, outputData: &output} os.Setenv(EnvVaultFormat, "table") statusHA := getMockStatusData(false) @@ -102,6 +108,7 @@ func TestStatusFormat(t *testing.T) { expectedOutputString := `Key Value --- ----- +Seal Type type Recovery Seal Type type Initialized true Sealed true @@ -134,6 +141,7 @@ Warnings [warning]` expectedOutputString = `Key Value --- ----- +Seal Type type Recovery Seal Type type Initialized true Sealed true @@ -161,21 +169,22 @@ func getMockStatusData(emptyFields bool) SealStatusOutput { var sealStatusResponseMock api.SealStatusResponse if !emptyFields { sealStatusResponseMock = api.SealStatusResponse{ - Type: "type", - Initialized: true, - Sealed: true, - T: 1, - N: 2, - Progress: 3, - Nonce: "nonce", - Version: "version", - BuildDate: "build date", - Migration: true, - ClusterName: "cluster name", - ClusterID: "cluster id", - RecoverySeal: true, - StorageType: "storage type", - Warnings: []string{"warning"}, + Type: "type", + Initialized: true, + Sealed: true, + T: 1, + N: 2, + Progress: 3, + Nonce: "nonce", + Version: "version", + BuildDate: "build date", + Migration: true, + ClusterName: "cluster name", + ClusterID: "cluster id", + RecoverySeal: true, + RecoverySealType: "type", + StorageType: "storage type", + Warnings: []string{"warning"}, } // must initialize this struct without explicit field names due to embedding @@ -194,20 +203,21 @@ func getMockStatusData(emptyFields bool) SealStatusOutput { } } else { sealStatusResponseMock = api.SealStatusResponse{ - Type: "type", - Initialized: true, - Sealed: true, - T: 1, - N: 2, - Progress: 3, - Nonce: "nonce", - Version: "version", - BuildDate: "build date", - Migration: true, - ClusterName: "", - ClusterID: "", - RecoverySeal: true, - StorageType: "", + Type: "type", + Initialized: true, + Sealed: true, + T: 1, + N: 2, + Progress: 3, + Nonce: "nonce", + Version: "version", + BuildDate: "build date", + Migration: true, + ClusterName: "", + ClusterID: "", + RecoverySeal: true, + StorageType: "", + RecoverySealType: "type", } // must initialize this struct without explicit field names due to embedding diff --git a/command/generaterootkind_enumer.go b/command/generaterootkind_enumer.go new file mode 100644 index 000000000000..a53d2846de82 --- /dev/null +++ b/command/generaterootkind_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=generateRootKind -trimprefix=generateRoot"; DO NOT EDIT. + +package command + +import ( + "fmt" +) + +const _generateRootKindName = "RegularDRRecovery" + +var _generateRootKindIndex = [...]uint8{0, 7, 9, 17} + +func (i generateRootKind) String() string { + if i < 0 || i >= generateRootKind(len(_generateRootKindIndex)-1) { + return fmt.Sprintf("generateRootKind(%d)", i) + } + return _generateRootKindName[_generateRootKindIndex[i]:_generateRootKindIndex[i+1]] +} + +var _generateRootKindValues = []generateRootKind{0, 1, 2} + +var _generateRootKindNameToValueMap = map[string]generateRootKind{ + _generateRootKindName[0:7]: 0, + _generateRootKindName[7:9]: 1, + _generateRootKindName[9:17]: 2, +} + +// generateRootKindString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func generateRootKindString(s string) (generateRootKind, error) { + if val, ok := _generateRootKindNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to generateRootKind values", s) +} + +// generateRootKindValues returns all values of the enum +func generateRootKindValues() []generateRootKind { + return _generateRootKindValues +} + +// IsAgenerateRootKind returns "true" if the value is listed in the enum definition. "false" otherwise +func (i generateRootKind) IsAgenerateRootKind() bool { + for _, v := range _generateRootKindValues { + if i == v { + return true + } + } + return false +} diff --git a/command/healthcheck/healthcheck.go b/command/healthcheck/healthcheck.go index 53910ea446bd..ba5c9b419638 100644 --- a/command/healthcheck/healthcheck.go +++ b/command/healthcheck/healthcheck.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + /* * The healthcheck package attempts to allow generic checks of arbitrary * engines, while providing a common framework with some performance @@ -25,6 +28,7 @@ package healthcheck import ( + "context" "fmt" "strings" @@ -118,9 +122,13 @@ func (e *Executor) Execute() (map[string][]*Result, error) { return nil, fmt.Errorf("failed to evaluate %v: %w", checker.Name(), err) } + if results == nil { + results = []*Result{} + } + for _, result := range results { result.Endpoint = e.templatePath(result.Endpoint) - result.StatusDisplay = ResultStatusNameMap[result.Status] + result.StatusDisplay = result.Status.String() } ret[checker.Name()] = results @@ -162,7 +170,11 @@ func (e *Executor) FetchIfNotFetched(op logical.Operation, rawPath string) (*Pat return nil, fmt.Errorf("unknown operation: %v on %v", op, path) } - response, err := e.Client.Logical().ReadRawWithData(path, data) + // client.ReadRaw* methods require a manual timeout override + ctx, cancel := context.WithTimeout(context.Background(), e.Client.ClientTimeout()) + defer cancel() + + response, err := e.Client.Logical().ReadRawWithDataWithContext(ctx, path, data) ret.Response = response if err != nil { ret.FetchError = err @@ -240,6 +252,7 @@ type Check interface { Evaluate(e *Executor) ([]*Result, error) } +//go:generate enumer -type=ResultStatus -trimprefix=Result -transform=snake type ResultStatus int const ( @@ -252,16 +265,6 @@ const ( ResultInsufficientPermissions ) -var ResultStatusNameMap = map[ResultStatus]string{ - ResultNotApplicable: "not_applicable", - ResultOK: "ok", - ResultInformational: "informational", - ResultWarning: "warning", - ResultCritical: "critical", - ResultInvalidVersion: "invalid_version", - ResultInsufficientPermissions: "insufficient_permissions", -} - var NameResultStatusMap = map[string]ResultStatus{ "not_applicable": ResultNotApplicable, "ok": ResultOK, diff --git a/command/healthcheck/pki.go b/command/healthcheck/pki.go index edec1523c4b6..6522cdbc83a7 100644 --- a/command/healthcheck/pki.go +++ b/command/healthcheck/pki.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -47,7 +50,7 @@ func parsePEM(contents string) ([]byte, error) { return pemBlock.Bytes, nil } -func parsePEMCert(contents string) (*x509.Certificate, error) { +func ParsePEMCert(contents string) (*x509.Certificate, error) { parsed, err := parsePEM(contents) if err != nil { return nil, err @@ -89,7 +92,7 @@ func pkiFetchIssuer(e *Executor, issuer string, versionError func()) (bool, *Pat } if len(issuerRet.ParsedCache) == 0 { - cert, err := parsePEMCert(issuerRet.Secret.Data["certificate"].(string)) + cert, err := ParsePEMCert(issuerRet.Secret.Data["certificate"].(string)) if err != nil { return true, issuerRet, nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuer, err) } @@ -114,7 +117,7 @@ func pkiFetchIssuerEntry(e *Executor, issuer string, versionError func()) (bool, } if len(issuerRet.ParsedCache) == 0 { - cert, err := parsePEMCert(issuerRet.Secret.Data["certificate"].(string)) + cert, err := ParsePEMCert(issuerRet.Secret.Data["certificate"].(string)) if err != nil { return true, issuerRet, nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuer, err) } @@ -222,7 +225,7 @@ func pkiFetchLeaf(e *Executor, serial string, versionError func()) (bool, *PathF } if len(leafRet.ParsedCache) == 0 { - cert, err := parsePEMCert(leafRet.Secret.Data["certificate"].(string)) + cert, err := ParsePEMCert(leafRet.Secret.Data["certificate"].(string)) if err != nil { return true, leafRet, nil, fmt.Errorf("unable to parse leaf %v's certificate: %w", serial, err) } diff --git a/command/healthcheck/pki_allow_acme_headers.go b/command/healthcheck/pki_allow_acme_headers.go new file mode 100644 index 000000000000..186cc40af966 --- /dev/null +++ b/command/healthcheck/pki_allow_acme_headers.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package healthcheck + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +type AllowAcmeHeaders struct { + Enabled bool + UnsupportedVersion bool + + TuneFetcher *PathFetch + TuneData map[string]interface{} + + AcmeConfigFetcher *PathFetch +} + +func NewAllowAcmeHeaders() Check { + return &AllowAcmeHeaders{} +} + +func (h *AllowAcmeHeaders) Name() string { + return "allow_acme_headers" +} + +func (h *AllowAcmeHeaders) IsEnabled() bool { + return h.Enabled +} + +func (h *AllowAcmeHeaders) DefaultConfig() map[string]interface{} { + return map[string]interface{}{} +} + +func (h *AllowAcmeHeaders) LoadConfig(config map[string]interface{}) error { + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *AllowAcmeHeaders) FetchResources(e *Executor) error { + var err error + h.AcmeConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/acme") + if err != nil { + return err + } + + if h.AcmeConfigFetcher.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + _, h.TuneFetcher, h.TuneData, err = fetchMountTune(e, func() { + h.UnsupportedVersion = true + }) + if err != nil { + return err + } + + return nil +} + +func (h *AllowAcmeHeaders) Evaluate(e *Executor) ([]*Result, error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "This health check requires Vault 1.14+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.AcmeConfigFetcher.IsSecretPermissionsError() { + msg := "Without read access to ACME configuration, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.AcmeConfigFetcher.Path, msg), nil + } + + acmeEnabled, err := isAcmeEnabled(h.AcmeConfigFetcher) + if err != nil { + return nil, err + } + + if !acmeEnabled { + ret := Result{ + Status: ResultNotApplicable, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "ACME is not enabled, no additional response headers required.", + } + return []*Result{&ret}, nil + } + + if h.TuneFetcher.IsSecretPermissionsError() { + msg := "Without access to mount tune information, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.TuneFetcher.Path, msg), nil + } + + resp, err := StringList(h.TuneData["allowed_response_headers"]) + if err != nil { + return nil, fmt.Errorf("unable to parse value from server for allowed_response_headers: %w", err) + } + + requiredResponseHeaders := []string{"Replay-Nonce", "Link", "Location"} + foundResponseHeaders := []string{} + for _, param := range resp { + for _, reqHeader := range requiredResponseHeaders { + if strings.EqualFold(param, reqHeader) { + foundResponseHeaders = append(foundResponseHeaders, reqHeader) + break + } + } + } + + foundAllHeaders := strutil.EquivalentSlices(requiredResponseHeaders, foundResponseHeaders) + + if !foundAllHeaders { + ret := Result{ + Status: ResultWarning, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Mount hasn't enabled 'Replay-Nonce', 'Link', 'Location' response headers, these are required for ACME to function.", + } + return []*Result{&ret}, nil + } + + ret := Result{ + Status: ResultOK, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Mount has enabled 'Replay-Nonce', 'Link', 'Location' response headers.", + } + return []*Result{&ret}, nil +} + +func craftInsufficientPermissionResult(e *Executor, path, errorMsg string) []*Result { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: path, + Message: errorMsg, + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message + } + + return []*Result{&ret} +} diff --git a/command/healthcheck/pki_allow_if_modified_since.go b/command/healthcheck/pki_allow_if_modified_since.go index 59f96611b118..38eaee9aab65 100644 --- a/command/healthcheck/pki_allow_if_modified_since.go +++ b/command/healthcheck/pki_allow_if_modified_since.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -12,6 +15,7 @@ type AllowIfModifiedSince struct { UnsupportedVersion bool TuneData map[string]interface{} + Fetcher *PathFetch } func NewAllowIfModifiedSinceCheck() Check { @@ -42,15 +46,16 @@ func (h *AllowIfModifiedSince) LoadConfig(config map[string]interface{}) error { } func (h *AllowIfModifiedSince) FetchResources(e *Executor) error { - exit, _, data, err := fetchMountTune(e, func() { + var exit bool + var err error + + exit, h.Fetcher, h.TuneData, err = fetchMountTune(e, func() { h.UnsupportedVersion = true }) - if exit { + + if exit || err != nil { return err } - - h.TuneData = data - return nil } @@ -59,22 +64,39 @@ func (h *AllowIfModifiedSince) Evaluate(e *Executor) (results []*Result, err err ret := Result{ Status: ResultInvalidVersion, Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "This health check requires Vault 1.9+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + Message: "This health check requires Vault 1.12+ but an earlier version of Vault Server was contacted, preventing this health check from running.", } return []*Result{&ret}, nil } - req, err := stringList(h.TuneData["passthrough_request_headers"]) + if h.Fetcher.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + return + } + + req, err := StringList(h.TuneData["passthrough_request_headers"]) if err != nil { return nil, fmt.Errorf("unable to parse value from server for passthrough_request_headers: %w", err) } - resp, err := stringList(h.TuneData["allowed_response_headers"]) + resp, err := StringList(h.TuneData["allowed_response_headers"]) if err != nil { return nil, fmt.Errorf("unable to parse value from server for allowed_response_headers: %w", err) } - var foundIMS bool = false + foundIMS := false for _, param := range req { if strings.EqualFold(param, "If-Modified-Since") { foundIMS = true @@ -82,7 +104,7 @@ func (h *AllowIfModifiedSince) Evaluate(e *Executor) (results []*Result, err err } } - var foundLM bool = false + foundLM := false for _, param := range resp { if strings.EqualFold(param, "Last-Modified") { foundLM = true diff --git a/command/healthcheck/pki_audit_visibility.go b/command/healthcheck/pki_audit_visibility.go index 24f9be4f6a1f..e58543cb6f12 100644 --- a/command/healthcheck/pki_audit_visibility.go +++ b/command/healthcheck/pki_audit_visibility.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -58,6 +61,7 @@ type AuditVisibility struct { IgnoredParameters map[string]bool TuneData map[string]interface{} + Fetcher *PathFetch } func NewAuditVisibilityCheck() Check { @@ -83,7 +87,7 @@ func (h *AuditVisibility) DefaultConfig() map[string]interface{} { func (h *AuditVisibility) LoadConfig(config map[string]interface{}) error { var err error - coerced, err := stringList(config["ignored_parameters"]) + coerced, err := StringList(config["ignored_parameters"]) if err != nil { return fmt.Errorf("error parsing %v.ignored_parameters: %v", h.Name(), err) } @@ -100,35 +104,52 @@ func (h *AuditVisibility) LoadConfig(config map[string]interface{}) error { } func (h *AuditVisibility) FetchResources(e *Executor) error { - exit, _, data, err := fetchMountTune(e, func() { + var exit bool + var err error + + exit, h.Fetcher, h.TuneData, err = fetchMountTune(e, func() { h.UnsupportedVersion = true }) - if exit { + + if exit || err != nil { return err } - - h.TuneData = data - return nil } func (h *AuditVisibility) Evaluate(e *Executor) (results []*Result, err error) { if h.UnsupportedVersion { - // Shouldn't happen; /certs has been around forever. ret := Result{ Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/certs", - Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "This health check requires Vault 1.9+ but an earlier version of Vault Server was contacted, preventing this health check from running.", } return []*Result{&ret}, nil } + if h.Fetcher.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + return + } + sourceMap := map[string][]string{ "audit_non_hmac_request_keys": VisibleReqParams, "audit_non_hmac_response_keys": VisibleRespParams, } for source, visibleList := range sourceMap { - actual, err := stringList(h.TuneData[source]) + actual, err := StringList(h.TuneData[source]) if err != nil { return nil, fmt.Errorf("error parsing %v from server: %v", source, err) } @@ -158,7 +179,7 @@ func (h *AuditVisibility) Evaluate(e *Executor) (results []*Result, err error) { "audit_non_hmac_response_keys": HiddenRespParams, } for source, hiddenList := range sourceMap { - actual, err := stringList(h.TuneData[source]) + actual, err := StringList(h.TuneData[source]) if err != nil { return nil, fmt.Errorf("error parsing %v from server: %v", source, err) } diff --git a/command/healthcheck/pki_ca_validity_period.go b/command/healthcheck/pki_ca_validity_period.go index c971aed52208..9ef56d7b9f15 100644 --- a/command/healthcheck/pki_ca_validity_period.go +++ b/command/healthcheck/pki_ca_validity_period.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -62,9 +65,8 @@ func (h *CAValidityPeriod) LoadConfig(config map[string]interface{}) error { if len(name_split) != 3 || name_split[1] != "expiry" { return fmt.Errorf("bad parameter: %v / %v / %v", parameter, len(name_split), name_split[1]) } - - status, present := NameResultStatusMap[name_split[2]] - if !present { + status, err := ResultStatusString(name_split[2]) + if err != nil { return fmt.Errorf("bad parameter: %v's type %v isn't in name map", parameter, name_split[2]) } diff --git a/command/healthcheck/pki_crl_validity_period.go b/command/healthcheck/pki_crl_validity_period.go index ae31e73324c1..c4d6fd8edb34 100644 --- a/command/healthcheck/pki_crl_validity_period.go +++ b/command/healthcheck/pki_crl_validity_period.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -169,7 +172,7 @@ func (h *CRLValidityPeriod) Evaluate(e *Executor) (results []*Result, err error) ret.Status = ResultCritical ret.Message = fmt.Sprintf("CRL's validity is outside of suggested rotation window: CRL's next update is expected at %v, but expires within %v%% of validity window (starting on %v and ending on %v). It is suggested to rotate this CRL and start propagating it to hosts to avoid any issues caused by stale CRLs.", crl.NextUpdate.Format("2006-01-02"), h.CRLExpiryPercentage, crl.ThisUpdate.Format("2006-01-02"), expWhen.Format("2006-01-02")) - if crlDisabled == true { + if crlDisabled { ret.Status = ResultInformational ret.Message += " Because the CRL is disabled, this is less of a concern." } @@ -192,7 +195,7 @@ func (h *CRLValidityPeriod) Evaluate(e *Executor) (results []*Result, err error) ret.Status = ResultCritical ret.Message = fmt.Sprintf("Delta CRL's validity is outside of suggested rotation window: Delta CRL's next update is expected at %v, but expires within %v%% of validity window (starting on %v and ending on %v). It is suggested to rotate this Delta CRL and start propagating it to hosts to avoid any issues caused by stale CRLs.", crl.NextUpdate.Format("2006-01-02"), h.CRLExpiryPercentage, crl.ThisUpdate.Format("2006-01-02"), expWhen.Format("2006-01-02")) - if crlDisabled == true { + if crlDisabled { ret.Status = ResultInformational ret.Message += " Because the CRL is disabled, this is less of a concern." } diff --git a/command/healthcheck/pki_enable_acme_issuance.go b/command/healthcheck/pki_enable_acme_issuance.go new file mode 100644 index 000000000000..853a3475e211 --- /dev/null +++ b/command/healthcheck/pki_enable_acme_issuance.go @@ -0,0 +1,236 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package healthcheck + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "net/http" + "net/url" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/logical" + + "golang.org/x/crypto/acme" +) + +type EnableAcmeIssuance struct { + Enabled bool + UnsupportedVersion bool + + AcmeConfigFetcher *PathFetch + ClusterConfigFetcher *PathFetch + TotalIssuers int + RootIssuers int +} + +func NewEnableAcmeIssuance() Check { + return &EnableAcmeIssuance{} +} + +func (h *EnableAcmeIssuance) Name() string { + return "enable_acme_issuance" +} + +func (h *EnableAcmeIssuance) IsEnabled() bool { + return h.Enabled +} + +func (h *EnableAcmeIssuance) DefaultConfig() map[string]interface{} { + return map[string]interface{}{} +} + +func (h *EnableAcmeIssuance) LoadConfig(config map[string]interface{}) error { + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *EnableAcmeIssuance) FetchResources(e *Executor) error { + var err error + h.AcmeConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/acme") + if err != nil { + return err + } + + if h.AcmeConfigFetcher.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + h.ClusterConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/cluster") + if err != nil { + return err + } + + if h.ClusterConfigFetcher.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + h.TotalIssuers, h.RootIssuers, err = doesMountContainOnlyRootIssuers(e) + return err +} + +func doesMountContainOnlyRootIssuers(e *Executor) (int, int, error) { + exit, _, issuers, err := pkiFetchIssuersList(e, func() {}) + if exit || err != nil { + return 0, 0, err + } + + totalIssuers := 0 + rootIssuers := 0 + + for _, issuer := range issuers { + skip, _, cert, err := pkiFetchIssuer(e, issuer, func() {}) + + if skip || err != nil { + if err != nil { + return 0, 0, err + } + continue + } + totalIssuers++ + + if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { + continue + } + if err := cert.CheckSignatureFrom(cert); err != nil { + continue + } + rootIssuers++ + } + + return totalIssuers, rootIssuers, nil +} + +func isAcmeEnabled(fetcher *PathFetch) (bool, error) { + isEnabledRaw, ok := fetcher.Secret.Data["enabled"] + if !ok { + return false, fmt.Errorf("enabled configuration field missing from acme config") + } + + parseBool, err := parseutil.ParseBool(isEnabledRaw) + if err != nil { + return false, fmt.Errorf("failed parsing 'enabled' field from ACME config: %w", err) + } + + return parseBool, nil +} + +func verifyLocalPathUrl(h *EnableAcmeIssuance) error { + localPathRaw, ok := h.ClusterConfigFetcher.Secret.Data["path"] + if !ok { + return fmt.Errorf("'path' field missing from config") + } + + localPath, err := parseutil.ParseString(localPathRaw) + if err != nil { + return fmt.Errorf("failed converting 'path' field from local config: %w", err) + } + + if localPath == "" { + return fmt.Errorf("'path' field not configured within /{{mount}}/config/cluster") + } + + parsedUrl, err := url.Parse(localPath) + if err != nil { + return fmt.Errorf("failed to parse URL from path config: %v: %w", localPathRaw, err) + } + + if parsedUrl.Scheme != "https" { + return fmt.Errorf("the configured 'path' field in /{{mount}}/config/cluster was not using an https scheme") + } + + // Avoid issues with SSL certificates for this check, we just want to validate that we would + // hit an ACME server with the path they specified in configuration + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + client := &http.Client{Transport: tr} + acmeDirectoryUrl := parsedUrl.JoinPath("/acme/", "directory") + acmeClient := acme.Client{HTTPClient: client, DirectoryURL: acmeDirectoryUrl.String()} + _, err = acmeClient.Discover(context.Background()) + if err != nil { + return fmt.Errorf("using configured 'path' field ('%s') in /{{mount}}/config/cluster failed to reach the ACME"+ + " directory: %s: %w", parsedUrl.String(), acmeDirectoryUrl.String(), err) + } + + return nil +} + +func (h *EnableAcmeIssuance) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "This health check requires Vault 1.14+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.AcmeConfigFetcher.IsSecretPermissionsError() { + msg := "Without this information, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.AcmeConfigFetcher.Path, msg), nil + } + + acmeEnabled, err := isAcmeEnabled(h.AcmeConfigFetcher) + if err != nil { + return nil, err + } + + if !acmeEnabled { + if h.TotalIssuers == 0 { + ret := Result{ + Status: ResultNotApplicable, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "No issuers in mount, ACME is not required.", + } + return []*Result{&ret}, nil + } + + if h.TotalIssuers == h.RootIssuers { + ret := Result{ + Status: ResultNotApplicable, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "Mount contains only root issuers, ACME is not required.", + } + return []*Result{&ret}, nil + } + + ret := Result{ + Status: ResultInformational, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "Consider enabling ACME support to support a self-rotating PKI infrastructure.", + } + return []*Result{&ret}, nil + } + + if h.ClusterConfigFetcher.IsSecretPermissionsError() { + msg := "Without this information, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.ClusterConfigFetcher.Path, msg), nil + } + + localPathIssue := verifyLocalPathUrl(h) + + if localPathIssue != nil { + ret := Result{ + Status: ResultWarning, + Endpoint: h.ClusterConfigFetcher.Path, + Message: "ACME enabled in config but not functional: " + localPathIssue.Error(), + } + return []*Result{&ret}, nil + } + + ret := Result{ + Status: ResultOK, + Endpoint: h.ClusterConfigFetcher.Path, + Message: "ACME enabled and successfully connected to the ACME directory.", + } + return []*Result{&ret}, nil +} diff --git a/command/healthcheck/pki_enable_auto_tidy.go b/command/healthcheck/pki_enable_auto_tidy.go index dca555fd0326..cabab17a6b16 100644 --- a/command/healthcheck/pki_enable_auto_tidy.go +++ b/command/healthcheck/pki_enable_auto_tidy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( diff --git a/command/healthcheck/pki_hardware_backed_root.go b/command/healthcheck/pki_hardware_backed_root.go index 199a781fe9bb..7fbe306ee746 100644 --- a/command/healthcheck/pki_hardware_backed_root.go +++ b/command/healthcheck/pki_hardware_backed_root.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -13,12 +16,14 @@ type HardwareBackedRoot struct { UnsupportedVersion bool + FetchIssues map[string]*PathFetch IssuerKeyMap map[string]string KeyIsManaged map[string]string } func NewHardwareBackedRootCheck() Check { return &HardwareBackedRoot{ + FetchIssues: make(map[string]*PathFetch), IssuerKeyMap: make(map[string]string), KeyIsManaged: make(map[string]string), } @@ -64,6 +69,7 @@ func (h *HardwareBackedRoot) FetchResources(e *Executor) error { if err != nil { return err } + h.FetchIssues[issuer] = ret continue } @@ -83,13 +89,15 @@ func (h *HardwareBackedRoot) FetchResources(e *Executor) error { } h.IssuerKeyMap[issuer] = keyId - skip, _, keyEntry, err := pkiFetchKeyEntry(e, keyId, func() { + skip, ret, keyEntry, err := pkiFetchKeyEntry(e, keyId, func() { h.UnsupportedVersion = true }) if skip || err != nil || keyEntry == nil { if err != nil { return err } + + h.FetchIssues[issuer] = ret continue } @@ -112,6 +120,25 @@ func (h *HardwareBackedRoot) Evaluate(e *Executor) (results []*Result, err error return []*Result{&ret}, nil } + for issuer, fetchPath := range h.FetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.IssuerKeyMap, issuer) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission for the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } + } + for name, keyId := range h.IssuerKeyMap { var ret Result ret.Status = ResultInformational diff --git a/command/healthcheck/pki_role_allows_glob_wildcards.go b/command/healthcheck/pki_role_allows_glob_wildcards.go index c78fad8653c3..f3edd8bd8a12 100644 --- a/command/healthcheck/pki_role_allows_glob_wildcards.go +++ b/command/healthcheck/pki_role_allows_glob_wildcards.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -10,14 +13,16 @@ import ( type RoleAllowsGlobWildcards struct { Enabled bool UnsupportedVersion bool - NoPerms bool - RoleEntryMap map[string]map[string]interface{} + RoleListFetchIssue *PathFetch + RoleFetchIssues map[string]*PathFetch + RoleEntryMap map[string]map[string]interface{} } func NewRoleAllowsGlobWildcardsCheck() Check { return &RoleAllowsGlobWildcards{ - RoleEntryMap: make(map[string]map[string]interface{}), + RoleFetchIssues: make(map[string]*PathFetch), + RoleEntryMap: make(map[string]map[string]interface{}), } } @@ -49,7 +54,7 @@ func (h *RoleAllowsGlobWildcards) FetchResources(e *Executor) error { }) if exit || err != nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleListFetchIssue = f } return err } @@ -60,7 +65,7 @@ func (h *RoleAllowsGlobWildcards) FetchResources(e *Executor) error { }) if skip || err != nil || entry == nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleFetchIssues[role] = f } if err != nil { return err @@ -84,18 +89,37 @@ func (h *RoleAllowsGlobWildcards) Evaluate(e *Executor) (results []*Result, err } return []*Result{&ret}, nil } - if h.NoPerms { + if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { ret := Result{ Status: ResultInsufficientPermissions, - Endpoint: "/{{mount}}/roles", - Message: "lacks permission either to list the roles or to read a specific role. This may restrict the ability to fully execute this health check.", + Endpoint: h.RoleListFetchIssue.Path, + Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", } if e.Client.Token() == "" { ret.Message = "No token available and so this health check " + ret.Message } else { ret.Message = "This token " + ret.Message } - results = append(results, &ret) + return []*Result{&ret}, nil + } + + for role, fetchPath := range h.RoleFetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RoleEntryMap, role) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } } for role, entry := range h.RoleEntryMap { @@ -141,7 +165,7 @@ func (h *RoleAllowsGlobWildcards) Evaluate(e *Executor) (results []*Result, err ret := Result{ Status: ResultWarning, - Endpoint: "/{{mount}}/role/" + role, + Endpoint: "/{{mount}}/roles/" + role, Message: fmt.Sprintf("Role currently allows wildcard issuance while allowing globs in allowed_domains (%v). Because globs can expand to one or more wildcard character, including wildcards under additional subdomains, these options are dangerous to enable together. If glob domains are required to be enabled, it is suggested to either disable wildcard issuance if not desired, or create two separate roles -- one with wildcard issuance for specified domains and one with glob matching enabled for concrete domain identifiers.", allowedDomains), } diff --git a/command/healthcheck/pki_role_allows_localhost.go b/command/healthcheck/pki_role_allows_localhost.go index 570ffdf90651..aec00dc7942e 100644 --- a/command/healthcheck/pki_role_allows_localhost.go +++ b/command/healthcheck/pki_role_allows_localhost.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -9,14 +12,16 @@ import ( type RoleAllowsLocalhost struct { Enabled bool UnsupportedVersion bool - NoPerms bool - RoleEntryMap map[string]map[string]interface{} + RoleListFetchIssue *PathFetch + RoleFetchIssues map[string]*PathFetch + RoleEntryMap map[string]map[string]interface{} } func NewRoleAllowsLocalhostCheck() Check { return &RoleAllowsLocalhost{ - RoleEntryMap: make(map[string]map[string]interface{}), + RoleFetchIssues: make(map[string]*PathFetch), + RoleEntryMap: make(map[string]map[string]interface{}), } } @@ -48,7 +53,7 @@ func (h *RoleAllowsLocalhost) FetchResources(e *Executor) error { }) if exit || err != nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleListFetchIssue = f } return err } @@ -59,7 +64,7 @@ func (h *RoleAllowsLocalhost) FetchResources(e *Executor) error { }) if skip || err != nil || entry == nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleFetchIssues[role] = f } if err != nil { return err @@ -83,18 +88,38 @@ func (h *RoleAllowsLocalhost) Evaluate(e *Executor) (results []*Result, err erro } return []*Result{&ret}, nil } - if h.NoPerms { + + if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { ret := Result{ Status: ResultInsufficientPermissions, - Endpoint: "/{{mount}}/roles", - Message: "lacks permission either to list the roles or to read a specific role. This may restrict the ability to fully execute this health check", + Endpoint: h.RoleListFetchIssue.Path, + Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", } if e.Client.Token() == "" { ret.Message = "No token available and so this health check " + ret.Message } else { ret.Message = "This token " + ret.Message } - results = append(results, &ret) + return []*Result{&ret}, nil + } + + for role, fetchPath := range h.RoleFetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RoleEntryMap, role) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } } for role, entry := range h.RoleEntryMap { @@ -115,7 +140,7 @@ func (h *RoleAllowsLocalhost) Evaluate(e *Executor) (results []*Result, err erro ret := Result{ Status: ResultWarning, - Endpoint: "/{{mount}}/role/" + role, + Endpoint: "/{{mount}}/roles/" + role, Message: fmt.Sprintf("Role currently allows localhost issuance with a non-empty allowed_domains (%v): this role is intended for issuing other hostnames and the allow_localhost=true option may be overlooked by operators. If this role is intended to issue certificates valid for localhost, consider setting allow_localhost=false and explicitly adding localhost to the list of allowed domains.", allowedDomains), } diff --git a/command/healthcheck/pki_role_no_store_false.go b/command/healthcheck/pki_role_no_store_false.go index 6e13e222d2cf..30db9dce1284 100644 --- a/command/healthcheck/pki_role_no_store_false.go +++ b/command/healthcheck/pki_role_no_store_false.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -11,19 +14,20 @@ import ( type RoleNoStoreFalse struct { Enabled bool UnsupportedVersion bool - NoPerms bool AllowedRoles map[string]bool - CertCounts int - RoleEntryMap map[string]map[string]interface{} - CRLConfig *PathFetch + RoleListFetchIssue *PathFetch + RoleFetchIssues map[string]*PathFetch + RoleEntryMap map[string]map[string]interface{} + CRLConfig *PathFetch } func NewRoleNoStoreFalseCheck() Check { return &RoleNoStoreFalse{ - AllowedRoles: make(map[string]bool), - RoleEntryMap: make(map[string]map[string]interface{}), + RoleFetchIssues: make(map[string]*PathFetch), + AllowedRoles: make(map[string]bool), + RoleEntryMap: make(map[string]map[string]interface{}), } } @@ -64,7 +68,7 @@ func (h *RoleNoStoreFalse) FetchResources(e *Executor) error { }) if exit || err != nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleListFetchIssue = f } return err } @@ -75,7 +79,7 @@ func (h *RoleNoStoreFalse) FetchResources(e *Executor) error { }) if skip || err != nil || entry == nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleFetchIssues[role] = f } if err != nil { return err @@ -86,14 +90,6 @@ func (h *RoleNoStoreFalse) FetchResources(e *Executor) error { h.RoleEntryMap[role] = entry } - exit, _, leaves, err := pkiFetchLeavesList(e, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - return err - } - h.CertCounts = len(leaves) - // Check if the issuer is fetched yet. configRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/crl") if err != nil { @@ -116,18 +112,37 @@ func (h *RoleNoStoreFalse) Evaluate(e *Executor) (results []*Result, err error) return []*Result{&ret}, nil } - if h.NoPerms { + if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { ret := Result{ Status: ResultInsufficientPermissions, - Endpoint: "/{{mount}}/roles", - Message: "lacks permission either to list the roles or to read a specific role. This may restrict the ability to fully execute this health check", + Endpoint: h.RoleListFetchIssue.Path, + Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", } if e.Client.Token() == "" { ret.Message = "No token available and so this health check " + ret.Message } else { ret.Message = "This token " + ret.Message } - results = append(results, &ret) + return []*Result{&ret}, nil + } + + for role, fetchPath := range h.RoleFetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RoleEntryMap, role) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } } crlAutoRebuild := false @@ -159,7 +174,7 @@ func (h *RoleNoStoreFalse) Evaluate(e *Executor) (results []*Result, err error) ret := Result{ Status: ResultWarning, - Endpoint: "/{{mount}}/role/" + role, + Endpoint: "/{{mount}}/roles/" + role, Message: "Role currently stores every issued certificate (no_store=false). Too many issued and/or revoked certificates can exceed Vault's storage limits and make operations slow. It is encouraged to enable auto-rebuild of CRLs to prevent every revocation from creating a new CRL, and to limit the number of certificates issued under roles with no_store=false: use shorter lifetimes and/or BYOC revocation instead.", } diff --git a/command/healthcheck/pki_root_issued_leaves.go b/command/healthcheck/pki_root_issued_leaves.go index 3252a91fb041..85359b2e59a9 100644 --- a/command/healthcheck/pki_root_issued_leaves.go +++ b/command/healthcheck/pki_root_issued_leaves.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -14,12 +17,14 @@ type RootIssuedLeaves struct { CertsToFetch int + FetchIssues map[string]*PathFetch RootCertMap map[string]*x509.Certificate LeafCertMap map[string]*x509.Certificate } func NewRootIssuedLeavesCheck() Check { return &RootIssuedLeaves{ + FetchIssues: make(map[string]*PathFetch), RootCertMap: make(map[string]*x509.Certificate), LeafCertMap: make(map[string]*x509.Certificate), } @@ -64,9 +69,10 @@ func (h *RootIssuedLeaves) FetchResources(e *Executor) error { } for _, issuer := range issuers { - skip, _, cert, err := pkiFetchIssuer(e, issuer, func() { + skip, pathFetch, cert, err := pkiFetchIssuer(e, issuer, func() { h.UnsupportedVersion = true }) + h.FetchIssues[issuer] = pathFetch if skip || err != nil { if err != nil { return err @@ -85,10 +91,15 @@ func (h *RootIssuedLeaves) FetchResources(e *Executor) error { h.RootCertMap[issuer] = cert } - exit, _, leaves, err := pkiFetchLeavesList(e, func() { + exit, f, leaves, err := pkiFetchLeavesList(e, func() { h.UnsupportedVersion = true }) if exit || err != nil { + if f != nil && f.IsSecretPermissionsError() { + for _, issuer := range issuers { + h.FetchIssues[issuer] = f + } + } return err } @@ -130,6 +141,25 @@ func (h *RootIssuedLeaves) Evaluate(e *Executor) (results []*Result, err error) return []*Result{&ret}, nil } + for issuer, fetchPath := range h.FetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RootCertMap, issuer) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission for the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } + } + issuerHasLeaf := make(map[string]bool) for serial, leaf := range h.LeafCertMap { if len(issuerHasLeaf) == len(h.RootCertMap) { diff --git a/command/healthcheck/pki_tidy_last_run.go b/command/healthcheck/pki_tidy_last_run.go index e079212333d9..a3f1c929ca0c 100644 --- a/command/healthcheck/pki_tidy_last_run.go +++ b/command/healthcheck/pki_tidy_last_run.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -93,7 +96,7 @@ func (h *TidyLastRun) Evaluate(e *Executor) (results []*Result, err error) { ret := Result{ Status: ResultInsufficientPermissions, Endpoint: "/{{mount}}/tidy-status", - Message: "Without this information, this health check is unable tof unction.", + Message: "Without this information, this health check is unable to function.", } if e.Client.Token() == "" { diff --git a/command/healthcheck/pki_too_many_certs.go b/command/healthcheck/pki_too_many_certs.go index 8bd61003bceb..f7873e640e00 100644 --- a/command/healthcheck/pki_too_many_certs.go +++ b/command/healthcheck/pki_too_many_certs.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -14,6 +17,7 @@ type TooManyCerts struct { CountWarning int CertCounts int + FetchIssue *PathFetch } func NewTooManyCertsCheck() Check { @@ -60,7 +64,9 @@ func (h *TooManyCerts) FetchResources(e *Executor) error { exit, leavesRet, _, err := pkiFetchLeavesList(e, func() { h.UnsupportedVersion = true }) - if exit { + h.FetchIssue = leavesRet + + if exit || err != nil { return err } @@ -80,6 +86,23 @@ func (h *TooManyCerts) Evaluate(e *Executor) (results []*Result, err error) { return []*Result{&ret}, nil } + if h.FetchIssue != nil && h.FetchIssue.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: h.FetchIssue.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable to list the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to list the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + return + } + ret := Result{ Status: ResultOK, Endpoint: "/{{mount}}/certs", diff --git a/command/healthcheck/resultstatus_enumer.go b/command/healthcheck/resultstatus_enumer.go new file mode 100644 index 000000000000..eb8182cb4aca --- /dev/null +++ b/command/healthcheck/resultstatus_enumer.go @@ -0,0 +1,54 @@ +// Code generated by "enumer -type=ResultStatus -trimprefix=Result -transform=snake"; DO NOT EDIT. + +package healthcheck + +import ( + "fmt" +) + +const _ResultStatusName = "not_applicableokinformationalwarningcriticalinvalid_versioninsufficient_permissions" + +var _ResultStatusIndex = [...]uint8{0, 14, 16, 29, 36, 44, 59, 83} + +func (i ResultStatus) String() string { + if i < 0 || i >= ResultStatus(len(_ResultStatusIndex)-1) { + return fmt.Sprintf("ResultStatus(%d)", i) + } + return _ResultStatusName[_ResultStatusIndex[i]:_ResultStatusIndex[i+1]] +} + +var _ResultStatusValues = []ResultStatus{0, 1, 2, 3, 4, 5, 6} + +var _ResultStatusNameToValueMap = map[string]ResultStatus{ + _ResultStatusName[0:14]: 0, + _ResultStatusName[14:16]: 1, + _ResultStatusName[16:29]: 2, + _ResultStatusName[29:36]: 3, + _ResultStatusName[36:44]: 4, + _ResultStatusName[44:59]: 5, + _ResultStatusName[59:83]: 6, +} + +// ResultStatusString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ResultStatusString(s string) (ResultStatus, error) { + if val, ok := _ResultStatusNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ResultStatus values", s) +} + +// ResultStatusValues returns all values of the enum +func ResultStatusValues() []ResultStatus { + return _ResultStatusValues +} + +// IsAResultStatus returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ResultStatus) IsAResultStatus() bool { + for _, v := range _ResultStatusValues { + if i == v { + return true + } + } + return false +} diff --git a/command/healthcheck/shared.go b/command/healthcheck/shared.go index e9d6a5a9964e..611b5337e618 100644 --- a/command/healthcheck/shared.go +++ b/command/healthcheck/shared.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( @@ -6,7 +9,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -func stringList(source interface{}) ([]string, error) { +func StringList(source interface{}) ([]string, error) { if source == nil { return nil, nil } @@ -35,7 +38,7 @@ func stringList(source interface{}) ([]string, error) { func fetchMountTune(e *Executor, versionError func()) (bool, *PathFetch, map[string]interface{}, error) { tuneRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/sys/mounts/{{mount}}/tune") if err != nil { - return true, nil, nil, err + return true, nil, nil, fmt.Errorf("failed to fetch mount tune information: %w", err) } if !tuneRet.IsSecretOK() { @@ -43,7 +46,7 @@ func fetchMountTune(e *Executor, versionError func()) (bool, *PathFetch, map[str versionError() } - return true, nil, nil, nil + return true, tuneRet, nil, nil } var data map[string]interface{} = nil diff --git a/command/healthcheck/util.go b/command/healthcheck/util.go index 632fe1a286a3..d8a7ba945e33 100644 --- a/command/healthcheck/util.go +++ b/command/healthcheck/util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package healthcheck import ( diff --git a/command/kv.go b/command/kv.go index 2172576dbd6f..f17baf5d3ab7 100644 --- a/command/kv.go +++ b/command/kv.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*KVCommand)(nil) diff --git a/command/kv_delete.go b/command/kv_delete.go index 5555b9c71a89..67cc56ac4e77 100644 --- a/command/kv_delete.go +++ b/command/kv_delete.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,8 +8,8 @@ import ( "path" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -153,7 +156,7 @@ func (c *KVDeleteCommand) Run(args []string) int { var fullPath string if v2 { secret, err = c.deleteV2(partialPath, mountPath, client) - fullPath = addPrefixToKVPath(partialPath, mountPath, "data") + fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) } else { // v1 if mountFlagSyntax { @@ -192,13 +195,13 @@ func (c *KVDeleteCommand) deleteV2(path, mountPath string, client *api.Client) ( var secret *api.Secret switch { case len(c.flagVersions) > 0: - path = addPrefixToKVPath(path, mountPath, "delete") + path = addPrefixToKVPath(path, mountPath, "delete", false) data := map[string]interface{}{ "versions": kvParseVersionsFlags(c.flagVersions), } secret, err = client.Logical().Write(path, data) default: - path = addPrefixToKVPath(path, mountPath, "data") + path = addPrefixToKVPath(path, mountPath, "data", false) secret, err = client.Logical().Delete(path) } diff --git a/command/kv_destroy.go b/command/kv_destroy.go index 45cbca02518b..0299be4cea87 100644 --- a/command/kv_destroy.go +++ b/command/kv_destroy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -152,7 +155,7 @@ func (c *KVDestroyCommand) Run(args []string) int { c.UI.Error("Destroy not supported on KV Version 1") return 1 } - destroyPath := addPrefixToKVPath(partialPath, mountPath, "destroy") + destroyPath := addPrefixToKVPath(partialPath, mountPath, "destroy", false) if err != nil { c.UI.Error(err.Error()) return 2 diff --git a/command/kv_enable_versioning.go b/command/kv_enable_versioning.go index 9c2a60143284..921c286e3aae 100644 --- a/command/kv_enable_versioning.go +++ b/command/kv_enable_versioning.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/kv_get.go b/command/kv_get.go index 057a787c4e28..e31c8a32a5c7 100644 --- a/command/kv_get.go +++ b/command/kv_get.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -149,7 +152,7 @@ func (c *KVGetCommand) Run(args []string) int { var fullPath string // Add /data to v2 paths only if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "data") + fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) if c.flagVersion > 0 { versionParam = map[string]string{ diff --git a/command/kv_helpers.go b/command/kv_helpers.go index b362c3bb0713..ed3bc38118e5 100644 --- a/command/kv_helpers.go +++ b/command/kv_helpers.go @@ -1,15 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( + "context" "errors" "fmt" "io" - "path" + paths "path" + "sort" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func kvReadRequest(client *api.Client, path string, params map[string]string) (*api.Secret, error) { @@ -75,7 +80,9 @@ func kvPreflightVersionRequest(client *api.Client, path string) (string, int, er err = fmt.Errorf( `This output flag requires the success of a preflight request to determine the version of a KV secrets engine. Please -re-run this command with a token with read access to %s`, path) +re-run this command with a token with read access to %s. +Note that if the path you are trying to reach is a KV v2 path, your token's policy must +allow read access to that path in the format 'mount-path/data/foo', not just 'mount-path/foo'.`, path) } } @@ -121,15 +128,15 @@ func isKVv2(path string, client *api.Client) (string, bool, error) { return mountPath, version == 2, nil } -func addPrefixToKVPath(p, mountPath, apiPrefix string) string { - if p == mountPath || p == strings.TrimSuffix(mountPath, "/") { - return path.Join(mountPath, apiPrefix) +func addPrefixToKVPath(path, mountPath, apiPrefix string, skipIfExists bool) string { + if path == mountPath || path == strings.TrimSuffix(mountPath, "/") { + return paths.Join(mountPath, apiPrefix) } - tp := strings.TrimPrefix(p, mountPath) + pathSuffix := strings.TrimPrefix(path, mountPath) for { // If the entire mountPath is included in the path, we are done - if tp != p { + if pathSuffix != path { break } // Trim the parts of the mountPath that are not included in the @@ -140,10 +147,16 @@ func addPrefixToKVPath(p, mountPath, apiPrefix string) string { break } mountPath = strings.TrimSuffix(partialMountPath[1], "/") - tp = strings.TrimPrefix(tp, mountPath) + pathSuffix = strings.TrimPrefix(pathSuffix, mountPath) + } + + if skipIfExists { + if strings.HasPrefix(pathSuffix, apiPrefix) || strings.HasPrefix(pathSuffix, "/"+apiPrefix) { + return paths.Join(mountPath, pathSuffix) + } } - return path.Join(mountPath, apiPrefix, tp) + return paths.Join(mountPath, apiPrefix, pathSuffix) } func getHeaderForMap(header string, data map[string]interface{}) string { @@ -192,3 +205,65 @@ func padEqualSigns(header string, totalLen int) string { return fmt.Sprintf("%s %s %s", strings.Repeat("=", equalSigns/2), header, strings.Repeat("=", equalSigns/2)) } + +// walkSecretsTree dfs-traverses the secrets tree rooted at the given path +// and calls the `visit` functor for each of the directory and leaf paths. +// Note: for kv-v2, a "metadata" path is expected and "metadata" paths will be +// returned in the visit functor. +func walkSecretsTree(ctx context.Context, client *api.Client, path string, visit func(path string, directory bool) error) error { + resp, err := client.Logical().ListWithContext(ctx, path) + if err != nil { + return fmt.Errorf("could not list %q path: %w", path, err) + } + + if resp == nil || resp.Data == nil { + return fmt.Errorf("no value found at %q: %w", path, err) + } + + keysRaw, ok := resp.Data["keys"] + if !ok { + return fmt.Errorf("unexpected list response at %q", path) + } + + keysRawSlice, ok := keysRaw.([]interface{}) + if !ok { + return fmt.Errorf("unexpected list response type %T at %q", keysRaw, path) + } + + keys := make([]string, 0, len(keysRawSlice)) + + for _, keyRaw := range keysRawSlice { + key, ok := keyRaw.(string) + if !ok { + return fmt.Errorf("unexpected key type %T at %q", keyRaw, path) + } + keys = append(keys, key) + } + + // sort the keys for a deterministic output + sort.Strings(keys) + + for _, key := range keys { + // the keys are relative to the current path: combine them + child := paths.Join(path, key) + + if strings.HasSuffix(key, "/") { + // visit the directory + if err := visit(child, true); err != nil { + return err + } + + // this is not a leaf node: we need to go deeper... + if err := walkSecretsTree(ctx, client, child, visit); err != nil { + return err + } + } else { + // this is a leaf node: add it to the list + if err := visit(child, false); err != nil { + return err + } + } + } + + return nil +} diff --git a/command/kv_helpers_test.go b/command/kv_helpers_test.go new file mode 100644 index 000000000000..06a1bb8ee9ab --- /dev/null +++ b/command/kv_helpers_test.go @@ -0,0 +1,275 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/hashicorp/vault/api" +) + +// TestAddPrefixToKVPath tests the addPrefixToKVPath helper function +func TestAddPrefixToKVPath(t *testing.T) { + cases := map[string]struct { + path string + mountPath string + apiPrefix string + skipIfExists bool + expected string + }{ + "simple": { + path: "kv-v2/foo", + mountPath: "kv-v2/", + apiPrefix: "data", + skipIfExists: false, + expected: "kv-v2/data/foo", + }, + + "multi-part": { + path: "my/kv-v2/mount/path/foo/bar/baz", + mountPath: "my/kv-v2/mount/path", + apiPrefix: "metadata", + skipIfExists: false, + expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", + }, + + "with-namespace": { + path: "my/kv-v2/mount/path/foo/bar/baz", + mountPath: "my/ns1/my/kv-v2/mount/path", + apiPrefix: "metadata", + skipIfExists: false, + expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", + }, + + "skip-if-exists-true": { + path: "kv-v2/data/foo", + mountPath: "kv-v2/", + apiPrefix: "data", + skipIfExists: true, + expected: "kv-v2/data/foo", + }, + + "skip-if-exists-false": { + path: "kv-v2/data/foo", + mountPath: "kv-v2", + apiPrefix: "data", + skipIfExists: false, + expected: "kv-v2/data/data/foo", + }, + + "skip-if-exists-with-namespace": { + path: "my/kv-v2/mount/path/metadata/foo/bar/baz", + mountPath: "my/ns1/my/kv-v2/mount/path", + apiPrefix: "metadata", + skipIfExists: true, + expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + actual := addPrefixToKVPath( + tc.path, + tc.mountPath, + tc.apiPrefix, + tc.skipIfExists, + ) + + if tc.expected != actual { + t.Fatalf("unexpected output; want: %v, got: %v", tc.expected, actual) + } + }) + } +} + +// TestWalkSecretsTree tests the walkSecretsTree helper function +func TestWalkSecretsTree(t *testing.T) { + // test setup + client, closer := testVaultServer(t) + defer closer() + + // enable kv-v1 backend + if err := client.Sys().Mount("kv-v1/", &api.MountInput{ + Type: "kv-v1", + }); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + // enable kv-v2 backend + if err := client.Sys().Mount("kv-v2/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelContextFunc() + + // populate secrets + for _, path := range []string{ + "foo", + "app-1/foo", + "app-1/bar", + "app-1/nested/x/y/z", + "app-1/nested/x/y", + "app-1/nested/bar", + } { + if err := client.KVv1("kv-v1").Put(ctx, path, map[string]interface{}{ + "password": "Hashi123", + }); err != nil { + t.Fatal(err) + } + + if _, err := client.KVv2("kv-v2").Put(ctx, path, map[string]interface{}{ + "password": "Hashi123", + }); err != nil { + t.Fatal(err) + } + } + + type treePath struct { + path string + directory bool + } + + cases := map[string]struct { + path string + expected []treePath + expectedError bool + }{ + "kv-v1-simple": { + path: "kv-v1/app-1/nested/x/y", + expected: []treePath{ + {path: "kv-v1/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v2-simple": { + path: "kv-v2/metadata/app-1/nested/x/y", + expected: []treePath{ + {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v1-nested": { + path: "kv-v1/app-1/nested/", + expected: []treePath{ + {path: "kv-v1/app-1/nested/bar", directory: false}, + {path: "kv-v1/app-1/nested/x", directory: true}, + {path: "kv-v1/app-1/nested/x/y", directory: false}, + {path: "kv-v1/app-1/nested/x/y", directory: true}, + {path: "kv-v1/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v2-nested": { + path: "kv-v2/metadata/app-1/nested/", + expected: []treePath{ + {path: "kv-v2/metadata/app-1/nested/bar", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v1-all": { + path: "kv-v1", + expected: []treePath{ + {path: "kv-v1/app-1", directory: true}, + {path: "kv-v1/app-1/bar", directory: false}, + {path: "kv-v1/app-1/foo", directory: false}, + {path: "kv-v1/app-1/nested", directory: true}, + {path: "kv-v1/app-1/nested/bar", directory: false}, + {path: "kv-v1/app-1/nested/x", directory: true}, + {path: "kv-v1/app-1/nested/x/y", directory: false}, + {path: "kv-v1/app-1/nested/x/y", directory: true}, + {path: "kv-v1/app-1/nested/x/y/z", directory: false}, + {path: "kv-v1/foo", directory: false}, + }, + expectedError: false, + }, + + "kv-v2-all": { + path: "kv-v2/metadata", + expected: []treePath{ + {path: "kv-v2/metadata/app-1", directory: true}, + {path: "kv-v2/metadata/app-1/bar", directory: false}, + {path: "kv-v2/metadata/app-1/foo", directory: false}, + {path: "kv-v2/metadata/app-1/nested", directory: true}, + {path: "kv-v2/metadata/app-1/nested/bar", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, + {path: "kv-v2/metadata/foo", directory: false}, + }, + expectedError: false, + }, + + "kv-v1-not-found": { + path: "kv-v1/does/not/exist", + expected: nil, + expectedError: true, + }, + + "kv-v2-not-found": { + path: "kv-v2/metadata/does/not/exist", + expected: nil, + expectedError: true, + }, + + "kv-v1-not-listable-leaf-node": { + path: "kv-v1/foo", + expected: nil, + expectedError: true, + }, + + "kv-v2-not-listable-leaf-node": { + path: "kv-v2/metadata/foo", + expected: nil, + expectedError: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var descendants []treePath + + err := walkSecretsTree(ctx, client, tc.path, func(path string, directory bool) error { + descendants = append(descendants, treePath{ + path: path, + directory: directory, + }) + return nil + }) + + if tc.expectedError { + if err == nil { + t.Fatal("an error was expected but the test succeeded") + } + } else { + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, descendants) { + t.Fatalf("unexpected list output; want: %v, got: %v", tc.expected, descendants) + } + } + }) + } +} diff --git a/command/kv_list.go b/command/kv_list.go index b6b665c6f55e..4e19d9d7ae3b 100644 --- a/command/kv_list.go +++ b/command/kv_list.go @@ -1,10 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" + "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -15,6 +19,7 @@ var ( type KVListCommand struct { *BaseCommand + flagMount string } func (c *KVListCommand) Synopsis() string { @@ -40,7 +45,23 @@ Usage: vault kv list [options] PATH } func (c *KVListCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV + v2 secrets.`, + }) + + return set } func (c *KVListCommand) AutocompleteArgs() complete.Predictor { @@ -62,8 +83,11 @@ func (c *KVListCommand) Run(args []string) int { args = f.Args() switch { case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 + if c.flagMount == "" { + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + } + args = []string{""} case len(args) > 1: c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) return 1 @@ -75,31 +99,56 @@ func (c *KVListCommand) Run(args []string) int { return 2 } - // Append trailing slash - path := args[0] - if !strings.HasSuffix(path, "/") { - path += "/" - } - - // Sanitize path - path = sanitizePath(path) - mountPath, v2, err := isKVv2(path, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } - if v2 { - path = addPrefixToKVPath(path, mountPath, "metadata") + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) if err != nil { c.UI.Error(err.Error()) return 2 } } - secret, err := client.Logical().List(path) + // Add /metadata to v2 paths only + var fullPath string + if v2 { + fullPath = addPrefixToKVPath(partialPath, mountPath, "metadata", false) + } else { + // v1 + if mountFlagSyntax { + fullPath = path.Join(mountPath, partialPath) + } else { + fullPath = partialPath + } + } + + secret, err := client.Logical().List(fullPath) if err != nil { - c.UI.Error(fmt.Sprintf("Error listing %s: %s", path, err)) + c.UI.Error(fmt.Sprintf("Error listing %s: %s", fullPath, err)) return 2 } @@ -117,12 +166,12 @@ func (c *KVListCommand) Run(args []string) int { } if secret == nil || secret.Data == nil { - c.UI.Error(fmt.Sprintf("No value found at %s", path)) + c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) return 2 } if !ok { - c.UI.Error(fmt.Sprintf("No entries found at %s", path)) + c.UI.Error(fmt.Sprintf("No entries found at %s", fullPath)) return 2 } diff --git a/command/kv_metadata.go b/command/kv_metadata.go index c4ab37910555..14e1b5bfa3a5 100644 --- a/command/kv_metadata.go +++ b/command/kv_metadata.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*KVMetadataCommand)(nil) diff --git a/command/kv_metadata_delete.go b/command/kv_metadata_delete.go index cff16f21c6fe..6f672fc6aeac 100644 --- a/command/kv_metadata_delete.go +++ b/command/kv_metadata_delete.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -135,7 +138,7 @@ func (c *KVMetadataDeleteCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) if secret, err := client.Logical().Delete(fullPath); err != nil { c.UI.Error(fmt.Sprintf("Error deleting %s: %s", fullPath, err)) if secret != nil { diff --git a/command/kv_metadata_get.go b/command/kv_metadata_get.go index 8920340752d5..2722c330efe0 100644 --- a/command/kv_metadata_get.go +++ b/command/kv_metadata_get.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,7 +10,7 @@ import ( "strconv" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -137,7 +140,7 @@ func (c *KVMetadataGetCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) secret, err := client.Logical().Read(fullPath) if err != nil { c.UI.Error(fmt.Sprintf("Error reading %s: %s", fullPath, err)) diff --git a/command/kv_metadata_patch.go b/command/kv_metadata_patch.go index 11ffdb4bea26..ff59c12fb7c7 100644 --- a/command/kv_metadata_patch.go +++ b/command/kv_metadata_patch.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -8,7 +11,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -208,7 +211,7 @@ func (c *KVMetadataPatchCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) data := make(map[string]interface{}, 0) diff --git a/command/kv_metadata_patch_test.go b/command/kv_metadata_patch_test.go index 3b15c520294c..1dc0e123773e 100644 --- a/command/kv_metadata_patch_test.go +++ b/command/kv_metadata_patch_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,8 +10,8 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testKVMetadataPatchCommand(tb testing.TB) (*cli.MockUi, *KVMetadataPatchCommand) { diff --git a/command/kv_metadata_put.go b/command/kv_metadata_put.go index 5196b1c79a0a..5b8124229de9 100644 --- a/command/kv_metadata_put.go +++ b/command/kv_metadata_put.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,7 +10,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -196,7 +199,7 @@ func (c *KVMetadataPutCommand) Run(args []string) int { return 1 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata") + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) data := map[string]interface{}{} if c.flagMaxVersions >= 0 { diff --git a/command/kv_metadata_put_test.go b/command/kv_metadata_put_test.go index a952802cc469..50f815ac4878 100644 --- a/command/kv_metadata_put_test.go +++ b/command/kv_metadata_put_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,8 +9,8 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testKVMetadataPutCommand(tb testing.TB) (*cli.MockUi, *KVMetadataPutCommand) { @@ -142,7 +145,6 @@ func TestKvMetadataPutCommand_CustomMetadata(t *testing.T) { } metadata, err = client.Logical().Read(metaFullPath) - if err != nil { t.Fatalf("Metadata read error: %#v", err) } diff --git a/command/kv_patch.go b/command/kv_patch.go index 8313ec33487c..791273558e4e 100644 --- a/command/kv_patch.go +++ b/command/kv_patch.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -8,8 +11,8 @@ import ( "path" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -213,11 +216,11 @@ func (c *KVPatchCommand) Run(args []string) int { } if !v2 { - c.UI.Error("K/V engine mount must be version 2 for patch support") + c.UI.Error("KV engine mount must be version 2 for patch support") return 2 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "data") + fullPath := addPrefixToKVPath(partialPath, mountPath, "data", false) if err != nil { c.UI.Error(err.Error()) return 2 @@ -261,6 +264,11 @@ func (c *KVPatchCommand) Run(args []string) int { return PrintRawField(c.UI, secret, c.flagField) } + // If the secret is wrapped, return the wrapped response. + if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { + return OutputSecret(c.UI, secret) + } + if Format(c.UI) == "table" { outputPath(c.UI, fullPath, "Secret Path") metadata := secret.Data diff --git a/command/kv_put.go b/command/kv_put.go index 5cc7b6fbc67b..b51705791f51 100644 --- a/command/kv_put.go +++ b/command/kv_put.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,7 +10,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -178,7 +181,7 @@ func (c *KVPutCommand) Run(args []string) int { // Add /data to v2 paths only var fullPath string if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "data") + fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) data = map[string]interface{}{ "data": data, "options": map[string]interface{}{}, @@ -216,6 +219,11 @@ func (c *KVPutCommand) Run(args []string) int { return PrintRawField(c.UI, secret, c.flagField) } + // If the secret is wrapped, return the wrapped response. + if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { + return OutputSecret(c.UI, secret) + } + if Format(c.UI) == "table" { outputPath(c.UI, fullPath, "Secret Path") metadata := secret.Data diff --git a/command/kv_rollback.go b/command/kv_rollback.go index 0d782619a832..d1f23caee05f 100644 --- a/command/kv_rollback.go +++ b/command/kv_rollback.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,7 +9,7 @@ import ( "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -157,11 +160,11 @@ func (c *KVRollbackCommand) Run(args []string) int { } if !v2 { - c.UI.Error("K/V engine mount must be version 2 for rollback support") + c.UI.Error("KV engine mount must be version 2 for rollback support") return 2 } - fullPath := addPrefixToKVPath(partialPath, mountPath, "data") + fullPath := addPrefixToKVPath(partialPath, mountPath, "data", false) if err != nil { c.UI.Error(err.Error()) return 2 diff --git a/command/kv_test.go b/command/kv_test.go index 830c9cc30770..c5ca555be6be 100644 --- a/command/kv_test.go +++ b/command/kv_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -8,8 +11,8 @@ import ( "testing" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testKVPutCommand(tb testing.TB) (*cli.MockUi, *KVPutCommand) { @@ -590,6 +593,131 @@ func TestKVGetCommand(t *testing.T) { }) } +func testKVListCommand(tb testing.TB) (*cli.MockUi, *KVListCommand) { + tb.Helper() + ui := cli.NewMockUi() + cmd := &KVListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } + + return ui, cmd +} + +// TestKVListCommand runs tests for `vault kv list` +func TestKVListCommand(t *testing.T) { + testCases := []struct { + name string + args []string + outStrings []string + code int + }{ + { + name: "default", + args: []string{"kv/my-prefix"}, + outStrings: []string{"secret-0", "secret-1", "secret-2"}, + code: 0, + }, + { + name: "not_enough_args", + args: []string{}, + outStrings: []string{"Not enough arguments"}, + code: 1, + }, + { + name: "v2_default_with_mount", + args: []string{"-mount", "kv", "my-prefix"}, + outStrings: []string{"secret-0", "secret-1", "secret-2"}, + code: 0, + }, + { + name: "v1_default_with_mount", + args: []string{"kv/my-prefix"}, + outStrings: []string{"secret-0", "secret-1", "secret-2"}, + code: 0, + }, + { + name: "v2_not_found", + args: []string{"kv/nope/not/once/never"}, + outStrings: []string{"No value found at kv/metadata/nope/not/once/never"}, + code: 2, + }, + { + name: "v1_mount_only", + args: []string{"kv"}, + outStrings: []string{"my-prefix"}, + code: 0, + }, + { + name: "v2_mount_only", + args: []string{"-mount", "kv"}, + outStrings: []string{"my-prefix"}, + code: 0, + }, + { + // this is behavior that should be tested + // `kv` here is an explicit mount + // `my-prefix` is not + // the current kv code will ignore `my-prefix` + name: "ignore_multi_part_mounts", + args: []string{"-mount", "kv/my-prefix"}, + outStrings: []string{"my-prefix"}, + code: 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, testCase := range testCases { + testCase := testCase + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + // test setup + client, closer := testVaultServer(t) + defer closer() + + // enable kv-v2 backend + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + ctx := context.Background() + for i := 0; i < 3; i++ { + path := fmt.Sprintf("my-prefix/secret-%d", i) + _, err := client.KVv2("kv/").Put(ctx, path, map[string]interface{}{ + "foo": "bar", + }) + if err != nil { + t.Fatal(err) + } + } + + ui, cmd := testKVListCommand(t) + cmd.client = client + + code := cmd.Run(testCase.args) + if code != testCase.code { + t.Errorf("expected %d to be %d", code, testCase.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + for _, str := range testCase.outStrings { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + }) + } + }) +} + func testKVMetadataGetCommand(tb testing.TB) (*cli.MockUi, *KVMetadataGetCommand) { tb.Helper() @@ -1395,6 +1523,156 @@ func TestPadEqualSigns(t *testing.T) { } } +func testKVUndeleteCommand(tb testing.TB) (*cli.MockUi, *KVUndeleteCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &KVUndeleteCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestKVUndeleteCommand(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + outStrings []string + code int + }{ + { + "not_enough_args", + []string{}, + []string{"Not enough arguments"}, + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + []string{"Too many arguments"}, + 1, + }, + { + "no_versions", + []string{"-mount", "kv", "/read/foo"}, + []string{"No versions provided"}, + 1, + }, + { + "v2_mount_flag_syntax", + []string{"-versions", "1", "-mount", "kv", "read/foo"}, + []string{"Success! Data written to: kv/undelete/read/foo"}, + 0, + }, + { + "v2_mount_flag_syntax_complex_1", + []string{"-versions", "1", "-mount", "secrets/testapp", "test"}, + []string{"Success! Data written to: secrets/testapp/undelete/test"}, + 0, + }, + { + "v2_mount_flag_syntax_complex_2", + []string{"-versions", "1", "-mount", "secrets/x/testapp", "test"}, + []string{"Success! Data written to: secrets/x/testapp/undelete/test"}, + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + if err := client.Sys().Mount("secrets/testapp", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + // Additional layer of mount path + if err := client.Sys().Mount("secrets/x/testapp", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + // Give time for the upgrade code to run/finish + time.Sleep(time.Second) + + if _, err := client.Logical().Write("kv/data/read/foo", map[string]interface{}{ + "data": map[string]interface{}{ + "foo": "bar", + }, + }); err != nil { + t.Fatal(err) + } + + // Delete the entry so we can undelete it + if _, err := client.Logical().Delete("kv/data/read/foo"); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("secrets/testapp/data/test", map[string]interface{}{ + "data": map[string]interface{}{ + "complex": "yes", + }, + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("secrets/x/testapp/data/test", map[string]interface{}{ + "data": map[string]interface{}{ + "complex": "yes", + }, + }); err != nil { + t.Fatal(err) + } + + // Delete the entry so we can undelete it + if _, err := client.Logical().Delete("secrets/x/testapp/data/test"); err != nil { + t.Fatal(err) + } + + // Delete the entry so we can undelete it + if _, err := client.Logical().Delete("secrets/testapp/data/test"); err != nil { + t.Fatal(err) + } + + ui, cmd := testKVUndeleteCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + + for _, str := range tc.outStrings { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + }) + } + }) +} + func createTokenForPolicy(t *testing.T, client *api.Client, policy string) (*api.SecretAuth, error) { t.Helper() diff --git a/command/kv_undelete.go b/command/kv_undelete.go index 90ea608a7316..7d438387193b 100644 --- a/command/kv_undelete.go +++ b/command/kv_undelete.go @@ -1,10 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" + "path" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -32,12 +36,12 @@ Usage: vault kv undelete [options] KEY This restores the data, allowing it to be returned on get requests. To undelete version 3 of key "foo": - + $ vault kv undelete -mount=secret -versions=3 foo - The deprecated path-like syntax can also be used, but this should be avoided, - as the fact that it is not actually the full API path to - the secret (secret/data/foo) can cause confusion: + The deprecated path-like syntax can also be used, but this should be avoided, + as the fact that it is not actually the full API path to + the secret (secret/data/foo) can cause confusion: $ vault kv undelete -versions=3 secret/foo @@ -64,10 +68,10 @@ func (c *KVUndeleteCommand) Flags() *FlagSets { Name: "mount", Target: &c.flagMount, Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /data/ automatically appended between KV + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV v2 secrets.`, }) @@ -131,6 +135,14 @@ func (c *KVUndeleteCommand) Run(args []string) int { c.UI.Error(err.Error()) return 2 } + if v2 { + // Without this join, mountPaths that are deeper + // than the root path E.G. secrets/myapp will get + // pruned down to myapp/undelete/ which + // is incorrect. + // This technique was lifted from kv_delete.go. + partialPath = path.Join(mountPath, partialPath) + } } else { // In this case, this arg is a path-like combination of mountPath/secretPath. // (e.g. "secret/foo") @@ -147,7 +159,7 @@ func (c *KVUndeleteCommand) Run(args []string) int { return 1 } - undeletePath := addPrefixToKVPath(partialPath, mountPath, "undelete") + undeletePath := addPrefixToKVPath(partialPath, mountPath, "undelete", false) data := map[string]interface{}{ "versions": kvParseVersionsFlags(c.flagVersions), } diff --git a/command/lease.go b/command/lease.go index 76f6cc174c41..3e0817ffd0d8 100644 --- a/command/lease.go +++ b/command/lease.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*LeaseCommand)(nil) diff --git a/command/lease_lookup.go b/command/lease_lookup.go index c72c6a174f2c..51b7aab60415 100644 --- a/command/lease_lookup.go +++ b/command/lease_lookup.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/lease_lookup_test.go b/command/lease_lookup_test.go index 4de63200f5ce..2c9b81caf5fa 100644 --- a/command/lease_lookup_test.go +++ b/command/lease_lookup_test.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testLeaseLookupCommand(tb testing.TB) (*cli.MockUi, *LeaseLookupCommand) { diff --git a/command/lease_renew.go b/command/lease_renew.go index 13eb95ed0093..b0671c379682 100644 --- a/command/lease_renew.go +++ b/command/lease_renew.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/lease_renew_test.go b/command/lease_renew_test.go index aa3b32d0d8b3..eac098fe4634 100644 --- a/command/lease_renew_test.go +++ b/command/lease_renew_test.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testLeaseRenewCommand(tb testing.TB) (*cli.MockUi, *LeaseRenewCommand) { diff --git a/command/lease_revoke.go b/command/lease_revoke.go index 1fc90eff7cb5..59a09de597e8 100644 --- a/command/lease_revoke.go +++ b/command/lease_revoke.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/lease_revoke_test.go b/command/lease_revoke_test.go index 1aa58c38ac76..aeb9987e7ddd 100644 --- a/command/lease_revoke_test.go +++ b/command/lease_revoke_test.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testLeaseRevokeCommand(tb testing.TB) (*cli.MockUi, *LeaseRevokeCommand) { diff --git a/command/list.go b/command/list.go index 9831b6633c8c..6505f76af8c3 100644 --- a/command/list.go +++ b/command/list.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -78,13 +81,7 @@ func (c *ListCommand) Run(args []string) int { return 2 } - // Append trailing slash - path := args[0] - if !strings.HasSuffix(path, "/") { - path += "/" - } - - path = sanitizePath(path) + path := sanitizePath(args[0]) secret, err := client.Logical().List(path) if err != nil { c.UI.Error(fmt.Sprintf("Error listing %s: %s", path, err)) diff --git a/command/list_test.go b/command/list_test.go index b1b6680507f1..e7a870d7ffb6 100644 --- a/command/list_test.go +++ b/command/list_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testListCommand(tb testing.TB) (*cli.MockUi, *ListCommand) { diff --git a/command/log_flags.go b/command/log_flags.go index 8b5e8fef77e5..cbde95d33088 100644 --- a/command/log_flags.go +++ b/command/log_flags.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "flag" "os" - "strings" + "strconv" "github.com/hashicorp/vault/internalshared/configutil" "github.com/posener/complete" @@ -12,23 +15,22 @@ import ( // logFlags are the 'log' related flags that can be shared across commands. type logFlags struct { flagCombineLogs bool + flagDisableGatedLogs bool flagLogLevel string flagLogFormat string flagLogFile string - flagLogRotateBytes string + flagLogRotateBytes int flagLogRotateDuration string - flagLogRotateMaxFiles string + flagLogRotateMaxFiles int } -type provider = func(key string) (string, bool) - // valuesProvider has the intention of providing a way to supply a func with a // way to retrieve values for flags and environment variables without having to -// directly call a specific implementation. The reasoning for its existence is -// to facilitate testing. +// directly call a specific implementation. +// The reasoning for its existence is to facilitate testing. type valuesProvider struct { - flagProvider provider - envVarProvider provider + flagProvider func(string) (flag.Value, bool) + envVarProvider func(string) (string, bool) } // addLogFlags will add the set of 'log' related flags to a flag set. @@ -40,6 +42,13 @@ func (f *FlagSet) addLogFlags(l *logFlags) { Hidden: true, }) + f.BoolVar(&BoolVar{ + Name: flagDisableGatedLogs, + Target: &l.flagDisableGatedLogs, + Default: false, + Hidden: true, + }) + f.StringVar(&StringVar{ Name: flagNameLogLevel, Target: &l.flagLogLevel, @@ -65,7 +74,7 @@ func (f *FlagSet) addLogFlags(l *logFlags) { Usage: "Path to the log file that Vault should use for logging", }) - f.StringVar(&StringVar{ + f.IntVar(&IntVar{ Name: flagNameLogRotateBytes, Target: &l.flagLogRotateBytes, Usage: "Number of bytes that should be written to a log before it needs to be rotated. " + @@ -79,23 +88,34 @@ func (f *FlagSet) addLogFlags(l *logFlags) { "Must be a duration value such as 30s", }) - f.StringVar(&StringVar{ + f.IntVar(&IntVar{ Name: flagNameLogRotateMaxFiles, Target: &l.flagLogRotateMaxFiles, Usage: "The maximum number of older log file archives to keep", }) } -// getValue will attempt to find the flag with the corresponding flag name (key) -// and return the value along with a bool representing whether of not the flag had been found/set. -func (f *FlagSets) getValue(flagName string) (string, bool) { - var result string +// envVarValue attempts to get a named value from the environment variables. +// The value will be returned as a string along with a boolean value indiciating +// to the caller whether the named env var existed. +func envVarValue(key string) (string, bool) { + if key == "" { + return "", false + } + return os.LookupEnv(key) +} + +// flagValue attempts to find the named flag in a set of FlagSets. +// The flag.Value is returned if it was specified, and the boolean value indicates +// to the caller if the flag was specified by the end user. +func (f *FlagSets) flagValue(flagName string) (flag.Value, bool) { + var result flag.Value var isFlagSpecified bool if f != nil { f.Visit(func(fl *flag.Flag) { if fl.Name == flagName { - result = fl.Value.String() + result = fl.Value isFlagSpecified = true } }) @@ -104,51 +124,63 @@ func (f *FlagSets) getValue(flagName string) (string, bool) { return result, isFlagSpecified } -// getAggregatedConfigValue uses the provided keys to check CLI flags and environment +// overrideValue uses the provided keys to check CLI flags and environment // variables for values that may be used to override any specified configuration. -// If nothing can be found in flags/env vars or config, the 'fallback' (default) value will be provided. -func (p *valuesProvider) getAggregatedConfigValue(flagKey, envVarKey, current, fallback string) string { +func (p *valuesProvider) overrideValue(flagKey, envVarKey string) (string, bool) { var result string - current = strings.TrimSpace(current) + found := true flg, flgFound := p.flagProvider(flagKey) env, envFound := p.envVarProvider(envVarKey) switch { case flgFound: - result = flg + result = flg.String() case envFound: - // Use value from env var result = env - case current != "": - // Use value from config - result = current default: - // Use the default value - result = fallback + found = false } - return result + return result, found } -// updateLogConfig will accept a shared config and specifically attempt to update the 'log' related config keys. -// For each 'log' key we aggregate file config/env vars and CLI flags to select the one with the highest precedence. +// applyLogConfigOverrides will accept a shared config and specifically attempt to update the 'log' related config keys. +// For each 'log' key, we aggregate file config, env vars and CLI flags to select the one with the highest precedence. // This method mutates the config object passed into it. -func (f *FlagSets) updateLogConfig(config *configutil.SharedConfig) { +func (f *FlagSets) applyLogConfigOverrides(config *configutil.SharedConfig) { p := &valuesProvider{ - flagProvider: func(key string) (string, bool) { return f.getValue(key) }, - envVarProvider: func(key string) (string, bool) { - if key == "" { - return "", false - } - return os.LookupEnv(key) - }, + flagProvider: f.flagValue, + envVarProvider: envVarValue, + } + + // Update log level + if val, found := p.overrideValue(flagNameLogLevel, EnvVaultLogLevel); found { + config.LogLevel = val + } + + // Update log format + if val, found := p.overrideValue(flagNameLogFormat, EnvVaultLogFormat); found { + config.LogFormat = val + } + + // Update log file name + if val, found := p.overrideValue(flagNameLogFile, ""); found { + config.LogFile = val + } + + // Update log rotation duration + if val, found := p.overrideValue(flagNameLogRotateDuration, ""); found { + config.LogRotateDuration = val } - config.LogLevel = p.getAggregatedConfigValue(flagNameLogLevel, EnvVaultLogLevel, config.LogLevel, "info") - config.LogFormat = p.getAggregatedConfigValue(flagNameLogFormat, EnvVaultLogFormat, config.LogFormat, "") - config.LogFile = p.getAggregatedConfigValue(flagNameLogFile, "", config.LogFile, "") - config.LogRotateDuration = p.getAggregatedConfigValue(flagNameLogRotateDuration, "", config.LogRotateDuration, "") - config.LogRotateBytes = p.getAggregatedConfigValue(flagNameLogRotateBytes, "", config.LogRotateBytes, "") - config.LogRotateMaxFiles = p.getAggregatedConfigValue(flagNameLogRotateMaxFiles, "", config.LogRotateMaxFiles, "") + // Update log max files + if val, found := p.overrideValue(flagNameLogRotateMaxFiles, ""); found { + config.LogRotateMaxFiles, _ = strconv.Atoi(val) + } + + // Update log rotation max bytes + if val, found := p.overrideValue(flagNameLogRotateBytes, ""); found { + config.LogRotateBytes, _ = strconv.Atoi(val) + } } diff --git a/command/log_flags_test.go b/command/log_flags_test.go index d4924f7360a3..1e54397f87fc 100644 --- a/command/log_flags_test.go +++ b/command/log_flags_test.go @@ -1,6 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( + "flag" "testing" "github.com/stretchr/testify/assert" @@ -10,66 +14,81 @@ func TestLogFlags_ValuesProvider(t *testing.T) { cases := map[string]struct { flagKey string envVarKey string - current string - fallback string - want string + wantValue string + wantFound bool }{ - "only-fallback": { - flagKey: "invalid", - envVarKey: "invalid", - current: "", - fallback: "foo", - want: "foo", - }, - "only-config": { - flagKey: "invalid", - envVarKey: "invalid", - current: "bar", - fallback: "", - want: "bar", - }, "flag-missing": { flagKey: "invalid", envVarKey: "valid-env-var", - current: "my-config-value1", - fallback: "", - want: "envVarValue", + wantValue: "envVarValue", + wantFound: true, }, "envVar-missing": { flagKey: "valid-flag", envVarKey: "invalid", - current: "my-config-value1", - fallback: "", - want: "flagValue", + wantValue: "flagValue", + wantFound: true, }, "all-present": { flagKey: "valid-flag", envVarKey: "valid-env-var", - current: "my-config-value1", - fallback: "foo", - want: "flagValue", + wantValue: "flagValue", + wantFound: true, }, + "all-missing": { + flagKey: "invalid", + envVarKey: "invalid", + wantValue: "", + wantFound: false, + }, + } + + // Sneaky little fake providers + flagFaker := func(key string) (flag.Value, bool) { + var result fakeFlag + var found bool + + if key == "valid-flag" { + result.Set("flagValue") + found = true + } + + return &result, found } - // Sneaky little fake provider - fakeProvider := func(key string) (string, bool) { - switch key { - case "valid-flag": - return "flagValue", true - case "valid-env-var": - return "envVarValue", true + envFaker := func(key string) (string, bool) { + var found bool + var result string + + if key == "valid-env-var" { + result = "envVarValue" + found = true } - return "", false + return result, found } vp := valuesProvider{ - flagProvider: fakeProvider, - envVarProvider: fakeProvider, + flagProvider: flagFaker, + envVarProvider: envFaker, } - for _, tc := range cases { - got := vp.getAggregatedConfigValue(tc.flagKey, tc.envVarKey, tc.current, tc.fallback) - assert.Equal(t, tc.want, got) + for name, tc := range cases { + val, found := vp.overrideValue(tc.flagKey, tc.envVarKey) + assert.Equal(t, tc.wantFound, found, name) + assert.Equal(t, tc.wantValue, val, name) } } + +type fakeFlag struct { + value string +} + +func (v *fakeFlag) String() string { + return v.value +} + +func (v *fakeFlag) Set(raw string) error { + v.value = raw + return nil +} diff --git a/command/login.go b/command/login.go index 30352abbb4e5..6252b7219df8 100644 --- a/command/login.go +++ b/command/login.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( diff --git a/command/login_test.go b/command/login_test.go index 56ed790f3427..b8afeffbf2f6 100644 --- a/command/login_test.go +++ b/command/login_test.go @@ -1,10 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( + "context" + "regexp" "strings" "testing" + "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" credToken "github.com/hashicorp/vault/builtin/credential/token" @@ -37,414 +43,445 @@ func testLoginCommand(tb testing.TB) (*cli.MockUi, *LoginCommand) { } } -func TestLoginCommand_Run(t *testing.T) { +func TestCustomPath(t *testing.T) { t.Parallel() - t.Run("custom_path", func(t *testing.T) { - t.Parallel() + client, closer := testVaultServer(t) + defer closer() - client, closer := testVaultServer(t) - defer closer() + if err := client.Sys().EnableAuth("my-auth", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/my-auth/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } - if err := client.Sys().EnableAuth("my-auth", "userpass", ""); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("auth/my-auth/users/test", map[string]interface{}{ - "password": "test", - "policies": "default", - }); err != nil { - t.Fatal(err) - } + ui, cmd := testLoginCommand(t) + cmd.client = client - ui, cmd := testLoginCommand(t) - cmd.client = client + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } + // Emulate an unknown token format present in ~/.vault-token, for example + client.SetToken("a.a") - // Emulate an unknown token format present in ~/.vault-token, for example - client.SetToken("a.a") + code := cmd.Run([]string{ + "-method", "userpass", + "-path", "my-auth", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - code := cmd.Run([]string{ - "-method", "userpass", - "-path", "my-auth", - "username=test", - "password=test", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + expected := "Success! You are now authenticated." + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to be %q", combined, expected) + } - expected := "Success! You are now authenticated." - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to be %q", combined, expected) - } + storedToken, err := tokenHelper.Get() + if err != nil { + t.Fatal(err) + } - storedToken, err := tokenHelper.Get() - if err != nil { - t.Fatal(err) - } + if l, exp := len(storedToken), minTokenLengthExternal+vault.TokenPrefixLength; l < exp { + t.Errorf("expected token to be %d characters, was %d: %q", exp, l, storedToken) + } +} - if l, exp := len(storedToken), minTokenLengthExternal+vault.TokenPrefixLength; l < exp { - t.Errorf("expected token to be %d characters, was %d: %q", exp, l, storedToken) - } +// Do not persist the token to the token helper +func TestNoStore(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", }) + if err != nil { + t.Fatal(err) + } + token := secret.Auth.ClientToken - t.Run("no_store", func(t *testing.T) { - t.Parallel() + _, cmd := testLoginCommand(t) + cmd.client = client - client, closer := testVaultServer(t) - defer closer() + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } - secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ - Policies: []string{"default"}, - TTL: "30m", - }) - if err != nil { - t.Fatal(err) - } - token := secret.Auth.ClientToken + // Ensure we have no token to start + if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { + t.Errorf("expected token helper to be empty: %s: %q", err, storedToken) + } - _, cmd := testLoginCommand(t) - cmd.client = client + code := cmd.Run([]string{ + "-no-store", + token, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } + storedToken, err := tokenHelper.Get() + if err != nil { + t.Fatal(err) + } - // Ensure we have no token to start - if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { - t.Errorf("expected token helper to be empty: %s: %q", err, storedToken) - } + if exp := ""; storedToken != exp { + t.Errorf("expected %q to be %q", storedToken, exp) + } +} - code := cmd.Run([]string{ - "-no-store", - token, - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } +func TestStores(t *testing.T) { + t.Parallel() - storedToken, err := tokenHelper.Get() - if err != nil { - t.Fatal(err) - } + client, closer := testVaultServer(t) + defer closer() - if exp := ""; storedToken != exp { - t.Errorf("expected %q to be %q", storedToken, exp) - } + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", }) + if err != nil { + t.Fatal(err) + } + token := secret.Auth.ClientToken - t.Run("stores", func(t *testing.T) { - t.Parallel() + _, cmd := testLoginCommand(t) + cmd.client = client - client, closer := testVaultServer(t) - defer closer() + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } - secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ - Policies: []string{"default"}, - TTL: "30m", - }) - if err != nil { - t.Fatal(err) - } - token := secret.Auth.ClientToken + code := cmd.Run([]string{ + token, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - _, cmd := testLoginCommand(t) - cmd.client = client + storedToken, err := tokenHelper.Get() + if err != nil { + t.Fatal(err) + } - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } + if storedToken != token { + t.Errorf("expected %q to be %q", storedToken, token) + } +} - code := cmd.Run([]string{ - token, - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } +func TestTokenOnly(t *testing.T) { + t.Parallel() - storedToken, err := tokenHelper.Get() - if err != nil { - t.Fatal(err) - } + client, closer := testVaultServer(t) + defer closer() - if storedToken != token { - t.Errorf("expected %q to be %q", storedToken, token) - } - }) + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } - t.Run("token_only", func(t *testing.T) { - t.Parallel() + ui, cmd := testLoginCommand(t) + cmd.client = client - client, closer := testVaultServer(t) - defer closer() + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } - if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ - "password": "test", - "policies": "default", - }); err != nil { - t.Fatal(err) - } + code := cmd.Run([]string{ + "-token-only", + "-method", "userpass", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - ui, cmd := testLoginCommand(t) - cmd.client = client + // Verify only the token was printed + token := ui.OutputWriter.String() + if l, exp := len(token), minTokenLengthExternal+vault.TokenPrefixLength; l != exp { + t.Errorf("expected token to be %d characters, was %d: %q", exp, l, token) + } - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } + // Verify the token was not stored + if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } +} - code := cmd.Run([]string{ - "-token-only", - "-method", "userpass", - "username=test", - "password=test", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } +func TestFailureNoStore(t *testing.T) { + t.Parallel() - // Verify only the token was printed - token := ui.OutputWriter.String() - if l, exp := len(token), minTokenLengthExternal+vault.TokenPrefixLength; l != exp { - t.Errorf("expected token to be %d characters, was %d: %q", exp, l, token) - } + client, closer := testVaultServer(t) + defer closer() - // Verify the token was not stored - if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { - t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) - } + ui, cmd := testLoginCommand(t) + cmd.client = client + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "not-a-real-token", }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - t.Run("failure_no_store", func(t *testing.T) { - t.Parallel() + expected := "Error authenticating: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } - client, closer := testVaultServer(t) - defer closer() + if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } +} - ui, cmd := testLoginCommand(t) - cmd.client = client +func TestWrapAutoUnwrap(t *testing.T) { + t.Parallel() - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } + client, closer := testVaultServer(t) + defer closer() - code := cmd.Run([]string{ - "not-a-real-token", - }) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } - expected := "Error authenticating: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } + _, cmd := testLoginCommand(t) + cmd.client = client - if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { - t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) - } + // Set the wrapping ttl to 5s. We can't set this via the flag because we + // override the client object before that particular flag is parsed. + client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) + + code := cmd.Run([]string{ + "-method", "userpass", + "username=test", + "password=test", }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - t.Run("wrap_auto_unwrap", func(t *testing.T) { - t.Parallel() + // Unset the wrapping + client.SetWrappingLookupFunc(func(string, string) string { return "" }) - client, closer := testVaultServer(t) - defer closer() + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + token, err := tokenHelper.Get() + if err != nil || token == "" { + t.Fatalf("expected token from helper: %s: %q", err, token) + } + client.SetToken(token) - if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ - "password": "test", - "policies": "default", - }); err != nil { - t.Fatal(err) - } + // Ensure the resulting token is unwrapped + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Error(err) + } + if secret == nil { + t.Fatal("secret was nil") + } - _, cmd := testLoginCommand(t) - cmd.client = client + if secret.WrapInfo != nil { + t.Errorf("expected to be unwrapped: %#v", secret) + } +} - // Set the wrapping ttl to 5s. We can't set this via the flag because we - // override the client object before that particular flag is parsed. - client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) +func TestWrapTokenOnly(t *testing.T) { + t.Parallel() - code := cmd.Run([]string{ - "-method", "userpass", - "username=test", - "password=test", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + client, closer := testVaultServer(t) + defer closer() - // Unset the wrapping - client.SetWrappingLookupFunc(func(string, string) string { return "" }) + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } - token, err := tokenHelper.Get() - if err != nil || token == "" { - t.Fatalf("expected token from helper: %s: %q", err, token) - } - client.SetToken(token) + ui, cmd := testLoginCommand(t) + cmd.client = client - // Ensure the resulting token is unwrapped - secret, err := client.Auth().Token().LookupSelf() - if err != nil { - t.Error(err) - } - if secret == nil { - t.Fatal("secret was nil") - } + // Set the wrapping ttl to 5s. We can't set this via the flag because we + // override the client object before that particular flag is parsed. + client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) - if secret.WrapInfo != nil { - t.Errorf("expected to be unwrapped: %#v", secret) - } + code := cmd.Run([]string{ + "-token-only", + "-method", "userpass", + "username=test", + "password=test", }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - t.Run("wrap_token_only", func(t *testing.T) { - t.Parallel() + // Unset the wrapping + client.SetWrappingLookupFunc(func(string, string) string { return "" }) - client, closer := testVaultServer(t) - defer closer() + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + storedToken, err := tokenHelper.Get() + if err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } - if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ - "password": "test", - "policies": "default", - }); err != nil { - t.Fatal(err) - } + token := strings.TrimSpace(ui.OutputWriter.String()) + if token == "" { + t.Errorf("expected %q to not be %q", token, "") + } - ui, cmd := testLoginCommand(t) - cmd.client = client + // Ensure the resulting token is, in fact, still wrapped. + client.SetToken(token) + secret, err := client.Logical().Unwrap("") + if err != nil { + t.Error(err) + } + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("expected secret to have auth: %#v", secret) + } +} - // Set the wrapping ttl to 5s. We can't set this via the flag because we - // override the client object before that particular flag is parsed. - client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) +func TestWrapNoStore(t *testing.T) { + t.Parallel() - code := cmd.Run([]string{ - "-token-only", - "-method", "userpass", - "username=test", - "password=test", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + client, closer := testVaultServer(t) + defer closer() - // Unset the wrapping - client.SetWrappingLookupFunc(func(string, string) string { return "" }) + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } - storedToken, err := tokenHelper.Get() - if err != nil || storedToken != "" { - t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) - } + ui, cmd := testLoginCommand(t) + cmd.client = client - token := strings.TrimSpace(ui.OutputWriter.String()) - if token == "" { - t.Errorf("expected %q to not be %q", token, "") - } + // Set the wrapping ttl to 5s. We can't set this via the flag because we + // override the client object before that particular flag is parsed. + client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) - // Ensure the resulting token is, in fact, still wrapped. - client.SetToken(token) - secret, err := client.Logical().Unwrap("") - if err != nil { - t.Error(err) - } - if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { - t.Fatalf("expected secret to have auth: %#v", secret) - } + code := cmd.Run([]string{ + "-no-store", + "-method", "userpass", + "username=test", + "password=test", }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - t.Run("wrap_no_store", func(t *testing.T) { - t.Parallel() + // Unset the wrapping + client.SetWrappingLookupFunc(func(string, string) string { return "" }) - client, closer := testVaultServer(t) - defer closer() + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + storedToken, err := tokenHelper.Get() + if err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } - if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { - t.Fatal(err) - } - if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ - "password": "test", - "policies": "default", - }); err != nil { - t.Fatal(err) - } + expected := "wrapping_token" + output := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(output, expected) { + t.Errorf("expected %q to contain %q", output, expected) + } +} - ui, cmd := testLoginCommand(t) - cmd.client = client +func TestCommunicationFailure(t *testing.T) { + t.Parallel() - // Set the wrapping ttl to 5s. We can't set this via the flag because we - // override the client object before that particular flag is parsed. - client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) + client, closer := testVaultServerBad(t) + defer closer() - code := cmd.Run([]string{ - "-no-store", - "-method", "userpass", - "username=test", - "password=test", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + ui, cmd := testLoginCommand(t) + cmd.client = client - // Unset the wrapping - client.SetWrappingLookupFunc(func(string, string) string { return "" }) + code := cmd.Run([]string{ + "token", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } - storedToken, err := tokenHelper.Get() - if err != nil || storedToken != "" { - t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) - } + expected := "Error authenticating: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } +} - expected := "wrapping_token" - output := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(output, expected) { - t.Errorf("expected %q to contain %q", output, expected) - } - }) +func TestNoTabs(t *testing.T) { + t.Parallel() - t.Run("login_mfa_single_phase", func(t *testing.T) { - t.Parallel() + _, cmd := testLoginCommand(t) + assertNoTabs(t, cmd) +} - client, closer := testVaultServer(t) - defer closer() +func TestLoginMFASinglePhase(t *testing.T) { + t.Parallel() - ui, cmd := testLoginCommand(t) + client, closer := testVaultServer(t) + defer closer() - userclient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client) - cmd.client = userclient + methodName := "foo" + waitPeriod := 5 + userClient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client, methodName, waitPeriod) + enginePath := testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) - enginePath := testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) + runCommand := func(methodIdentifier string) { + // the time required for the totp engine to generate a new code + time.Sleep(time.Duration(waitPeriod) * time.Second) totpCode := testhelpers.GetTOTPCodeFromEngine(t, client, enginePath) - + ui, cmd := testLoginCommand(t) + cmd.client = userClient // login command bails early for test clients, so we have to explicitly set this - cmd.client.SetMFACreds([]string{methodID + ":" + totpCode}) + cmd.client.SetMFACreds([]string{methodIdentifier + ":" + totpCode}) code := cmd.Run([]string{ "-method", "userpass", "username=testuser1", @@ -462,85 +499,118 @@ func TestLoginCommand_Run(t *testing.T) { if err != nil { t.Fatal(err) } - output = ui.OutputWriter.String() + ui.ErrorWriter.String() - t.Logf("\n%+v", output) + if storedToken == "" { + t.Fatal("expected non-empty stored token") + } + output := ui.OutputWriter.String() if !strings.Contains(output, storedToken) { t.Fatalf("expected stored token: %q, got: %q", storedToken, output) } - }) + } + runCommand(methodID) + runCommand(methodName) +} - t.Run("login_mfa_two_phase", func(t *testing.T) { - t.Parallel() +func TestLoginMFATwoPhase(t *testing.T) { + t.Parallel() - client, closer := testVaultServer(t) - defer closer() + client, closer := testVaultServer(t) + defer closer() - ui, cmd := testLoginCommand(t) + ui, cmd := testLoginCommand(t) - userclient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client) - cmd.client = userclient + userclient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client, "", 5) + cmd.client = userclient - _ = testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) + _ = testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) - // clear the MFA creds just to be sure - cmd.client.SetMFACreds([]string{}) + // clear the MFA creds just to be sure + cmd.client.SetMFACreds([]string{}) - code := cmd.Run([]string{ - "-method", "userpass", - "username=testuser1", - "password=testpassword", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + code := cmd.Run([]string{ + "-method", "userpass", + "username=testuser1", + "password=testpassword", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - expected := methodID - output = ui.OutputWriter.String() + ui.ErrorWriter.String() - t.Logf("\n%+v", output) - if !strings.Contains(output, expected) { - t.Fatalf("expected stored token: %q, got: %q", expected, output) - } + expected := methodID + output := ui.OutputWriter.String() + if !strings.Contains(output, expected) { + t.Fatalf("expected stored token: %q, got: %q", expected, output) + } - tokenHelper, err := cmd.TokenHelper() - if err != nil { - t.Fatal(err) - } - storedToken, err := tokenHelper.Get() - if storedToken != "" { - t.Fatal("expected empty stored token") - } - if err != nil { - t.Fatal(err) - } - }) + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + storedToken, err := tokenHelper.Get() + if storedToken != "" { + t.Fatal("expected empty stored token") + } + if err != nil { + t.Fatal(err) + } +} - t.Run("communication_failure", func(t *testing.T) { - t.Parallel() +func TestLoginMFATwoPhaseNonInteractiveMethodName(t *testing.T) { + t.Parallel() - client, closer := testVaultServerBad(t) - defer closer() + client, closer := testVaultServer(t) + defer closer() - ui, cmd := testLoginCommand(t) - cmd.client = client + ui, cmd := testLoginCommand(t) - code := cmd.Run([]string{ - "token", + methodName := "foo" + waitPeriod := 5 + userclient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client, methodName, waitPeriod) + cmd.client = userclient + + engineName := testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) + + // clear the MFA creds just to be sure + cmd.client.SetMFACreds([]string{}) + + code := cmd.Run([]string{ + "-method", "userpass", + "-non-interactive", + "username=testuser1", + "password=testpassword", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + output := ui.OutputWriter.String() + + reqIdReg := regexp.MustCompile(`mfa_request_id\s+([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\s+mfa_constraint`) + reqIDRaw := reqIdReg.FindAllStringSubmatch(output, -1) + if len(reqIDRaw) == 0 || len(reqIDRaw[0]) < 2 { + t.Fatal("failed to MFA request ID from output") + } + mfaReqID := reqIDRaw[0][1] + + validateFunc := func(methodIdentifier string) { + // the time required for the totp engine to generate a new code + time.Sleep(time.Duration(waitPeriod) * time.Second) + totpPasscode1 := "passcode=" + testhelpers.GetTOTPCodeFromEngine(t, client, engineName) + + secret, err := cmd.client.Logical().WriteWithContext(context.Background(), "sys/mfa/validate", map[string]interface{}{ + "mfa_request_id": mfaReqID, + "mfa_payload": map[string][]string{ + methodIdentifier: {totpPasscode1}, + }, }) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) + if err != nil { + t.Fatalf("mfa validation failed: %v", err) } - expected := "Error authenticating: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) + if secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("mfa validation did not return a client token") } - }) - - t.Run("no_tabs", func(t *testing.T) { - t.Parallel() + } - _, cmd := testLoginCommand(t) - assertNoTabs(t, cmd) - }) + validateFunc(methodName) } diff --git a/command/main.go b/command/main.go index 1ac72875062e..d6fad4b6c9ae 100644 --- a/command/main.go +++ b/command/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -12,10 +15,11 @@ import ( "text/tabwriter" "github.com/fatih/color" + "github.com/hashicorp/cli" + hcpvlib "github.com/hashicorp/vault-hcp-lib" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/token" - colorable "github.com/mattn/go-colorable" - "github.com/mitchellh/cli" + "github.com/mattn/go-colorable" ) type VaultUI struct { @@ -131,11 +135,12 @@ func getGlobalFlagValue(arg string) string { } type RunOptions struct { - TokenHelper token.TokenHelper - Stdout io.Writer - Stderr io.Writer - Address string - Client *api.Client + TokenHelper token.TokenHelper + HCPTokenHelper hcpvlib.HCPTokenHelper + Stdout io.Writer + Stderr io.Writer + Address string + Client *api.Client } func Run(args []string) int { @@ -217,14 +222,14 @@ func RunCustom(args []string, runOpts *RunOptions) int { return 1 } - initCommands(ui, serverCmdUi, runOpts) + commands := initCommands(ui, serverCmdUi, runOpts) hiddenCommands := []string{"version"} cli := &cli.CLI{ Name: "vault", Args: args, - Commands: Commands, + Commands: commands, HelpFunc: groupedHelpFunc( cli.BasicHelpFunc("vault"), ), diff --git a/command/monitor.go b/command/monitor.go index e6309258973f..f39ca72a360a 100644 --- a/command/monitor.go +++ b/command/monitor.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,8 +8,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/monitor_test.go b/command/monitor_test.go index d10547a8c873..fd1b288fd243 100644 --- a/command/monitor_test.go +++ b/command/monitor_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,8 +9,7 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testMonitorCommand(tb testing.TB) (*cli.MockUi, *MonitorCommand) { @@ -72,14 +74,11 @@ func TestMonitorCommand_Run(t *testing.T) { cmd.client = client cmd.ShutdownCh = shutdownCh - stopCh := testhelpers.GenerateDebugLogs(t, client) - go func() { atomic.StoreInt64(&code, int64(cmd.Run(tc.args))) }() <-time.After(3 * time.Second) - stopCh <- struct{}{} close(shutdownCh) if atomic.LoadInt64(&code) != tc.code { @@ -90,8 +89,6 @@ func TestMonitorCommand_Run(t *testing.T) { if !strings.Contains(combined, tc.out) { t.Fatalf("expected %q to contain %q", combined, tc.out) } - - <-stopCh }) } } diff --git a/command/namespace.go b/command/namespace.go index 702395753da8..c47b26648c89 100644 --- a/command/namespace.go +++ b/command/namespace.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*NamespaceCommand)(nil) diff --git a/command/namespace_api_lock.go b/command/namespace_api_lock.go index 48fec344c741..4193508ec4fe 100644 --- a/command/namespace_api_lock.go +++ b/command/namespace_api_lock.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/helper/namespace" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/namespace_api_unlock.go b/command/namespace_api_unlock.go index 38f4a764d4d5..0c9cd22eadd9 100644 --- a/command/namespace_api_unlock.go +++ b/command/namespace_api_unlock.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/helper/namespace" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/namespace_create.go b/command/namespace_create.go index 7d1f52fa8c9b..6499bf2a25c9 100644 --- a/command/namespace_create.go +++ b/command/namespace_create.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/namespace_delete.go b/command/namespace_delete.go index a5d18929368b..e7704ca5cd85 100644 --- a/command/namespace_delete.go +++ b/command/namespace_delete.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/namespace_list.go b/command/namespace_list.go index 605c4e32e28f..e8581670edb7 100644 --- a/command/namespace_list.go +++ b/command/namespace_list.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -37,7 +40,18 @@ Usage: vault namespace list [options] } func (c *NamespaceListCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + Usage: "Print detailed information such as namespace ID.", + }) + + return set } func (c *NamespaceListCommand) AutocompleteArgs() complete.Predictor { @@ -101,5 +115,9 @@ func (c *NamespaceListCommand) Run(args []string) int { return 2 } + if c.flagDetailed && Format(c.UI) != "table" { + return OutputData(c.UI, secret.Data["key_info"]) + } + return OutputList(c.UI, secret) } diff --git a/command/namespace_lookup.go b/command/namespace_lookup.go index 98d710ea536e..376a0adc419e 100644 --- a/command/namespace_lookup.go +++ b/command/namespace_lookup.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/namespace_patch.go b/command/namespace_patch.go index 3ae6f6bc8b31..d3868c6134fe 100644 --- a/command/namespace_patch.go +++ b/command/namespace_patch.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,8 +9,8 @@ import ( "net/http" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/operator.go b/command/operator.go index ad1bb439fc7d..8d918f3492ac 100644 --- a/command/operator.go +++ b/command/operator.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*OperatorCommand)(nil) diff --git a/command/operator_diagnose.go b/command/operator_diagnose.go index 0bfe512d844f..47b6183cc5de 100644 --- a/command/operator_diagnose.go +++ b/command/operator_diagnose.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -11,15 +14,12 @@ import ( "sync" "time" - "golang.org/x/term" - - wrapping "github.com/hashicorp/go-kms-wrapping/v2" - - "github.com/docker/docker/pkg/ioutils" + "github.com/hashicorp/cli" "github.com/hashicorp/consul/api" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy/v2" "github.com/hashicorp/go-secure-stdlib/reloadutil" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" cserver "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/helper/metricsutil" @@ -33,9 +33,10 @@ import ( "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/diagnose" "github.com/hashicorp/vault/vault/hcp_link" + "github.com/hashicorp/vault/vault/seal" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" "github.com/posener/complete" + "golang.org/x/term" ) const CoreConfigUninitializedErr = "Diagnose cannot attempt this step because core config could not be set." @@ -68,7 +69,7 @@ func (c *OperatorDiagnoseCommand) Synopsis() string { func (c *OperatorDiagnoseCommand) Help() string { helpText := ` -Usage: vault operator diagnose +Usage: vault operator diagnose This command troubleshoots Vault startup issues, such as TLS configuration or auto-unseal. It should be run using the same environment variables and configuration @@ -76,7 +77,7 @@ Usage: vault operator diagnose reproduced. Start diagnose with a configuration file: - + $ vault operator diagnose -config=/etc/vault/config.hcl Perform a diagnostic check while Vault is still running: @@ -159,7 +160,7 @@ func (c *OperatorDiagnoseCommand) RunWithParsedFlags() int { if c.diagnose == nil { if c.flagFormat == "json" { - c.diagnose = diagnose.New(&ioutils.NopWriter{}) + c.diagnose = diagnose.New(io.Discard) } else { c.UI.Output(version.GetVersion().FullVersionNumber(true)) c.diagnose = diagnose.New(os.Stdout) @@ -430,31 +431,30 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error }) sealcontext, sealspan := diagnose.StartSpan(ctx, "Create Vault Server Configuration Seals") - var seals []vault.Seal - var sealConfigError error - barrierSeal, barrierWrapper, unwrapSeal, seals, sealConfigError, err := setSeal(server, config, make([]string, 0), make(map[string]string)) - // Check error here + var setSealResponse *SetSealResponse + var err error + var existingSealGenerationInfo *seal.SealGenerationInfo + if config.IsMultisealEnabled() { + existingSealGenerationInfo, err = vault.PhysicalSealGenInfo(sealcontext, *backend) + if err != nil { + diagnose.Fail(sealcontext, fmt.Sprintf("Unable to get Seal generation information from storage: %s.", err.Error())) + goto SEALFAIL + } + } + + setSealResponse, err = setSeal(server, config, make([]string, 0), make(map[string]string), existingSealGenerationInfo, false /* unsealed vault has no partially wrapped paths */) if err != nil { diagnose.Advise(ctx, "For assistance with the seal stanza, see the Vault configuration documentation.") diagnose.Fail(sealcontext, fmt.Sprintf("Seal creation resulted in the following error: %s.", err.Error())) goto SEALFAIL } - if sealConfigError != nil { - diagnose.Fail(sealcontext, "Seal could not be configured: seals may already be initialized.") - goto SEALFAIL - } - for _, seal := range seals { - // There is always one nil seal. We need to skip it so we don't start an empty Finalize-Seal-Shamir - // section. - if seal == nil { - continue - } + for _, seal := range setSealResponse.getCreatedSeals() { seal := seal // capture range variable // Ensure that the seal finalizer is called, even if using verify-only defer func(seal *vault.Seal) { - sealType := diagnose.CapitalizeFirstLetter((*seal).BarrierType().String()) + sealType := diagnose.CapitalizeFirstLetter((*seal).BarrierSealConfigType().String()) finalizeSealContext, finalizeSealSpan := diagnose.StartSpan(ctx, "Finalize "+sealType+" Seal") err = (*seal).Finalize(finalizeSealContext) if err != nil { @@ -463,16 +463,26 @@ func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error finalizeSealSpan.End() } finalizeSealSpan.End() - }(&seal) + }(seal) } - if barrierSeal == nil { + if setSealResponse.sealConfigError != nil { + diagnose.Fail(sealcontext, "Seal could not be configured: seals may already be initialized.") + } else if setSealResponse.barrierSeal == nil { diagnose.Fail(sealcontext, "Could not create barrier seal. No error was generated, but it is likely that the seal stanza is misconfigured. For guidance, see Vault's configuration documentation on the seal stanza.") } SEALFAIL: sealspan.End() + var barrierSeal vault.Seal + var unwrapSeal vault.Seal + + if setSealResponse != nil { + barrierSeal = setSealResponse.barrierSeal + unwrapSeal = setSealResponse.unwrapSeal + } + diagnose.Test(ctx, "Check Transit Seal TLS", func(ctx context.Context) error { var checkSealTransit bool for _, seal := range config.Seals { @@ -529,9 +539,20 @@ SEALFAIL: var secureRandomReader io.Reader // prepare a secure random reader for core randReaderTestName := "Initialize Randomness for Core" - secureRandomReader, err = configutil.CreateSecureRandomReaderFunc(config.SharedConfig, barrierWrapper) + var sources []*configutil.EntropySourcerInfo + if barrierSeal != nil { + for _, sealWrapper := range barrierSeal.GetAccess().GetEnabledSealWrappersByPriority() { + if s, ok := sealWrapper.Wrapper.(entropy.Sourcer); ok { + sources = append(sources, &configutil.EntropySourcerInfo{ + Sourcer: s, + Name: sealWrapper.Name, + }) + } + } + } + secureRandomReader, err = configutil.CreateSecureRandomReaderFunc(config.SharedConfig, sources, server.logger) if err != nil { - return diagnose.SpotError(ctx, randReaderTestName, fmt.Errorf("Could not initialize randomness for core: %w.", err)) + return diagnose.SpotError(ctx, randReaderTestName, fmt.Errorf("could not initialize randomness for core: %w", err)) } diagnose.SpotOk(ctx, randReaderTestName, "") coreConfig = createCoreConfig(server, config, *backend, configSR, barrierSeal, unwrapSeal, metricsHelper, metricSink, secureRandomReader) @@ -673,7 +694,7 @@ SEALFAIL: if barrierSeal == nil { return fmt.Errorf("Diagnose could not create a barrier seal object.") } - if barrierSeal.BarrierType() == wrapping.WrapperTypeShamir { + if barrierSeal.BarrierSealConfigType() == vault.SealConfigTypeShamir { diagnose.Skipped(ctx, "Skipping barrier encryption test. Only supported for auto-unseal.") return nil } @@ -682,11 +703,25 @@ SEALFAIL: return fmt.Errorf("Diagnose could not create unique UUID for unsealing.") } barrierEncValue := "diagnose-" + barrierUUID - ciphertext, err := barrierWrapper.Encrypt(ctx, []byte(barrierEncValue), nil) - if err != nil { - return fmt.Errorf("Error encrypting with seal barrier: %w.", err) + ciphertext, errMap := barrierSeal.GetAccess().Encrypt(ctx, []byte(barrierEncValue), nil) + if len(errMap) > 0 { + var sealErrors []error + for name, err := range errMap { + sealErrors = append(sealErrors, fmt.Errorf("error encrypting with seal %q: %w", name, err)) + } + if ciphertext == nil { + // Full failure + if len(sealErrors) == 1 { + return sealErrors[0] + } else { + return fmt.Errorf("complete seal encryption failure: %w", errors.Join()) + } + } else { + // Partial failure + return fmt.Errorf("partial seal encryption failure: %w", errors.Join()) + } } - plaintext, err := barrierWrapper.Decrypt(ctx, ciphertext, nil) + plaintext, _, err := barrierSeal.GetAccess().Decrypt(ctx, ciphertext, nil) if err != nil { return fmt.Errorf("Error decrypting with seal barrier: %w", err) } diff --git a/command/operator_diagnose_test.go b/command/operator_diagnose_test.go index 99834b9d48bc..8528637dc2e4 100644 --- a/command/operator_diagnose_test.go +++ b/command/operator_diagnose_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !race package command @@ -10,8 +13,9 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/vault/diagnose" - "github.com/mitchellh/cli" ) func testOperatorDiagnoseCommand(tb testing.TB) *OperatorDiagnoseCommand { @@ -37,7 +41,7 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) { { "diagnose_ok", []string{ - "-config", "./server/test-fixtures/config_diagnose_ok.hcl", + "-config", "./server/test-fixtures/config_diagnose_ok_singleseal.hcl", }, []*diagnose.Result{ { @@ -169,6 +173,138 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) { }, }, }, + { + "diagnose_ok_multiseal", + []string{ + "-config", "./server/test-fixtures/config_diagnose_ok.hcl", + }, + []*diagnose.Result{ + { + Name: "Parse Configuration", + Status: diagnose.OkStatus, + }, + { + Name: "Start Listeners", + Status: diagnose.WarningStatus, + Children: []*diagnose.Result{ + { + Name: "Create Listeners", + Status: diagnose.OkStatus, + }, + { + Name: "Check Listener TLS", + Status: diagnose.WarningStatus, + Warnings: []string{ + "TLS is disabled in a listener config stanza.", + }, + }, + }, + }, + { + Name: "Check Storage", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Create Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check Consul TLS", + Status: diagnose.SkippedStatus, + }, + { + Name: "Check Consul Direct Storage Access", + Status: diagnose.OkStatus, + }, + }, + }, + { + Name: "Check Service Discovery", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Check Consul Service Discovery TLS", + Status: diagnose.SkippedStatus, + }, + { + Name: "Check Consul Direct Service Discovery", + Status: diagnose.OkStatus, + }, + }, + }, + { + Name: "Create Vault Server Configuration Seals", + // We can't load from storage the existing seal generation info during the test, so we expect an error. + Status: diagnose.ErrorStatus, + }, + { + Name: "Create Core Configuration", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Initialize Randomness for Core", + Status: diagnose.OkStatus, + }, + }, + }, + { + Name: "HA Storage", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Create HA Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check HA Consul Direct Storage Access", + Status: diagnose.OkStatus, + }, + { + Name: "Check Consul TLS", + Status: diagnose.SkippedStatus, + }, + }, + }, + { + Name: "Determine Redirect Address", + Status: diagnose.OkStatus, + }, + { + Name: "Check Cluster Address", + Status: diagnose.OkStatus, + }, + { + Name: "Check Core Creation", + Status: diagnose.OkStatus, + }, + { + Name: "Start Listeners", + Status: diagnose.WarningStatus, + Children: []*diagnose.Result{ + { + Name: "Create Listeners", + Status: diagnose.OkStatus, + }, + { + Name: "Check Listener TLS", + Status: diagnose.WarningStatus, + Warnings: []string{ + "TLS is disabled in a listener config stanza.", + }, + }, + }, + }, + { + Name: "Check Autounseal Encryption", + Status: diagnose.ErrorStatus, + Message: "Diagnose could not create a barrier seal object.", + }, + { + Name: "Check Server Before Runtime", + Status: diagnose.OkStatus, + }, + }, + }, { "diagnose_raft_problems", []string{ @@ -475,17 +611,23 @@ func TestOperatorDiagnoseCommand_Run(t *testing.T) { for _, tc := range cases { tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() - client, closer := testVaultServer(t) - defer closer() - cmd := testOperatorDiagnoseCommand(t) - cmd.client = client + if tc.name == "diagnose_ok" && constants.IsEnterprise { + t.Skip("Test not valid in ENT") + } else if tc.name == "diagnose_ok_multiseal" && !constants.IsEnterprise { + t.Skip("Test not valid in community edition") + } else { + t.Parallel() + client, closer := testVaultServer(t) + defer closer() + cmd := testOperatorDiagnoseCommand(t) + cmd.client = client - cmd.Run(tc.args) - result := cmd.diagnose.Finalize(context.Background()) + cmd.Run(tc.args) + result := cmd.diagnose.Finalize(context.Background()) - if err := compareResults(tc.expected, result.Children); err != nil { - t.Fatalf("Did not find expected test results: %v", err) + if err := compareResults(tc.expected, result.Children); err != nil { + t.Fatalf("Did not find expected test results: %v", err) + } } }) } diff --git a/command/operator_generate_root.go b/command/operator_generate_root.go index ece541683be1..6a4d7bc9e4d5 100644 --- a/command/operator_generate_root.go +++ b/command/operator_generate_root.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,11 +10,11 @@ import ( "os" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/password" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/sdk/helper/roottoken" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -20,6 +23,7 @@ var ( _ cli.CommandAutocomplete = (*OperatorGenerateRootCommand)(nil) ) +//go:generate enumer -type=generateRootKind -trimprefix=generateRoot type generateRootKind int const ( @@ -46,40 +50,98 @@ type OperatorGenerateRootCommand struct { } func (c *OperatorGenerateRootCommand) Synopsis() string { - return "Generates a new root token" + return "Generates a new root, DR operation, or recovery token" } func (c *OperatorGenerateRootCommand) Help() string { helpText := ` -Usage: vault operator generate-root [options] [KEY] +Usage: vault operator generate-root [options] -init [-otp=...] [-pgp-key=...] + vault operator generate-root [options] [-nonce=... KEY] + vault operator generate-root [options] -decode=... -otp=... + vault operator generate-root [options] -generate-otp + vault operator generate-root [options] -status + vault operator generate-root [options] -cancel + + Generates a new root token by combining a quorum of share holders. + + This command is unusual, as it is effectively six separate subcommands, + selected via the options -init, -decode, -generate-otp, -status, -cancel, + or the absence of any of the previous five options (which selects the + "provide a key share" form). + + With the -dr-token or -recovery-token options, a DR operation token or a + recovery token is generated instead of a root token - the relevant option + must be included in every form of the generate-root command. + + Form 1 (-init) - Start a token generation: + + When starting a root or privileged operation token generation, you must + choose one of the following protection methods for how the token will be + returned: + + - A base64-encoded one-time-password (OTP). The resulting token is XORed + with this value when it is returned. Use the "-decode" form of this + command to output the final value. + + The Vault server will generate a suitable OTP for you, and return it: + + $ vault operator generate-root -init + + Vault versions before 0.11.2, released in 2018, required you to + generate your own OTP (see the "-generate-otp" form) and pass it in, + but this is no longer necessary. The command is still supported for + compatibility, though: + + $ vault operator generate-root -init -otp="..." + + - A PGP key. The resulting token is encrypted with this public key. + The key may be specified as a path to a file, or a string of the + form "keybase:" to fetch the key from the keybase.io API. + + $ vault operator generate-root -init -pgp-key="..." + + Form 2 (no option) - Enter an unseal key to progress root token generation: + + In the sub-form intended for interactive use, the command will + automatically look up the nonce of the currently active generation + operation, and will prompt for the key to be entered: + + $ vault operator generate-root + + In the sub-form intended for automation, the operation nonce must be + explicitly provided, and the key is provided directly on the command line + + $ vault operator generate-root -nonce=... KEY + + If key is specified as "-", the command will read from stdin. + + Form 3 (-decode) - Decode a generated token protected with an OTP: + + $ vault operator generate-root -decode=ENCODED_TOKEN -otp=OTP - Generates a new root token by combining a quorum of share holders. One of - the following must be provided to start the root token generation: + If encoded token is specified as "-", the command will read from stdin. - - A base64-encoded one-time-password (OTP) provided via the "-otp" flag. - Use the "-generate-otp" flag to generate a usable value. The resulting - token is XORed with this value when it is returned. Use the "-decode" - flag to output the final value. + Form 4 (-generate-otp) - Generate an OTP code for the final token: - - A file containing a PGP key or a keybase username in the "-pgp-key" - flag. The resulting token is encrypted with this public key. + $ vault operator generate-root -generate-otp - An unseal key may be provided directly on the command line as an argument to - the command. If key is specified as "-", the command will read from stdin. If - a TTY is available, the command will prompt for text. + Since changes in Vault 0.11.2 in 2018, there is no longer any reason to + use this form, as a suitable OTP will be returned as part of the "-init" + command. - Generate an OTP code for the final token: + Form 5 (-status) - Get the status of a token generation that is in progress: - $ vault operator generate-root -generate-otp + $ vault operator generate-root -status - Start a root token generation: + This form also returns the length of the a correct OTP, for the running + version and configuration of Vault. - $ vault operator generate-root -init -otp="..." - $ vault operator generate-root -init -pgp-key="..." + Form 6 (-cancel) - Cancel a token generation that is in progress: - Enter an unseal key to progress root token generation: + This would be used to remove an in progress generation operation, so that + a new one can be started with different parameters. - $ vault operator generate-root -otp="..." + $ vault operator generate-root -cancel ` + c.Flags().Help() return strings.TrimSpace(helpText) @@ -146,7 +208,7 @@ func (c *OperatorGenerateRootCommand) Flags() *FlagSets { Default: false, EnvVar: "", Completion: complete.PredictNothing, - Usage: "Set this flag to do generate root operations on DR Operational " + + Usage: "Set this flag to do generate root operations on DR operation " + "tokens.", }) @@ -156,7 +218,7 @@ func (c *OperatorGenerateRootCommand) Flags() *FlagSets { Default: false, EnvVar: "", Completion: complete.PredictNothing, - Usage: "Set this flag to do generate root operations on Recovery Operational " + + Usage: "Set this flag to do generate root operations on recovery " + "tokens.", }) @@ -179,7 +241,7 @@ func (c *OperatorGenerateRootCommand) Flags() *FlagSets { "public PGP key. This can also be specified as a Keybase username " + "using the format \"keybase:\". When supplied, the generated " + "root token will be encrypted and base64-encoded with the given public " + - "key.", + "key. Must be used with \"-init\".", }) f.StringVar(&StringVar{ @@ -188,8 +250,9 @@ func (c *OperatorGenerateRootCommand) Flags() *FlagSets { Default: "", EnvVar: "", Completion: complete.PredictAnything, - Usage: "Nonce value provided at initialization. The same nonce value " + - "must be provided with each unseal key.", + Usage: "Nonce value returned at initialization. The same nonce value " + + "must be provided with each unseal or recovery key. Only needed " + + "when providing an unseal or recovery key.", }) return set diff --git a/command/operator_generate_root_test.go b/command/operator_generate_root_test.go index b4489718efbe..e27592d856a7 100644 --- a/command/operator_generate_root_test.go +++ b/command/operator_generate_root_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !race package command @@ -10,9 +13,9 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/sdk/helper/xor" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" ) func testOperatorGenerateRootCommand(tb testing.TB) (*cli.MockUi, *OperatorGenerateRootCommand) { diff --git a/command/operator_init.go b/command/operator_init.go index 3b0dfe3de2b5..576f7fe2dc5d 100644 --- a/command/operator_init.go +++ b/command/operator_init.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,9 +9,9 @@ import ( "runtime" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/pgpkeys" - "github.com/mitchellh/cli" "github.com/posener/complete" consulapi "github.com/hashicorp/consul/api" diff --git a/command/operator_init_test.go b/command/operator_init_test.go index ec02873587df..73fe4ff59e93 100644 --- a/command/operator_init_test.go +++ b/command/operator_init_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !race package command @@ -10,10 +13,10 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/pgpkeys" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" ) func testOperatorInitCommand(tb testing.TB) (*cli.MockUi, *OperatorInitCommand) { diff --git a/command/operator_key_status.go b/command/operator_key_status.go index e015fb0e32f7..015bd891f75f 100644 --- a/command/operator_key_status.go +++ b/command/operator_key_status.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_key_status_test.go b/command/operator_key_status_test.go index 01cb9136286b..ccaac3883081 100644 --- a/command/operator_key_status_test.go +++ b/command/operator_key_status_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testOperatorKeyStatusCommand(tb testing.TB) (*cli.MockUi, *OperatorKeyStatusCommand) { diff --git a/command/operator_members.go b/command/operator_members.go index d4bd1fbe4389..83d7a2d4301e 100644 --- a/command/operator_members.go +++ b/command/operator_members.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_migrate.go b/command/operator_migrate.go index a974f58d6cb6..3af73e696382 100644 --- a/command/operator_migrate.go +++ b/command/operator_migrate.go @@ -1,15 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "context" "fmt" "io/ioutil" + "math" "net/url" "os" "sort" "strings" "time" + "github.com/hashicorp/cli" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/hcl" @@ -19,9 +24,9 @@ import ( "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" "github.com/pkg/errors" "github.com/posener/complete" + "golang.org/x/sync/errgroup" ) var ( @@ -39,6 +44,7 @@ type OperatorMigrateCommand struct { flagLogLevel string flagStart string flagReset bool + flagMaxParallel int logger log.Logger ShutdownCh chan struct{} } @@ -98,6 +104,14 @@ func (c *OperatorMigrateCommand) Flags() *FlagSets { Usage: "Reset the migration lock. No migration will occur.", }) + f.IntVar(&IntVar{ + Name: "max-parallel", + Default: 10, + Target: &c.flagMaxParallel, + Usage: "Specifies the maximum number of parallel migration threads (goroutines) that may be used when migrating. " + + "This can speed up the migration process on slow backends but uses more resources.", + }) + f.StringVar(&StringVar{ Name: "log-level", Target: &c.flagLogLevel, @@ -126,7 +140,6 @@ func (c *OperatorMigrateCommand) Run(args []string) int { c.UI.Error(err.Error()) return 1 } - c.flagLogLevel = strings.ToLower(c.flagLogLevel) validLevels := []string{"trace", "debug", "info", "warn", "error"} if !strutil.StrListContains(validLevels, c.flagLogLevel) { @@ -135,6 +148,11 @@ func (c *OperatorMigrateCommand) Run(args []string) int { } c.logger = logging.NewVaultLogger(log.LevelFromString(c.flagLogLevel)) + if c.flagMaxParallel < 1 { + c.UI.Error(fmt.Sprintf("Argument to flag -max-parallel must be between 1 and %d", math.MaxInt)) + return 1 + } + if c.flagConfig == "" { c.UI.Error("Must specify exactly one config path using -config") return 1 @@ -164,7 +182,7 @@ func (c *OperatorMigrateCommand) Run(args []string) int { } // migrate attempts to instantiate the source and destinations backends, -// and then invoke the migration the the root of the keyspace. +// and then invoke the migration the root of the keyspace. func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error { from, err := c.newBackend(config.StorageSource.Type, config.StorageSource.Config) if err != nil { @@ -209,7 +227,7 @@ func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error { doneCh := make(chan error) go func() { - doneCh <- c.migrateAll(ctx, from, to) + doneCh <- c.migrateAll(ctx, from, to, c.flagMaxParallel) }() select { @@ -225,8 +243,8 @@ func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error { } // migrateAll copies all keys in lexicographic order. -func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend) error { - return dfsScan(ctx, from, func(ctx context.Context, path string) error { +func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend, maxParallel int) error { + return dfsScan(ctx, from, maxParallel, func(ctx context.Context, path string) error { if path < c.flagStart || path == storageMigrationLock || path == vault.CoreLockPath { return nil } @@ -365,10 +383,20 @@ func parseStorage(result *migratorConfig, list *ast.ObjectList, name string) err // dfsScan will invoke cb with every key from source. // Keys will be traversed in lexicographic, depth-first order. -func dfsScan(ctx context.Context, source physical.Backend, cb func(ctx context.Context, path string) error) error { +func dfsScan(ctx context.Context, source physical.Backend, maxParallel int, cb func(ctx context.Context, path string) error) error { dfs := []string{""} + eg, ctx := errgroup.WithContext(ctx) + eg.SetLimit(maxParallel) + for l := len(dfs); l > 0; l = len(dfs) { + // Check for cancellation + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + key := dfs[len(dfs)-1] if key == "" || strings.HasSuffix(key, "/") { children, err := source.List(ctx, key) @@ -385,19 +413,14 @@ func dfsScan(ctx context.Context, source physical.Backend, cb func(ctx context.C } } } else { - err := cb(ctx, key) - if err != nil { - return err - } + // Pooling + eg.Go(func() error { + return cb(ctx, key) + }) dfs = dfs[:len(dfs)-1] } - - select { - case <-ctx.Done(): - return nil - default: - } } - return nil + + return eg.Wait() } diff --git a/command/operator_migrate_test.go b/command/operator_migrate_test.go index 5db53ebbfcb9..9a6c27196ebb 100644 --- a/command/operator_migrate_test.go +++ b/command/operator_migrate_test.go @@ -1,16 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "bytes" "context" "fmt" - "io/ioutil" "math/rand" "os" "path/filepath" "reflect" "sort" "strings" + "sync" "testing" "time" @@ -18,7 +21,6 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/base62" "github.com/hashicorp/vault/command/server" - "github.com/hashicorp/vault/helper/testhelpers" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault" ) @@ -35,8 +37,8 @@ func TestMigration(t *testing.T) { fromFactory := physicalBackends["file"] - folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator")) - defer os.RemoveAll(folder) + folder := t.TempDir() + confFrom := map[string]string{ "path": folder, } @@ -55,11 +57,10 @@ func TestMigration(t *testing.T) { if err != nil { t.Fatal(err) } - cmd := OperatorMigrateCommand{ logger: log.NewNullLogger(), } - if err := cmd.migrateAll(context.Background(), from, to); err != nil { + if err := cmd.migrateAll(context.Background(), from, to, 1); err != nil { t.Fatal(err) } @@ -68,6 +69,44 @@ func TestMigration(t *testing.T) { } }) + t.Run("Concurrent migration", func(t *testing.T) { + data := generateData() + + fromFactory := physicalBackends["file"] + + folder := t.TempDir() + + confFrom := map[string]string{ + "path": folder, + } + + from, err := fromFactory(confFrom, nil) + if err != nil { + t.Fatal(err) + } + if err := storeData(from, data); err != nil { + t.Fatal(err) + } + + toFactory := physicalBackends["inmem"] + confTo := map[string]string{} + to, err := toFactory(confTo, nil) + if err != nil { + t.Fatal(err) + } + + cmd := OperatorMigrateCommand{ + logger: log.NewNullLogger(), + } + + if err := cmd.migrateAll(context.Background(), from, to, 10); err != nil { + t.Fatal(err) + } + if err := compareStoredData(to, data, ""); err != nil { + t.Fatal(err) + } + }) + t.Run("Start option", func(t *testing.T) { data := generateData() @@ -82,8 +121,46 @@ func TestMigration(t *testing.T) { } toFactory := physicalBackends["file"] - folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator")) - defer os.RemoveAll(folder) + folder := t.TempDir() + confTo := map[string]string{ + "path": folder, + } + + to, err := toFactory(confTo, nil) + if err != nil { + t.Fatal(err) + } + + const start = "m" + + cmd := OperatorMigrateCommand{ + logger: log.NewNullLogger(), + flagStart: start, + } + if err := cmd.migrateAll(context.Background(), from, to, 1); err != nil { + t.Fatal(err) + } + + if err := compareStoredData(to, data, start); err != nil { + t.Fatal(err) + } + }) + + t.Run("Start option (parallel)", func(t *testing.T) { + data := generateData() + + fromFactory := physicalBackends["inmem"] + confFrom := map[string]string{} + from, err := fromFactory(confFrom, nil) + if err != nil { + t.Fatal(err) + } + if err := storeData(from, data); err != nil { + t.Fatal(err) + } + + toFactory := physicalBackends["file"] + folder := t.TempDir() confTo := map[string]string{ "path": folder, } @@ -99,7 +176,7 @@ func TestMigration(t *testing.T) { logger: log.NewNullLogger(), flagStart: start, } - if err := cmd.migrateAll(context.Background(), from, to); err != nil { + if err := cmd.migrateAll(context.Background(), from, to, 10); err != nil { t.Fatal(err) } @@ -110,9 +187,8 @@ func TestMigration(t *testing.T) { t.Run("Config parsing", func(t *testing.T) { cmd := new(OperatorMigrateCommand) - - cfgName := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator")) - ioutil.WriteFile(cfgName, []byte(` + cfgName := filepath.Join(t.TempDir(), "migrator") + os.WriteFile(cfgName, []byte(` storage_source "src_type" { path = "src_path" } @@ -120,7 +196,6 @@ storage_source "src_type" { storage_destination "dest_type" { path = "dest_path" }`), 0o644) - defer os.Remove(cfgName) expCfg := &migratorConfig{ StorageSource: &server.Storage{ @@ -145,7 +220,7 @@ storage_destination "dest_type" { } verifyBad := func(cfg string) { - ioutil.WriteFile(cfgName, []byte(cfg), 0o644) + os.WriteFile(cfgName, []byte(cfg), 0o644) _, err := cmd.loadMigratorConfig(cfgName) if err == nil { t.Fatalf("expected error but none received from: %v", cfg) @@ -192,6 +267,7 @@ storage_destination "dest_type2" { path = "dest_path" }`) }) + t.Run("DFS Scan", func(t *testing.T) { s, _ := physicalBackends["inmem"](map[string]string{}, nil) @@ -204,9 +280,16 @@ storage_destination "dest_type2" { l := randomLister{s} - var out []string - dfsScan(context.Background(), l, func(ctx context.Context, path string) error { - out = append(out, path) + type SafeAppend struct { + out []string + lock sync.Mutex + } + outKeys := SafeAppend{} + dfsScan(context.Background(), l, 10, func(ctx context.Context, path string) error { + outKeys.lock.Lock() + defer outKeys.lock.Unlock() + + outKeys.out = append(outKeys.out, path) return nil }) @@ -218,8 +301,11 @@ storage_destination "dest_type2" { keys = append(keys, key) } sort.Strings(keys) - if !reflect.DeepEqual(keys, out) { - t.Fatalf("expected equal: %v, %v", keys, out) + outKeys.lock.Lock() + sort.Strings(outKeys.out) + outKeys.lock.Unlock() + if !reflect.DeepEqual(keys, outKeys.out) { + t.Fatalf("expected equal: %v, %v", keys, outKeys.out) } }) } diff --git a/command/operator_raft.go b/command/operator_raft.go index 34107dbb5953..deaff14cdf2d 100644 --- a/command/operator_raft.go +++ b/command/operator_raft.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*OperatorRaftCommand)(nil) diff --git a/command/operator_raft_autopilot_get_config.go b/command/operator_raft_autopilot_get_config.go index 1462e354c559..bdeeed6f8c08 100644 --- a/command/operator_raft_autopilot_get_config.go +++ b/command/operator_raft_autopilot_get_config.go @@ -1,10 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" "github.com/posener/complete" ) @@ -15,6 +19,7 @@ var ( type OperatorRaftAutopilotGetConfigCommand struct { *BaseCommand + flagDRToken string } func (c *OperatorRaftAutopilotGetConfigCommand) Synopsis() string { @@ -34,6 +39,17 @@ Usage: vault operator raft autopilot get-config func (c *OperatorRaftAutopilotGetConfigCommand) Flags() *FlagSets { set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", + }) + return set } @@ -67,10 +83,12 @@ func (c *OperatorRaftAutopilotGetConfigCommand) Run(args []string) int { return 2 } - config, err := client.Sys().RaftAutopilotConfiguration() - if err != nil { - c.UI.Error(err.Error()) - return 2 + var config *api.AutopilotConfig + switch { + case c.flagDRToken != "": + config, err = client.Sys().RaftAutopilotConfigurationWithDRToken(c.flagDRToken) + default: + config, err = client.Sys().RaftAutopilotConfiguration() } if config == nil { diff --git a/command/operator_raft_autopilot_set_config.go b/command/operator_raft_autopilot_set_config.go index 4a839c5fae3a..39b9b2ddcb6d 100644 --- a/command/operator_raft_autopilot_set_config.go +++ b/command/operator_raft_autopilot_set_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -23,6 +26,7 @@ type OperatorRaftAutopilotSetConfigCommand struct { flagMinQuorum uint flagServerStabilizationTime time.Duration flagDisableUpgradeMigration BoolPtr + flagDRToken string } func (c *OperatorRaftAutopilotSetConfigCommand) Synopsis() string { @@ -47,36 +51,52 @@ func (c *OperatorRaftAutopilotSetConfigCommand) Flags() *FlagSets { f.BoolPtrVar(&BoolPtrVar{ Name: "cleanup-dead-servers", Target: &c.flagCleanupDeadServers, + Usage: "Controls whether to remove dead servers from the Raft peer list periodically or when a new server joins.", }) f.DurationVar(&DurationVar{ Name: "last-contact-threshold", Target: &c.flagLastContactThreshold, + Usage: "Limit on the amount of time a server can go without leader contact before being considered unhealthy.", }) f.DurationVar(&DurationVar{ Name: "dead-server-last-contact-threshold", Target: &c.flagDeadServerLastContactThreshold, + Usage: "Limit on the amount of time a server can go without leader contact before being considered failed. This takes effect only when cleanup_dead_servers is set.", }) f.Uint64Var(&Uint64Var{ Name: "max-trailing-logs", Target: &c.flagMaxTrailingLogs, + Usage: "Amount of entries in the Raft Log that a server can be behind before being considered unhealthy.", }) f.UintVar(&UintVar{ Name: "min-quorum", Target: &c.flagMinQuorum, + Usage: "Minimum number of servers allowed in a cluster before autopilot can prune dead servers. This should at least be 3.", }) f.DurationVar(&DurationVar{ Name: "server-stabilization-time", Target: &c.flagServerStabilizationTime, + Usage: "Minimum amount of time a server must be in a stable, healthy state before it can be added to the cluster.", }) f.BoolPtrVar(&BoolPtrVar{ Name: "disable-upgrade-migration", Target: &c.flagDisableUpgradeMigration, + Usage: "Whether or not to perform automated version upgrades.", + }) + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", }) return set @@ -134,6 +154,9 @@ func (c *OperatorRaftAutopilotSetConfigCommand) Run(args []string) int { if c.flagDisableUpgradeMigration.IsSet() { data["disable_upgrade_migration"] = c.flagDisableUpgradeMigration.Get() } + if c.flagDRToken != "" { + data["dr_operation_token"] = c.flagDRToken + } secret, err := client.Logical().Write("sys/storage/raft/autopilot/configuration", data) if err != nil { diff --git a/command/operator_raft_autopilot_state.go b/command/operator_raft_autopilot_state.go index 8a530dc75e0f..4fc4ca4445e8 100644 --- a/command/operator_raft_autopilot_state.go +++ b/command/operator_raft_autopilot_state.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,8 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" "github.com/posener/complete" ) @@ -16,6 +20,7 @@ var ( type OperatorRaftAutopilotStateCommand struct { *BaseCommand + flagDRToken string } func (c *OperatorRaftAutopilotStateCommand) Synopsis() string { @@ -35,6 +40,17 @@ Usage: vault operator raft autopilot state func (c *OperatorRaftAutopilotStateCommand) Flags() *FlagSets { set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", + }) + // The output of the state endpoint contains nested values and is not fit for // the default "table" display format. Override the default display format to // "pretty", both in the flag and in the UI. @@ -80,7 +96,14 @@ func (c *OperatorRaftAutopilotStateCommand) Run(args []string) int { return 2 } - state, err := client.Sys().RaftAutopilotState() + var state *api.AutopilotState + switch { + case c.flagDRToken != "": + state, err = client.Sys().RaftAutopilotStateWithDRToken(c.flagDRToken) + default: + state, err = client.Sys().RaftAutopilotState() + } + if err != nil { c.UI.Error(fmt.Sprintf("Error checking autopilot state: %s", err)) return 2 diff --git a/command/operator_raft_join.go b/command/operator_raft_join.go index 466ab84142b4..aaaaf2891e10 100644 --- a/command/operator_raft_join.go +++ b/command/operator_raft_join.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/operator_raft_listpeers.go b/command/operator_raft_listpeers.go index 2c80112ec3fa..b92ab8f0b517 100644 --- a/command/operator_raft_listpeers.go +++ b/command/operator_raft_listpeers.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/operator_raft_remove_peer.go b/command/operator_raft_remove_peer.go index 6f7e837474fa..fabd9ce30d39 100644 --- a/command/operator_raft_remove_peer.go +++ b/command/operator_raft_remove_peer.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_raft_snapshot.go b/command/operator_raft_snapshot.go index 5e3b04287736..02fc4c2b8a43 100644 --- a/command/operator_raft_snapshot.go +++ b/command/operator_raft_snapshot.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*OperatorRaftSnapshotCommand)(nil) @@ -32,6 +35,10 @@ Usage: vault operator raft snapshot [options] [args] $ vault operator raft snapshot save raft.snap + Inspects a snapshot based on a file: + + $ vault operator raft snapshot inspect raft.snap + Please see the individual subcommand help for detailed usage information. ` diff --git a/command/operator_raft_snapshot_inspect.go b/command/operator_raft_snapshot_inspect.go new file mode 100644 index 000000000000..43c3fb0e4a13 --- /dev/null +++ b/command/operator_raft_snapshot_inspect.go @@ -0,0 +1,568 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "fmt" + "hash" + "io" + "math" + "os" + "sort" + "strconv" + "strings" + "text/tabwriter" + + "github.com/hashicorp/cli" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" + protoio "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/plugin/pb" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftSnapshotInspectCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftSnapshotInspectCommand)(nil) +) + +type OperatorRaftSnapshotInspectCommand struct { + *BaseCommand + details bool + depth int + filter string +} + +func (c *OperatorRaftSnapshotInspectCommand) Synopsis() string { + return "Inspects raft snapshot" +} + +func (c *OperatorRaftSnapshotInspectCommand) Help() string { + helpText := ` + Usage: vault operator raft snapshot inspect + + Inspects a snapshot file. + + $ vault operator raft snapshot inspect raft.snap + + ` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftSnapshotInspectCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "details", + Target: &c.details, + Default: true, + Usage: "Provides information about usage for data stored in the snapshot.", + }) + + f.IntVar(&IntVar{ + Name: "depth", + Target: &c.depth, + Default: 2, + Usage: "Can only be used with -details. The key prefix depth used to breakdown KV store data. If set to 0, all keys will be returned. Defaults to 2.", + }) + + f.StringVar(&StringVar{ + Name: "filter", + Target: &c.filter, + Default: "", + Usage: "Can only be used with -details. Limits the key breakdown using this prefix filter.", + }) + + return set +} + +func (c *OperatorRaftSnapshotInspectCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftSnapshotInspectCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +type OutputFormat struct { + Meta *MetadataInfo + StatsKV []typeStats + TotalCountKV int + TotalSizeKV int +} + +// SnapshotInfo is used for passing snapshot stat +// information between functions +type SnapshotInfo struct { + Meta MetadataInfo + StatsKV map[string]typeStats + TotalCountKV int + TotalSizeKV int +} + +type MetadataInfo struct { + ID string + Size int64 + Index uint64 + Term uint64 + Version raft.SnapshotVersion +} + +type typeStats struct { + Name string + Count int + Size int +} + +func (c *OperatorRaftSnapshotInspectCommand) Run(args []string) int { + flags := c.Flags() + + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Validate flags + if c.depth < 0 { + c.UI.Error("Depth must be equal to or greater than 0") + return 1 + } + + var file string + args = c.flags.Args() + + switch len(args) { + case 0: + c.UI.Error("Missing FILE argument") + return 1 + case 1: + file = args[0] + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + // Open the file. + f, err := os.Open(file) + if err != nil { + c.UI.Error(fmt.Sprintf("Error opening snapshot file: %s", err)) + return 1 + } + defer f.Close() + + // Extract metadata and snapshot info from snapshot file + var info *SnapshotInfo + var meta *raft.SnapshotMeta + info, meta, err = c.Read(hclog.New(nil), f) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading snapshot: %s", err)) + return 1 + } + + if info == nil { + c.UI.Error(fmt.Sprintf("Error calculating snapshot info: %s", err)) + return 1 + } + + // Generate structs for the formatter with information we read in + metaformat := &MetadataInfo{ + ID: meta.ID, + Size: meta.Size, + Index: meta.Index, + Term: meta.Term, + Version: meta.Version, + } + + formattedStatsKV := generateKVStats(*info) + + data := &OutputFormat{ + Meta: metaformat, + StatsKV: formattedStatsKV, + TotalCountKV: info.TotalCountKV, + TotalSizeKV: info.TotalSizeKV, + } + + if Format(c.UI) != "table" { + return OutputData(c.UI, data) + } + + tableData, err := formatTable(data) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + c.UI.Output(tableData) + + return 0 +} + +func (c *OperatorRaftSnapshotInspectCommand) kvEnhance(val *pb.StorageEntry, info *SnapshotInfo, read int) { + if !c.details { + return + } + + if val.Key == "" { + return + } + + // check for whether a filter is specified. if it is, skip + // any keys that don't match. + if len(c.filter) > 0 && !strings.HasPrefix(val.Key, c.filter) { + return + } + + split := strings.Split(val.Key, "/") + + // handle the situation where the key is shorter than + // the specified depth. + actualDepth := c.depth + if c.depth == 0 || c.depth > len(split) { + actualDepth = len(split) + } + + prefix := strings.Join(split[0:actualDepth], "/") + kvs := info.StatsKV[prefix] + if kvs.Name == "" { + kvs.Name = prefix + } + + kvs.Count++ + kvs.Size += read + info.TotalCountKV++ + info.TotalSizeKV += read + info.StatsKV[prefix] = kvs +} + +// Read from snapshot's state.bin and update the SnapshotInfo struct +func (c *OperatorRaftSnapshotInspectCommand) parseState(r io.Reader) (SnapshotInfo, error) { + info := SnapshotInfo{ + StatsKV: make(map[string]typeStats), + } + + protoReader := protoio.NewDelimitedReader(r, math.MaxInt32) + + for { + s := new(pb.StorageEntry) + if err := protoReader.ReadMsg(s); err != nil { + if err == io.EOF { + break + } + return info, err + } + size := protoReader.GetLastReadSize() + c.kvEnhance(s, &info, size) + } + + return info, nil +} + +// Read contents of snapshot. Parse metadata and snapshot info +// Also, verify validity of snapshot +func (c *OperatorRaftSnapshotInspectCommand) Read(logger hclog.Logger, in io.Reader) (*SnapshotInfo, *raft.SnapshotMeta, error) { + // Wrap the reader in a gzip decompressor. + decomp, err := gzip.NewReader(in) + if err != nil { + return nil, nil, fmt.Errorf("failed to decompress snapshot: %v", err) + } + + defer func() { + if decomp == nil { + return + } + + if err := decomp.Close(); err != nil { + logger.Error("Failed to close snapshot decompressor", "error", err) + } + }() + + // Read the archive. + snapshotInfo, metadata, err := c.read(decomp) + if err != nil { + return nil, nil, fmt.Errorf("failed to read snapshot file: %v", err) + } + + if err := concludeGzipRead(decomp); err != nil { + return nil, nil, err + } + + if err := decomp.Close(); err != nil { + return nil, nil, err + } + decomp = nil + return snapshotInfo, metadata, nil +} + +func formatTable(info *OutputFormat) (string, error) { + var b bytes.Buffer + tw := tabwriter.NewWriter(&b, 8, 8, 6, ' ', 0) + + fmt.Fprintf(tw, " ID\t%s", info.Meta.ID) + fmt.Fprintf(tw, "\n Size\t%d", info.Meta.Size) + fmt.Fprintf(tw, "\n Index\t%d", info.Meta.Index) + fmt.Fprintf(tw, "\n Term\t%d", info.Meta.Term) + fmt.Fprintf(tw, "\n Version\t%d", info.Meta.Version) + fmt.Fprintf(tw, "\n") + + if info.StatsKV != nil { + fmt.Fprintf(tw, "\n") + fmt.Fprintln(tw, "\n Key Name\tCount\tSize") + fmt.Fprintf(tw, " %s\t%s\t%s", "----", "----", "----") + + for _, s := range info.StatsKV { + fmt.Fprintf(tw, "\n %s\t%d\t%s", s.Name, s.Count, ByteSize(uint64(s.Size))) + } + + fmt.Fprintf(tw, "\n %s\t%s", "----", "----") + fmt.Fprintf(tw, "\n Total Size\t\t%s", ByteSize(uint64(info.TotalSizeKV))) + } + + if err := tw.Flush(); err != nil { + return b.String(), err + } + + return b.String(), nil +} + +const ( + BYTE = 1 << (10 * iota) + KILOBYTE + MEGABYTE + GIGABYTE + TERABYTE +) + +func ByteSize(bytes uint64) string { + unit := "" + value := float64(bytes) + + switch { + case bytes >= TERABYTE: + unit = "TB" + value = value / TERABYTE + case bytes >= GIGABYTE: + unit = "GB" + value = value / GIGABYTE + case bytes >= MEGABYTE: + unit = "MB" + value = value / MEGABYTE + case bytes >= KILOBYTE: + unit = "KB" + value = value / KILOBYTE + case bytes >= BYTE: + unit = "B" + case bytes == 0: + return "0" + } + + result := strconv.FormatFloat(value, 'f', 1, 64) + result = strings.TrimSuffix(result, ".0") + return result + unit +} + +// sortTypeStats sorts the stat slice by count and then +// alphabetically in the case the counts are equal +func sortTypeStats(stats []typeStats) []typeStats { + // sort alphabetically if size is equal + sort.Slice(stats, func(i, j int) bool { + // Sort alphabetically if count is equal + if stats[i].Count == stats[j].Count { + return stats[i].Name < stats[j].Name + } + return stats[i].Count > stats[j].Count + }) + + return stats +} + +// generateKVStats reformats the KV stats to work with +// the output struct that's used to produce the printed +// output the user sees. +func generateKVStats(info SnapshotInfo) []typeStats { + kvLen := len(info.StatsKV) + if kvLen > 0 { + ks := make([]typeStats, 0, kvLen) + + for _, s := range info.StatsKV { + ks = append(ks, s) + } + + ks = sortTypeStats(ks) + + return ks + } + + return nil +} + +// hashList manages a list of filenames and their hashes. +type hashList struct { + hashes map[string]hash.Hash +} + +// newHashList returns a new hashList. +func newHashList() *hashList { + return &hashList{ + hashes: make(map[string]hash.Hash), + } +} + +// Add creates a new hash for the given file. +func (hl *hashList) Add(file string) hash.Hash { + if existing, ok := hl.hashes[file]; ok { + return existing + } + + h := sha256.New() + hl.hashes[file] = h + return h +} + +// Encode takes the current sum of all the hashes and saves the hash list as a +// SHA256SUMS-style text file. +func (hl *hashList) Encode(w io.Writer) error { + for file, h := range hl.hashes { + if _, err := fmt.Fprintf(w, "%x %s\n", h.Sum([]byte{}), file); err != nil { + return err + } + } + return nil +} + +// DecodeAndVerify reads a SHA256SUMS-style text file and checks the results +// against the current sums for all the hashes. +func (hl *hashList) DecodeAndVerify(r io.Reader) error { + // Read the file and make sure everything in there has a matching hash. + seen := make(map[string]struct{}) + s := bufio.NewScanner(r) + for s.Scan() { + sha := make([]byte, sha256.Size) + var file string + if _, err := fmt.Sscanf(s.Text(), "%x %s", &sha, &file); err != nil { + return err + } + + h, ok := hl.hashes[file] + if !ok { + return fmt.Errorf("list missing hash for %q", file) + } + if !bytes.Equal(sha, h.Sum([]byte{})) { + return fmt.Errorf("hash check failed for %q", file) + } + seen[file] = struct{}{} + } + if err := s.Err(); err != nil { + return err + } + + // Make sure everything we had a hash for was seen. + for file := range hl.hashes { + if _, ok := seen[file]; !ok { + return fmt.Errorf("file missing for %q", file) + } + } + + return nil +} + +// read takes a reader and extracts the snapshot metadata and snapshot +// info. It also checks the integrity of the snapshot data. +func (c *OperatorRaftSnapshotInspectCommand) read(in io.Reader) (*SnapshotInfo, *raft.SnapshotMeta, error) { + // Start a new tar reader. + archive := tar.NewReader(in) + + // Create a hash list that we will use to compare with the SHA256SUMS + // file in the archive. + hl := newHashList() + + // Populate the hashes for all the files we expect to see. The check at + // the end will make sure these are all present in the SHA256SUMS file + // and that the hashes match. + metaHash := hl.Add("meta.json") + snapHash := hl.Add("state.bin") + + // Look through the archive for the pieces we care about. + var shaBuffer bytes.Buffer + var snapshotInfo SnapshotInfo + var metadata raft.SnapshotMeta + for { + hdr, err := archive.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, fmt.Errorf("failed reading snapshot: %v", err) + } + + switch hdr.Name { + case "meta.json": + // Previously we used json.Decode to decode the archive stream. There are + // edgecases in which it doesn't read all the bytes from the stream, even + // though the json object is still being parsed properly. Since we + // simultaneously feeded everything to metaHash, our hash ended up being + // different than what we calculated when creating the snapshot. Which in + // turn made the snapshot verification fail. By explicitly reading the + // whole thing first we ensure that we calculate the correct hash + // independent of how json.Decode works internally. + buf, err := io.ReadAll(io.TeeReader(archive, metaHash)) + if err != nil { + return nil, nil, fmt.Errorf("failed to read snapshot metadata: %v", err) + } + if err := json.Unmarshal(buf, &metadata); err != nil { + return nil, nil, fmt.Errorf("failed to decode snapshot metadata: %v", err) + } + case "state.bin": + // create reader that writes to snapHash what it reads from archive + wrappedReader := io.TeeReader(archive, snapHash) + var err error + snapshotInfo, err = c.parseState(wrappedReader) + if err != nil { + return nil, nil, fmt.Errorf("error parsing snapshot state: %v", err) + } + + case "SHA256SUMS": + if _, err := io.CopyN(&shaBuffer, archive, 10000); err != nil && err != io.EOF { + return nil, nil, fmt.Errorf("failed to read snapshot hashes: %v", err) + } + + case "SHA256SUMS.sealed": + // Add verification of sealed sum in future + continue + + default: + return nil, nil, fmt.Errorf("unexpected file %q in snapshot", hdr.Name) + } + } + + // Verify all the hashes. + if err := hl.DecodeAndVerify(&shaBuffer); err != nil { + return nil, nil, fmt.Errorf("failed checking integrity of snapshot: %v", err) + } + + return &snapshotInfo, &metadata, nil +} + +// concludeGzipRead should be invoked after you think you've consumed all of +// the data from the gzip stream. It will error if the stream was corrupt. +// +// The docs for gzip.Reader say: "Clients should treat data returned by Read as +// tentative until they receive the io.EOF marking the end of the data." +func concludeGzipRead(decomp *gzip.Reader) error { + extra, err := io.ReadAll(decomp) // ReadAll consumes the EOF + if err != nil { + return err + } + if len(extra) != 0 { + return fmt.Errorf("%d unread uncompressed bytes remain", len(extra)) + } + return nil +} diff --git a/command/operator_raft_snapshot_inspect_test.go b/command/operator_raft_snapshot_inspect_test.go new file mode 100644 index 000000000000..d70037695606 --- /dev/null +++ b/command/operator_raft_snapshot_inspect_test.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/physical" +) + +func testOperatorRaftSnapshotInspectCommand(tb testing.TB) (*cli.MockUi, *OperatorRaftSnapshotInspectCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorRaftSnapshotInspectCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func createSnapshot(tb testing.TB) (*os.File, func(), error) { + // Create new raft backend + r, raftDir := raft.GetRaft(tb, true, false) + defer os.RemoveAll(raftDir) + + // Write some data + for i := 0; i < 100; i++ { + err := r.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + return nil, nil, fmt.Errorf("Error adding data to snapshot %s", err) + } + } + + // Create temporary file to save snapshot to + snap, err := os.CreateTemp("", "temp_snapshot.snap") + if err != nil { + return nil, nil, fmt.Errorf("Error creating temporary file %s", err) + } + + cleanup := func() { + err := os.RemoveAll(snap.Name()) + if err != nil { + tb.Errorf("Error deleting temporary snapshot %s", err) + } + } + + // Save snapshot + err = r.Snapshot(snap, nil) + if err != nil { + return nil, nil, fmt.Errorf("Error saving raft snapshot %s", err) + } + + return snap, cleanup, nil +} + +func TestOperatorRaftSnapshotInspectCommand_Run(t *testing.T) { + t.Parallel() + + file1, cleanup1, err := createSnapshot(t) + if err != nil { + t.Fatalf("Error creating snapshot %s", err) + } + + file2, cleanup2, err := createSnapshot(t) + if err != nil { + t.Fatalf("Error creating snapshot %s", err) + } + + cases := []struct { + name string + args []string + out string + code int + cleanup func() + }{ + { + "too_many_args", + []string{"test.snap", "test"}, + "Too many arguments", + 1, + nil, + }, + { + "default", + []string{file1.Name()}, + "ID bolt-snapshot", + 0, + cleanup1, + }, + { + "all_flags", + []string{"-details", "-depth", "10", "-filter", "key", file2.Name()}, + "Key Name", + 0, + cleanup2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorRaftSnapshotInspectCommand(t) + + cmd.client = client + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + + if tc.cleanup != nil { + tc.cleanup() + } + }) + } + }) +} diff --git a/command/operator_raft_snapshot_restore.go b/command/operator_raft_snapshot_restore.go index 3755d6cbfd0d..516fba522fe8 100644 --- a/command/operator_raft_snapshot_restore.go +++ b/command/operator_raft_snapshot_restore.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_raft_snapshot_save.go b/command/operator_raft_snapshot_save.go index 496b0a7b52c4..38580ed5e1f5 100644 --- a/command/operator_raft_snapshot_save.go +++ b/command/operator_raft_snapshot_save.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,7 +9,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_rekey.go b/command/operator_rekey.go index b73349405daa..9b4841568281 100644 --- a/command/operator_rekey.go +++ b/command/operator_rekey.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -8,10 +11,10 @@ import ( "strings" "github.com/fatih/structs" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/password" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/pgpkeys" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -20,6 +23,11 @@ var ( _ cli.CommandAutocomplete = (*OperatorRekeyCommand)(nil) ) +const ( + keyTypeRecovery = "Recovery" + keyTypeUnseal = "Unseal" +) + type OperatorRekeyCommand struct { *BaseCommand @@ -58,6 +66,9 @@ Usage: vault operator rekey [options] [KEY] the command. If key is specified as "-", the command will read from stdin. If a TTY is available, the command will prompt for text. + If the flag -target=recovery is supplied, then this operation will require a + quorum of recovery keys in order to generate a new set of recovery keys. + Initialize a rekey: $ vault operator rekey \ @@ -112,7 +123,7 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { Target: &c.flagCancel, Default: false, Usage: "Reset the rekeying progress. This will discard any submitted " + - "unseal keys or configuration.", + "unseal keys, recovery keys, or configuration.", }) f.BoolVar(&BoolVar{ @@ -120,7 +131,7 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { Target: &c.flagStatus, Default: false, Usage: "Print the status of the current attempt without providing an " + - "unseal key.", + "unseal or recovery key.", }) f.IntVar(&IntVar{ @@ -130,7 +141,7 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { Default: 5, Completion: complete.PredictAnything, Usage: "Number of key shares to split the generated root key into. " + - "This is the number of \"unseal keys\" to generate.", + "This is the number of \"unseal keys\" or \"recovery keys\" to generate.", }) f.IntVar(&IntVar{ @@ -150,7 +161,7 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { EnvVar: "", Completion: complete.PredictAnything, Usage: "Nonce value provided at initialization. The same nonce value " + - "must be provided with each unseal key.", + "must be provided with each unseal or recovery key.", }) f.StringVar(&StringVar{ @@ -179,7 +190,7 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { Usage: "Comma-separated list of paths to files on disk containing " + "public PGP keys OR a comma-separated list of Keybase usernames using " + "the format \"keybase:\". When supplied, the generated " + - "unseal keys will be encrypted and base64-encoded in the order " + + "unseal or recovery keys will be encrypted and base64-encoded in the order " + "specified in this list.", }) @@ -189,25 +200,25 @@ func (c *OperatorRekeyCommand) Flags() *FlagSets { Name: "backup", Target: &c.flagBackup, Default: false, - Usage: "Store a backup of the current PGP encrypted unseal keys in " + + Usage: "Store a backup of the current PGP encrypted unseal or recovery keys in " + "Vault's core. The encrypted values can be recovered in the event of " + "failure or discarded after success. See the -backup-delete and " + "-backup-retrieve options for more information. This option only " + - "applies when the existing unseal keys were PGP encrypted.", + "applies when the existing unseal or recovery keys were PGP encrypted.", }) f.BoolVar(&BoolVar{ Name: "backup-delete", Target: &c.flagBackupDelete, Default: false, - Usage: "Delete any stored backup unseal keys.", + Usage: "Delete any stored backup unseal or recovery keys.", }) f.BoolVar(&BoolVar{ Name: "backup-retrieve", Target: &c.flagBackupRetrieve, Default: false, - Usage: "Retrieve the backed-up unseal keys. This option is only available " + + Usage: "Retrieve the backed-up unseal or recovery keys. This option is only available " + "if the PGP keys were provided and the backup has not been deleted.", }) @@ -268,10 +279,12 @@ func (c *OperatorRekeyCommand) Run(args []string) int { func (c *OperatorRekeyCommand) init(client *api.Client) int { // Handle the different API requests var fn func(*api.RekeyInitRequest) (*api.RekeyStatusResponse, error) + keyTypeRequired := keyTypeUnseal switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { case "barrier": fn = client.Sys().RekeyInit case "recovery", "hsm": + keyTypeRequired = keyTypeRecovery fn = client.Sys().RekeyRecoveryKeyInit default: c.UI.Error(fmt.Sprintf("Unknown target: %s", c.flagTarget)) @@ -295,25 +308,25 @@ func (c *OperatorRekeyCommand) init(client *api.Client) int { if len(c.flagPGPKeys) == 0 { if Format(c.UI) == "table" { c.UI.Warn(wrapAtLength( - "WARNING! If you lose the keys after they are returned, there is no " + - "recovery. Consider canceling this operation and re-initializing " + - "with the -pgp-keys flag to protect the returned unseal keys along " + - "with -backup to allow recovery of the encrypted keys in case of " + - "emergency. You can delete the stored keys later using the -delete " + - "flag.")) + fmt.Sprintf("WARNING! If you lose the keys after they are returned, there is no "+ + "recovery. Consider canceling this operation and re-initializing "+ + "with the -pgp-keys flag to protect the returned %s keys along "+ + "with -backup to allow recovery of the encrypted keys in case of "+ + "emergency. You can delete the stored keys later using the -delete "+ + "flag.", strings.ToLower(keyTypeRequired)))) c.UI.Output("") } } if len(c.flagPGPKeys) > 0 && !c.flagBackup { if Format(c.UI) == "table" { c.UI.Warn(wrapAtLength( - "WARNING! You are using PGP keys for encrypted the resulting unseal " + - "keys, but you did not enable the option to backup the keys to " + - "Vault's core. If you lose the encrypted keys after they are " + - "returned, you will not be able to recover them. Consider canceling " + - "this operation and re-running with -backup to allow recovery of the " + - "encrypted unseal keys in case of emergency. You can delete the " + - "stored keys later using the -delete flag.")) + fmt.Sprintf("WARNING! You are using PGP keys for encrypted the resulting %s "+ + "keys, but you did not enable the option to backup the keys to "+ + "Vault's core. If you lose the encrypted keys after they are "+ + "returned, you will not be able to recover them. Consider canceling "+ + "this operation and re-running with -backup to allow recovery of the "+ + "encrypted unseal keys in case of emergency. You can delete the "+ + "stored keys later using the -delete flag.", strings.ToLower(keyTypeRequired)))) c.UI.Output("") } } @@ -358,7 +371,7 @@ func (c *OperatorRekeyCommand) cancel(client *api.Client) int { func (c *OperatorRekeyCommand) provide(client *api.Client, key string) int { var statusFn func() (interface{}, error) var updateFn func(string, string) (interface{}, error) - + keyTypeRequired := keyTypeUnseal switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { case "barrier": statusFn = func() (interface{}, error) { @@ -376,6 +389,7 @@ func (c *OperatorRekeyCommand) provide(client *api.Client, key string) int { } } case "recovery", "hsm": + keyTypeRequired = keyTypeRecovery statusFn = func() (interface{}, error) { return client.Sys().RekeyRecoveryKeyStatus() } @@ -448,7 +462,7 @@ func (c *OperatorRekeyCommand) provide(client *api.Client, key string) int { // Nonce value is not required if we are prompting via the terminal w := getWriterFromUI(c.UI) fmt.Fprintf(w, "Rekey operation nonce: %s\n", nonce) - fmt.Fprintf(w, "Unseal Key (will be hidden): ") + fmt.Fprintf(w, "%s Key (will be hidden): ", keyTypeRequired) key, err = password.Read(os.Stdin) fmt.Fprintf(w, "\n") if err != nil { @@ -458,11 +472,11 @@ func (c *OperatorRekeyCommand) provide(client *api.Client, key string) int { } c.UI.Error(wrapAtLength(fmt.Sprintf("An error occurred attempting to "+ - "ask for the unseal key. The raw error message is shown below, but "+ + "ask for the %s key. The raw error message is shown below, but "+ "usually this is because you attempted to pipe a value into the "+ "command or you are executing outside of a terminal (tty). If you "+ "want to pipe the value, pass \"-\" as the argument to read from "+ - "stdin. The raw error was: %s", err))) + "stdin. The raw error was: %s", strings.ToLower(keyTypeRequired), err))) return 1 } default: // Supplied directly as an arg @@ -697,7 +711,7 @@ func (c *OperatorRekeyCommand) printUnsealKeys(client *api.Client, status *api.R ))) case "recovery", "hsm": c.UI.Output(wrapAtLength(fmt.Sprintf( - "The encrypted unseal keys are backed up to \"core/recovery-keys-backup\" " + + "The encrypted recovery keys are backed up to \"core/recovery-keys-backup\" " + "in the storage backend. Remove these keys at any time using " + "\"vault operator rekey -backup-delete -target=recovery\". Vault does not automatically " + "remove these keys.", @@ -708,33 +722,56 @@ func (c *OperatorRekeyCommand) printUnsealKeys(client *api.Client, status *api.R switch status.VerificationRequired { case false: c.UI.Output("") - c.UI.Output(wrapAtLength(fmt.Sprintf( - "Vault rekeyed with %d key shares and a key threshold of %d. Please "+ - "securely distribute the key shares printed above. When Vault is "+ - "re-sealed, restarted, or stopped, you must supply at least %d of "+ - "these keys to unseal it before it can start servicing requests.", - status.N, - status.T, - status.T))) + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault unseal keys rekeyed with %d key shares and a key threshold of %d. Please "+ + "securely distribute the key shares printed above. When Vault is "+ + "re-sealed, restarted, or stopped, you must supply at least %d of "+ + "these keys to unseal it before it can start servicing requests.", + status.N, + status.T, + status.T))) + case "recovery", "hsm": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault recovery keys rekeyed with %d key shares and a key threshold of %d. Please "+ + "securely distribute the key shares printed above.", + status.N, + status.T))) + } + default: c.UI.Output("") - c.UI.Output(wrapAtLength(fmt.Sprintf( - "Vault has created a new key, split into %d key shares and a key threshold "+ - "of %d. These will not be active until after verification is complete. "+ - "Please securely distribute the key shares printed above. When Vault "+ - "is re-sealed, restarted, or stopped, you must supply at least %d of "+ - "these keys to unseal it before it can start servicing requests.", - status.N, - status.T, - status.T))) + var warningText string + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault has created a new unseal key, split into %d key shares and a key threshold "+ + "of %d. These will not be active until after verification is complete. "+ + "Please securely distribute the key shares printed above. When Vault "+ + "is re-sealed, restarted, or stopped, you must supply at least %d of "+ + "these keys to unseal it before it can start servicing requests.", + status.N, + status.T, + status.T))) + warningText = "unseal" + case "recovery", "hsm": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault has created a new recovery key, split into %d key shares and a key threshold "+ + "of %d. These will not be active until after verification is complete. "+ + "Please securely distribute the key shares printed above.", + status.N, + status.T))) + warningText = "authenticate with" + + } c.UI.Output("") - c.UI.Warn(wrapAtLength( - "Again, these key shares are _not_ valid until verification is performed. " + - "Do not lose or discard your current key shares until after verification " + - "is complete or you will be unable to unseal Vault. If you cancel the " + - "rekey process or seal Vault before verification is complete the new " + - "shares will be discarded and the current shares will remain valid.", - )) + c.UI.Warn(wrapAtLength(fmt.Sprintf( + "Again, these key shares are _not_ valid until verification is performed. "+ + "Do not lose or discard your current key shares until after verification "+ + "is complete or you will be unable to %s Vault. If you cancel the "+ + "rekey process or seal Vault before verification is complete the new "+ + "shares will be discarded and the current shares will remain valid.", warningText))) c.UI.Output("") c.UI.Warn(wrapAtLength( "The current verification status, including initial nonce, is shown below.", diff --git a/command/operator_rekey_test.go b/command/operator_rekey_test.go index 31617e5ac4bc..b21d9c367b32 100644 --- a/command/operator_rekey_test.go +++ b/command/operator_rekey_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !race package command @@ -9,8 +12,10 @@ import ( "strings" "testing" + "github.com/hashicorp/vault/sdk/helper/roottoken" + + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testOperatorRekeyCommand(tb testing.TB) (*cli.MockUi, *OperatorRekeyCommand) { @@ -254,6 +259,83 @@ func TestOperatorRekeyCommand_Run(t *testing.T) { } }) + t.Run("provide_arg_recovery_keys", func(t *testing.T) { + t.Parallel() + + client, keys, closer := testVaultServerAutoUnseal(t) + defer closer() + + // Initialize a rekey + status, err := client.Sys().RekeyRecoveryKeyInit(&api.RekeyInitRequest{ + SecretShares: 1, + SecretThreshold: 1, + }) + if err != nil { + t.Fatal(err) + } + nonce := status.Nonce + + // Supply the first n-1 recovery keys + for _, key := range keys[:len(keys)-1] { + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-nonce", nonce, + "-target", "recovery", + key, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + } + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-nonce", nonce, + "-target", "recovery", + keys[len(keys)-1], // the last recovery key + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + re := regexp.MustCompile(`Key 1: (.+)`) + output := ui.OutputWriter.String() + match := re.FindAllStringSubmatch(output, -1) + if len(match) < 1 || len(match[0]) < 2 { + t.Fatalf("bad match: %#v", match) + } + recoveryKey := match[0][1] + + if strings.Contains(strings.ToLower(output), "unseal key") { + t.Fatalf(`output %s shouldn't contain "unseal key"`, output) + } + + // verify that we can perform operations with the recovery key + // below we generate a root token using the recovery key + rootStatus, err := client.Sys().GenerateRootStatus() + if err != nil { + t.Fatal(err) + } + otp, err := roottoken.GenerateOTP(rootStatus.OTPLength) + if err != nil { + t.Fatal(err) + } + genRoot, err := client.Sys().GenerateRootInit(otp, "") + if err != nil { + t.Fatal(err) + } + r, err := client.Sys().GenerateRootUpdate(recoveryKey, genRoot.Nonce) + if err != nil { + t.Fatal(err) + } + if !r.Complete { + t.Fatal("expected root update to be complete") + } + }) t.Run("provide_arg", func(t *testing.T) { t.Parallel() @@ -392,6 +474,94 @@ func TestOperatorRekeyCommand_Run(t *testing.T) { } }) + t.Run("provide_stdin_recovery_keys", func(t *testing.T) { + t.Parallel() + + client, keys, closer := testVaultServerAutoUnseal(t) + defer closer() + + // Initialize a rekey + status, err := client.Sys().RekeyRecoveryKeyInit(&api.RekeyInitRequest{ + SecretShares: 1, + SecretThreshold: 1, + }) + if err != nil { + t.Fatal(err) + } + nonce := status.Nonce + for _, key := range keys[:len(keys)-1] { + stdinR, stdinW := io.Pipe() + go func() { + _, _ = stdinW.Write([]byte(key)) + _ = stdinW.Close() + }() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "-target", "recovery", + "-nonce", nonce, + "-", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + } + + stdinR, stdinW := io.Pipe() + go func() { + _, _ = stdinW.Write([]byte(keys[len(keys)-1])) // the last recovery key + _ = stdinW.Close() + }() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "-nonce", nonce, + "-target", "recovery", + "-", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + re := regexp.MustCompile(`Key 1: (.+)`) + output := ui.OutputWriter.String() + match := re.FindAllStringSubmatch(output, -1) + if len(match) < 1 || len(match[0]) < 2 { + t.Fatalf("bad match: %#v", match) + } + recoveryKey := match[0][1] + + if strings.Contains(strings.ToLower(output), "unseal key") { + t.Fatalf(`output %s shouldn't contain "unseal key"`, output) + } + // verify that we can perform operations with the recovery key + // below we generate a root token using the recovery key + rootStatus, err := client.Sys().GenerateRootStatus() + if err != nil { + t.Fatal(err) + } + otp, err := roottoken.GenerateOTP(rootStatus.OTPLength) + if err != nil { + t.Fatal(err) + } + genRoot, err := client.Sys().GenerateRootInit(otp, "") + if err != nil { + t.Fatal(err) + } + r, err := client.Sys().GenerateRootUpdate(recoveryKey, genRoot.Nonce) + if err != nil { + t.Fatal(err) + } + if !r.Complete { + t.Fatal("expected root update to be complete") + } + }) t.Run("backup", func(t *testing.T) { t.Parallel() diff --git a/command/operator_seal.go b/command/operator_seal.go index 369ec3215d66..f390972f2045 100644 --- a/command/operator_seal.go +++ b/command/operator_seal.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_seal_test.go b/command/operator_seal_test.go index 86722d2e84dd..6208d6396328 100644 --- a/command/operator_seal_test.go +++ b/command/operator_seal_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testOperatorSealCommand(tb testing.TB) (*cli.MockUi, *OperatorSealCommand) { diff --git a/command/operator_step_down.go b/command/operator_step_down.go index dea2c97178da..e8b93acf0759 100644 --- a/command/operator_step_down.go +++ b/command/operator_step_down.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/operator_step_down_test.go b/command/operator_step_down_test.go index 93117a856b66..8cb108be98c9 100644 --- a/command/operator_step_down_test.go +++ b/command/operator_step_down_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testOperatorStepDownCommand(tb testing.TB) (*cli.MockUi, *OperatorStepDownCommand) { diff --git a/command/operator_unseal.go b/command/operator_unseal.go index 8cdd06d38408..a667f209dcba 100644 --- a/command/operator_unseal.go +++ b/command/operator_unseal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,9 +9,9 @@ import ( "os" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/password" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/operator_unseal_test.go b/command/operator_unseal_test.go index 867b17a03c01..42f603e4882c 100644 --- a/command/operator_unseal_test.go +++ b/command/operator_unseal_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -8,7 +11,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testOperatorUnsealCommand(tb testing.TB) (*cli.MockUi, *OperatorUnsealCommand) { diff --git a/command/operator_usage.go b/command/operator_usage.go index 1df42091ccc3..d6468d67ddbf 100644 --- a/command/operator_usage.go +++ b/command/operator_usage.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -8,8 +11,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/ryanuber/columnize" ) @@ -129,7 +132,7 @@ func (c *OperatorUsageCommand) Run(args []string) int { c.outputTimestamps(resp.Data) out := []string{ - "Namespace path | Distinct entities | Non-Entity tokens | Active clients", + "Namespace path | Distinct entities | Non-Entity tokens | Secret syncs | Active clients", } out = append(out, c.namespacesOutput(resp.Data)...) @@ -193,8 +196,8 @@ type UsageResponse struct { entityCount int64 // As per 1.9, the tokenCount field will contain the distinct non-entity // token clients instead of each individual token. - tokenCount int64 - + tokenCount int64 + secretSyncs int64 clientCount int64 } @@ -239,6 +242,9 @@ func (c *OperatorUsageCommand) parseNamespaceCount(rawVal interface{}) (UsageRes return ret, errors.New("missing non_entity_tokens") } + // don't error if the secret syncs key is missing + ret.secretSyncs, _ = jsonNumberOK(counts, "secret_syncs") + ret.clientCount, ok = jsonNumberOK(counts, "clients") if !ok { return ret, errors.New("missing clients") @@ -271,8 +277,8 @@ func (c *OperatorUsageCommand) namespacesOutput(data map[string]interface{}) []s sortOrder = "2" + val.namespacePath } - formattedLine := fmt.Sprintf("%s | %d | %d | %d", - val.namespacePath, val.entityCount, val.tokenCount, val.clientCount) + formattedLine := fmt.Sprintf("%s | %d | %d | %d | %d", + val.namespacePath, val.entityCount, val.tokenCount, val.secretSyncs, val.clientCount) nsOut = append(nsOut, UsageCommandNamespace{ formattedLine: formattedLine, sortOrder: sortOrder, @@ -293,7 +299,7 @@ func (c *OperatorUsageCommand) namespacesOutput(data map[string]interface{}) []s func (c *OperatorUsageCommand) totalOutput(data map[string]interface{}) []string { // blank line separating it from namespaces - out := []string{" | | | "} + out := []string{" | | | | "} total, ok := data["total"].(map[string]interface{}) if !ok { @@ -312,13 +318,16 @@ func (c *OperatorUsageCommand) totalOutput(data map[string]interface{}) []string c.UI.Error("missing non_entity_tokens in total") return out } + // don't error if secret syncs key is missing + secretSyncs, _ := jsonNumberOK(total, "secret_syncs") + clientCount, ok := jsonNumberOK(total, "clients") if !ok { c.UI.Error("missing clients in total") return out } - out = append(out, fmt.Sprintf("Total | %d | %d | %d", - entityCount, tokenCount, clientCount)) + out = append(out, fmt.Sprintf("Total | %d | %d | %d | %d", + entityCount, tokenCount, secretSyncs, clientCount)) return out } diff --git a/command/operator_utilization.go b/command/operator_utilization.go new file mode 100644 index 000000000000..71291cca73cd --- /dev/null +++ b/command/operator_utilization.go @@ -0,0 +1,213 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "encoding/base64" + "errors" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/mapstructure" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorUtilizationCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorUtilizationCommand)(nil) +) + +type OperatorUtilizationCommand struct { + *BaseCommand + + flagMessage string + flagTodayOnly BoolPtr + flagOutput string +} + +func (c *OperatorUtilizationCommand) Synopsis() string { + return "Generates license utilization reporting bundle" +} + +func (c *OperatorUtilizationCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *OperatorUtilizationCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorUtilizationCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "message", + Target: &c.flagMessage, + Completion: complete.PredictAnything, + Usage: "Provide context about the conditions under which the report was generated and submitted. This message is not included in the license utilization bundle but will be included in the vault server logs.", + }) + + f.BoolPtrVar(&BoolPtrVar{ + Name: "today-only", + Target: &c.flagTodayOnly, + Usage: "To include only today’s snapshot, no historical snapshots. If no snapshots were persisted in the last 24 hrs, it takes a snapshot and exports it to a bundle.", + }) + + f.StringVar(&StringVar{ + Name: "output", + Target: &c.flagOutput, + Completion: complete.PredictAnything, + Usage: "Specifies the output path for the bundle. Defaults to a time-based generated file name.", + }) + + return set +} + +func (c *OperatorUtilizationCommand) Help() string { + helpText := ` +Usage: vault operator utilization [options] + +Produces a bundle of snapshots that contains license utilization data. If no snapshots were persisted in the last 24 hrs, it takes a snapshot and includes it in the bundle to prevent stale data. + + To create a license utilization bundle that includes all persisted historical snapshots and has the default bundle name: + + $ vault operator utilization + + To create a license utilization bundle with a message about the bundle (Note: this message is not included in the bundle but only included in server logs): + + $ vault operator utilization -message="Change Control 654987" + + To create a license utilization bundle with only today's snapshot: + + $ vault operator utilization -today-only + + To create a license utilization bundle with a specific name: + + $ vault operator utilization -output="/utilization/reports/latest.json" + +` + c.Flags().Help() + + return helpText +} + +func (c *OperatorUtilizationCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + parsedArgs := f.Args() + if len(parsedArgs) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(parsedArgs))) + return 1 + } + + outputBundleFile, err := getOutputFileName(time.Now().UTC(), c.flagOutput) + if err != nil { + c.UI.Error(fmt.Sprintf("Error during validation: %s", err)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Capture license utilization reporting data + bundleDataBytes, err := c.getManualReportingCensusData(client) + if err != nil { + c.UI.Error(fmt.Sprintf("Error capturing license utilization reporting data: %s", err)) + return 1 + } + + err = os.WriteFile(outputBundleFile, bundleDataBytes, 0o400) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing license utilization reporting data to bundle %q: %s", outputBundleFile, err)) + return 1 + } + + c.UI.Info(fmt.Sprintf("Success! License utilization reporting bundle written to: %s", outputBundleFile)) + return 0 +} + +// getOutputFileName returns the file name of the license utilization reporting bundle ending with .json +// If filename is a path with non-existing parent directory, it creates a new directory to which the file with returned filename is added +func getOutputFileName(inputTime time.Time, flagOutput string) (string, error) { + formattedTime := inputTime.Format(fileFriendlyTimeFormat) + switch len(flagOutput) { + case 0: + flagOutput = fmt.Sprintf("vault-utilization-%s.json", formattedTime) + default: + flagOutput = filepath.Clean(flagOutput) + ext := filepath.Ext(flagOutput) + switch ext { + case "": // it's a directory + flagOutput = filepath.Join(flagOutput, fmt.Sprintf("vault-utilization-%s.json", formattedTime)) + case ".json": + default: + return "", fmt.Errorf("invalid file extension %s, must be .json", ext) + } + } + + // Stat the file to ensure we don't override any existing data. + _, err := os.Stat(flagOutput) + switch { + case os.IsNotExist(err): + case err != nil: + return "", fmt.Errorf("unable to stat file: %s", err) + default: + return "", fmt.Errorf("output file already exists: %s", flagOutput) + } + + // output file does not exist, create the parent directory if it doesn't exist + _, err = os.Stat(filepath.Dir(flagOutput)) + switch { + case os.IsNotExist(err): + err := os.MkdirAll(filepath.Dir(flagOutput), 0o700) + if err != nil { + return "", fmt.Errorf("unable to create output directory: %s", err) + } + case err != nil: + return "", fmt.Errorf("unable to stat directory: %s", err) + } + return flagOutput, nil +} + +func (c *OperatorUtilizationCommand) getManualReportingCensusData(client *api.Client) ([]byte, error) { + data := make(map[string]interface{}) + if c.flagTodayOnly.IsSet() { + data["today_only"] = c.flagTodayOnly.Get() + } + if c.flagMessage != "" { + data["message"] = c.flagMessage + } + secret, err := client.Logical().Write("sys/utilization", data) + if err != nil { + return nil, fmt.Errorf("error getting license utilization reporting data: %w", err) + } + if secret == nil { + return nil, errors.New("no license utilization reporting data available") + } + + var bundleBase64Str string + err = mapstructure.Decode(secret.Data["utilization_bundle"], &bundleBase64Str) + if err != nil { + return nil, err + } + + bundleByteArray, err := base64.StdEncoding.DecodeString(bundleBase64Str) + if err != nil { + return nil, err + } + return bundleByteArray, nil +} diff --git a/command/patch.go b/command/patch.go index 8edb77ea86f7..f7b006f0f125 100644 --- a/command/patch.go +++ b/command/patch.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,7 +10,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/patch_test.go b/command/patch_test.go index b4bdd7a6243d..357257937309 100644 --- a/command/patch_test.go +++ b/command/patch_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,8 +8,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testPatchCommand(tb testing.TB) (*cli.MockUi, *PatchCommand) { diff --git a/command/path_help.go b/command/path_help.go index 1f540a5c6ab0..335de684008f 100644 --- a/command/path_help.go +++ b/command/path_help.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/path_help_test.go b/command/path_help_test.go index 688bcf09cef3..33c06b4fe553 100644 --- a/command/path_help_test.go +++ b/command/path_help_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPathHelpCommand(tb testing.TB) (*cli.MockUi, *PathHelpCommand) { diff --git a/command/path_map_upgrade_api_test.go b/command/path_map_upgrade_api_test.go deleted file mode 100644 index b9e573b2797e..000000000000 --- a/command/path_map_upgrade_api_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package command - -import ( - "testing" - - "github.com/hashicorp/vault/api" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - - credAppId "github.com/hashicorp/vault/builtin/credential/app-id" -) - -func TestPathMap_Upgrade_API(t *testing.T) { - var err error - coreConfig := &vault.CoreConfig{ - DisableMlock: true, - DisableCache: true, - CredentialBackends: map[string]logical.Factory{ - "app-id": credAppId.Factory, - }, - PendingRemovalMountsAllowed: true, - } - - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - - cluster.Start() - defer cluster.Cleanup() - - cores := cluster.Cores - - vault.TestWaitActive(t, cores[0].Core) - - client := cores[0].Client - - // Enable the app-id method - err = client.Sys().EnableAuthWithOptions("app-id", &api.EnableAuthOptions{ - Type: "app-id", - }) - if err != nil { - t.Fatal(err) - } - - // Create an app-id - _, err = client.Logical().Write("auth/app-id/map/app-id/test-app-id", map[string]interface{}{ - "policy": "test-policy", - }) - if err != nil { - t.Fatal(err) - } - - // Create a user-id - _, err = client.Logical().Write("auth/app-id/map/user-id/test-user-id", map[string]interface{}{ - "value": "test-app-id", - }) - if err != nil { - t.Fatal(err) - } - - // Perform a login. It should succeed. - _, err = client.Logical().Write("auth/app-id/login", map[string]interface{}{ - "app_id": "test-app-id", - "user_id": "test-user-id", - }) - if err != nil { - t.Fatal(err) - } - - // List the hashed app-ids in the storage - secret, err := client.Logical().List("auth/app-id/map/app-id") - if err != nil { - t.Fatal(err) - } - hashedAppID := secret.Data["keys"].([]interface{})[0].(string) - - // Try reading it. This used to cause an issue which is fixed in [GH-3806]. - _, err = client.Logical().Read("auth/app-id/map/app-id/" + hashedAppID) - if err != nil { - t.Fatal(err) - } - - // Ensure that there was no issue by performing another login - _, err = client.Logical().Write("auth/app-id/login", map[string]interface{}{ - "app_id": "test-app-id", - "user_id": "test-user-id", - }) - if err != nil { - t.Fatal(err) - } -} diff --git a/command/pgp_test.go b/command/pgp_test.go index b9f3ee2a91ac..2211cbed56b6 100644 --- a/command/pgp_test.go +++ b/command/pgp_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( diff --git a/command/pki.go b/command/pki.go index 4212ee6f86ab..8b90a6aa7b38 100644 --- a/command/pki.go +++ b/command/pki.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*PKICommand)(nil) @@ -13,7 +16,7 @@ type PKICommand struct { } func (c *PKICommand) Synopsis() string { - return "Interact with Vault's Key-Value storage" + return "Interact with Vault's PKI Secrets Engine" } func (c *PKICommand) Help() string { diff --git a/command/pki_health_check.go b/command/pki_health_check.go index df89dc342b78..0ae44499fb96 100644 --- a/command/pki_health_check.go +++ b/command/pki_health_check.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -9,7 +12,7 @@ import ( "github.com/hashicorp/vault/command/healthcheck" "github.com/ghodss/yaml" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" "github.com/ryanuber/columnize" ) @@ -47,7 +50,7 @@ type PKIHealthCheckCommand struct { } func (c *PKIHealthCheckCommand) Synopsis() string { - return "Check PKI Secrets Engine health and operational status" + return "Check a PKI Secrets Engine mount's health and operational status" } func (c *PKIHealthCheckCommand) Help() string { @@ -79,6 +82,9 @@ Usage: vault pki health-check [options] MOUNT 6 - A permission denied message was returned from Vault Server for one or more health checks. +For more detailed information, refer to the online documentation about the +vault pki health-check command. + ` + c.Flags().Help() return strings.TrimSpace(helpText) @@ -132,7 +138,7 @@ default unless enabled by the configuration file explicitly.`, Default: false, EnvVar: "", Usage: `When specified, no health checks are run, but all known health -checks are printed. Still requires a positional mount argument.`, +checks are printed.`, }) return set @@ -167,10 +173,10 @@ func (c *PKIHealthCheckCommand) Run(args []string) int { } args = f.Args() - if len(args) < 1 { + if !c.flagList && len(args) < 1 { c.UI.Error("Not enough arguments (expected mount path, got nothing)") return pkiRetUsage - } else if len(args) > 1 { + } else if !c.flagList && len(args) > 1 { c.UI.Error(fmt.Sprintf("Too many arguments (expected only mount path, got %d arguments)", len(args))) for _, arg := range args { if strings.HasPrefix(arg, "-") { @@ -193,7 +199,14 @@ func (c *PKIHealthCheckCommand) Run(args []string) int { return pkiRetUsage } - mount := sanitizePath(args[0]) + // When listing is enabled, we lack an argument here, but do not contact + // the server at all, so we're safe to use a hard-coded default here. + pkiPath := "" + if len(args) == 1 { + pkiPath = args[0] + } + + mount := sanitizePath(pkiPath) executor := healthcheck.NewExecutor(client, mount) executor.AddCheck(healthcheck.NewCAValidityPeriodCheck()) executor.AddCheck(healthcheck.NewCRLValidityPeriodCheck()) @@ -207,39 +220,51 @@ func (c *PKIHealthCheckCommand) Run(args []string) int { executor.AddCheck(healthcheck.NewEnableAutoTidyCheck()) executor.AddCheck(healthcheck.NewTidyLastRunCheck()) executor.AddCheck(healthcheck.NewTooManyCertsCheck()) + executor.AddCheck(healthcheck.NewEnableAcmeIssuance()) + executor.AddCheck(healthcheck.NewAllowAcmeHeaders()) if c.flagDefaultDisabled { executor.DefaultEnabled = false } // Handle listing, if necessary. if c.flagList { - c.UI.Output("Health Checks:") + uiFormat := Format(c.UI) + if uiFormat == "yaml" { + c.UI.Error("YAML output format is not supported by the --list command") + return pkiRetUsage + } + + if uiFormat != "json" { + c.UI.Output("Default health check config:") + } + config := map[string]map[string]interface{}{} for _, checker := range executor.Checkers { - c.UI.Output(" - " + checker.Name()) - - prefix := " " - cfg := checker.DefaultConfig() - marshaled, err := json.MarshalIndent(cfg, prefix, " ") - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to marshal default config for check: %v", err)) - return pkiRetUsage - } - c.UI.Output(prefix + string(marshaled)) + config[checker.Name()] = checker.DefaultConfig() } + marshaled, err := json.MarshalIndent(config, "", " ") + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to marshal default config for check: %v", err)) + return pkiRetUsage + } + + c.UI.Output(string(marshaled)) return pkiRetOK } // Handle config merging. external_config := map[string]interface{}{} if c.flagConfig != "" { - contents, err := os.ReadFile(c.flagConfig) + contents, err := os.Open(c.flagConfig) if err != nil { c.UI.Error(fmt.Sprintf("Failed to read configuration file %v: %v", c.flagConfig, err)) return pkiRetUsage } - if err := json.Unmarshal(contents, &external_config); err != nil { + decoder := json.NewDecoder(contents) + decoder.UseNumber() // Use json.Number instead of float64 values as we are decoding to an interface{}. + + if err := decoder.Decode(&external_config); err != nil { c.UI.Error(fmt.Sprintf("Failed to parse configuration file %v: %v", c.flagConfig, err)) return pkiRetUsage } diff --git a/command/pki_health_check_test.go b/command/pki_health_check_test.go index bdd491a0497d..5f86b3b97f7e 100644 --- a/command/pki_health_check_test.go +++ b/command/pki_health_check_test.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "bytes" "encoding/json" "fmt" + "net/url" "strings" "testing" "time" @@ -11,7 +15,7 @@ import ( "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/healthcheck" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/stretchr/testify/require" ) @@ -27,7 +31,7 @@ func TestPKIHC_AllGood(t *testing.T) { AuditNonHMACRequestKeys: healthcheck.VisibleReqParams, AuditNonHMACResponseKeys: healthcheck.VisibleRespParams, PassthroughRequestHeaders: []string{"If-Modified-Since"}, - AllowedResponseHeaders: []string{"Last-Modified"}, + AllowedResponseHeaders: []string{"Last-Modified", "Replay-Nonce", "Link", "Location"}, MaxLeaseTTL: "36500d", }, }); err != nil { @@ -66,6 +70,21 @@ func TestPKIHC_AllGood(t *testing.T) { t.Fatalf("failed to run tidy: %v", err) } + path, err := url.Parse(client.Address()) + require.NoError(t, err, "failed parsing client address") + + if _, err := client.Logical().Write("pki/config/cluster", map[string]interface{}{ + "path": path.JoinPath("/v1/", "pki/").String(), + }); err != nil { + t.Fatalf("failed to update local cluster: %v", err) + } + + if _, err := client.Logical().Write("pki/config/acme", map[string]interface{}{ + "enabled": "true", + }); err != nil { + t.Fatalf("failed to update acme config: %v", err) + } + _, _, results := execPKIHC(t, client, true) validateExpectedPKIHC(t, expectedAllGood, results) @@ -271,6 +290,8 @@ func testPKIHealthCheckCommand(tb testing.TB) (*cli.MockUi, *PKIHealthCheckComma } func execPKIHC(t *testing.T, client *api.Client, ok bool) (int, string, map[string][]map[string]interface{}) { + t.Helper() + stdout := bytes.NewBuffer(nil) stderr := bytes.NewBuffer(nil) runOpts := &RunOptions{ @@ -295,6 +316,8 @@ func execPKIHC(t *testing.T, client *api.Client, ok bool) (int, string, map[stri } func validateExpectedPKIHC(t *testing.T, expected, results map[string][]map[string]interface{}) { + t.Helper() + for test, subtest := range expected { actual, ok := results[test] require.True(t, ok, fmt.Sprintf("expected top-level test %v to be present", test)) @@ -338,6 +361,11 @@ var expectedAllGood = map[string][]map[string]interface{}{ "status": "ok", }, }, + "allow_acme_headers": { + { + "status": "ok", + }, + }, "allow_if_modified_since": { { "status": "ok", @@ -348,6 +376,11 @@ var expectedAllGood = map[string][]map[string]interface{}{ "status": "ok", }, }, + "enable_acme_issuance": { + { + "status": "ok", + }, + }, "enable_auto_tidy": { { "status": "ok", @@ -399,6 +432,11 @@ var expectedAllBad = map[string][]map[string]interface{}{ "status": "critical", }, }, + "allow_acme_headers": { + { + "status": "not_applicable", + }, + }, "allow_if_modified_since": { { "status": "informational", @@ -496,6 +534,11 @@ var expectedAllBad = map[string][]map[string]interface{}{ "status": "informational", }, }, + "enable_acme_issuance": { + { + "status": "not_applicable", + }, + }, "enable_auto_tidy": { { "status": "informational", @@ -547,8 +590,18 @@ var expectedEmptyWithIssuer = map[string][]map[string]interface{}{ "status": "ok", }, }, + "allow_acme_headers": { + { + "status": "not_applicable", + }, + }, "allow_if_modified_since": nil, "audit_visibility": nil, + "enable_acme_issuance": { + { + "status": "not_applicable", + }, + }, "enable_auto_tidy": { { "status": "informational", @@ -591,8 +644,18 @@ var expectedNoPerm = map[string][]map[string]interface{}{ "status": "critical", }, }, + "allow_acme_headers": { + { + "status": "insufficient_permissions", + }, + }, "allow_if_modified_since": nil, "audit_visibility": nil, + "enable_acme_issuance": { + { + "status": "insufficient_permissions", + }, + }, "enable_auto_tidy": { { "status": "insufficient_permissions", @@ -615,7 +678,7 @@ var expectedNoPerm = map[string][]map[string]interface{}{ }, "root_issued_leaves": { { - "status": "ok", + "status": "insufficient_permissions", }, }, "tidy_last_run": { @@ -625,7 +688,7 @@ var expectedNoPerm = map[string][]map[string]interface{}{ }, "too_many_certs": { { - "status": "ok", + "status": "insufficient_permissions", }, }, } diff --git a/command/pki_issue_intermediate.go b/command/pki_issue_intermediate.go new file mode 100644 index 000000000000..7545b22cc7dc --- /dev/null +++ b/command/pki_issue_intermediate.go @@ -0,0 +1,367 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "io" + "os" + paths "path" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +type PKIIssueCACommand struct { + *BaseCommand + + flagConfig string + flagReturnIndicator string + flagDefaultDisabled bool + flagList bool + + flagKeyStorageSource string + flagNewIssuerName string +} + +func (c *PKIIssueCACommand) Synopsis() string { + return "Given a parent certificate, and a list of generation parameters, creates an issuer on a specified mount" +} + +func (c *PKIIssueCACommand) Help() string { + helpText := ` +Usage: vault pki issue PARENT CHILD_MOUNT options + +PARENT is the fully qualified path of the Certificate Authority in vault which will issue the new intermediate certificate. + +CHILD_MOUNT is the path of the mount in vault where the new issuer is saved. + +options are the superset of the options passed to generate/intermediate and sign-intermediate commands. At least one option must be set. + +This command creates a intermediate certificate authority certificate signed by the parent in the CHILD_MOUNT. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *PKIIssueCACommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagKeyStorageSource, + Default: "internal", + EnvVar: "", + Usage: `Options are "existing" - to use an existing key inside vault, "internal" - to generate a new key inside vault, or "kms" - to link to an external key. Exported keys are not available through this API.`, + Completion: complete.PredictSet("internal", "existing", "kms"), + }) + + f.StringVar(&StringVar{ + Name: "issuer_name", + Target: &c.flagNewIssuerName, + Default: "", + EnvVar: "", + Usage: `If present, the newly created issuer will be given this name.`, + }) + + return set +} + +func (c *PKIIssueCACommand) Run(args []string) int { + // Parse Args + f := c.Flags() + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + args = f.Args() + + if len(args) < 3 { + c.UI.Error("Not enough arguments expected parent issuer and child-mount location and some key_value argument") + return 1 + } + + stdin := (io.Reader)(os.Stdin) + data, err := parseArgsData(stdin, args[2:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + parentMountIssuer := sanitizePath(args[0]) // /pki/issuer/default + + intermediateMount := sanitizePath(args[1]) + + return pkiIssue(c.BaseCommand, parentMountIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data) +} + +func pkiIssue(c *BaseCommand, parentMountIssuer string, intermediateMount string, flagNewIssuerName string, flagKeyStorageSource string, data map[string]interface{}) int { + // Check We Have a Client + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to obtain client: %v", err)) + return 1 + } + + // Sanity Check the Parent Issuer + if !strings.Contains(parentMountIssuer, "/issuer/") { + c.UI.Error(fmt.Sprintf("Parent Issuer %v is Not a PKI Issuer Path of the format /mount/issuer/issuer-ref", parentMountIssuer)) + return 1 + } + _, err = readIssuer(client, parentMountIssuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Unable to access parent issuer %v: %v", parentMountIssuer, err)) + return 1 + } + + // Set-up Failure State (Immediately Before First Write Call) + failureState := inCaseOfFailure{ + intermediateMount: intermediateMount, + parentMount: strings.Split(parentMountIssuer, "/issuer/")[0], + parentIssuer: parentMountIssuer, + newName: flagNewIssuerName, + } + + // Generate Certificate Signing Request + csrResp, err := client.Logical().Write(intermediateMount+"/intermediate/generate/"+flagKeyStorageSource, data) + if err != nil { + if strings.Contains(err.Error(), "no handler for route") { // Mount Given Does Not Exist + c.UI.Error(fmt.Sprintf("Given Intermediate Mount %v Does Not Exist: %v", intermediateMount, err)) + } else if strings.Contains(err.Error(), "unsupported path") { // Expected if Not a PKI Mount + c.UI.Error(fmt.Sprintf("Given Intermeidate Mount %v Is Not a PKI Mount: %v", intermediateMount, err)) + } else { + c.UI.Error(fmt.Sprintf("Failled to Generate Intermediate CSR on %v: %v", intermediateMount, err)) + } + return 1 + } + // Parse CSR Response, Also Verifies that this is a PKI Mount + // (e.g. calling the above call on cubbyhole/ won't return an error response) + csrPemRaw, present := csrResp.Data["csr"] + if !present { + c.UI.Error(fmt.Sprintf("Failed to Generate Intermediate CSR on %v, got response: %v", intermediateMount, csrResp)) + return 1 + } + keyIdRaw, present := csrResp.Data["key_id"] + if !present && flagKeyStorageSource == "internal" { + c.UI.Error(fmt.Sprintf("Failed to Generate Key on %v, got response: %v", intermediateMount, csrResp)) + return 1 + } + + // If that all Parses, then we've successfully generated a CSR! Save It (and the Key-ID) + failureState.csrGenerated = true + if flagKeyStorageSource == "internal" { + failureState.createdKeyId = keyIdRaw.(string) + } + csr := csrPemRaw.(string) + failureState.csr = csr + data["csr"] = csr + + // Next, Sign the CSR + rootResp, err := client.Logical().Write(parentMountIssuer+"/sign-intermediate", data) + if err != nil { + c.UI.Error(failureState.generateFailureMessage()) + c.UI.Error(fmt.Sprintf("Error Signing Intermiate On %v", err)) + return 1 + } + // Success! Save Our Progress (and Parse the Response) + failureState.csrSigned = true + serialNumber := rootResp.Data["serial_number"].(string) + failureState.certSerialNumber = serialNumber + + caChain := rootResp.Data["ca_chain"].([]interface{}) + caChainPemBundle := "" + for _, cert := range caChain { + caChainPemBundle += cert.(string) + "\n" + } + failureState.caChain = caChainPemBundle + + // Next Import Certificate + certificate := rootResp.Data["certificate"].(string) + issuerId, err := importIssuerWithName(client, intermediateMount, certificate, flagNewIssuerName) + failureState.certIssuerId = issuerId + if err != nil { + if strings.Contains(err.Error(), "error naming issuer") { + failureState.certImported = true + c.UI.Error(failureState.generateFailureMessage()) + c.UI.Error(fmt.Sprintf("Error Naming Newly Imported Issuer: %v", err)) + return 1 + } else { + c.UI.Error(failureState.generateFailureMessage()) + c.UI.Error(fmt.Sprintf("Error Importing Into %v Newly Created Issuer %v: %v", intermediateMount, certificate, err)) + return 1 + } + } + failureState.certImported = true + + // Then Import Issuing Certificate + issuingCa := rootResp.Data["issuing_ca"].(string) + _, parentIssuerName := paths.Split(parentMountIssuer) + _, err = importIssuerWithName(client, intermediateMount, issuingCa, parentIssuerName) + if err != nil { + if strings.Contains(err.Error(), "error naming issuer") { + c.UI.Warn(fmt.Sprintf("Unable to Set Name on Parent Cert from %v Imported Into %v with serial %v, err: %v", parentIssuerName, intermediateMount, serialNumber, err)) + } else { + c.UI.Error(failureState.generateFailureMessage()) + c.UI.Error(fmt.Sprintf("Error Importing Into %v Newly Created Issuer %v: %v", intermediateMount, certificate, err)) + return 1 + } + } + + // Finally Import CA_Chain (just in case there's more information) + if len(caChain) > 2 { // We've already imported parent cert and newly issued cert above + importData := map[string]interface{}{ + "pem_bundle": caChainPemBundle, + } + _, err := client.Logical().Write(intermediateMount+"/issuers/import/cert", importData) + if err != nil { + c.UI.Error(failureState.generateFailureMessage()) + c.UI.Error(fmt.Sprintf("Error Importing CaChain into %v: %v", intermediateMount, err)) + return 1 + } + } + failureState.caChainImported = true + + // Finally we read our newly issued certificate in order to tell our caller about it + readAndOutputNewCertificate(client, intermediateMount, issuerId, c) + + return 0 +} + +func readAndOutputNewCertificate(client *api.Client, intermediateMount string, issuerId string, c *BaseCommand) { + resp, err := client.Logical().Read(sanitizePath(intermediateMount + "/issuer/" + issuerId)) + if err != nil || resp == nil { + c.UI.Error(fmt.Sprintf("Error Reading Fully Imported Certificate from %v : %v", + intermediateMount+"/issuer/"+issuerId, err)) + return + } + + OutputSecret(c.UI, resp) +} + +func importIssuerWithName(client *api.Client, mount string, bundle string, name string) (issuerUUID string, err error) { + importData := map[string]interface{}{ + "pem_bundle": bundle, + } + writeResp, err := client.Logical().Write(mount+"/issuers/import/cert", importData) + if err != nil { + return "", err + } + mapping := writeResp.Data["mapping"].(map[string]interface{}) + if len(mapping) > 1 { + return "", fmt.Errorf("multiple issuers returned, while expected one, got %v", writeResp) + } + for issuerId := range mapping { + issuerUUID = issuerId + } + if name != "" && name != "default" { + nameReq := map[string]interface{}{ + "issuer_name": name, + } + ctx := context.Background() + _, err = client.Logical().JSONMergePatch(ctx, mount+"/issuer/"+issuerUUID, nameReq) + if err != nil { + return issuerUUID, fmt.Errorf("error naming issuer %v to %v: %v", issuerUUID, name, err) + } + } + return issuerUUID, nil +} + +type inCaseOfFailure struct { + csrGenerated bool + csrSigned bool + certImported bool + certNamed bool + caChainImported bool + + intermediateMount string + createdKeyId string + csr string + caChain string + parentMount string + parentIssuer string + certSerialNumber string + certIssuerId string + newName string +} + +func (state inCaseOfFailure) generateFailureMessage() string { + message := "A failure has occurred" + + if state.csrGenerated { + message += fmt.Sprintf(" after \n a Certificate Signing Request was successfully generated on mount %v", state.intermediateMount) + } + if state.csrSigned { + message += fmt.Sprintf(" and after \n that Certificate Signing Request was successfully signed by mount %v", state.parentMount) + } + if state.certImported { + message += fmt.Sprintf(" and after \n the signed certificate was reimported into mount %v , with issuerID %v", state.intermediateMount, state.certIssuerId) + } + + if state.csrGenerated { + message += "\n\nTO CONTINUE: \n" + state.toContinue() + } + if state.csrGenerated && !state.certImported { + message += "\n\nTO ABORT: \n" + state.toAbort() + } + + message += "\n" + + return message +} + +func (state inCaseOfFailure) toContinue() string { + message := "" + if !state.csrSigned { + message += fmt.Sprintf("You can continue to work with this Certificate Signing Request CSR PEM, by saving"+ + " it as `pki_int.csr`: %v \n Then call `vault write %v/sign-intermediate csr=@pki_int.csr ...` adding the "+ + "same key-value arguements as to `pki issue` (except key_type and issuer_name) to generate the certificate "+ + "and ca_chain", state.csr, state.parentIssuer) + } + if !state.certImported { + if state.caChain != "" { + message += fmt.Sprintf("The certificate chain, signed by %v, for this new certificate is: %v", state.parentIssuer, state.caChain) + } + message += fmt.Sprintf("You can continue to work with this Certificate (and chain) by saving it as "+ + "chain.pem and importing it as `vault write %v/issuers/import/cert pem_bundle=@chain.pem`", + state.intermediateMount) + } + if !state.certNamed { + issuerId := state.certIssuerId + if issuerId == "" { + message += fmt.Sprintf("The issuer_id is returned as the key in a key_value map from importing the " + + "certificate chain.") + issuerId = "" + } + message += fmt.Sprintf("You can name the newly imported issuer by calling `vault patch %v/issuer/%v "+ + "issuer_name=%v`", state.intermediateMount, issuerId, state.newName) + } + return message +} + +func (state inCaseOfFailure) toAbort() string { + if !state.csrGenerated || (!state.csrSigned && state.createdKeyId == "") { + return "No state was created by running this command. Try rerunning this command after resolving the error." + } + message := "" + if state.csrGenerated && state.createdKeyId != "" { + message += fmt.Sprintf(" A key, with key ID %v was created on mount %v as part of this command."+ + " If you do not with to use this key and corresponding CSR/cert, you can delete that information by calling"+ + " `vault delete %v/key/%v`", state.createdKeyId, state.intermediateMount, state.intermediateMount, state.createdKeyId) + } + if state.csrSigned { + message += fmt.Sprintf("A certificate with serial number %v was signed by mount %v as part of this command."+ + " If you do not want to use this certificate, consider revoking it by calling `vault write %v/revoke/%v`", + state.certSerialNumber, state.parentMount, state.parentMount, state.certSerialNumber) + } + //if state.certImported { + // message += fmt.Sprintf("An issuer with UUID %v was created on mount %v as part of this command. " + + // "If you do not wish to use this issuer, consider deleting it by calling `vault delete %v/issuer/%v`", + // state.certIssuerId, state.intermediateMount, state.intermediateMount, state.certIssuerId) + //} + + return message +} diff --git a/command/pki_issue_intermediate_test.go b/command/pki_issue_intermediate_test.go new file mode 100644 index 000000000000..cb66d45e7c5c --- /dev/null +++ b/command/pki_issue_intermediate_test.go @@ -0,0 +1,208 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/hashicorp/vault/api" +) + +func TestPKIIssueIntermediate(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + createComplicatedIssuerSetUpWithIssueIntermediate(t, client) + + runPkiVerifySignTests(t, client) + + runPkiListIntermediateTests(t, client) +} + +func createComplicatedIssuerSetUpWithIssueIntermediate(t *testing.T, client *api.Client) { + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + + if err := client.Sys().Mount("pki-root", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-newroot", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-int", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + // Used to check handling empty list responses: Not Used for Any Issuers / Certificates + if err := client.Sys().Mount("pki-empty", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{}, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + resp, err := client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX1", + "key_name": "rootX1", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + resp, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX2", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-newroot/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX3", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-root/root/generate/existing", map[string]interface{}{ + "common_name": "Root X4", + "ttl": "3650d", + "issuer_name": "rootX4", + "key_ref": "rootX1", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + // Next we create the Intermediates Using the Issue Intermediate Command + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + // Intermediate X1 + intX1CallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX1", + "pki-root/issuer/rootX1", + "pki-int/", + "key_type=rsa", + "common_name=Int X1", + "ttl=3650d", + } + codeOut := RunCustom(intX1CallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X1, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + // Intermediate X2 + intX2CallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX2", + "pki-newroot/issuer/rootX3", + "pki-int/", + "key_type=ec", + "common_name=Int X2", + "ttl=3650d", + } + codeOut = RunCustom(intX2CallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X2, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + // Intermediate X3 + // Clear Buffers so that we can unmarshall json of just this call + stdout = bytes.NewBuffer(nil) + stderr = bytes.NewBuffer(nil) + runOpts = &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + intX3OriginalCallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX3", + "pki-int/issuer/intX1", + "pki-int/", + "key_type=rsa", + "common_name=Int X3", + "ttl=3650d", + } + codeOut = RunCustom(intX3OriginalCallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X3, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + var intX3Resp map[string]interface{} + json.Unmarshal(stdout.Bytes(), &intX3Resp) + intX3Data := intX3Resp["data"].(map[string]interface{}) + keyId := intX3Data["key_id"].(string) + + intX3AdaptedCallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX3also", "-type=existing", + "pki-int/issuer/intX2", + "pki-int/", + "key_ref=" + keyId, + "common_name=Int X3", + "ttl=3650d", + } + codeOut = RunCustom(intX3AdaptedCallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X3also, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } +} diff --git a/command/pki_list_intermediate.go b/command/pki_list_intermediate.go new file mode 100644 index 000000000000..d756d610f1aa --- /dev/null +++ b/command/pki_list_intermediate.go @@ -0,0 +1,304 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/vault/api" + + "github.com/ghodss/yaml" + "github.com/ryanuber/columnize" +) + +type PKIListIntermediateCommand struct { + *BaseCommand + + flagConfig string + flagReturnIndicator string + flagDefaultDisabled bool + flagList bool + + flagUseNames bool + + flagSignatureMatch bool + flagIndirectSignMatch bool + flagKeyIdMatch bool + flagSubjectMatch bool + flagPathMatch bool +} + +func (c *PKIListIntermediateCommand) Synopsis() string { + return "Determine which of a list of certificates, were issued by a given parent certificate" +} + +func (c *PKIListIntermediateCommand) Help() string { + helpText := ` +Usage: vault pki list-intermediates PARENT [CHILD] [CHILD] [CHILD] ... + + Lists the set of intermediate CAs issued by this parent issuer. + + PARENT is the certificate that might be the issuer that everything should + be verified against. + + CHILD is an optional list of paths to certificates to be compared to the + PARENT, or pki mounts to look for certificates on. If CHILD is omitted + entirely, the list will be constructed from all accessible pki mounts. + + This returns a list of issuing certificates, and whether they are a match. + By default, the type of match required is whether the PARENT has the + expected subject, key_id, and could have (directly) signed this issuer. + The match criteria can be updated by changed the corresponding flag. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *PKIListIntermediateCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "subject_match", + Target: &c.flagSubjectMatch, + Default: true, + EnvVar: "", + Usage: `Whether the subject name of the potential parent cert matches the issuer name of the child cert.`, + }) + + f.BoolVar(&BoolVar{ + Name: "key_id_match", + Target: &c.flagKeyIdMatch, + Default: true, + EnvVar: "", + Usage: `Whether the subject key id (SKID) of the potential parent cert matches the authority key id (AKID) of the child cert.`, + }) + + f.BoolVar(&BoolVar{ + Name: "path_match", + Target: &c.flagPathMatch, + Default: false, + EnvVar: "", + Usage: `Whether the potential parent appears in the certificate chain field (ca_chain) of the issued cert.`, + }) + + f.BoolVar(&BoolVar{ + Name: "direct_sign", + Target: &c.flagSignatureMatch, + Default: true, + EnvVar: "", + Usage: `Whether the key of the potential parent directly signed this issued certificate.`, + }) + + f.BoolVar(&BoolVar{ + Name: "indirect_sign", + Target: &c.flagIndirectSignMatch, + Default: true, + EnvVar: "", + Usage: `Whether trusting the parent certificate is sufficient to trust the child certificate.`, + }) + + f.BoolVar(&BoolVar{ + Name: "use_names", + Target: &c.flagUseNames, + Default: false, + EnvVar: "", + Usage: `Whether the list of issuers returned is referred to by name (when it exists) rather than by uuid.`, + }) + + return set +} + +func (c *PKIListIntermediateCommand) Run(args []string) int { + f := c.Flags() + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + + if len(args) < 1 { + c.UI.Error("Not enough arguments (expected potential parent, got nothing)") + return 1 + } else if len(args) > 2 { + for _, arg := range args { + if strings.HasPrefix(arg, "-") { + c.UI.Warn(fmt.Sprintf("Options (%v) must be specified before positional arguments (%v)", arg, args[0])) + break + } + } + } + + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to obtain client: %s", err)) + return 1 + } + + issuer := sanitizePath(args[0]) + var issued []string + if len(args) > 1 { + for _, arg := range args[1:] { + cleanPath := sanitizePath(arg) + // Arg Might be a Fully Qualified Path + if strings.Contains(cleanPath, "/issuer/") || + strings.Contains(cleanPath, "/certs/") || + strings.Contains(cleanPath, "/revoked/") { + issued = append(issued, cleanPath) + } else { // Or Arg Might be a Mount + mountCaList, err := c.getIssuerListFromMount(client, arg) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + issued = append(issued, mountCaList...) + } + } + } else { + mountListRaw, err := client.Logical().Read("/sys/mounts/") + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to Read List of Mounts With Potential Issuers: %v", err)) + return 1 + } + for path, rawValueMap := range mountListRaw.Data { + valueMap := rawValueMap.(map[string]interface{}) + if valueMap["type"].(string) == "pki" { + mountCaList, err := c.getIssuerListFromMount(client, sanitizePath(path)) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + issued = append(issued, mountCaList...) + } + } + } + + childrenMatches := make(map[string]bool) + + constraintMap := map[string]bool{ + // This comparison isn't strictly correct, despite a standard ordering these are sets + "subject_match": c.flagSubjectMatch, + "path_match": c.flagPathMatch, + "trust_match": c.flagIndirectSignMatch, + "key_id_match": c.flagKeyIdMatch, + "signature_match": c.flagSignatureMatch, + } + + issuerResp, err := readIssuer(client, issuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to read parent issuer on path %s: %s", issuer, err.Error())) + return 1 + } + + for _, child := range issued { + path := sanitizePath(child) + if path != "" { + verifyResults, err := verifySignBetween(client, issuerResp, path) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to run verification on path %v: %v", path, err)) + return 1 + } + childrenMatches[path] = checkIfResultsMatchFilters(verifyResults, constraintMap) + } + } + + err = c.outputResults(childrenMatches) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + return 0 +} + +func (c *PKIListIntermediateCommand) getIssuerListFromMount(client *api.Client, mountString string) ([]string, error) { + var issuerList []string + issuerListEndpoint := sanitizePath(mountString) + "/issuers" + rawIssuersResp, err := client.Logical().List(issuerListEndpoint) + if err != nil { + return issuerList, fmt.Errorf("failed to read list of issuers within mount %v: %v", mountString, err) + } + if rawIssuersResp == nil { // No Issuers (Empty Mount) + return issuerList, nil + } + issuersMap := rawIssuersResp.Data["keys"] + certList := issuersMap.([]interface{}) + for _, certId := range certList { + identifier := certId.(string) + if c.flagUseNames { + issuerReadResp, err := client.Logical().Read(sanitizePath(mountString) + "/issuer/" + identifier) + if err != nil { + c.UI.Warn(fmt.Sprintf("Unable to Fetch Issuer to Recover Name at: %v", sanitizePath(mountString)+"/issuer/"+identifier)) + } + if issuerReadResp != nil { + issuerName := issuerReadResp.Data["issuer_name"].(string) + if issuerName != "" { + identifier = issuerName + } + } + } + issuerList = append(issuerList, sanitizePath(mountString)+"/issuer/"+identifier) + } + return issuerList, nil +} + +func checkIfResultsMatchFilters(verifyResults, constraintMap map[string]bool) bool { + for key, required := range constraintMap { + if required && !verifyResults[key] { + return false + } + } + return true +} + +func (c *PKIListIntermediateCommand) outputResults(results map[string]bool) error { + switch Format(c.UI) { + case "", "table": + return c.outputResultsTable(results) + case "json": + return c.outputResultsJSON(results) + case "yaml": + return c.outputResultsYAML(results) + default: + return fmt.Errorf("unknown output format: %v", Format(c.UI)) + } +} + +func (c *PKIListIntermediateCommand) outputResultsTable(results map[string]bool) error { + data := []string{"intermediate" + hopeDelim + "match?"} + for field, finding := range results { + row := field + hopeDelim + strconv.FormatBool(finding) + data = append(data, row) + } + c.UI.Output(tableOutput(data, &columnize.Config{ + Delim: hopeDelim, + })) + c.UI.Output("\n") + + return nil +} + +func (c *PKIListIntermediateCommand) outputResultsJSON(results map[string]bool) error { + bytes, err := json.MarshalIndent(results, "", " ") + if err != nil { + return err + } + + c.UI.Output(string(bytes)) + return nil +} + +func (c *PKIListIntermediateCommand) outputResultsYAML(results map[string]bool) error { + bytes, err := yaml.Marshal(results) + if err != nil { + return err + } + + c.UI.Output(string(bytes)) + return nil +} diff --git a/command/pki_list_intermediate_test.go b/command/pki_list_intermediate_test.go new file mode 100644 index 000000000000..5abfabd55994 --- /dev/null +++ b/command/pki_list_intermediate_test.go @@ -0,0 +1,246 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" +) + +func TestPKIListIntermediate(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3(also) + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + createComplicatedIssuerSetUp(t, client) + + runPkiListIntermediateTests(t, client) +} + +func runPkiListIntermediateTests(t *testing.T, client *api.Client) { + cases := []struct { + name string + args []string + expectedMatches map[string]bool + jsonOut bool + shouldError bool + expectErrorCont string + expectErrorNotCont string + nonJsonOutputCont string + }{ + { + "rootX1-match-everything-no-constraints", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-subject_match=false", "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", "-path_match=false", + "pki-root/issuer/rootX1", + }, + map[string]bool{ + "pki-root/issuer/rootX1": true, + "pki-root/issuer/rootX2": true, + "pki-newroot/issuer/rootX3": true, + "pki-root/issuer/rootX4": true, + "pki-int/issuer/intX1": true, + "pki-int/issuer/intX2": true, + "pki-int/issuer/intX3": true, + "pki-int/issuer/intX3also": true, + "pki-int/issuer/rootX1": true, + "pki-int/issuer/rootX3": true, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-default-children", + []string{"pki", "list-intermediates", "-format=json", "-use_names=true", "pki-root/issuer/rootX1"}, + map[string]bool{ + "pki-root/issuer/rootX1": true, + "pki-root/issuer/rootX2": false, + "pki-newroot/issuer/rootX3": false, + "pki-root/issuer/rootX4": false, + "pki-int/issuer/intX1": true, + "pki-int/issuer/intX2": false, + "pki-int/issuer/intX3": false, + "pki-int/issuer/intX3also": false, + "pki-int/issuer/rootX1": true, + "pki-int/issuer/rootX3": false, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-subject-match-only", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", + "pki-root/issuer/rootX1", + }, + map[string]bool{ + "pki-root/issuer/rootX1": true, + "pki-root/issuer/rootX2": true, + "pki-newroot/issuer/rootX3": true, + "pki-root/issuer/rootX4": false, + "pki-int/issuer/intX1": true, + "pki-int/issuer/intX2": true, + "pki-int/issuer/intX3": false, + "pki-int/issuer/intX3also": false, + "pki-int/issuer/rootX1": true, + "pki-int/issuer/rootX3": true, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-in-path", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-subject_match=false", "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", "-path_match=true", + "pki-root/issuer/rootX1", + }, + map[string]bool{ + "pki-root/issuer/rootX1": true, + "pki-root/issuer/rootX2": false, + "pki-newroot/issuer/rootX3": false, + "pki-root/issuer/rootX4": false, + "pki-int/issuer/intX1": true, + "pki-int/issuer/intX2": false, + "pki-int/issuer/intX3": true, + "pki-int/issuer/intX3also": false, + "pki-int/issuer/rootX1": true, + "pki-int/issuer/rootX3": false, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-only-int-mount", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-subject_match=false", "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", "-path_match=true", + "pki-root/issuer/rootX1", "pki-int/", + }, + map[string]bool{ + "pki-int/issuer/intX1": true, + "pki-int/issuer/intX2": false, + "pki-int/issuer/intX3": true, + "pki-int/issuer/intX3also": false, + "pki-int/issuer/rootX1": true, + "pki-int/issuer/rootX3": false, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-subject-match-root-mounts-only", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", + "pki-root/issuer/rootX1", "pki-root/", "pki-newroot", "pki-empty", + }, + map[string]bool{ + "pki-root/issuer/rootX1": true, + "pki-root/issuer/rootX2": true, + "pki-newroot/issuer/rootX3": true, + "pki-root/issuer/rootX4": false, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-subject-match-these-certs-only", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", + "pki-root/issuer/rootX1", "pki-root/issuer/rootX2", "pki-newroot/issuer/rootX3", "pki-root/issuer/rootX4", + }, + map[string]bool{ + "pki-root/issuer/rootX2": true, + "pki-newroot/issuer/rootX3": true, + "pki-root/issuer/rootX4": false, + }, + true, + false, + "", + "", + "", + }, + } + for _, testCase := range cases { + var errString string + var results map[string]interface{} + var stdOut string + + if testCase.jsonOut { + results, errString = execPKIVerifyJson(t, client, false, testCase.shouldError, testCase.args) + } else { + stdOut, errString = execPKIVerifyNonJson(t, client, testCase.shouldError, testCase.args) + } + + // Verify Error Behavior + if testCase.shouldError { + if errString == "" { + t.Fatalf("Expected error in Testcase %s : no error produced, got results %s", testCase.name, results) + } + if testCase.expectErrorCont != "" && !strings.Contains(errString, testCase.expectErrorCont) { + t.Fatalf("Expected error in Testcase %s to contain %s, but got error %s", testCase.name, testCase.expectErrorCont, errString) + } + if testCase.expectErrorNotCont != "" && strings.Contains(errString, testCase.expectErrorNotCont) { + t.Fatalf("Expected error in Testcase %s to not contain %s, but got error %s", testCase.name, testCase.expectErrorNotCont, errString) + } + } else { + if errString != "" { + t.Fatalf("Error in Testcase %s : no error expected, but got error: %s", testCase.name, errString) + } + } + + // Verify Output + if testCase.jsonOut { + isMatch, errString := verifyExpectedJson(testCase.expectedMatches, results) + if !isMatch { + t.Fatalf("Expected Results for Testcase %s, do not match returned results %s", testCase.name, errString) + } + } else { + if !strings.Contains(stdOut, testCase.nonJsonOutputCont) { + t.Fatalf("Expected standard output for Testcase %s to contain %s, but got %s", testCase.name, testCase.nonJsonOutputCont, stdOut) + } + } + + } +} diff --git a/command/pki_reissue_intermediate.go b/command/pki_reissue_intermediate.go new file mode 100644 index 000000000000..cbaa657e7b96 --- /dev/null +++ b/command/pki_reissue_intermediate.go @@ -0,0 +1,183 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "crypto/x509" + "encoding/hex" + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/vault/sdk/helper/certutil" + + "github.com/posener/complete" +) + +type PKIReIssueCACommand struct { + *BaseCommand + + flagConfig string + flagReturnIndicator string + flagDefaultDisabled bool + flagList bool + + flagKeyStorageSource string + flagNewIssuerName string +} + +func (c *PKIReIssueCACommand) Synopsis() string { + return "Uses a parent certificate and a template certificate to create a new issuer on a child mount" +} + +func (c *PKIReIssueCACommand) Help() string { + helpText := ` +Usage: vault pki reissue PARENT TEMPLATE CHILD_MOUNT options +` + return strings.TrimSpace(helpText) +} + +func (c *PKIReIssueCACommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagKeyStorageSource, + Default: "internal", + EnvVar: "", + Usage: `Options are “existing” - to use an existing key inside vault, “internal” - to generate a new key inside vault, or “kms” - to link to an external key. Exported keys are not available through this API.`, + Completion: complete.PredictSet("internal", "existing", "kms"), + }) + + f.StringVar(&StringVar{ + Name: "issuer_name", + Target: &c.flagNewIssuerName, + Default: "", + EnvVar: "", + Usage: `If present, the newly created issuer will be given this name`, + }) + + return set +} + +func (c *PKIReIssueCACommand) Run(args []string) int { + // Parse Args + f := c.Flags() + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + args = f.Args() + + if len(args) < 3 { + c.UI.Error("Not enough arguments: expected parent issuer and child-mount location and some key_value argument") + return 1 + } + + stdin := (io.Reader)(os.Stdin) + userData, err := parseArgsData(stdin, args[3:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + // Check We Have a Client + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to obtain client: %v", err)) + return 1 + } + + parentIssuer := sanitizePath(args[0]) // /pki/issuer/default + templateIssuer := sanitizePath(args[1]) + intermediateMount := sanitizePath(args[2]) + + templateIssuerBundle, err := readIssuer(client, templateIssuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Error fetching template certificate %v : %v", templateIssuer, err)) + return 1 + } + certificate := templateIssuerBundle.certificate + + useExistingKey := c.flagKeyStorageSource == "existing" + keyRef := "" + if useExistingKey { + keyRef = templateIssuerBundle.keyId + + if keyRef == "" { + c.UI.Error(fmt.Sprintf("Template issuer %s did not have a key id field set in response which is required", templateIssuer)) + return 1 + } + } + + templateData, err := parseTemplateCertificate(*certificate, useExistingKey, keyRef) + data := updateTemplateWithData(templateData, userData) + + return pkiIssue(c.BaseCommand, parentIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data) +} + +func updateTemplateWithData(template map[string]interface{}, changes map[string]interface{}) map[string]interface{} { + data := map[string]interface{}{} + + for key, value := range template { + data[key] = value + } + + // ttl and not_after set the same thing. Delete template ttl if using not_after: + if _, ok := changes["not_after"]; ok { + delete(data, "ttl") + } + + // If we are updating the key_type, do not set key_bits + if _, ok := changes["key_type"]; ok && changes["key_type"] != template["key_type"] { + delete(data, "key_bits") + } + + for key, value := range changes { + data[key] = value + } + + return data +} + +func parseTemplateCertificate(certificate x509.Certificate, useExistingKey bool, keyRef string) (templateData map[string]interface{}, err error) { + // Generate Certificate Signing Parameters + templateData = map[string]interface{}{ + "common_name": certificate.Subject.CommonName, + "alt_names": certutil.MakeAltNamesCommaSeparatedString(certificate.DNSNames, certificate.EmailAddresses), + "ip_sans": certutil.MakeIpAddressCommaSeparatedString(certificate.IPAddresses), + "uri_sans": certutil.MakeUriCommaSeparatedString(certificate.URIs), + // other_sans (string: "") - Specifies custom OID/UTF8-string SANs. These must match values specified on the role in allowed_other_sans (see role creation for allowed_other_sans globbing rules). The format is the same as OpenSSL: ;: where the only current valid type is UTF8. This can be a comma-delimited list or a JSON string slice. + // Punting on Other_SANs, shouldn't really be on CAs + "signature_bits": certutil.FindSignatureBits(certificate.SignatureAlgorithm), + "exclude_cn_from_sans": certutil.DetermineExcludeCnFromCertSans(certificate), + "ou": certificate.Subject.OrganizationalUnit, + "organization": certificate.Subject.Organization, + "country": certificate.Subject.Country, + "locality": certificate.Subject.Locality, + "province": certificate.Subject.Province, + "street_address": certificate.Subject.StreetAddress, + "postal_code": certificate.Subject.PostalCode, + "serial_number": certificate.Subject.SerialNumber, + "ttl": (certificate.NotAfter.Sub(certificate.NotBefore)).String(), + "max_path_length": certificate.MaxPathLen, + "permitted_dns_domains": strings.Join(certificate.PermittedDNSDomains, ","), + "use_pss": certutil.IsPSS(certificate.SignatureAlgorithm), + } + + if useExistingKey { + templateData["skid"] = hex.EncodeToString(certificate.SubjectKeyId) // TODO: Double Check this with someone + if keyRef == "" { + return nil, fmt.Errorf("unable to create certificate template for existing key without a key_id") + } + templateData["key_ref"] = keyRef + } else { + templateData["key_type"] = certutil.GetKeyType(certificate.PublicKeyAlgorithm.String()) + templateData["key_bits"] = certutil.FindBitLength(certificate.PublicKey) + } + + return templateData, nil +} diff --git a/command/pki_reissue_intermediate_test.go b/command/pki_reissue_intermediate_test.go new file mode 100644 index 000000000000..45657fe11990 --- /dev/null +++ b/command/pki_reissue_intermediate_test.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "bytes" + "testing" + + "github.com/hashicorp/vault/api" +) + +// TestPKIReIssueIntermediate tests that the pki reissue command line tool accurately copies information from the +// template certificate to the newly issued certificate, by issuing and reissuing several certificates and seeing how +// they related to each other. +func TestPKIReIssueIntermediate(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + createComplicatedIssuerSetUpWithReIssueIntermediate(t, client) + + runPkiVerifySignTests(t, client) + + runPkiListIntermediateTests(t, client) +} + +func createComplicatedIssuerSetUpWithReIssueIntermediate(t *testing.T, client *api.Client) { + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + + if err := client.Sys().Mount("pki-root", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-newroot", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-int", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + // Used to check handling empty list responses: Not Used for Any Issuers / Certificates + if err := client.Sys().Mount("pki-empty", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{}, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + resp, err := client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX1", + "key_name": "rootX1", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + resp, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX2", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-newroot/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX3", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-root/root/generate/existing", map[string]interface{}{ + "common_name": "Root X4", + "ttl": "3650d", + "issuer_name": "rootX4", + "key_ref": "rootX1", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + // Intermediate X1 + intX1CallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX1", + "pki-root/issuer/rootX1", + "pki-int/", + "key_type=rsa", + "common_name=Int X1", + "ou=thing", + "ttl=3650d", + } + codeOut := RunCustom(intX1CallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X1, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + // Intermediate X2 - using ReIssue + intX2CallArgs := []string{ + "pki", "reissue", "-format=json", "-issuer_name=intX2", + "pki-newroot/issuer/rootX3", + "pki-int/issuer/intX1", + "pki-int/", + "key_type=ec", + "common_name=Int X2", + } + codeOut = RunCustom(intX2CallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X2, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + // Intermediate X3 + intX3OriginalCallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX3", + "pki-int/issuer/intX1", + "pki-int/", + "key_type=ec", + "use_pss=true", // This is meaningful because rootX1 is an RSA key + "signature_bits=512", + "common_name=Int X3", + "ttl=3650d", + } + codeOut = RunCustom(intX3OriginalCallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X3, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + intX3AdaptedCallArgs := []string{ + "pki", "reissue", "-format=json", "-issuer_name=intX3also", "-type=existing", + "pki-int/issuer/intX2", // This is a EC key + "pki-int/issuer/intX3", // This template includes use_pss = true which can't be accomodated + "pki-int/", + } + codeOut = RunCustom(intX3AdaptedCallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X3also, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } +} diff --git a/command/pki_verify_sign.go b/command/pki_verify_sign.go new file mode 100644 index 000000000000..31d693e5ae72 --- /dev/null +++ b/command/pki_verify_sign.go @@ -0,0 +1,309 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "bytes" + "crypto/x509" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/vault/command/healthcheck" + + "github.com/ghodss/yaml" + "github.com/hashicorp/vault/api" + "github.com/ryanuber/columnize" +) + +type PKIVerifySignCommand struct { + *BaseCommand + + flagConfig string + flagReturnIndicator string + flagDefaultDisabled bool + flagList bool +} + +func (c *PKIVerifySignCommand) Synopsis() string { + return "Check whether one certificate validates another specified certificate" +} + +func (c *PKIVerifySignCommand) Help() string { + helpText := ` +Usage: vault pki verify-sign POSSIBLE-ISSUER POSSIBLE-ISSUED + + Verifies whether the listed issuer has signed the listed issued certificate. + + POSSIBLE-ISSUER and POSSIBLE-ISSUED are the fully name-spaced path to + an issuer certificate, for instance: 'ns1/mount1/issuer/issuerName/json'. + + Returns five fields of information: + + - signature_match: was the key of the issuer used to sign the issued. + - path_match: the possible issuer appears in the valid certificate chain + of the issued. + - key_id_match: does the key-id of the issuer match the key_id of the + subject. + - subject_match: does the subject name of the issuer match the issuer + subject of the issued. + - trust_match: if someone trusted the parent issuer, is the chain + provided sufficient to trust the child issued. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *PKIVerifySignCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + return set +} + +func (c *PKIVerifySignCommand) Run(args []string) int { + f := c.Flags() + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + + if len(args) < 2 { + if len(args) == 0 { + c.UI.Error("Not enough arguments (expected potential issuer and issued, got nothing)") + } else { + c.UI.Error("Not enough arguments (expected both potential issuer and issued, got only one)") + } + return 1 + } else if len(args) > 2 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected only potential issuer and issued, got %d arguments)", len(args))) + for _, arg := range args { + if strings.HasPrefix(arg, "-") { + c.UI.Warn(fmt.Sprintf("Options (%v) must be specified before positional arguments (%v)", arg, args[0])) + break + } + } + return 1 + } + + issuer := sanitizePath(args[0]) + issued := sanitizePath(args[1]) + + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to obtain client: %s", err)) + return 1 + } + + issuerResp, err := readIssuer(client, issuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to read issuer: %s: %s", issuer, err.Error())) + return 1 + } + + results, err := verifySignBetween(client, issuerResp, issued) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to run verification: %v", err)) + return pkiRetUsage + } + + c.outputResults(results, issuer, issued) + + return 0 +} + +func verifySignBetween(client *api.Client, issuerResp *issuerResponse, issuedPath string) (map[string]bool, error) { + // Note that this eats warnings + + issuerCert := issuerResp.certificate + issuerKeyId := issuerCert.SubjectKeyId + + // Fetch and Parse the Potential Issued Cert + issuedCertBundle, err := readIssuer(client, issuedPath) + if err != nil { + return nil, fmt.Errorf("error: unable to fetch issuer %v: %w", issuedPath, err) + } + parentKeyId := issuedCertBundle.certificate.AuthorityKeyId + + // Check the Chain-Match + rootCertPool := x509.NewCertPool() + rootCertPool.AddCert(issuerCert) + checkTrustPathOptions := x509.VerifyOptions{ + Roots: rootCertPool, + } + trust := false + trusts, err := issuedCertBundle.certificate.Verify(checkTrustPathOptions) + if err != nil && !strings.Contains(err.Error(), "certificate signed by unknown authority") { + return nil, err + } else if err == nil { + for _, chain := range trusts { + // Output of this Should Only Have One Trust with Chain of Length Two (Child followed by Parent) + for _, cert := range chain { + if issuedCertBundle.certificate.Equal(cert) { + trust = true + break + } + } + } + } + + pathMatch := false + for _, cert := range issuedCertBundle.caChain { + if bytes.Equal(cert.Raw, issuerCert.Raw) { + pathMatch = true + break + } + } + + signatureMatch := false + err = issuedCertBundle.certificate.CheckSignatureFrom(issuerCert) + if err == nil { + signatureMatch = true + } + + result := map[string]bool{ + // This comparison isn't strictly correct, despite a standard ordering these are sets + "subject_match": bytes.Equal(issuerCert.RawSubject, issuedCertBundle.certificate.RawIssuer), + "path_match": pathMatch, + "trust_match": trust, // TODO: Refactor into a reasonable function + "key_id_match": bytes.Equal(parentKeyId, issuerKeyId), + "signature_match": signatureMatch, + } + + return result, nil +} + +type issuerResponse struct { + keyId string + certificate *x509.Certificate + caChain []*x509.Certificate +} + +func readIssuer(client *api.Client, issuerPath string) (*issuerResponse, error) { + issuerResp, err := client.Logical().Read(issuerPath) + if err != nil { + return nil, err + } + issuerCertPem, err := requireStrRespField(issuerResp, "certificate") + if err != nil { + return nil, err + } + issuerCert, err := healthcheck.ParsePEMCert(issuerCertPem) + if err != nil { + return nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuerPath, err) + } + + caChainPem, err := requireStrListRespField(issuerResp, "ca_chain") + if err != nil { + return nil, fmt.Errorf("unable to parse issuer %v's CA chain: %w", issuerPath, err) + } + + var caChain []*x509.Certificate + for _, pem := range caChainPem { + trimmedPem := strings.TrimSpace(pem) + if trimmedPem == "" { + continue + } + cert, err := healthcheck.ParsePEMCert(trimmedPem) + if err != nil { + return nil, err + } + caChain = append(caChain, cert) + } + + keyId := optStrRespField(issuerResp, "key_id") + + return &issuerResponse{ + keyId: keyId, + certificate: issuerCert, + caChain: caChain, + }, nil +} + +func optStrRespField(resp *api.Secret, reqField string) string { + if resp == nil || resp.Data == nil { + return "" + } + if val, present := resp.Data[reqField]; !present { + return "" + } else if strVal, castOk := val.(string); !castOk || strVal == "" { + return "" + } else { + return strVal + } +} + +func requireStrRespField(resp *api.Secret, reqField string) (string, error) { + if resp == nil || resp.Data == nil { + return "", fmt.Errorf("nil response received, %s field unavailable", reqField) + } + if val, present := resp.Data[reqField]; !present { + return "", fmt.Errorf("response did not contain field: %s", reqField) + } else if strVal, castOk := val.(string); !castOk || strVal == "" { + return "", fmt.Errorf("field %s value was blank or not a string: %v", reqField, val) + } else { + return strVal, nil + } +} + +func requireStrListRespField(resp *api.Secret, reqField string) ([]string, error) { + if resp == nil || resp.Data == nil { + return nil, fmt.Errorf("nil response received, %s field unavailable", reqField) + } + if val, present := resp.Data[reqField]; !present { + return nil, fmt.Errorf("response did not contain field: %s", reqField) + } else { + return healthcheck.StringList(val) + } +} + +func (c *PKIVerifySignCommand) outputResults(results map[string]bool, potentialParent, potentialChild string) error { + switch Format(c.UI) { + case "", "table": + return c.outputResultsTable(results, potentialParent, potentialChild) + case "json": + return c.outputResultsJSON(results) + case "yaml": + return c.outputResultsYAML(results) + default: + return fmt.Errorf("unknown output format: %v", Format(c.UI)) + } +} + +func (c *PKIVerifySignCommand) outputResultsTable(results map[string]bool, potentialParent, potentialChild string) error { + c.UI.Output("issuer:" + potentialParent) + c.UI.Output("issued:" + potentialChild + "\n") + data := []string{"field" + hopeDelim + "value"} + for field, finding := range results { + row := field + hopeDelim + strconv.FormatBool(finding) + data = append(data, row) + } + c.UI.Output(tableOutput(data, &columnize.Config{ + Delim: hopeDelim, + })) + c.UI.Output("\n") + + return nil +} + +func (c *PKIVerifySignCommand) outputResultsJSON(results map[string]bool) error { + bytes, err := json.MarshalIndent(results, "", " ") + if err != nil { + return err + } + + c.UI.Output(string(bytes)) + return nil +} + +func (c *PKIVerifySignCommand) outputResultsYAML(results map[string]bool) error { + bytes, err := yaml.Marshal(results) + if err != nil { + return err + } + + c.UI.Output(string(bytes)) + return nil +} diff --git a/command/pki_verify_sign_test.go b/command/pki_verify_sign_test.go new file mode 100644 index 000000000000..4001aadbc92d --- /dev/null +++ b/command/pki_verify_sign_test.go @@ -0,0 +1,468 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/vault/api" +) + +func TestPKIVerifySign(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + createComplicatedIssuerSetUp(t, client) + + runPkiVerifySignTests(t, client) +} + +func runPkiVerifySignTests(t *testing.T, client *api.Client) { + cases := []struct { + name string + args []string + expectedMatches map[string]bool + jsonOut bool + shouldError bool + expectErrorCont string + expectErrorNotCont string + nonJsonOutputCont string + }{ + { + "rootX1-matches-rootX1", + []string{"pki", "verify-sign", "-format=json", "pki-root/issuer/rootX1", "pki-root/issuer/rootX1"}, + map[string]bool{ + "key_id_match": true, + "path_match": true, + "signature_match": true, + "subject_match": true, + "trust_match": true, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-on-rootX2-onlySameName", + []string{"pki", "verify-sign", "-format=json", "pki-root/issuer/rootX1", "pki-root/issuer/rootX2"}, + map[string]bool{ + "key_id_match": false, + "path_match": false, + "signature_match": false, + "subject_match": true, + "trust_match": false, + }, + true, + false, + "", + "", + "", + }, + } + for _, testCase := range cases { + var errString string + var results map[string]interface{} + var stdOut string + + if testCase.jsonOut { + results, errString = execPKIVerifyJson(t, client, false, testCase.shouldError, testCase.args) + } else { + stdOut, errString = execPKIVerifyNonJson(t, client, testCase.shouldError, testCase.args) + } + + // Verify Error Behavior + if testCase.shouldError { + if errString == "" { + t.Fatalf("Expected error in Testcase %s : no error produced, got results %s", testCase.name, results) + } + if testCase.expectErrorCont != "" && !strings.Contains(errString, testCase.expectErrorCont) { + t.Fatalf("Expected error in Testcase %s to contain %s, but got error %s", testCase.name, testCase.expectErrorCont, errString) + } + if testCase.expectErrorNotCont != "" && strings.Contains(errString, testCase.expectErrorNotCont) { + t.Fatalf("Expected error in Testcase %s to not contain %s, but got error %s", testCase.name, testCase.expectErrorNotCont, errString) + } + } else { + if errString != "" { + t.Fatalf("Error in Testcase %s : no error expected, but got error: %s", testCase.name, errString) + } + } + + // Verify Output + if testCase.jsonOut { + isMatch, errString := verifyExpectedJson(testCase.expectedMatches, results) + if !isMatch { + t.Fatalf("Expected Results for Testcase %s, do not match returned results %s", testCase.name, errString) + } + } else { + if !strings.Contains(stdOut, testCase.nonJsonOutputCont) { + t.Fatalf("Expected standard output for Testcase %s to contain %s, but got %s", testCase.name, testCase.nonJsonOutputCont, stdOut) + } + } + + } +} + +func execPKIVerifyJson(t *testing.T, client *api.Client, expectErrorUnmarshalling bool, expectErrorOut bool, callArgs []string) (map[string]interface{}, string) { + stdout, stderr := execPKIVerifyNonJson(t, client, expectErrorOut, callArgs) + + var results map[string]interface{} + if err := json.Unmarshal([]byte(stdout), &results); err != nil && !expectErrorUnmarshalling { + t.Fatalf("failed to decode json response : %v \n json: \n%v", err, stdout) + } + + return results, stderr +} + +func execPKIVerifyNonJson(t *testing.T, client *api.Client, expectErrorOut bool, callArgs []string) (string, string) { + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + code := RunCustom(callArgs, runOpts) + if !expectErrorOut && code != 0 { + t.Fatalf("running command `%v` unsuccessful (ret %v)\nerr: %v", strings.Join(callArgs, " "), code, stderr.String()) + } + + t.Log(stdout.String() + stderr.String()) + + return stdout.String(), stderr.String() +} + +func convertListOfInterfaceToString(list []interface{}, sep string) string { + newList := make([]string, len(list)) + for i, interfa := range list { + newList[i] = interfa.(string) + } + return strings.Join(newList, sep) +} + +func createComplicatedIssuerSetUp(t *testing.T, client *api.Client) { + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + + if err := client.Sys().Mount("pki-root", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-newroot", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-int", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + // Used to check handling empty list responses: Not Used for Any Issuers / Certificates + if err := client.Sys().Mount("pki-empty", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{}, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + resp, err := client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX1", + "key_name": "rootX1", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + resp, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX2", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-newroot/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX3", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-root/root/generate/existing", map[string]interface{}{ + "common_name": "Root X4", + "ttl": "3650d", + "issuer_name": "rootX4", + "key_ref": "rootX1", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + // Intermediate X1 + int1CsrResp, err := client.Logical().Write("pki-int/intermediate/generate/internal", map[string]interface{}{ + "key_type": "rsa", + "common_name": "Int X1", + "ttl": "3650d", + }) + if err != nil || int1CsrResp == nil { + t.Fatalf("failed to generate CSR: %v", err) + } + int1KeyId, ok := int1CsrResp.Data["key_id"] + if !ok { + t.Fatalf("no key_id produced when generating csr, response %v", int1CsrResp.Data) + } + int1CsrRaw, ok := int1CsrResp.Data["csr"] + if !ok { + t.Fatalf("no csr produced when generating intermediate, resp: %v", int1CsrResp) + } + int1Csr := int1CsrRaw.(string) + int1CertResp, err := client.Logical().Write("pki-root/issuer/rootX1/sign-intermediate", map[string]interface{}{ + "csr": int1Csr, + }) + if err != nil || int1CertResp == nil { + t.Fatalf("failed to sign CSR: %v", err) + } + int1CertChainRaw, ok := int1CertResp.Data["ca_chain"] + if !ok { + t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int1CertResp) + } + int1CertChain := convertListOfInterfaceToString(int1CertChainRaw.([]interface{}), "\n") + importInt1Resp, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ + "pem_bundle": int1CertChain, + }) + if err != nil || importInt1Resp == nil { + t.Fatalf("failed to import certificate: %v", err) + } + importIssuerIdMap, ok := importInt1Resp.Data["mapping"] + if !ok { + t.Fatalf("no mapping data returned on issuer import: %v", importInt1Resp) + } + for key, value := range importIssuerIdMap.(map[string]interface{}) { + if value != nil && len(value.(string)) > 0 { + if value != int1KeyId { + t.Fatalf("Expected exactly one key_match to %v, got multiple: %v", int1KeyId, importIssuerIdMap) + } + if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "intX1", + }); err != nil || resp == nil { + t.Fatalf("error naming issuer %v", err) + } + } else { + if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "rootX1", + }); err != nil || resp == nil { + t.Fatalf("error naming issuer parent %v", err) + } + } + } + + // Intermediate X2 + int2CsrResp, err := client.Logical().Write("pki-int/intermediate/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Int X2", + "ttl": "3650d", + }) + if err != nil || int2CsrResp == nil { + t.Fatalf("failed to generate CSR: %v", err) + } + int2KeyId, ok := int2CsrResp.Data["key_id"] + if !ok { + t.Fatalf("no key material returned from producing csr, resp: %v", int2CsrResp) + } + int2CsrRaw, ok := int2CsrResp.Data["csr"] + if !ok { + t.Fatalf("no csr produced when generating intermediate, resp: %v", int2CsrResp) + } + int2Csr := int2CsrRaw.(string) + int2CertResp, err := client.Logical().Write("pki-newroot/issuer/rootX3/sign-intermediate", map[string]interface{}{ + "csr": int2Csr, + }) + if err != nil || int2CertResp == nil { + t.Fatalf("failed to sign CSR: %v", err) + } + int2CertChainRaw, ok := int2CertResp.Data["ca_chain"] + if !ok { + t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int2CertResp) + } + int2CertChain := convertListOfInterfaceToString(int2CertChainRaw.([]interface{}), "\n") + importInt2Resp, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ + "pem_bundle": int2CertChain, + }) + if err != nil || importInt2Resp == nil { + t.Fatalf("failed to import certificate: %v", err) + } + importIssuer2IdMap, ok := importInt2Resp.Data["mapping"] + if !ok { + t.Fatalf("no mapping data returned on issuer import: %v", importInt2Resp) + } + for key, value := range importIssuer2IdMap.(map[string]interface{}) { + if value != nil && len(value.(string)) > 0 { + if value != int2KeyId { + t.Fatalf("unexpected key_match with ca_chain, expected only %v, got %v", int2KeyId, importIssuer2IdMap) + } + if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "intX2", + }); err != nil || resp == nil { + t.Fatalf("error naming issuer %v", err) + } + } else { + if resp, err := client.Logical().Write("pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "rootX3", + }); err != nil || resp == nil { + t.Fatalf("error naming parent issuer %v", err) + } + } + } + + // Intermediate X3 + int3CsrResp, err := client.Logical().Write("pki-int/intermediate/generate/internal", map[string]interface{}{ + "key_type": "rsa", + "common_name": "Int X3", + "ttl": "3650d", + }) + if err != nil || int3CsrResp == nil { + t.Fatalf("failed to generate CSR: %v", err) + } + int3KeyId, ok := int3CsrResp.Data["key_id"] + int3CsrRaw, ok := int3CsrResp.Data["csr"] + if !ok { + t.Fatalf("no csr produced when generating intermediate, resp: %v", int3CsrResp) + } + int3Csr := int3CsrRaw.(string) + // sign by intX1 and import + int3CertResp1, err := client.Logical().Write("pki-int/issuer/intX1/sign-intermediate", map[string]interface{}{ + "csr": int3Csr, + }) + if err != nil || int3CertResp1 == nil { + t.Fatalf("failed to sign CSR: %v", err) + } + int3CertChainRaw1, ok := int3CertResp1.Data["ca_chain"] + if !ok { + t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int3CertResp1) + } + int3CertChain1 := convertListOfInterfaceToString(int3CertChainRaw1.([]interface{}), "\n") + importInt3Resp1, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ + "pem_bundle": int3CertChain1, + }) + if err != nil || importInt3Resp1 == nil { + t.Fatalf("failed to import certificate: %v", err) + } + importIssuer3IdMap1, ok := importInt3Resp1.Data["mapping"] + if !ok { + t.Fatalf("no mapping data returned on issuer import: %v", importInt2Resp) + } + for key, value := range importIssuer3IdMap1.(map[string]interface{}) { + if value != nil && len(value.(string)) > 0 && value == int3KeyId { + if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "intX3", + }); err != nil || resp == nil { + t.Fatalf("error naming issuer %v", err) + } + break + } + } + + // sign by intX2 and import + int3CertResp2, err := client.Logical().Write("pki-int/issuer/intX2/sign-intermediate", map[string]interface{}{ + "csr": int3Csr, + }) + if err != nil || int3CertResp2 == nil { + t.Fatalf("failed to sign CSR: %v", err) + } + int3CertChainRaw2, ok := int3CertResp2.Data["ca_chain"] + if !ok { + t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int3CertResp2) + } + int3CertChain2 := convertListOfInterfaceToString(int3CertChainRaw2.([]interface{}), "\n") + importInt3Resp2, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ + "pem_bundle": int3CertChain2, + }) + if err != nil || importInt3Resp2 == nil { + t.Fatalf("failed to import certificate: %v", err) + } + importIssuer3IdMap2, ok := importInt3Resp2.Data["mapping"] + if !ok { + t.Fatalf("no mapping data returned on issuer import: %v", importInt2Resp) + } + for key, value := range importIssuer3IdMap2.(map[string]interface{}) { + if value != nil && len(value.(string)) > 0 && value == int3KeyId { + if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "intX3also", + }); err != nil || resp == nil { + t.Fatalf("error naming issuer %v", err) + } + break // Parent Certs Already Named + } + } +} + +func verifyExpectedJson(expectedResults map[string]bool, results map[string]interface{}) (isMatch bool, error string) { + if len(expectedResults) != len(results) { + return false, fmt.Sprintf("Different Number of Keys in Expected Results (%d), than results (%d)", + len(expectedResults), len(results)) + } + for key, value := range expectedResults { + if results[key].(bool) != value { + return false, fmt.Sprintf("Different value for key %s : expected %t got %s", key, value, results[key]) + } + } + return true, "" +} diff --git a/command/plugin.go b/command/plugin.go index cf0a5009f626..862b55bb046b 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*PluginCommand)(nil) diff --git a/command/plugin_deregister.go b/command/plugin_deregister.go index a65bf6702a96..1f1e4360acd5 100644 --- a/command/plugin_deregister.go +++ b/command/plugin_deregister.go @@ -1,13 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( + "context" "fmt" + "net/http" "strings" + "github.com/hashicorp/cli" semver "github.com/hashicorp/go-version" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -64,7 +68,7 @@ func (c *PluginDeregisterCommand) Flags() *FlagSets { } func (c *PluginDeregisterCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultPlugins(consts.PluginTypeUnknown) + return c.PredictVaultPlugins(api.PluginTypeUnknown) } func (c *PluginDeregisterCommand) AutocompleteFlags() complete.Flags { @@ -81,18 +85,16 @@ func (c *PluginDeregisterCommand) Run(args []string) int { var pluginNameRaw, pluginTypeRaw string args = f.Args() - switch len(args) { - case 0: - c.UI.Error("Not enough arguments (expected 1, or 2, got 0)") + positionalArgsCount := len(args) + switch positionalArgsCount { + case 0, 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 2, got %d)", positionalArgsCount)) return 1 - case 1: - pluginTypeRaw = "unknown" - pluginNameRaw = args[0] case 2: pluginTypeRaw = args[0] pluginNameRaw = args[1] default: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, or 2, got %d)", len(args))) + c.UI.Error(fmt.Sprintf("Too many arguments (expected 2, got %d)", positionalArgsCount)) return 1 } @@ -102,7 +104,7 @@ func (c *PluginDeregisterCommand) Run(args []string) int { return 2 } - pluginType, err := consts.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) + pluginType, err := api.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) if err != nil { c.UI.Error(err.Error()) return 2 @@ -116,7 +118,33 @@ func (c *PluginDeregisterCommand) Run(args []string) int { } } - if err := client.Sys().DeregisterPlugin(&api.DeregisterPluginInput{ + // The deregister endpoint returns 200 if the plugin doesn't exist, so first + // try fetching the plugin to help improve info printed to the user. + // 404 => Return early with a descriptive message. + // Other error => Continue attempting to deregister the plugin anyway. + // Plugin exists but is builtin => Error early. + // Otherwise => If deregister succeeds, we can report that the plugin really + // was deregistered (and not just already absent). + var pluginExists bool + if info, err := client.Sys().GetPluginWithContext(context.Background(), &api.GetPluginInput{ + Name: pluginName, + Type: pluginType, + Version: c.flagPluginVersion, + }); err != nil { + if respErr, ok := err.(*api.ResponseError); ok && respErr.StatusCode == http.StatusNotFound { + c.UI.Output(fmt.Sprintf("Plugin %q (type: %q, version %q) does not exist in the catalog", pluginName, pluginType, c.flagPluginVersion)) + return 0 + } + // Best-effort check, continue trying to deregister. + } else if info != nil { + if info.Builtin { + c.UI.Error(fmt.Sprintf("Plugin %q (type: %q) is a builtin plugin and cannot be deregistered", pluginName, pluginType)) + return 2 + } + pluginExists = true + } + + if err := client.Sys().DeregisterPluginWithContext(context.Background(), &api.DeregisterPluginInput{ Name: pluginName, Type: pluginType, Version: c.flagPluginVersion, @@ -125,6 +153,10 @@ func (c *PluginDeregisterCommand) Run(args []string) int { return 2 } - c.UI.Output(fmt.Sprintf("Success! Deregistered plugin (if it was registered): %s", pluginName)) + if pluginExists { + c.UI.Output(fmt.Sprintf("Success! Deregistered %s plugin: %s", pluginType, pluginName)) + } else { + c.UI.Output(fmt.Sprintf("Success! Deregistered %s plugin (if it was registered): %s", pluginType, pluginName)) + } return 0 } diff --git a/command/plugin_deregister_test.go b/command/plugin_deregister_test.go index fc3bc5801ebc..46e52df7979a 100644 --- a/command/plugin_deregister_test.go +++ b/command/plugin_deregister_test.go @@ -1,13 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" ) func testPluginDeregisterCommand(tb testing.TB) (*cli.MockUi, *PluginDeregisterCommand) { @@ -32,7 +35,7 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { }{ { "not_enough_args", - nil, + []string{"foo"}, "Not enough arguments", 1, }, @@ -77,21 +80,20 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, consts.PluginTypeCredential, "") + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") ui, cmd := testPluginDeregisterCommand(t) cmd.client = client if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ Name: pluginName, - Type: consts.PluginTypeCredential, + Type: api.PluginTypeCredential, Command: pluginName, SHA256: sha256Sum, }); err != nil { @@ -106,14 +108,14 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Errorf("expected %d to be %d", code, exp) } - expected := "Success! Deregistered plugin (if it was registered): " + expected := "Success! Deregistered auth plugin: " combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) } resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: consts.PluginTypeCredential, + Type: api.PluginTypeCredential, }) if err != nil { t.Fatal(err) @@ -135,14 +137,13 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Run("integration with version", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, consts.PluginTypeCredential) + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, api.PluginTypeCredential) ui, cmd := testPluginDeregisterCommand(t) cmd.client = client @@ -156,14 +157,14 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Errorf("expected %d to be %d", code, exp) } - expected := "Success! Deregistered plugin (if it was registered): " + expected := "Success! Deregistered auth plugin: " combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) } resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: consts.PluginTypeUnknown, + Type: api.PluginTypeUnknown, }) if err != nil { t.Fatal(err) @@ -183,14 +184,13 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Run("integration with missing version", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, consts.PluginTypeCredential) + testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, api.PluginTypeCredential) ui, cmd := testPluginDeregisterCommand(t) cmd.client = client @@ -203,14 +203,14 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { t.Errorf("expected %d to be %d", code, exp) } - expected := "Success! Deregistered plugin (if it was registered): " + expected := "does not exist in the catalog" combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) } resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: consts.PluginTypeUnknown, + Type: api.PluginTypeUnknown, }) if err != nil { t.Fatal(err) @@ -227,6 +227,28 @@ func TestPluginDeregisterCommand_Run(t *testing.T) { } }) + t.Run("deregister builtin", func(t *testing.T) { + t.Parallel() + + pluginDir := corehelpers.MakeTestPluginDir(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + ui, cmd := testPluginDeregisterCommand(t) + cmd.client = client + + expected := "is a builtin plugin" + if code := cmd.Run([]string{ + consts.PluginTypeCredential.String(), + "github", + }); code != 2 { + t.Errorf("expected %d to be %d", code, 2) + } else if !strings.Contains(ui.ErrorWriter.String(), expected) { + t.Errorf("expected %q to contain %q", ui.ErrorWriter.String(), expected) + } + }) + t.Run("communication_failure", func(t *testing.T) { t.Parallel() diff --git a/command/plugin_info.go b/command/plugin_info.go index 8fedb9831535..e47a23c66519 100644 --- a/command/plugin_info.go +++ b/command/plugin_info.go @@ -1,12 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -58,7 +60,7 @@ func (c *PluginInfoCommand) Flags() *FlagSets { } func (c *PluginInfoCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultPlugins(consts.PluginTypeUnknown) + return c.PredictVaultPlugins(api.PluginTypeUnknown) } func (c *PluginInfoCommand) AutocompleteFlags() complete.Flags { @@ -75,30 +77,26 @@ func (c *PluginInfoCommand) Run(args []string) int { var pluginNameRaw, pluginTypeRaw string args = f.Args() + positionalArgsCount := len(args) switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1 or 2, got %d)", len(args))) + case positionalArgsCount < 2: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 2, got %d)", positionalArgsCount)) return 1 - case len(args) > 2: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1 or 2, got %d)", len(args))) + case positionalArgsCount > 2: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 2, got %d)", positionalArgsCount)) return 1 - - // These cases should come after invalid cases have been checked - case len(args) == 1: - pluginTypeRaw = "unknown" - pluginNameRaw = args[0] - case len(args) == 2: - pluginTypeRaw = args[0] - pluginNameRaw = args[1] } + pluginTypeRaw = args[0] + pluginNameRaw = args[1] + client, err := c.Client() if err != nil { c.UI.Error(err.Error()) return 2 } - pluginType, err := consts.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) + pluginType, err := api.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) if err != nil { c.UI.Error(err.Error()) return 2 @@ -124,6 +122,8 @@ func (c *PluginInfoCommand) Run(args []string) int { "args": resp.Args, "builtin": resp.Builtin, "command": resp.Command, + "oci_image": resp.OCIImage, + "runtime": resp.Runtime, "name": resp.Name, "sha256": resp.SHA256, "deprecation_status": resp.DeprecationStatus, diff --git a/command/plugin_info_test.go b/command/plugin_info_test.go index 714ac1e59b81..58525312d71b 100644 --- a/command/plugin_info_test.go +++ b/command/plugin_info_test.go @@ -1,13 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/versions" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" ) func testPluginInfoCommand(tb testing.TB) (*cli.MockUi, *PluginInfoCommand) { @@ -30,6 +34,12 @@ func TestPluginInfoCommand_Run(t *testing.T) { out string code int }{ + { + "not_enough_args", + []string{"foo"}, + "Not enough arguments", + 1, + }, { "too_many_args", []string{"foo", "bar", "fizz"}, @@ -38,7 +48,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { }, { "no_plugin_exist", - []string{consts.PluginTypeCredential.String(), "not-a-real-plugin-like-ever"}, + []string{api.PluginTypeCredential.String(), "not-a-real-plugin-like-ever"}, "Error reading plugin", 2, }, @@ -75,20 +85,19 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("default", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, consts.PluginTypeCredential, "") + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") ui, cmd := testPluginInfoCommand(t) cmd.client = client code := cmd.Run([]string{ - consts.PluginTypeCredential.String(), pluginName, + api.PluginTypeCredential.String(), pluginName, }) if exp := 0; code != exp { t.Errorf("expected %d to be %d", code, exp) @@ -106,14 +115,13 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("version flag", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() const pluginName = "azure" - _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, consts.PluginTypeCredential, "v1.0.0") + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "v1.0.0") for name, tc := range map[string]struct { version string @@ -128,7 +136,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { code := cmd.Run([]string{ "-version=" + tc.version, - consts.PluginTypeCredential.String(), pluginName, + api.PluginTypeCredential.String(), pluginName, }) combined := ui.OutputWriter.String() + ui.ErrorWriter.String() @@ -152,21 +160,20 @@ func TestPluginInfoCommand_Run(t *testing.T) { t.Run("field", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - testPluginCreateAndRegister(t, client, pluginDir, pluginName, consts.PluginTypeCredential, "") + testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") ui, cmd := testPluginInfoCommand(t) cmd.client = client code := cmd.Run([]string{ "-field", "builtin", - consts.PluginTypeCredential.String(), pluginName, + api.PluginTypeCredential.String(), pluginName, }) if exp := 0; code != exp { t.Errorf("expected %d to be %d", code, exp) @@ -188,7 +195,7 @@ func TestPluginInfoCommand_Run(t *testing.T) { cmd.client = client code := cmd.Run([]string{ - consts.PluginTypeCredential.String(), "my-plugin", + api.PluginTypeCredential.String(), "my-plugin", }) if exp := 2; code != exp { t.Errorf("expected %d to be %d", code, exp) diff --git a/command/plugin_list.go b/command/plugin_list.go index 641c5e2bae98..28714adf4f5b 100644 --- a/command/plugin_list.go +++ b/command/plugin_list.go @@ -1,12 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -90,12 +92,12 @@ func (c *PluginListCommand) Run(args []string) int { return 1 } - pluginType := consts.PluginTypeUnknown + pluginType := api.PluginTypeUnknown if len(args) > 0 { pluginTypeStr := strings.TrimSpace(args[0]) if pluginTypeStr != "" { var err error - pluginType, err = consts.ParsePluginType(pluginTypeStr) + pluginType, err = api.ParsePluginType(pluginTypeStr) if err != nil { c.UI.Error(fmt.Sprintf("Error parsing type: %s", err)) return 2 @@ -139,10 +141,10 @@ func (c *PluginListCommand) Run(args []string) int { } } -func (c *PluginListCommand) simpleResponse(plugins *api.ListPluginsResponse, pluginType consts.PluginType) []string { +func (c *PluginListCommand) simpleResponse(plugins *api.ListPluginsResponse, pluginType api.PluginType) []string { var out []string switch pluginType { - case consts.PluginTypeUnknown: + case api.PluginTypeUnknown: out = []string{"Name | Type | Version"} for _, plugin := range plugins.Details { out = append(out, fmt.Sprintf("%s | %s | %s", plugin.Name, plugin.Type, plugin.Version)) @@ -158,9 +160,9 @@ func (c *PluginListCommand) simpleResponse(plugins *api.ListPluginsResponse, plu } func (c *PluginListCommand) detailedResponse(plugins *api.ListPluginsResponse) []string { - out := []string{"Name | Type | Version | Deprecation Status"} + out := []string{"Name | Type | Version | Container | Deprecation Status"} for _, plugin := range plugins.Details { - out = append(out, fmt.Sprintf("%s | %s | %s | %s", plugin.Name, plugin.Type, plugin.Version, plugin.DeprecationStatus)) + out = append(out, fmt.Sprintf("%s | %s | %s | %v | %s", plugin.Name, plugin.Type, plugin.Version, plugin.OCIImage != "", plugin.DeprecationStatus)) } return out diff --git a/command/plugin_list_test.go b/command/plugin_list_test.go index 8e4bbbff83e6..4ad37868b2c9 100644 --- a/command/plugin_list_test.go +++ b/command/plugin_list_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPluginListCommand(tb testing.TB) (*cli.MockUi, *PluginListCommand) { diff --git a/command/plugin_register.go b/command/plugin_register.go index 0c4510e3b99b..d124b38b8917 100644 --- a/command/plugin_register.go +++ b/command/plugin_register.go @@ -1,12 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -18,10 +20,13 @@ var ( type PluginRegisterCommand struct { *BaseCommand - flagArgs []string - flagCommand string - flagSHA256 string - flagVersion string + flagArgs []string + flagCommand string + flagSHA256 string + flagVersion string + flagOCIImage string + flagRuntime string + flagEnv []string } func (c *PluginRegisterCommand) Synopsis() string { @@ -62,8 +67,8 @@ func (c *PluginRegisterCommand) Flags() *FlagSets { Name: "args", Target: &c.flagArgs, Completion: complete.PredictAnything, - Usage: "Arguments to pass to the plugin when starting. Separate " + - "multiple arguments with a comma.", + Usage: "Argument to pass to the plugin when starting. This " + + "flag can be specified multiple times to specify multiple args.", }) f.StringVar(&StringVar{ @@ -71,28 +76,51 @@ func (c *PluginRegisterCommand) Flags() *FlagSets { Target: &c.flagCommand, Completion: complete.PredictAnything, Usage: "Command to spawn the plugin. This defaults to the name of the " + - "plugin if unspecified.", + "plugin if both oci_image and command are unspecified.", }) f.StringVar(&StringVar{ Name: "sha256", Target: &c.flagSHA256, Completion: complete.PredictAnything, - Usage: "SHA256 of the plugin binary. This is required for all plugins.", + Usage: "SHA256 of the plugin binary or the oci_image provided. This is required for all plugins.", }) f.StringVar(&StringVar{ Name: "version", Target: &c.flagVersion, Completion: complete.PredictAnything, - Usage: "Semantic version of the plugin. Optional.", + Usage: "Semantic version of the plugin. Used as the tag when specifying oci_image, but with any leading 'v' trimmed. Optional.", + }) + + f.StringVar(&StringVar{ + Name: "oci_image", + Target: &c.flagOCIImage, + Completion: complete.PredictAnything, + Usage: "OCI image to run. If specified, setting command, args, and env will update the " + + "container's entrypoint, args, and environment variables (append-only) respectively.", + }) + + f.StringVar(&StringVar{ + Name: "runtime", + Target: &c.flagRuntime, + Completion: complete.PredictAnything, + Usage: "Vault plugin runtime to use if oci_image is specified.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "env", + Target: &c.flagEnv, + Completion: complete.PredictAnything, + Usage: "Environment variables to set for the plugin when starting. This " + + "flag can be specified multiple times to specify multiple environment variables.", }) return set } func (c *PluginRegisterCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultPlugins(consts.PluginTypeUnknown) + return c.PredictVaultPlugins(api.PluginTypeUnknown) } func (c *PluginRegisterCommand) AutocompleteFlags() complete.Flags { @@ -135,7 +163,7 @@ func (c *PluginRegisterCommand) Run(args []string) int { return 2 } - pluginType, err := consts.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) + pluginType, err := api.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) if err != nil { c.UI.Error(err.Error()) return 2 @@ -143,17 +171,20 @@ func (c *PluginRegisterCommand) Run(args []string) int { pluginName := strings.TrimSpace(pluginNameRaw) command := c.flagCommand - if command == "" { + if command == "" && c.flagOCIImage == "" { command = pluginName } if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ - Name: pluginName, - Type: pluginType, - Args: c.flagArgs, - Command: command, - SHA256: c.flagSHA256, - Version: c.flagVersion, + Name: pluginName, + Type: pluginType, + Args: c.flagArgs, + Command: command, + SHA256: c.flagSHA256, + Version: c.flagVersion, + OCIImage: c.flagOCIImage, + Runtime: c.flagRuntime, + Env: c.flagEnv, }); err != nil { c.UI.Error(fmt.Sprintf("Error registering plugin %s: %s", pluginName, err)) return 2 diff --git a/command/plugin_register_test.go b/command/plugin_register_test.go index c2047d070f55..8d04b7733e48 100644 --- a/command/plugin_register_test.go +++ b/command/plugin_register_test.go @@ -1,15 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( + "encoding/json" + "fmt" "reflect" "sort" "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" ) func testPluginRegisterCommand(tb testing.TB) (*cli.MockUi, *PluginRegisterCommand) { @@ -80,8 +85,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -107,7 +111,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { } resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: consts.PluginTypeCredential, + Type: api.PluginTypeCredential, }) if err != nil { t.Fatal(err) @@ -129,8 +133,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { t.Run("integration with version", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -138,7 +141,7 @@ func TestPluginRegisterCommand_Run(t *testing.T) { const pluginName = "my-plugin" versions := []string{"v1.0.0", "v2.0.1"} _, sha256Sum := testPluginCreate(t, pluginDir, pluginName) - types := []consts.PluginType{consts.PluginTypeCredential, consts.PluginTypeDatabase, consts.PluginTypeSecrets} + types := []api.PluginType{api.PluginTypeCredential, api.PluginTypeDatabase, api.PluginTypeSecrets} for _, typ := range types { for _, version := range versions { @@ -164,17 +167,17 @@ func TestPluginRegisterCommand_Run(t *testing.T) { } resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ - Type: consts.PluginTypeUnknown, + Type: api.PluginTypeUnknown, }) if err != nil { t.Fatal(err) } - found := make(map[consts.PluginType]int) - versionsFound := make(map[consts.PluginType][]string) + found := make(map[api.PluginType]int) + versionsFound := make(map[api.PluginType][]string) for _, p := range resp.Details { if p.Name == pluginName { - typ, err := consts.ParsePluginType(p.Type) + typ, err := api.ParsePluginType(p.Type) if err != nil { t.Fatal(err) } @@ -226,3 +229,107 @@ func TestPluginRegisterCommand_Run(t *testing.T) { assertNoTabs(t, cmd) }) } + +// TestFlagParsing ensures that flags passed to vault plugin register correctly +// translate into the expected JSON body and request path. +func TestFlagParsing(t *testing.T) { + for name, tc := range map[string]struct { + pluginType api.PluginType + name string + command string + ociImage string + runtime string + version string + sha256 string + args []string + env []string + expectedPayload string + }{ + "minimal": { + pluginType: api.PluginTypeUnknown, + name: "foo", + sha256: "abc123", + expectedPayload: `{"type":"unknown","command":"foo","sha256":"abc123"}`, + }, + "full": { + pluginType: api.PluginTypeCredential, + name: "name", + command: "cmd", + ociImage: "image", + runtime: "runtime", + version: "v1.0.0", + sha256: "abc123", + args: []string{"--a=b", "--b=c", "positional"}, + env: []string{"x=1", "y=2"}, + expectedPayload: `{"type":"auth","args":["--a=b","--b=c","positional"],"command":"cmd","sha256":"abc123","version":"v1.0.0","oci_image":"image","runtime":"runtime","env":["x=1","y=2"]}`, + }, + "command remains empty if oci_image specified": { + pluginType: api.PluginTypeCredential, + name: "name", + ociImage: "image", + sha256: "abc123", + expectedPayload: `{"type":"auth","sha256":"abc123","oci_image":"image"}`, + }, + } { + tc := tc + t.Run(name, func(t *testing.T) { + ui, cmd := testPluginRegisterCommand(t) + var requestLogger *recordingRoundTripper + cmd.client, requestLogger = mockClient(t) + + var args []string + if tc.command != "" { + args = append(args, "-command="+tc.command) + } + if tc.ociImage != "" { + args = append(args, "-oci_image="+tc.ociImage) + } + if tc.runtime != "" { + args = append(args, "-runtime="+tc.runtime) + } + if tc.sha256 != "" { + args = append(args, "-sha256="+tc.sha256) + } + if tc.version != "" { + args = append(args, "-version="+tc.version) + } + for _, arg := range tc.args { + args = append(args, "-args="+arg) + } + for _, env := range tc.env { + args = append(args, "-env="+env) + } + if tc.pluginType != api.PluginTypeUnknown { + args = append(args, tc.pluginType.String()) + } + args = append(args, tc.name) + t.Log(args) + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Fatalf("expected %d to be %d\nstdout: %s\nstderr: %s", code, exp, ui.OutputWriter.String(), ui.ErrorWriter.String()) + } + + actual := &api.RegisterPluginInput{} + expected := &api.RegisterPluginInput{} + err := json.Unmarshal(requestLogger.body, actual) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal([]byte(tc.expectedPayload), expected) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected: %s\ngot: %s", tc.expectedPayload, requestLogger.body) + } + expectedPath := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", tc.pluginType.String(), tc.name) + if tc.pluginType == api.PluginTypeUnknown { + expectedPath = fmt.Sprintf("/v1/sys/plugins/catalog/%s", tc.name) + } + if requestLogger.path != expectedPath { + t.Errorf("Expected path %s, got %s", expectedPath, requestLogger.path) + } + }) + } +} diff --git a/command/plugin_reload.go b/command/plugin_reload.go index ae3c663869fe..bdcd6f696efb 100644 --- a/command/plugin_reload.go +++ b/command/plugin_reload.go @@ -1,11 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( + "context" "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -16,9 +20,10 @@ var ( type PluginReloadCommand struct { *BaseCommand - plugin string - mounts []string - scope string + plugin string + mounts []string + scope string + pluginType string } func (c *PluginReloadCommand) Synopsis() string { @@ -33,9 +38,16 @@ Usage: vault plugin reload [options] mount(s) must be provided, but not both. In case the plugin name is provided, all of its corresponding mounted paths that use the plugin backend will be reloaded. - Reload the plugin named "my-custom-plugin": + If run with a Vault namespace other than the root namespace, only plugins + running in the same namespace will be reloaded. + + Reload the secret plugin named "my-custom-plugin" on the current node: + + $ vault plugin reload -type=secret -plugin=my-custom-plugin - $ vault plugin reload -plugin=my-custom-plugin + Reload the secret plugin named "my-custom-plugin" across all nodes and replicated clusters: + + $ vault plugin reload -type=secret -plugin=my-custom-plugin -scope=global ` + c.Flags().Help() @@ -65,7 +77,15 @@ func (c *PluginReloadCommand) Flags() *FlagSets { Name: "scope", Target: &c.scope, Completion: complete.PredictAnything, - Usage: "The scope of the reload, omitted for local, 'global', for replicated reloads", + Usage: "The scope of the reload, omitted for local, 'global', for replicated reloads.", + }) + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.pluginType, + Completion: complete.PredictAnything, + Usage: "The type of plugin to reload, one of auth, secret, or database. Mutually " + + "exclusive with -mounts. If not provided, all plugins with a matching name will be reloaded.", }) return set @@ -87,15 +107,23 @@ func (c *PluginReloadCommand) Run(args []string) int { return 1 } + positionalArgs := len(f.Args()) switch { + case positionalArgs != 0: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", positionalArgs)) + return 1 case c.plugin == "" && len(c.mounts) == 0: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + c.UI.Error("No plugins specified, must specify exactly one of -plugin or -mounts") return 1 case c.plugin != "" && len(c.mounts) > 0: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + c.UI.Error("Must specify exactly one of -plugin or -mounts") return 1 case c.scope != "" && c.scope != "global": c.UI.Error(fmt.Sprintf("Invalid reload scope: %s", c.scope)) + return 1 + case len(c.mounts) > 0 && c.pluginType != "": + c.UI.Error("Cannot specify -type with -mounts") + return 1 } client, err := c.Client() @@ -104,25 +132,46 @@ func (c *PluginReloadCommand) Run(args []string) int { return 2 } - rid, err := client.Sys().ReloadPlugin(&api.ReloadPluginInput{ - Plugin: c.plugin, - Mounts: c.mounts, - Scope: c.scope, - }) + var reloadID string + if client.Namespace() == "" { + pluginType := api.PluginTypeUnknown + pluginTypeStr := strings.TrimSpace(c.pluginType) + if pluginTypeStr != "" { + var err error + pluginType, err = api.ParsePluginType(pluginTypeStr) + if err != nil { + c.UI.Error(fmt.Sprintf("Error parsing -type as a plugin type, must be unset or one of auth, secret, or database: %s", err)) + return 1 + } + } + + reloadID, err = client.Sys().RootReloadPlugin(context.Background(), &api.RootReloadPluginInput{ + Plugin: c.plugin, + Type: pluginType, + Scope: c.scope, + }) + } else { + reloadID, err = client.Sys().ReloadPlugin(&api.ReloadPluginInput{ + Plugin: c.plugin, + Mounts: c.mounts, + Scope: c.scope, + }) + } + if err != nil { c.UI.Error(fmt.Sprintf("Error reloading plugin/mounts: %s", err)) return 2 } if len(c.mounts) > 0 { - if rid != "" { - c.UI.Output(fmt.Sprintf("Success! Reloading mounts: %s, reload_id: %s", c.mounts, rid)) + if reloadID != "" { + c.UI.Output(fmt.Sprintf("Success! Reloading mounts: %s, reload_id: %s", c.mounts, reloadID)) } else { c.UI.Output(fmt.Sprintf("Success! Reloaded mounts: %s", c.mounts)) } } else { - if rid != "" { - c.UI.Output(fmt.Sprintf("Success! Reloading plugin: %s, reload_id: %s", c.plugin, rid)) + if reloadID != "" { + c.UI.Output(fmt.Sprintf("Success! Reloading plugin: %s, reload_id: %s", c.plugin, reloadID)) } else { c.UI.Output(fmt.Sprintf("Success! Reloaded plugin: %s", c.plugin)) } diff --git a/command/plugin_reload_status.go b/command/plugin_reload_status.go index 319d539c1546..c653955978aa 100644 --- a/command/plugin_reload_status.go +++ b/command/plugin_reload_status.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/plugin_reload_test.go b/command/plugin_reload_test.go index 5713d1a1507b..d84062d8d251 100644 --- a/command/plugin_reload_test.go +++ b/command/plugin_reload_test.go @@ -1,13 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" ) func testPluginReloadCommand(tb testing.TB) (*cli.MockUi, *PluginReloadCommand) { @@ -44,13 +46,25 @@ func TestPluginReloadCommand_Run(t *testing.T) { { "not_enough_args", nil, - "Not enough arguments", + "No plugins specified, must specify exactly one of -plugin or -mounts", 1, }, { "too_many_args", []string{"-plugin", "foo", "-mounts", "bar"}, - "Too many arguments", + "Must specify exactly one of -plugin or -mounts", + 1, + }, + { + "type_and_mounts_mutually_exclusive", + []string{"-mounts", "bar", "-type", "secret"}, + "Cannot specify -type with -mounts", + 1, + }, + { + "invalid_type", + []string{"-plugin", "bar", "-type", "unsupported"}, + "Error parsing -type as a plugin type", 1, }, } @@ -83,21 +97,20 @@ func TestPluginReloadCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() pluginName := "my-plugin" - _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, consts.PluginTypeCredential, "") + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") ui, cmd := testPluginReloadCommand(t) cmd.client = client if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ Name: pluginName, - Type: consts.PluginTypeCredential, + Type: api.PluginTypeCredential, Command: pluginName, SHA256: sha256Sum, }); err != nil { @@ -145,7 +158,7 @@ func TestPluginReloadStatusCommand_Run(t *testing.T) { client, closer := testVaultServer(t) defer closer() - ui, cmd := testPluginReloadCommand(t) + ui, cmd := testPluginReloadStatusCommand(t) cmd.client = client args := append([]string{}, tc.args...) diff --git a/command/plugin_runtime.go b/command/plugin_runtime.go new file mode 100644 index 000000000000..ce15bb31fdbf --- /dev/null +++ b/command/plugin_runtime.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + + "github.com/hashicorp/cli" +) + +var _ cli.Command = (*PluginRuntimeCommand)(nil) + +type PluginRuntimeCommand struct { + *BaseCommand +} + +func (c *PluginRuntimeCommand) Synopsis() string { + return "Interact with Vault plugin runtimes catalog." +} + +func (c *PluginRuntimeCommand) Help() string { + helpText := ` +Usage: vault plugin runtime [options] [args] + + This command groups subcommands for interacting with Vault's plugin runtimes and the + plugin runtime catalog. The plugin runtime catalog is divided into types. Currently, + Vault only supports "container" plugin runtimes. A plugin runtime allows users to + fine-tune the parameters with which a plugin is executed. For example, you can select + a different OCI-compatible runtime, or set resource limits. A plugin runtime can + optionally be referenced during plugin registration. A type must be specified on each call. + Here are a few examples of the plugin runtime commands. + + List all available plugin runtimes in the catalog of a particular type: + + $ vault plugin runtime list -type=container + + Register a new plugin runtime to the catalog as a particular type: + + $ vault plugin runtime register -type=container -oci_runtime=my-oci-runtime my-custom-plugin-runtime + + Get information about a plugin runtime in the catalog listed under a particular type: + + $ vault plugin runtime info -type=container my-custom-plugin-runtime + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/plugin_runtime_deregister.go b/command/plugin_runtime_deregister.go new file mode 100644 index 000000000000..47b790f2cc03 --- /dev/null +++ b/command/plugin_runtime_deregister.go @@ -0,0 +1,124 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeDeregisterCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeDeregisterCommand)(nil) +) + +type PluginRuntimeDeregisterCommand struct { + *BaseCommand + + flagType string +} + +func (c *PluginRuntimeDeregisterCommand) Synopsis() string { + return "Deregister an existing plugin runtime in the catalog" +} + +func (c *PluginRuntimeDeregisterCommand) Help() string { + helpText := ` +Usage: vault plugin runtime deregister [options] NAME + + Deregister an existing plugin runtime in the catalog with the given name. If + any registered plugin references the plugin runtime, an error is returned. If + the plugin runtime does not exist, an error is returned. The -type flag + currently only accepts "container". + + Deregister a plugin runtime: + + $ vault plugin runtime deregister -type=container my-plugin-runtime + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeDeregisterCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + return set +} + +func (c *PluginRuntimeDeregisterCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeDeregisterCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeDeregisterCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) == 0 { + c.UI.Error("-type is required for plugin runtime deregistration") + return 1 + } + + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var runtimeNameRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + + // This case should come after invalid cases have been checked + case len(args) == 1: + runtimeNameRaw = args[0] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + runtimeName := strings.TrimSpace(runtimeNameRaw) + if err = client.Sys().DeregisterPluginRuntime(context.Background(), &api.DeregisterPluginRuntimeInput{ + Name: runtimeName, + Type: runtimeType, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error deregistering plugin runtime named %s: %s", runtimeName, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Deregistered plugin runtime: %s", runtimeName)) + return 0 +} diff --git a/command/plugin_runtime_deregister_test.go b/command/plugin_runtime_deregister_test.go new file mode 100644 index 000000000000..1569fceb3f11 --- /dev/null +++ b/command/plugin_runtime_deregister_test.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "regexp" + "strings" + "testing" + + "github.com/hashicorp/cli" +) + +func testPluginRuntimeDeregisterCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeDeregisterCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeDeregisterCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeDeregisterCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{"-type=container"}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"-type=container", "foo", "baz"}, + "Too many arguments", + 1, + }, + { + "invalid_runtime_type", + []string{"-type=foo", "bar"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "info_container_on_empty_plugin_runtime_catalog", + []string{"-type=container", "my-plugin-runtime"}, + "Error deregistering plugin runtime named my-plugin-runtime", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeDeregisterCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + matcher := regexp.MustCompile(tc.out) + if !matcher.MatchString(combined) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeDeregisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type=container", "my-plugin-runtime"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error deregistering plugin runtime named my-plugin-runtime" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeDeregisterCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_runtime_info.go b/command/plugin_runtime_info.go new file mode 100644 index 000000000000..22c95a233570 --- /dev/null +++ b/command/plugin_runtime_info.go @@ -0,0 +1,140 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeInfoCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeInfoCommand)(nil) +) + +type PluginRuntimeInfoCommand struct { + *BaseCommand + + flagType string +} + +func (c *PluginRuntimeInfoCommand) Synopsis() string { + return "Read information about a plugin runtime in the catalog" +} + +func (c *PluginRuntimeInfoCommand) Help() string { + helpText := ` +Usage: vault plugin runtime info [options] NAME + + Displays information about a plugin runtime in the catalog with the given name. If + the plugin runtime does not exist, an error is returned. The -type flag + currently only accepts "container". + + Get info about a plugin runtime: + + $ vault plugin runtime info -type=container my-plugin-runtime + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeInfoCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + return set +} + +func (c *PluginRuntimeInfoCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeInfoCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeInfoCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) == 0 { + c.UI.Error("-type is required for plugin runtime info retrieval") + return 1 + } + + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var runtimeNameRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + + // This case should come after invalid cases have been checked + case len(args) == 1: + runtimeNameRaw = args[0] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + runtimeName := strings.TrimSpace(runtimeNameRaw) + resp, err := client.Sys().GetPluginRuntime(context.Background(), &api.GetPluginRuntimeInput{ + Name: runtimeName, + Type: runtimeType, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading plugin runtime named %s: %s", runtimeName, err)) + return 2 + } + + if resp == nil { + c.UI.Error(fmt.Sprintf("No value found for plugin runtime %q", runtimeName)) + return 2 + } + + data := map[string]interface{}{ + "name": resp.Name, + "type": resp.Type, + "oci_runtime": resp.OCIRuntime, + "cgroup_parent": resp.CgroupParent, + "cpu_nanos": resp.CPU, + "memory_bytes": resp.Memory, + } + + if c.flagField != "" { + return PrintRawField(c.UI, data, c.flagField) + } + return OutputData(c.UI, data) +} diff --git a/command/plugin_runtime_info_test.go b/command/plugin_runtime_info_test.go new file mode 100644 index 000000000000..40166b094bf9 --- /dev/null +++ b/command/plugin_runtime_info_test.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "regexp" + "strings" + "testing" + + "github.com/hashicorp/cli" +) + +func testPluginRuntimeInfoCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeInfoCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeInfoCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeInfoCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{"-type=container"}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"-type=container", "bar", "baz"}, + "Too many arguments", + 1, + }, + { + "invalid_runtime_type", + []string{"-type=foo", "bar"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "info_container_on_empty_plugin_runtime_catalog", + []string{"-type=container", "my-plugin-runtime"}, + "Error reading plugin runtime named my-plugin-runtime", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeInfoCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + matcher := regexp.MustCompile(tc.out) + if !matcher.MatchString(combined) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeInfoCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type=container", "my-plugin-runtime"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error reading plugin runtime named my-plugin-runtime" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeInfoCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_runtime_list.go b/command/plugin_runtime_list.go new file mode 100644 index 000000000000..64cca1805f63 --- /dev/null +++ b/command/plugin_runtime_list.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeListCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeListCommand)(nil) +) + +type PluginRuntimeListCommand struct { + *BaseCommand + + flagType string +} + +func (c *PluginRuntimeListCommand) Synopsis() string { + return "Lists available plugin runtimes" +} + +func (c *PluginRuntimeListCommand) Help() string { + helpText := ` +Usage: vault plugin runtime list [options] + + Lists available plugin runtimes registered in the catalog. This does not list whether + plugin runtimes are in use, but rather just their availability. + + List all available plugin runtimes in the catalog: + + $ vault plugin runtime list + + List all available container plugin runtimes in the catalog: + + $ vault plugin runtime list -type=container + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + return set +} + +func (c *PluginRuntimeListCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + if len(f.Args()) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + var input *api.ListPluginRuntimesInput + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) > 0 { + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + input = &api.ListPluginRuntimesInput{Type: runtimeType} + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + resp, err := client.Sys().ListPluginRuntimes(context.Background(), input) + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing available plugin runtimes: %s", err)) + return 2 + } + if resp == nil { + c.UI.Error("No tableResponse from server when listing plugin runtimes") + return 2 + } + + switch Format(c.UI) { + case "table": + c.UI.Output(tableOutput(c.tableResponse(resp), nil)) + return 0 + default: + return OutputData(c.UI, resp.Runtimes) + } +} + +func (c *PluginRuntimeListCommand) tableResponse(response *api.ListPluginRuntimesResponse) []string { + out := []string{"Name | Type | OCI Runtime | Parent Cgroup | CPU Nanos | Memory Bytes"} + for _, runtime := range response.Runtimes { + out = append(out, fmt.Sprintf("%s | %s | %s | %s | %d | %d", + runtime.Name, runtime.Type, runtime.OCIRuntime, runtime.CgroupParent, runtime.CPU, runtime.Memory)) + } + + return out +} diff --git a/command/plugin_runtime_list_test.go b/command/plugin_runtime_list_test.go new file mode 100644 index 000000000000..8f8d209ca413 --- /dev/null +++ b/command/plugin_runtime_list_test.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "regexp" + "strings" + "testing" + + "github.com/hashicorp/cli" +) + +func testPluginRuntimeListCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeListCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeListCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo"}, + "Too many arguments", + 1, + }, + { + "invalid_runtime_type", + []string{"-type=foo"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "list container on empty plugin runtime catalog", + []string{"-type=container"}, + "OCI Runtime", + 0, + }, + { + "list on empty plugin runtime catalog", + nil, + "OCI Runtime", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeListCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + matcher := regexp.MustCompile(tc.out) + if !matcher.MatchString(combined) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeListCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type=container"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing available plugin runtimes: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeListCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_runtime_register.go b/command/plugin_runtime_register.go new file mode 100644 index 000000000000..175be302f519 --- /dev/null +++ b/command/plugin_runtime_register.go @@ -0,0 +1,172 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRuntimeRegisterCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRuntimeRegisterCommand)(nil) +) + +type PluginRuntimeRegisterCommand struct { + *BaseCommand + + flagType string + flagOCIRuntime string + flagCgroupParent string + flagCPUNanos int64 + flagMemoryBytes int64 + flagRootless bool +} + +func (c *PluginRuntimeRegisterCommand) Synopsis() string { + return "Registers a new plugin runtime in the catalog" +} + +func (c *PluginRuntimeRegisterCommand) Help() string { + helpText := ` +Usage: vault plugin runtime register [options] NAME + + Registers a new plugin runtime in the catalog. Currently, Vault only supports registering runtimes of type "container". +The OCI runtime must be available on Vault's host. If no OCI runtime is specified, Vault will use "runsc", gVisor's OCI runtime. + + Register the plugin runtime named my-custom-plugin-runtime: + + $ vault plugin runtime register -type=container -oci_runtime=my-oci-runtime my-custom-plugin-runtime + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRuntimeRegisterCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Completion: complete.PredictAnything, + Usage: "Plugin runtime type. Vault currently only supports \"container\" runtime type.", + }) + + f.StringVar(&StringVar{ + Name: "oci_runtime", + Target: &c.flagOCIRuntime, + Completion: complete.PredictAnything, + Usage: "OCI runtime. Default is \"runsc\", gVisor's OCI runtime.", + }) + + f.StringVar(&StringVar{ + Name: "cgroup_parent", + Target: &c.flagCgroupParent, + Completion: complete.PredictAnything, + Usage: "Parent cgroup to set for each container. This can be used to control the total resource usage for a group of plugins.", + }) + + f.Int64Var(&Int64Var{ + Name: "cpu_nanos", + Target: &c.flagCPUNanos, + Completion: complete.PredictAnything, + Usage: "CPU limit to set per container in nanos. Defaults to no limit.", + }) + + f.Int64Var(&Int64Var{ + Name: "memory_bytes", + Target: &c.flagMemoryBytes, + Completion: complete.PredictAnything, + Usage: "Memory limit to set per container in bytes. Defaults to no limit.", + }) + + f.BoolVar(&BoolVar{ + Name: "rootless", + Target: &c.flagRootless, + Completion: complete.PredictAnything, + Usage: "Whether the container runtime is configured to run as a " + + "non-privileged (non-root) user. Required if the plugin container " + + "image is also configured to run as a non-root user.", + }) + + return set +} + +func (c *PluginRuntimeRegisterCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginRuntimeRegisterCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRuntimeRegisterCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + runtimeTyeRaw := strings.TrimSpace(c.flagType) + if len(runtimeTyeRaw) == 0 { + c.UI.Error("-type is required for plugin runtime registration") + return 1 + } + + runtimeType, err := api.ParsePluginRuntimeType(runtimeTyeRaw) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var runtimeNameRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + + // This case should come after invalid cases have been checked + case len(args) == 1: + runtimeNameRaw = args[0] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + runtimeName := strings.TrimSpace(runtimeNameRaw) + ociRuntime := strings.TrimSpace(c.flagOCIRuntime) + cgroupParent := strings.TrimSpace(c.flagCgroupParent) + + if err := client.Sys().RegisterPluginRuntime(context.Background(), &api.RegisterPluginRuntimeInput{ + Name: runtimeName, + Type: runtimeType, + OCIRuntime: ociRuntime, + CgroupParent: cgroupParent, + CPU: c.flagCPUNanos, + Memory: c.flagMemoryBytes, + Rootless: c.flagRootless, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error registering plugin runtime %s: %s", runtimeName, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Registered plugin runtime: %s", runtimeName)) + return 0 +} diff --git a/command/plugin_runtime_register_test.go b/command/plugin_runtime_register_test.go new file mode 100644 index 000000000000..3b28587cf62a --- /dev/null +++ b/command/plugin_runtime_register_test.go @@ -0,0 +1,206 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +func testPluginRuntimeRegisterCommand(tb testing.TB) (*cli.MockUi, *PluginRuntimeRegisterCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRuntimeRegisterCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRuntimeRegisterCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + flags []string + args []string + out string + code int + }{ + { + "no type specified", + []string{}, + []string{"foo"}, + "-type is required for plugin runtime registration", + 1, + }, + { + "invalid type", + []string{"-type", "foo"}, + []string{"not"}, + "\"foo\" is not a supported plugin runtime type", + 2, + }, + { + "not_enough_args", + []string{"-type", consts.PluginRuntimeTypeContainer.String()}, + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"-type", consts.PluginRuntimeTypeContainer.String()}, + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRuntimeRegisterCommand(t) + cmd.client = client + + args := append(tc.flags, tc.args...) + code := cmd.Run(args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRuntimeRegisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{"-type", consts.PluginRuntimeTypeContainer.String(), "my-plugin-runtime"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error registering plugin runtime my-plugin-runtime" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRuntimeRegisterCommand(t) + assertNoTabs(t, cmd) + }) +} + +// TestPluginRuntimeFlagParsing ensures that flags passed to vault plugin runtime register correctly +// translate into the expected JSON body and request path. +func TestPluginRuntimeFlagParsing(t *testing.T) { + for name, tc := range map[string]struct { + runtimeType api.PluginRuntimeType + name string + ociRuntime string + cgroupParent string + cpu int64 + memory int64 + rootless bool + expectedPayload string + }{ + "minimal": { + runtimeType: api.PluginRuntimeTypeContainer, + name: "foo", + expectedPayload: `{"type":1,"name":"foo"}`, + }, + "full": { + runtimeType: api.PluginRuntimeTypeContainer, + name: "foo", + cgroupParent: "/cpulimit/", + ociRuntime: "runtime", + cpu: 5678, + memory: 1234, + rootless: true, + expectedPayload: `{"type":1,"cgroup_parent":"/cpulimit/","memory_bytes":1234,"cpu_nanos":5678,"oci_runtime":"runtime","rootless":true}`, + }, + } { + tc := tc + t.Run(name, func(t *testing.T) { + ui, cmd := testPluginRuntimeRegisterCommand(t) + var requestLogger *recordingRoundTripper + cmd.client, requestLogger = mockClient(t) + + var args []string + if tc.cgroupParent != "" { + args = append(args, "-cgroup_parent="+tc.cgroupParent) + } + if tc.ociRuntime != "" { + args = append(args, "-oci_runtime="+tc.ociRuntime) + } + if tc.memory != 0 { + args = append(args, fmt.Sprintf("-memory_bytes=%d", tc.memory)) + } + if tc.cpu != 0 { + args = append(args, fmt.Sprintf("-cpu_nanos=%d", tc.cpu)) + } + if tc.rootless { + args = append(args, "-rootless=true") + } + + if tc.runtimeType != api.PluginRuntimeTypeUnsupported { + args = append(args, "-type="+tc.runtimeType.String()) + } + args = append(args, tc.name) + t.Log(args) + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Fatalf("expected %d to be %d\nstdout: %s\nstderr: %s", code, exp, ui.OutputWriter.String(), ui.ErrorWriter.String()) + } + + actual := &api.RegisterPluginRuntimeInput{} + expected := &api.RegisterPluginRuntimeInput{} + err := json.Unmarshal(requestLogger.body, actual) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal([]byte(tc.expectedPayload), expected) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected: %s\ngot: %s", tc.expectedPayload, requestLogger.body) + } + expectedPath := fmt.Sprintf("/v1/sys/plugins/runtimes/catalog/%s/%s", tc.runtimeType.String(), tc.name) + + if requestLogger.path != expectedPath { + t.Errorf("Expected path %s, got %s", expectedPath, requestLogger.path) + } + }) + } +} diff --git a/command/plugin_test.go b/command/plugin_test.go index cc83efc772a9..2e72bb7c1898 100644 --- a/command/plugin_test.go +++ b/command/plugin_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -9,7 +12,6 @@ import ( "testing" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" ) // testPluginCreate creates a sample plugin in a tempdir and returns the shasum @@ -38,7 +40,7 @@ func testPluginCreate(tb testing.TB, dir, name string) (string, string) { } // testPluginCreateAndRegister creates a plugin and registers it in the catalog. -func testPluginCreateAndRegister(tb testing.TB, client *api.Client, dir, name string, pluginType consts.PluginType, version string) (string, string) { +func testPluginCreateAndRegister(tb testing.TB, client *api.Client, dir, name string, pluginType api.PluginType, version string) (string, string) { tb.Helper() pth, sha256Sum := testPluginCreate(tb, dir, name) @@ -57,7 +59,7 @@ func testPluginCreateAndRegister(tb testing.TB, client *api.Client, dir, name st } // testPluginCreateAndRegisterVersioned creates a versioned plugin and registers it in the catalog. -func testPluginCreateAndRegisterVersioned(tb testing.TB, client *api.Client, dir, name string, pluginType consts.PluginType) (string, string, string) { +func testPluginCreateAndRegisterVersioned(tb testing.TB, client *api.Client, dir, name string, pluginType api.PluginType) (string, string, string) { tb.Helper() pth, sha256Sum := testPluginCreate(tb, dir, name) diff --git a/command/policy.go b/command/policy.go index 59ffdf0bfb3d..5e5f61bb2d72 100644 --- a/command/policy.go +++ b/command/policy.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*PolicyCommand)(nil) diff --git a/command/policy_delete.go b/command/policy_delete.go index 76fa9a21d0f8..d5c3b8aabc11 100644 --- a/command/policy_delete.go +++ b/command/policy_delete.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/policy_delete_test.go b/command/policy_delete_test.go index 2c822de9d4fd..6b3bd01e3f32 100644 --- a/command/policy_delete_test.go +++ b/command/policy_delete_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPolicyDeleteCommand(tb testing.TB) (*cli.MockUi, *PolicyDeleteCommand) { diff --git a/command/policy_fmt.go b/command/policy_fmt.go index 7912c10643d9..ea3dd2ab9958 100644 --- a/command/policy_fmt.go +++ b/command/policy_fmt.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,10 +8,10 @@ import ( "io/ioutil" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/hcl/hcl/printer" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" homedir "github.com/mitchellh/go-homedir" "github.com/posener/complete" ) diff --git a/command/policy_fmt_test.go b/command/policy_fmt_test.go index 2ae92ff6947e..41de53c9e6c8 100644 --- a/command/policy_fmt_test.go +++ b/command/policy_fmt_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,7 +9,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPolicyFmtCommand(tb testing.TB) (*cli.MockUi, *PolicyFmtCommand) { @@ -86,7 +89,7 @@ path "secret" { t.Fatal(err) } defer os.Remove(f.Name()) - if _, err := f.Write([]byte(policy)); err != nil { + if _, err := f.WriteString(policy); err != nil { t.Fatal(err) } f.Close() @@ -129,7 +132,7 @@ path "secret" { t.Fatal(err) } defer os.Remove(f.Name()) - if _, err := f.Write([]byte(policy)); err != nil { + if _, err := f.WriteString(policy); err != nil { t.Fatal(err) } f.Close() @@ -164,7 +167,7 @@ path "secret" { t.Fatal(err) } defer os.Remove(f.Name()) - if _, err := f.Write([]byte(policy)); err != nil { + if _, err := f.WriteString(policy); err != nil { t.Fatal(err) } f.Close() @@ -199,7 +202,7 @@ path "secret" { t.Fatal(err) } defer os.Remove(f.Name()) - if _, err := f.Write([]byte(policy)); err != nil { + if _, err := f.WriteString(policy); err != nil { t.Fatal(err) } f.Close() diff --git a/command/policy_list.go b/command/policy_list.go index 53e85df0fdb9..147efb971672 100644 --- a/command/policy_list.go +++ b/command/policy_list.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/policy_list_test.go b/command/policy_list_test.go index 70defe54ead7..c603d310fcd2 100644 --- a/command/policy_list_test.go +++ b/command/policy_list_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPolicyListCommand(tb testing.TB) (*cli.MockUi, *PolicyListCommand) { diff --git a/command/policy_read.go b/command/policy_read.go index 31777c5d5ae9..dd7a698de65c 100644 --- a/command/policy_read.go +++ b/command/policy_read.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/policy_read_test.go b/command/policy_read_test.go index 8cd7c066b8ce..e18298e5115e 100644 --- a/command/policy_read_test.go +++ b/command/policy_read_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPolicyReadCommand(tb testing.TB) (*cli.MockUi, *PolicyReadCommand) { diff --git a/command/policy_write.go b/command/policy_write.go index 538414bc50fa..193c94968809 100644 --- a/command/policy_write.go +++ b/command/policy_write.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,7 +10,7 @@ import ( "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/policy_write_test.go b/command/policy_write_test.go index c8db7dc9ddc2..64f67eb2a8b4 100644 --- a/command/policy_write_test.go +++ b/command/policy_write_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -9,7 +12,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testPolicyWriteCommand(tb testing.TB) (*cli.MockUi, *PolicyWriteCommand) { diff --git a/command/print.go b/command/print.go index dace6ac951d6..d5e3b2a5529b 100644 --- a/command/print.go +++ b/command/print.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/print_token.go b/command/print_token.go index efe5aeedd3ef..9402e8a15238 100644 --- a/command/print_token.go +++ b/command/print_token.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/proxy.go b/command/proxy.go new file mode 100644 index 000000000000..82b0dce67a91 --- /dev/null +++ b/command/proxy.go @@ -0,0 +1,1205 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "crypto/tls" + "flag" + "fmt" + "io" + "net" + "net/http" + "os" + "sort" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/daemon" + "github.com/hashicorp/cli" + ctconfig "github.com/hashicorp/consul-template/config" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/gatedwriter" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" + "github.com/hashicorp/vault/command/agentproxyshared/winsvc" + proxyConfig "github.com/hashicorp/vault/command/proxy/config" + "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/version" + "github.com/kr/pretty" + "github.com/oklog/run" + "github.com/posener/complete" + "golang.org/x/text/cases" + "golang.org/x/text/language" + "google.golang.org/grpc/test/bufconn" +) + +var ( + _ cli.Command = (*ProxyCommand)(nil) + _ cli.CommandAutocomplete = (*ProxyCommand)(nil) +) + +const ( + // flagNameProxyExitAfterAuth is used as a Proxy specific flag to indicate + // that proxy should exit after a single successful auth + flagNameProxyExitAfterAuth = "exit-after-auth" + nameProxy = "proxy" +) + +type ProxyCommand struct { + *BaseCommand + logFlags logFlags + + config *proxyConfig.Config + + ShutdownCh chan struct{} + SighupCh chan struct{} + + tlsReloadFuncsLock sync.RWMutex + tlsReloadFuncs []reloadutil.ReloadFunc + + logWriter io.Writer + logGate *gatedwriter.Writer + logger log.Logger + + // Telemetry object + metricsHelper *metricsutil.MetricsHelper + + cleanupGuard sync.Once + + startedCh chan struct{} // for tests + reloadedCh chan struct{} // for tests + + flagConfigs []string + flagExitAfterAuth bool + flagTestVerifyOnly bool +} + +func (c *ProxyCommand) Synopsis() string { + return "Start a Vault Proxy" +} + +func (c *ProxyCommand) Help() string { + helpText := ` +Usage: vault proxy [options] + + This command starts a Vault Proxy that can perform automatic authentication + in certain environments. + + Start a proxy with a configuration file: + + $ vault proxy -config=/etc/vault/config.hcl + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *ProxyCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + // Augment with the log flags + f.addLogFlags(&c.logFlags) + + f.StringSliceVar(&StringSliceVar{ + Name: "config", + Target: &c.flagConfigs, + Completion: complete.PredictOr( + complete.PredictFiles("*.hcl"), + complete.PredictFiles("*.json"), + ), + Usage: "Path to a configuration file. This configuration file should " + + "contain only proxy directives.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameProxyExitAfterAuth, + Target: &c.flagExitAfterAuth, + Default: false, + Usage: "If set to true, the proxy will exit with code 0 after a single " + + "successful auth, where success means that a token was retrieved and " + + "all sinks successfully wrote it", + }) + + // Internal-only flags to follow. + // + // Why hello there little source code reader! Welcome to the Vault source + // code. The remaining options are intentionally undocumented and come with + // no warranty or backwards-compatibility promise. Do not use these flags + // in production. Do not build automation using these flags. Unless you are + // developing against Vault, you should not need any of these flags. + f.BoolVar(&BoolVar{ + Name: "test-verify-only", + Target: &c.flagTestVerifyOnly, + Default: false, + Hidden: true, + }) + + // End internal-only flags. + + return set +} + +func (c *ProxyCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *ProxyCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *ProxyCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Create a logger. We wrap it in a gated writer so that it doesn't + // start logging too early. + c.logGate = gatedwriter.NewWriter(os.Stderr) + c.logWriter = c.logGate + + if c.logFlags.flagCombineLogs { + c.logWriter = os.Stdout + } + + // Validation + if len(c.flagConfigs) < 1 { + c.UI.Error("Must specify exactly at least one config path using -config") + return 1 + } + + config, err := c.loadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + return 1 + } + + if config.AutoAuth == nil { + c.UI.Info("No auto_auth block found in config, the automatic authentication feature will not be started") + } + + c.applyConfigOverrides(f, config) // This only needs to happen on start-up to aggregate config from flags and env vars + c.config = config + + l, err := c.newLogger() + if err != nil { + c.outputErrors(err) + return 1 + } + c.logger = l + + // release log gate if the disable-gated-logs flag is set + if c.logFlags.flagDisableGatedLogs { + c.logGate.Flush() + } + + infoKeys := make([]string, 0, 10) + info := make(map[string]string) + info["log level"] = config.LogLevel + infoKeys = append(infoKeys, "log level") + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + infoKeys = append(infoKeys, "cgo") + info["cgo"] = "disabled" + if version.CgoEnabled { + info["cgo"] = "enabled" + } + + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + if os.Getenv("VAULT_TEST_VERIFY_ONLY_DUMP_CONFIG") != "" { + c.UI.Output(fmt.Sprintf( + "\nConfiguration:\n%s\n", + pretty.Sprint(*c.config))) + } + return 0 + } + + // Ignore any setting of Agent/Proxy's address. This client is used by the Proxy + // to reach out to Vault. This should never loop back to the proxy. + c.flagAgentProxyAddress = "" + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf( + "Error fetching client: %v", + err)) + return 1 + } + + serverHealth, err := client.Sys().Health() + // We don't have any special behaviour if the error != nil, as this + // is not worth stopping the Proxy process over. + if err == nil { + // Note that we don't exit if the versions don't match, as this is a valid + // configuration, but we should still let the user know. + serverVersion := serverHealth.Version + proxyVersion := version.GetVersion().VersionNumber() + if serverVersion != proxyVersion { + c.UI.Info("==> Note: Vault Proxy version does not match Vault server version. " + + fmt.Sprintf("Vault Proxy version: %s, Vault server version: %s", proxyVersion, serverVersion)) + } + } + + // telemetry configuration + inmemMetrics, _, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{ + Config: config.Telemetry, + Ui: c.UI, + ServiceName: "vault", + DisplayName: "Vault", + UserAgent: useragent.ProxyString(), + ClusterName: config.ClusterName, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) + return 1 + } + c.metricsHelper = metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) + + // This indicates whether the namespace for the client has been set by environment variable. + // If it has, we don't touch it + namespaceSetByEnvironmentVariable := client.Namespace() != "" + + if !namespaceSetByEnvironmentVariable && config.Vault != nil && config.Vault.Namespace != "" { + client.SetNamespace(config.Vault.Namespace) + } + + var method auth.AuthMethod + var sinks []*sink.SinkConfig + if config.AutoAuth != nil { + // Note: This will only set namespace header to the value in config.AutoAuth.Method.Namespace + // only if it hasn't been set by config.Vault.Namespace above. In that case, the config value + // present at config.AutoAuth.Method.Namespace will still be used for auto-auth. + if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { + client.SetNamespace(config.AutoAuth.Method.Namespace) + } + + sinkClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for file sink: %v", err)) + return 1 + } + + if config.DisableIdleConnsAutoAuth { + sinkClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + sinkClient.SetDisableKeepAlives(true) + } + + for _, sc := range config.AutoAuth.Sinks { + switch sc.Type { + case "file": + config := &sink.SinkConfig{ + Logger: c.logger.Named("sink.file"), + Config: sc.Config, + Client: sinkClient, + WrapTTL: sc.WrapTTL, + DHType: sc.DHType, + DeriveKey: sc.DeriveKey, + DHPath: sc.DHPath, + AAD: sc.AAD, + } + s, err := file.NewFileSink(config) + if err != nil { + c.UI.Error(fmt.Errorf("error creating file sink: %w", err).Error()) + return 1 + } + config.Sink = s + sinks = append(sinks, config) + default: + c.UI.Error(fmt.Sprintf("Unknown sink type %q", sc.Type)) + return 1 + } + } + + authConfig := &auth.AuthConfig{ + Logger: c.logger.Named(fmt.Sprintf("auth.%s", config.AutoAuth.Method.Type)), + MountPath: config.AutoAuth.Method.MountPath, + Config: config.AutoAuth.Method.Config, + } + method, err = agentproxyshared.GetAutoAuthMethodFromConfig(config.AutoAuth.Method.Type, authConfig, config.Vault.Address) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating %s auth method: %v", config.AutoAuth.Method.Type, err)) + return 1 + } + } + + // We do this after auto-auth has been configured, because we don't want to + // confuse the issue of retries for auth failures which have their own + // config and are handled a bit differently. + if os.Getenv(api.EnvVaultMaxRetries) == "" { + client.SetMaxRetries(ctconfig.DefaultRetryAttempts) + if config.Vault != nil { + if config.Vault.Retry != nil { + client.SetMaxRetries(config.Vault.Retry.NumRetries) + } + } + } + + enforceConsistency := cache.EnforceConsistencyNever + whenInconsistent := cache.WhenInconsistentFail + if config.APIProxy != nil { + switch config.APIProxy.EnforceConsistency { + case "always": + enforceConsistency = cache.EnforceConsistencyAlways + case "never", "": + default: + c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for enforce_consistency: %q", config.APIProxy.EnforceConsistency)) + return 1 + } + + switch config.APIProxy.WhenInconsistent { + case "retry": + whenInconsistent = cache.WhenInconsistentRetry + case "forward": + whenInconsistent = cache.WhenInconsistentForward + case "fail", "": + default: + c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for when_inconsistent: %q", config.APIProxy.WhenInconsistent)) + return 1 + } + } + + // Warn if cache _and_ cert auto-auth is enabled but certificates were not + // provided in the auto_auth.method["cert"].config stanza. + if config.Cache != nil && (config.AutoAuth != nil && config.AutoAuth.Method != nil && config.AutoAuth.Method.Type == "cert") { + _, okCertFile := config.AutoAuth.Method.Config["client_cert"] + _, okCertKey := config.AutoAuth.Method.Config["client_key"] + + // If neither of these exists in the cert stanza, proxy will use the + // certs from the vault stanza. + if !okCertFile && !okCertKey { + c.UI.Warn(wrapAtLength("WARNING! Cache is enabled and using the same certificates " + + "from the 'cert' auto-auth method specified in the 'vault' stanza. Consider " + + "specifying certificate information in the 'cert' auto-auth's config stanza.")) + } + + } + + // Output the header that the proxy has started + if !c.logFlags.flagCombineLogs { + c.UI.Output("==> Vault Proxy started! Log data will stream in below:\n") + } + + var leaseCache *cache.LeaseCache + var previousToken string + + proxyClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for proxying: %v", err)) + return 1 + } + + if config.DisableIdleConnsAPIProxy { + proxyClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAPIProxy { + proxyClient.SetDisableKeepAlives(true) + } + + apiProxyLogger := c.logger.Named("apiproxy") + + // The API proxy to be used, if listeners are configured + apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ + Client: proxyClient, + Logger: apiProxyLogger, + EnforceConsistency: enforceConsistency, + WhenInconsistentAction: whenInconsistent, + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), + PrependConfiguredNamespace: config.APIProxy != nil && config.APIProxy.PrependConfiguredNamespace, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) + return 1 + } + + // ctx and cancelFunc are passed to the AuthHandler, SinkServer, + // and other subsystems, so that they can listen for ctx.Done() to + // fire and shut down accordingly. + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + var updater *cache.StaticSecretCacheUpdater + + // Parse proxy cache configurations + if config.Cache != nil { + cacheLogger := c.logger.Named("cache") + + // Create the lease cache proxier and set its underlying proxier to + // the API proxier. + leaseCache, err = cache.NewLeaseCache(&cache.LeaseCacheConfig{ + Client: proxyClient, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + CacheStaticSecrets: config.Cache.CacheStaticSecrets, + // dynamic secrets are configured as default-on to preserve backwards compatibility + CacheDynamicSecrets: !config.Cache.DisableCachingDynamicSecrets, + UserAgentToUse: useragent.AgentProxyString(), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating lease cache: %v", err)) + return 1 + } + + cacheLogger.Info("cache configured", "cache_static_secrets", config.Cache.CacheStaticSecrets, "disable_caching_dynamic_secrets", config.Cache.DisableCachingDynamicSecrets) + + // Configure persistent storage and add to LeaseCache + if config.Cache.Persist != nil { + deferFunc, oldToken, err := agentproxyshared.AddPersistentStorageToLeaseCache(ctx, leaseCache, config.Cache.Persist, cacheLogger) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) + return 1 + } + previousToken = oldToken + if deferFunc != nil { + defer deferFunc() + } + } + + // If we're caching static secrets, we need to start the updater, too + if config.Cache.CacheStaticSecrets { + staticSecretCacheUpdaterLogger := c.logger.Named("cache.staticsecretcacheupdater") + inmemSink, err := inmem.New(&sink.SinkConfig{ + Logger: staticSecretCacheUpdaterLogger, + }, leaseCache) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating inmem sink for static secret updater susbsystem: %v", err)) + return 1 + } + sinks = append(sinks, &sink.SinkConfig{ + Logger: staticSecretCacheUpdaterLogger, + Sink: inmemSink, + }) + + updater, err = cache.NewStaticSecretCacheUpdater(&cache.StaticSecretCacheUpdaterConfig{ + Client: client, + LeaseCache: leaseCache, + Logger: staticSecretCacheUpdaterLogger, + TokenSink: inmemSink, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating static secret cache updater: %v", err)) + return 1 + } + + capabilityManager, err := cache.NewStaticSecretCapabilityManager(&cache.StaticSecretCapabilityManagerConfig{ + LeaseCache: leaseCache, + Logger: c.logger.Named("cache.staticsecretcapabilitymanager"), + Client: client, + StaticSecretTokenCapabilityRefreshInterval: config.Cache.StaticSecretTokenCapabilityRefreshInterval, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating static secret capability manager: %v", err)) + return 1 + } + leaseCache.SetCapabilityManager(capabilityManager) + } + } + + var listeners []net.Listener + + // Ensure we've added all the reload funcs for TLS before anyone triggers a reload. + c.tlsReloadFuncsLock.Lock() + + for i, lnConfig := range config.Listeners { + var ln net.Listener + var tlsCfg *tls.Config + + if lnConfig.Type == listenerutil.BufConnType { + inProcListener := bufconn.Listen(1024 * 1024) + if config.Cache != nil { + config.Cache.InProcDialer = listenerutil.NewBufConnWrapper(inProcListener) + } + ln = inProcListener + } else { + lnBundle, err := cache.StartListener(lnConfig) + if err != nil { + c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) + c.tlsReloadFuncsLock.Unlock() + return 1 + } + + tlsCfg = lnBundle.TLSConfig + ln = lnBundle.Listener + + // Track the reload func, so we can reload later if needed. + c.tlsReloadFuncs = append(c.tlsReloadFuncs, lnBundle.TLSReloadFunc) + } + + listeners = append(listeners, ln) + + proxyVaultToken := true + var inmemSink sink.Sink + if config.APIProxy != nil { + if config.APIProxy.UseAutoAuthToken { + apiProxyLogger.Debug("configuring inmem auto-auth sink") + inmemSink, err = inmem.New(&sink.SinkConfig{ + Logger: apiProxyLogger, + }, leaseCache) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) + c.tlsReloadFuncsLock.Unlock() + return 1 + } + sinks = append(sinks, &sink.SinkConfig{ + Logger: apiProxyLogger, + Sink: inmemSink, + }) + } + proxyVaultToken = !config.APIProxy.ForceAutoAuthToken + } + + var muxHandler http.Handler + if leaseCache != nil { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) + } else { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) + } + + // Parse 'require_request_header' listener config option, and wrap + // the request handler if necessary + if lnConfig.RequireRequestHeader && ("metrics_only" != lnConfig.Role) { + muxHandler = verifyRequestHeader(muxHandler) + } + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + quitEnabled := lnConfig.ProxyAPI != nil && lnConfig.ProxyAPI.EnableQuit + + mux.Handle(consts.ProxyPathMetrics, c.handleMetrics()) + if "metrics_only" != lnConfig.Role { + mux.Handle(consts.ProxyPathCacheClear, leaseCache.HandleCacheClear(ctx)) + mux.Handle(consts.ProxyPathQuit, c.handleQuit(quitEnabled)) + mux.Handle("/", muxHandler) + } + + scheme := "https://" + if tlsCfg == nil { + scheme = "http://" + } + if ln.Addr().Network() == "unix" { + scheme = "unix://" + } + + infoKey := fmt.Sprintf("api address %d", i+1) + info[infoKey] = scheme + ln.Addr().String() + infoKeys = append(infoKeys, infoKey) + + server := &http.Server{ + Addr: ln.Addr().String(), + TLSConfig: tlsCfg, + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: apiProxyLogger.StandardLogger(nil), + } + + go server.Serve(ln) + } + + c.tlsReloadFuncsLock.Unlock() + + // Ensure that listeners are closed at all the exits + listenerCloseFunc := func() { + for _, ln := range listeners { + ln.Close() + } + } + defer c.cleanupGuard.Do(listenerCloseFunc) + + // Inform any tests that the server is ready + if c.startedCh != nil { + close(c.startedCh) + } + + var g run.Group + + g.Add(func() error { + for { + select { + case <-c.SighupCh: + c.UI.Output("==> Vault Proxy config reload triggered") + err := c.reloadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + } + // Send the 'reloaded' message on the relevant channel + select { + case c.reloadedCh <- struct{}{}: + default: + } + case <-ctx.Done(): + return nil + } + } + }, func(error) { + cancelFunc() + }) + + // This run group watches for signal termination + g.Add(func() error { + for { + select { + case <-c.ShutdownCh: + c.UI.Output("==> Vault Proxy shutdown triggered") + // Notify systemd that the server is shutting down + // Let the lease cache know this is a shutdown; no need to evict everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + return nil + case <-ctx.Done(): + return nil + case <-winsvc.ShutdownChannel(): + return nil + } + } + }, func(error) {}) + + // Start auto-auth and sink servers + if method != nil { + // Auth Handler is going to set its own retry values, so we want to + // work on a copy of the client to not affect other subsystems. + ahClient, err := c.client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) + return 1 + } + + // Override the set namespace with the auto-auth specific namespace + if !namespaceSetByEnvironmentVariable && config.AutoAuth.Method.Namespace != "" { + ahClient.SetNamespace(config.AutoAuth.Method.Namespace) + } + + if config.DisableIdleConnsAutoAuth { + ahClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + ahClient.SetDisableKeepAlives(true) + } + + ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{ + Logger: c.logger.Named("auth.handler"), + Client: ahClient, + WrapTTL: config.AutoAuth.Method.WrapTTL, + MinBackoff: config.AutoAuth.Method.MinBackoff, + MaxBackoff: config.AutoAuth.Method.MaxBackoff, + EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, + Token: previousToken, + ExitOnError: config.AutoAuth.Method.ExitOnError, + UserAgent: useragent.ProxyAutoAuthString(), + MetricsSignifier: "proxy", + }) + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: c.logger.Named("sink.server"), + Client: ahClient, + ExitAfterAuth: config.ExitAfterAuth, + }) + + g.Add(func() error { + return ah.Run(ctx, method) + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + + g.Add(func() error { + err := ss.Run(ctx, ah.OutputCh, sinks) + c.logger.Info("sinks finished, exiting") + + // Start goroutine to drain from ah.OutputCh from this point onward + // to prevent ah.Run from being blocked. + go func() { + for { + select { + case <-ctx.Done(): + return + case <-ah.OutputCh: + } + } + }() + + return err + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + } + + // Add the static secret cache updater, if appropriate + if updater != nil { + g.Add(func() error { + err := updater.Run(ctx) + return err + }, func(error) { + cancelFunc() + }) + } + + // Server configuration output + padding := 24 + sort.Strings(infoKeys) + caser := cases.Title(language.English) + c.UI.Output("==> Vault Proxy configuration:\n") + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%s%s: %s", + strings.Repeat(" ", padding-len(k)), + caser.String(k), + info[k])) + } + c.UI.Output("") + + // Release the log gate. + c.logGate.Flush() + + // Write out the PID to the file now that server has successfully started + if err := c.storePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error storing PID: %s", err)) + return 1 + } + + // Notify systemd that the server is ready (if applicable) + c.notifySystemd(systemd.SdNotifyReady) + + defer func() { + if err := c.removePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) + } + }() + + var exitCode int + if err := g.Run(); err != nil { + c.logger.Error("runtime error encountered", "error", err) + c.UI.Error("Error encountered during run, refer to logs for more details.") + exitCode = 1 + } + c.notifySystemd(systemd.SdNotifyStopping) + return exitCode +} + +// applyConfigOverrides ensures that the config object accurately reflects the desired +// settings as configured by the user. It applies the relevant config setting based +// on the precedence (env var overrides file config, cli overrides env var). +// It mutates the config object supplied. +func (c *ProxyCommand) applyConfigOverrides(f *FlagSets, config *proxyConfig.Config) { + if config.Vault == nil { + config.Vault = &proxyConfig.Vault{} + } + + f.applyLogConfigOverrides(config.SharedConfig) + + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameProxyExitAfterAuth { + config.ExitAfterAuth = c.flagExitAfterAuth + } + }) + + c.setStringFlag(f, config.Vault.Address, &StringVar{ + Name: flagNameAddress, + Target: &c.flagAddress, + Default: "https://127.0.0.1:8200", + EnvVar: api.EnvVaultAddress, + }) + config.Vault.Address = c.flagAddress + c.setStringFlag(f, config.Vault.CACert, &StringVar{ + Name: flagNameCACert, + Target: &c.flagCACert, + Default: "", + EnvVar: api.EnvVaultCACert, + }) + config.Vault.CACert = c.flagCACert + c.setStringFlag(f, config.Vault.CAPath, &StringVar{ + Name: flagNameCAPath, + Target: &c.flagCAPath, + Default: "", + EnvVar: api.EnvVaultCAPath, + }) + config.Vault.CAPath = c.flagCAPath + c.setStringFlag(f, config.Vault.ClientCert, &StringVar{ + Name: flagNameClientCert, + Target: &c.flagClientCert, + Default: "", + EnvVar: api.EnvVaultClientCert, + }) + config.Vault.ClientCert = c.flagClientCert + c.setStringFlag(f, config.Vault.ClientKey, &StringVar{ + Name: flagNameClientKey, + Target: &c.flagClientKey, + Default: "", + EnvVar: api.EnvVaultClientKey, + }) + config.Vault.ClientKey = c.flagClientKey + c.setBoolFlag(f, config.Vault.TLSSkipVerify, &BoolVar{ + Name: flagNameTLSSkipVerify, + Target: &c.flagTLSSkipVerify, + Default: false, + EnvVar: api.EnvVaultSkipVerify, + }) + config.Vault.TLSSkipVerify = c.flagTLSSkipVerify + c.setStringFlag(f, config.Vault.TLSServerName, &StringVar{ + Name: flagTLSServerName, + Target: &c.flagTLSServerName, + Default: "", + EnvVar: api.EnvVaultTLSServerName, + }) + config.Vault.TLSServerName = c.flagTLSServerName +} + +func (c *ProxyCommand) notifySystemd(status string) { + sent, err := systemd.SdNotify(false, status) + if err != nil { + c.logger.Error("error notifying systemd", "error", err) + } else { + if sent { + c.logger.Debug("sent systemd notification", "notification", status) + } else { + c.logger.Debug("would have sent systemd notification (systemd not present)", "notification", status) + } + } +} + +func (c *ProxyCommand) setStringFlag(f *FlagSets, configVal string, fVar *StringVar) { + var isFlagSet bool + f.Visit(func(f *flag.Flag) { + if f.Name == fVar.Name { + isFlagSet = true + } + }) + + flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) + switch { + case isFlagSet: + // Don't do anything as the flag is already set from the command line + case flagEnvSet: + // Use value from env var + *fVar.Target = flagEnvValue + case configVal != "": + // Use value from config + *fVar.Target = configVal + default: + // Use the default value + *fVar.Target = fVar.Default + } +} + +func (c *ProxyCommand) setBoolFlag(f *FlagSets, configVal bool, fVar *BoolVar) { + var isFlagSet bool + f.Visit(func(f *flag.Flag) { + if f.Name == fVar.Name { + isFlagSet = true + } + }) + + flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) + switch { + case isFlagSet: + // Don't do anything as the flag is already set from the command line + case flagEnvSet: + // Use value from env var + val, err := parseutil.ParseBool(flagEnvValue) + if err != nil { + c.logger.Error("error parsing bool from environment variable, using default instead", "environment variable", fVar.EnvVar, "provided value", flagEnvValue, "default", fVar.Default, "err", err) + val = fVar.Default + } + *fVar.Target = val + case configVal: + // Use value from config + *fVar.Target = configVal + default: + // Use the default value + *fVar.Target = fVar.Default + } +} + +// storePidFile is used to write out our PID to a file if necessary +func (c *ProxyCommand) storePidFile(pidPath string) error { + // Quit fast if no pidfile + if pidPath == "" { + return nil + } + + // Open the PID file + pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) + if err != nil { + return fmt.Errorf("could not open pid file: %w", err) + } + defer pidFile.Close() + + // Write out the PID + pid := os.Getpid() + _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("could not write to pid file: %w", err) + } + return nil +} + +// removePidFile is used to cleanup the PID file if necessary +func (c *ProxyCommand) removePidFile(pidPath string) error { + if pidPath == "" { + return nil + } + return os.Remove(pidPath) +} + +func (c *ProxyCommand) handleMetrics() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + logical.RespondError(w, http.StatusMethodNotAllowed, nil) + return + } + + if err := r.ParseForm(); err != nil { + logical.RespondError(w, http.StatusBadRequest, err) + return + } + + format := r.Form.Get("format") + if format == "" { + format = metricsutil.FormatFromRequest(&logical.Request{ + Headers: r.Header, + }) + } + + resp := c.metricsHelper.ResponseForFormat(format) + + status := resp.Data[logical.HTTPStatusCode].(int) + w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) + switch v := resp.Data[logical.HTTPRawBody].(type) { + case string: + w.WriteHeader(status) + w.Write([]byte(v)) + case []byte: + w.WriteHeader(status) + w.Write(v) + default: + logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("wrong response returned")) + } + }) +} + +func (c *ProxyCommand) handleQuit(enabled bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !enabled { + w.WriteHeader(http.StatusNotFound) + return + } + + switch r.Method { + case http.MethodPost: + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + c.logger.Debug("received quit request") + close(c.ShutdownCh) + }) +} + +// newLogger creates a logger based on parsed config field on the Proxy Command struct. +func (c *ProxyCommand) newLogger() (log.InterceptLogger, error) { + if c.config == nil { + return nil, fmt.Errorf("cannot create logger, no config") + } + + var errors error + + // Parse all the log related config + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + errors = multierror.Append(errors, err) + } + + logFormat, err := logging.ParseLogFormat(c.config.LogFormat) + if err != nil { + errors = multierror.Append(errors, err) + } + + logRotateDuration, err := parseutil.ParseDurationSecond(c.config.LogRotateDuration) + if err != nil { + errors = multierror.Append(errors, err) + } + + if errors != nil { + return nil, errors + } + + logCfg, err := logging.NewLogConfig(nameProxy) + if err != nil { + return nil, err + } + logCfg.Name = nameProxy + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = c.config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = c.config.LogRotateBytes + logCfg.LogRotateMaxFiles = c.config.LogRotateMaxFiles + + l, err := logging.Setup(logCfg, c.logWriter) + if err != nil { + return nil, err + } + + return l, nil +} + +// loadConfig attempts to generate a Proxy config from the file(s) specified. +func (c *ProxyCommand) loadConfig(paths []string) (*proxyConfig.Config, error) { + var errors error + cfg := proxyConfig.NewConfig() + + for _, configPath := range paths { + configFromPath, err := proxyConfig.LoadConfig(configPath) + if err != nil { + errors = multierror.Append(errors, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) + } else { + cfg = cfg.Merge(configFromPath) + } + } + + if errors != nil { + return nil, errors + } + + if err := cfg.ValidateConfig(); err != nil { + return nil, fmt.Errorf("error validating configuration: %w", err) + } + + return cfg, nil +} + +// reloadConfig will attempt to reload the config from file(s) and adjust certain +// config values without requiring a restart of the Vault Proxy. +// If config is retrieved without error it is stored in the config field of the ProxyCommand. +// This operation is not atomic and could result in updated config but partially applied config settings. +// The error returned from this func may be a multierror. +// This function will most likely be called due to Vault Proxy receiving a SIGHUP signal. +// Currently only reloading the following are supported: +// * log level +// * TLS certs for listeners +func (c *ProxyCommand) reloadConfig(paths []string) error { + // Notify systemd that the server is reloading + c.notifySystemd(systemd.SdNotifyReloading) + defer c.notifySystemd(systemd.SdNotifyReady) + + var errors error + + // Reload the config + cfg, err := c.loadConfig(paths) + if err != nil { + // Returning single error as we won't continue with bad config and won't 'commit' it. + return err + } + c.config = cfg + + // Update the log level + err = c.reloadLogLevel() + if err != nil { + errors = multierror.Append(errors, err) + } + + // Update certs + err = c.reloadCerts() + if err != nil { + errors = multierror.Append(errors, err) + } + + return errors +} + +// reloadLogLevel will attempt to update the log level for the logger attached +// to the ProxyCommand struct using the value currently set in config. +func (c *ProxyCommand) reloadLogLevel() error { + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + return err + } + + c.logger.SetLevel(logLevel) + + return nil +} + +// reloadCerts will attempt to reload certificates using a reload func which +// was provided when the listeners were configured, only funcs that were appended +// to the ProxyCommand slice will be invoked. +// This function returns a multierror type so that every func can report an error +// if it encounters one. +func (c *ProxyCommand) reloadCerts() error { + var errors error + + c.tlsReloadFuncsLock.RLock() + defer c.tlsReloadFuncsLock.RUnlock() + + for _, reloadFunc := range c.tlsReloadFuncs { + // Non-TLS listeners will have a nil reload func. + if reloadFunc != nil { + err := reloadFunc() + if err != nil { + errors = multierror.Append(errors, err) + } + } + } + + return errors +} + +// outputErrors will take an error or multierror and handle outputting each to the UI +func (c *ProxyCommand) outputErrors(err error) { + if err != nil { + if me, ok := err.(*multierror.Error); ok { + for _, err := range me.Errors { + c.UI.Error(err.Error()) + } + } else { + c.UI.Error(err.Error()) + } + } +} diff --git a/command/proxy/config/config.go b/command/proxy/config/config.go new file mode 100644 index 000000000000..d22b74fad83f --- /dev/null +++ b/command/proxy/config/config.go @@ -0,0 +1,855 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package config + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + "time" + + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/configutil" +) + +// Config is the configuration for Vault Proxy. +type Config struct { + *configutil.SharedConfig `hcl:"-"` + + AutoAuth *AutoAuth `hcl:"auto_auth"` + ExitAfterAuth bool `hcl:"exit_after_auth"` + Cache *Cache `hcl:"cache"` + APIProxy *APIProxy `hcl:"api_proxy""` + Vault *Vault `hcl:"vault"` + DisableIdleConns []string `hcl:"disable_idle_connections"` + DisableIdleConnsAPIProxy bool `hcl:"-"` + DisableIdleConnsAutoAuth bool `hcl:"-"` + DisableKeepAlives []string `hcl:"disable_keep_alives"` + DisableKeepAlivesAPIProxy bool `hcl:"-"` + DisableKeepAlivesAutoAuth bool `hcl:"-"` +} + +const ( + DisableIdleConnsEnv = "VAULT_PROXY_DISABLE_IDLE_CONNECTIONS" + DisableKeepAlivesEnv = "VAULT_PROXY_DISABLE_KEEP_ALIVES" +) + +func (c *Config) Prune() { + for _, l := range c.Listeners { + l.RawConfig = nil + l.Profiling.UnusedKeys = nil + l.Telemetry.UnusedKeys = nil + l.CustomResponseHeaders = nil + } + c.FoundKeys = nil + c.UnusedKeys = nil + c.SharedConfig.FoundKeys = nil + c.SharedConfig.UnusedKeys = nil + if c.Telemetry != nil { + c.Telemetry.FoundKeys = nil + c.Telemetry.UnusedKeys = nil + } +} + +type Retry struct { + NumRetries int `hcl:"num_retries"` +} + +// Vault contains configuration for connecting to Vault servers +type Vault struct { + Address string `hcl:"address"` + CACert string `hcl:"ca_cert"` + CAPath string `hcl:"ca_path"` + TLSSkipVerify bool `hcl:"-"` + TLSSkipVerifyRaw interface{} `hcl:"tls_skip_verify"` + ClientCert string `hcl:"client_cert"` + ClientKey string `hcl:"client_key"` + TLSServerName string `hcl:"tls_server_name"` + Namespace string `hcl:"namespace"` + Retry *Retry `hcl:"retry"` +} + +// transportDialer is an interface that allows passing a custom dialer function +// to an HTTP client's transport config +type transportDialer interface { + // Dial is intended to match https://pkg.go.dev/net#Dialer.Dial + Dial(network, address string) (net.Conn, error) + + // DialContext is intended to match https://pkg.go.dev/net#Dialer.DialContext + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// APIProxy contains any configuration needed for proxy mode +type APIProxy struct { + UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` + UseAutoAuthToken bool `hcl:"-"` + ForceAutoAuthToken bool `hcl:"-"` + EnforceConsistency string `hcl:"enforce_consistency"` + WhenInconsistent string `hcl:"when_inconsistent"` + PrependConfiguredNamespace bool `hcl:"prepend_configured_namespace"` +} + +// Cache contains any configuration needed for Cache mode +type Cache struct { + Persist *agentproxyshared.PersistConfig `hcl:"persist"` + InProcDialer transportDialer `hcl:"-"` + CacheStaticSecrets bool `hcl:"cache_static_secrets"` + DisableCachingDynamicSecrets bool `hcl:"disable_caching_dynamic_secrets"` + StaticSecretTokenCapabilityRefreshIntervalRaw interface{} `hcl:"static_secret_token_capability_refresh_interval"` + StaticSecretTokenCapabilityRefreshInterval time.Duration `hcl:"-"` +} + +// AutoAuth is the configured authentication method and sinks +type AutoAuth struct { + Method *Method `hcl:"-"` + Sinks []*Sink `hcl:"sinks"` + + // NOTE: This is unsupported outside of testing and may disappear at any + // time. + EnableReauthOnNewCredentials bool `hcl:"enable_reauth_on_new_credentials"` +} + +// Method represents the configuration for the authentication backend +type Method struct { + Type string + MountPath string `hcl:"mount_path"` + WrapTTLRaw interface{} `hcl:"wrap_ttl"` + WrapTTL time.Duration `hcl:"-"` + MinBackoffRaw interface{} `hcl:"min_backoff"` + MinBackoff time.Duration `hcl:"-"` + MaxBackoffRaw interface{} `hcl:"max_backoff"` + MaxBackoff time.Duration `hcl:"-"` + Namespace string `hcl:"namespace"` + ExitOnError bool `hcl:"exit_on_err"` + Config map[string]interface{} +} + +// Sink defines a location to write the authenticated token +type Sink struct { + Type string + WrapTTLRaw interface{} `hcl:"wrap_ttl"` + WrapTTL time.Duration `hcl:"-"` + DHType string `hcl:"dh_type"` + DeriveKey bool `hcl:"derive_key"` + DHPath string `hcl:"dh_path"` + AAD string `hcl:"aad"` + AADEnvVar string `hcl:"aad_env_var"` + Config map[string]interface{} +} + +func NewConfig() *Config { + return &Config{ + SharedConfig: new(configutil.SharedConfig), + } +} + +// Merge merges two Proxy configurations. +func (c *Config) Merge(c2 *Config) *Config { + if c2 == nil { + return c + } + + result := NewConfig() + + result.SharedConfig = c.SharedConfig + if c2.SharedConfig != nil { + result.SharedConfig = c.SharedConfig.Merge(c2.SharedConfig) + } + + result.AutoAuth = c.AutoAuth + if c2.AutoAuth != nil { + result.AutoAuth = c2.AutoAuth + } + + result.Cache = c.Cache + if c2.Cache != nil { + result.Cache = c2.Cache + } + + result.APIProxy = c.APIProxy + if c2.APIProxy != nil { + result.APIProxy = c2.APIProxy + } + + result.DisableMlock = c.DisableMlock + if c2.DisableMlock { + result.DisableMlock = c2.DisableMlock + } + + // For these, ignore the non-specific one and overwrite them all + result.DisableIdleConnsAutoAuth = c.DisableIdleConnsAutoAuth + if c2.DisableIdleConnsAutoAuth { + result.DisableIdleConnsAutoAuth = c2.DisableIdleConnsAutoAuth + } + + result.DisableIdleConnsAPIProxy = c.DisableIdleConnsAPIProxy + if c2.DisableIdleConnsAPIProxy { + result.DisableIdleConnsAPIProxy = c2.DisableIdleConnsAPIProxy + } + + result.DisableKeepAlivesAutoAuth = c.DisableKeepAlivesAutoAuth + if c2.DisableKeepAlivesAutoAuth { + result.DisableKeepAlivesAutoAuth = c2.DisableKeepAlivesAutoAuth + } + + result.DisableKeepAlivesAPIProxy = c.DisableKeepAlivesAPIProxy + if c2.DisableKeepAlivesAPIProxy { + result.DisableKeepAlivesAPIProxy = c2.DisableKeepAlivesAPIProxy + } + + result.ExitAfterAuth = c.ExitAfterAuth + if c2.ExitAfterAuth { + result.ExitAfterAuth = c2.ExitAfterAuth + } + + result.Vault = c.Vault + if c2.Vault != nil { + result.Vault = c2.Vault + } + + result.PidFile = c.PidFile + if c2.PidFile != "" { + result.PidFile = c2.PidFile + } + + return result +} + +// ValidateConfig validates a Vault configuration after it has been fully merged together, to +// ensure that required combinations of configs are there +func (c *Config) ValidateConfig() error { + if c.Cache != nil { + if len(c.Listeners) < 1 { + return fmt.Errorf("enabling the cache requires at least 1 listener to be defined") + } + } + + if c.APIProxy != nil { + if len(c.Listeners) < 1 { + return fmt.Errorf("configuring the api_proxy requires at least 1 listener to be defined") + } + + if c.APIProxy.UseAutoAuthToken { + if c.AutoAuth == nil { + return fmt.Errorf("api_proxy.use_auto_auth_token is true but auto_auth not configured") + } + if c.AutoAuth != nil && c.AutoAuth.Method != nil && c.AutoAuth.Method.WrapTTL > 0 { + return fmt.Errorf("api_proxy.use_auto_auth_token is true and auto_auth uses wrapping") + } + } + } + + if c.AutoAuth != nil { + cacheStaticSecrets := c.Cache != nil && c.Cache.CacheStaticSecrets + if len(c.AutoAuth.Sinks) == 0 && + (c.APIProxy == nil || !c.APIProxy.UseAutoAuthToken) && !cacheStaticSecrets { + return fmt.Errorf("auto_auth requires at least one sink, api_proxy.use_auto_auth_token=true, or cache.cache_static_secrets=true") + } + } + + if c.Cache != nil && c.Cache.CacheStaticSecrets && c.AutoAuth == nil { + return fmt.Errorf("cache.cache_static_secrets=true requires an auto-auth block configured, to use the token to connect with Vault's event system") + } + + if c.Cache != nil && !c.Cache.CacheStaticSecrets && c.Cache.DisableCachingDynamicSecrets { + return fmt.Errorf("to enable the cache, the cache must be configured to either cache static secrets or dynamic secrets") + } + + if c.AutoAuth == nil && c.Cache == nil && len(c.Listeners) == 0 { + return fmt.Errorf("no auto_auth, cache, or listener block found in config") + } + + return nil +} + +// LoadConfig loads the configuration at the given path, regardless if +// it's a file or directory. +func LoadConfig(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + return LoadConfigDir(path) + } + return LoadConfigFile(path) +} + +// LoadConfigDir loads the configuration at the given path if it's a directory +func LoadConfigDir(dir string) (*Config, error) { + f, err := os.Open(dir) + if err != nil { + return nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fmt.Errorf("configuration path must be a directory: %q", dir) + } + + var files []string + err = nil + for err != io.EOF { + var fis []os.FileInfo + fis, err = f.Readdir(128) + if err != nil && err != io.EOF { + return nil, err + } + + for _, fi := range fis { + // Ignore directories + if fi.IsDir() { + continue + } + + // Only care about files that are valid to load. + name := fi.Name() + skip := true + if strings.HasSuffix(name, ".hcl") { + skip = false + } else if strings.HasSuffix(name, ".json") { + skip = false + } + if skip || isTemporaryFile(name) { + continue + } + + path := filepath.Join(dir, name) + files = append(files, path) + } + } + + result := NewConfig() + for _, f := range files { + config, err := LoadConfigFile(f) + if err != nil { + return nil, fmt.Errorf("error loading %q: %w", f, err) + } + + if result == nil { + result = config + } else { + result = result.Merge(config) + } + } + + return result, nil +} + +// isTemporaryFile returns true or false depending on whether the +// provided file name is a temporary file for the following editors: +// emacs or vim. +func isTemporaryFile(name string) bool { + return strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, ".#") || // emacs + (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs +} + +// LoadConfigFile loads the configuration at the given path if it's a file +func LoadConfigFile(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + return nil, fmt.Errorf("location is a directory, not a file") + } + + // Read the file + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + // Parse! + obj, err := hcl.Parse(string(d)) + if err != nil { + return nil, err + } + + // Attribute + ast.Walk(obj, func(n ast.Node) (ast.Node, bool) { + if k, ok := n.(*ast.ObjectKey); ok { + k.Token.Pos.Filename = path + } + return n, true + }) + + // Start building the result + result := NewConfig() + if err := hcl.DecodeObject(result, obj); err != nil { + return nil, err + } + + sharedConfig, err := configutil.ParseConfig(string(d)) + if err != nil { + return nil, err + } + + // Pruning custom headers for Vault for now + for _, ln := range sharedConfig.Listeners { + ln.CustomResponseHeaders = nil + } + + result.SharedConfig = sharedConfig + + list, ok := obj.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + if err := parseAutoAuth(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'auto_auth': %w", err) + } + + if err := parseCache(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'cache':%w", err) + } + + if err := parseAPIProxy(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'api_proxy':%w", err) + } + + err = parseVault(result, list) + if err != nil { + return nil, fmt.Errorf("error parsing 'vault':%w", err) + } + + if result.Vault != nil { + // Set defaults + if result.Vault.Retry == nil { + result.Vault.Retry = &Retry{} + } + switch result.Vault.Retry.NumRetries { + case 0: + result.Vault.Retry.NumRetries = ctconfig.DefaultRetryAttempts + case -1: + result.Vault.Retry.NumRetries = 0 + } + } + + if disableIdleConnsEnv := os.Getenv(DisableIdleConnsEnv); disableIdleConnsEnv != "" { + result.DisableIdleConns, err = parseutil.ParseCommaStringSlice(strings.ToLower(disableIdleConnsEnv)) + if err != nil { + return nil, fmt.Errorf("error parsing environment variable %s: %v", DisableIdleConnsEnv, err) + } + } + + for _, subsystem := range result.DisableIdleConns { + switch subsystem { + case "auto-auth": + result.DisableIdleConnsAutoAuth = true + case "caching", "proxying": + result.DisableIdleConnsAPIProxy = true + case "": + continue + default: + return nil, fmt.Errorf("unknown disable_idle_connections value: %s", subsystem) + } + } + + if disableKeepAlivesEnv := os.Getenv(DisableKeepAlivesEnv); disableKeepAlivesEnv != "" { + result.DisableKeepAlives, err = parseutil.ParseCommaStringSlice(strings.ToLower(disableKeepAlivesEnv)) + if err != nil { + return nil, fmt.Errorf("error parsing environment variable %s: %v", DisableKeepAlivesEnv, err) + } + } + + for _, subsystem := range result.DisableKeepAlives { + switch subsystem { + case "auto-auth": + result.DisableKeepAlivesAutoAuth = true + case "caching", "proxying": + result.DisableKeepAlivesAPIProxy = true + case "": + continue + default: + return nil, fmt.Errorf("unknown disable_keep_alives value: %s", subsystem) + } + } + + return result, nil +} + +func parseVault(result *Config, list *ast.ObjectList) error { + name := "vault" + + vaultList := list.Filter(name) + if len(vaultList.Items) == 0 { + return nil + } + + if len(vaultList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := vaultList.Items[0] + + var v Vault + err := hcl.DecodeObject(&v, item.Val) + if err != nil { + return err + } + + if v.TLSSkipVerifyRaw != nil { + v.TLSSkipVerify, err = parseutil.ParseBool(v.TLSSkipVerifyRaw) + if err != nil { + return err + } + } + + result.Vault = &v + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + + if err := parseRetry(result, subs.List); err != nil { + return fmt.Errorf("error parsing 'retry': %w", err) + } + + return nil +} + +func parseRetry(result *Config, list *ast.ObjectList) error { + name := "retry" + + retryList := list.Filter(name) + if len(retryList.Items) == 0 { + return nil + } + + if len(retryList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := retryList.Items[0] + + var r Retry + err := hcl.DecodeObject(&r, item.Val) + if err != nil { + return err + } + + result.Vault.Retry = &r + + return nil +} + +func parseAPIProxy(result *Config, list *ast.ObjectList) error { + name := "api_proxy" + + apiProxyList := list.Filter(name) + if len(apiProxyList.Items) == 0 { + return nil + } + + if len(apiProxyList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := apiProxyList.Items[0] + + var apiProxy APIProxy + err := hcl.DecodeObject(&apiProxy, item.Val) + if err != nil { + return err + } + + if apiProxy.UseAutoAuthTokenRaw != nil { + apiProxy.UseAutoAuthToken, err = parseutil.ParseBool(apiProxy.UseAutoAuthTokenRaw) + if err != nil { + // Could be a value of "force" instead of "true"/"false" + switch apiProxy.UseAutoAuthTokenRaw.(type) { + case string: + v := apiProxy.UseAutoAuthTokenRaw.(string) + + if !strings.EqualFold(v, "force") { + return fmt.Errorf("value of 'use_auto_auth_token' can be either true/false/force, %q is an invalid option", apiProxy.UseAutoAuthTokenRaw) + } + apiProxy.UseAutoAuthToken = true + apiProxy.ForceAutoAuthToken = true + + default: + return err + } + } + } + result.APIProxy = &apiProxy + + return nil +} + +func parseCache(result *Config, list *ast.ObjectList) error { + name := "cache" + + cacheList := list.Filter(name) + if len(cacheList.Items) == 0 { + return nil + } + + if len(cacheList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := cacheList.Items[0] + + var c Cache + err := hcl.DecodeObject(&c, item.Val) + if err != nil { + return err + } + + result.Cache = &c + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + subList := subs.List + if err := parsePersist(result, subList); err != nil { + return fmt.Errorf("error parsing persist: %w", err) + } + + if result.Cache.StaticSecretTokenCapabilityRefreshIntervalRaw != nil { + var err error + if result.Cache.StaticSecretTokenCapabilityRefreshInterval, err = parseutil.ParseDurationSecond(result.Cache.StaticSecretTokenCapabilityRefreshIntervalRaw); err != nil { + return fmt.Errorf("error parsing static_secret_token_capability_refresh_interval, must be provided as a duration string: %w", err) + } + result.Cache.StaticSecretTokenCapabilityRefreshIntervalRaw = nil + } + + return nil +} + +func parsePersist(result *Config, list *ast.ObjectList) error { + name := "persist" + + persistList := list.Filter(name) + if len(persistList.Items) == 0 { + return nil + } + + if len(persistList.Items) > 1 { + return fmt.Errorf("only one %q block is required", name) + } + + item := persistList.Items[0] + + var p agentproxyshared.PersistConfig + err := hcl.DecodeObject(&p, item.Val) + if err != nil { + return err + } + + if p.Type == "" { + if len(item.Keys) == 1 { + p.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if p.Type == "" { + return errors.New("persist type must be specified") + } + } + + result.Cache.Persist = &p + + return nil +} + +func parseAutoAuth(result *Config, list *ast.ObjectList) error { + name := "auto_auth" + + autoAuthList := list.Filter(name) + if len(autoAuthList.Items) == 0 { + return nil + } + if len(autoAuthList.Items) > 1 { + return fmt.Errorf("at most one %q block is allowed", name) + } + + // Get our item + item := autoAuthList.Items[0] + + var a AutoAuth + if err := hcl.DecodeObject(&a, item.Val); err != nil { + return err + } + + result.AutoAuth = &a + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + subList := subs.List + + if err := parseMethod(result, subList); err != nil { + return fmt.Errorf("error parsing 'method': %w", err) + } + if a.Method == nil { + return fmt.Errorf("no 'method' block found") + } + + if err := parseSinks(result, subList); err != nil { + return fmt.Errorf("error parsing 'sink' stanzas: %w", err) + } + + if result.AutoAuth.Method.WrapTTL > 0 { + if len(result.AutoAuth.Sinks) != 1 { + return fmt.Errorf("error parsing auto_auth: wrapping enabled on auth method and 0 or many sinks defined") + } + + if result.AutoAuth.Sinks[0].WrapTTL > 0 { + return fmt.Errorf("error parsing auto_auth: wrapping enabled both on auth method and sink") + } + } + + if result.AutoAuth.Method.MaxBackoffRaw != nil { + var err error + if result.AutoAuth.Method.MaxBackoff, err = parseutil.ParseDurationSecond(result.AutoAuth.Method.MaxBackoffRaw); err != nil { + return err + } + result.AutoAuth.Method.MaxBackoffRaw = nil + } + + if result.AutoAuth.Method.MinBackoffRaw != nil { + var err error + if result.AutoAuth.Method.MinBackoff, err = parseutil.ParseDurationSecond(result.AutoAuth.Method.MinBackoffRaw); err != nil { + return err + } + result.AutoAuth.Method.MinBackoffRaw = nil + } + + return nil +} + +func parseMethod(result *Config, list *ast.ObjectList) error { + name := "method" + + methodList := list.Filter(name) + if len(methodList.Items) != 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + // Get our item + item := methodList.Items[0] + + var m Method + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return err + } + + if m.Type == "" { + if len(item.Keys) == 1 { + m.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if m.Type == "" { + return errors.New("method type must be specified") + } + } + + // Default to Vault's default + if m.MountPath == "" { + m.MountPath = fmt.Sprintf("auth/%s", m.Type) + } + // Standardize on no trailing slash + m.MountPath = strings.TrimSuffix(m.MountPath, "/") + + if m.WrapTTLRaw != nil { + var err error + if m.WrapTTL, err = parseutil.ParseDurationSecond(m.WrapTTLRaw); err != nil { + return err + } + m.WrapTTLRaw = nil + } + + // Canonicalize namespace path if provided + m.Namespace = namespace.Canonicalize(m.Namespace) + + result.AutoAuth.Method = &m + return nil +} + +func parseSinks(result *Config, list *ast.ObjectList) error { + name := "sink" + + sinkList := list.Filter(name) + if len(sinkList.Items) < 1 { + return nil + } + + var ts []*Sink + + for _, item := range sinkList.Items { + var s Sink + if err := hcl.DecodeObject(&s, item.Val); err != nil { + return err + } + + if s.Type == "" { + if len(item.Keys) == 1 { + s.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if s.Type == "" { + return errors.New("sink type must be specified") + } + } + + if s.WrapTTLRaw != nil { + var err error + if s.WrapTTL, err = parseutil.ParseDurationSecond(s.WrapTTLRaw); err != nil { + return multierror.Prefix(err, fmt.Sprintf("sink.%s", s.Type)) + } + s.WrapTTLRaw = nil + } + + switch s.DHType { + case "": + case "curve25519": + default: + return multierror.Prefix(errors.New("invalid value for 'dh_type'"), fmt.Sprintf("sink.%s", s.Type)) + } + + if s.AADEnvVar != "" { + s.AAD = os.Getenv(s.AADEnvVar) + s.AADEnvVar = "" + } + + switch { + case s.DHPath == "" && s.DHType == "": + if s.AAD != "" { + return multierror.Prefix(errors.New("specifying AAD data without 'dh_type' does not make sense"), fmt.Sprintf("sink.%s", s.Type)) + } + if s.DeriveKey { + return multierror.Prefix(errors.New("specifying 'derive_key' data without 'dh_type' does not make sense"), fmt.Sprintf("sink.%s", s.Type)) + } + case s.DHPath != "" && s.DHType != "": + default: + return multierror.Prefix(errors.New("'dh_type' and 'dh_path' must be specified together"), fmt.Sprintf("sink.%s", s.Type)) + } + + ts = append(ts, &s) + } + + result.AutoAuth.Sinks = ts + return nil +} diff --git a/command/proxy/config/config_test.go b/command/proxy/config/config_test.go new file mode 100644 index 000000000000..e0afc50de54a --- /dev/null +++ b/command/proxy/config/config_test.go @@ -0,0 +1,205 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package config + +import ( + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/internalshared/configutil" +) + +// TestLoadConfigFile_ProxyCache tests loading a config file containing a cache +// as well as a valid proxy config. +func TestLoadConfigFile_ProxyCache(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "unix", + Address: "/path/to/socket", + TLSDisable: true, + SocketMode: "configmode", + SocketUser: "configuser", + SocketGroup: "configgroup", + }, + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + { + Type: "tcp", + Address: "127.0.0.1:3000", + Role: "metrics_only", + TLSDisable: true, + }, + { + Type: "tcp", + Role: "default", + Address: "127.0.0.1:8400", + TLSKeyFile: "/path/to/cakey.pem", + TLSCertFile: "/path/to/cacert.pem", + }, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + APIProxy: &APIProxy{ + EnforceConsistency: "always", + WhenInconsistent: "retry", + UseAutoAuthTokenRaw: true, + UseAutoAuthToken: true, + ForceAutoAuthToken: false, + }, + Cache: &Cache{ + Persist: &agentproxyshared.PersistConfig{ + Type: "kubernetes", + Path: "/vault/agent-cache/", + KeepAfterImport: true, + ExitOnErr: true, + ServiceAccountTokenFile: "/tmp/serviceaccount/token", + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + CACert: "config_ca_cert", + CAPath: "config_ca_path", + TLSSkipVerifyRaw: interface{}("true"), + TLSSkipVerify: true, + ClientCert: "config_client_cert", + ClientKey: "config_client_key", + Retry: &Retry{ + NumRetries: 12, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-cache-embedded-type.hcl") + if err != nil { + t.Fatal(err) + } + expected.Vault.TLSSkipVerifyRaw = interface{}(true) + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +// TestLoadConfigFile_NoCachingEnabled tests that you cannot enable a cache +// without either of the options to enable caching secrets +func TestLoadConfigFile_NoCachingEnabled(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-cache-but-no-secrets.hcl") + if err != nil { + t.Fatal(err) + } + + if err := cfg.ValidateConfig(); err == nil { + t.Fatalf("expected error, as you cannot configure a cache without caching secrets") + } +} + +// TestLoadConfigFile_StaticSecretCachingWithoutAutoAuth tests that loading +// a config file with static secret caching enabled but no auto auth will fail. +func TestLoadConfigFile_StaticSecretCachingWithoutAutoAuth(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-cache-static-no-auto-auth.hcl") + if err != nil { + t.Fatal(err) + } + + if err := cfg.ValidateConfig(); err == nil { + t.Fatalf("expected error, as static secret caching requires auto-auth") + } +} + +// TestLoadConfigFile_ProxyCacheStaticSecrets tests loading a config file containing a cache +// as well as a valid proxy config with static secret caching enabled +func TestLoadConfigFile_ProxyCacheStaticSecrets(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache-static-secret-cache.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Cache: &Cache{ + CacheStaticSecrets: true, + StaticSecretTokenCapabilityRefreshInterval: 1 * time.Hour, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + TLSSkipVerify: true, + TLSSkipVerifyRaw: interface{}("true"), + Retry: &Retry{ + NumRetries: 12, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} diff --git a/command/proxy/config/test-fixtures/config-cache-but-no-secrets.hcl b/command/proxy/config/test-fixtures/config-cache-but-no-secrets.hcl new file mode 100644 index 000000000000..edd8e6a2a584 --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache-but-no-secrets.hcl @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +cache { + cache_static_secrets = false + disable_caching_dynamic_secrets = true +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +vault { + address = "http://127.0.0.1:1111" + tls_skip_verify = "true" +} diff --git a/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl b/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl new file mode 100644 index 000000000000..cd953e7f3d0a --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +api_proxy { + use_auto_auth_token = true + enforce_consistency = "always" + when_inconsistent = "retry" +} + +cache { + persist "kubernetes" { + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +listener { + type = "unix" + address = "/path/to/socket" + tls_disable = true + socket_mode = "configmode" + socket_user = "configuser" + socket_group = "configgroup" +} + +listener { + type = "tcp" + address = "127.0.0.1:8300" + tls_disable = true +} + +listener { + type = "tcp" + address = "127.0.0.1:3000" + tls_disable = true + role = "metrics_only" +} + +listener { + type = "tcp" + role = "default" + address = "127.0.0.1:8400" + tls_key_file = "/path/to/cakey.pem" + tls_cert_file = "/path/to/cacert.pem" +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = true + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/proxy/config/test-fixtures/config-cache-static-no-auto-auth.hcl b/command/proxy/config/test-fixtures/config-cache-static-no-auto-auth.hcl new file mode 100644 index 000000000000..815d7fd8e615 --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache-static-no-auto-auth.hcl @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +cache { + cache_static_secrets = true +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +vault { + address = "http://127.0.0.1:1111" + tls_skip_verify = "true" +} diff --git a/command/proxy/config/test-fixtures/config-cache-static-secret-cache.hcl b/command/proxy/config/test-fixtures/config-cache-static-secret-cache.hcl new file mode 100644 index 000000000000..fa395bd8bdc5 --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache-static-secret-cache.hcl @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +cache { + cache_static_secrets = true + static_secret_token_capability_refresh_interval = "1h" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +vault { + address = "http://127.0.0.1:1111" + tls_skip_verify = "true" +} diff --git a/command/proxy/config/test-fixtures/config-cache.hcl b/command/proxy/config/test-fixtures/config-cache.hcl new file mode 100644 index 000000000000..caf153479560 --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache.hcl @@ -0,0 +1,75 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +api_proxy { + use_auto_auth_token = true + enforce_consistency = "always" + when_inconsistent = "retry" +} + +cache { + persist = { + type = "kubernetes" + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +listener "unix" { + address = "/path/to/socket" + tls_disable = true + socket_mode = "configmode" + socket_user = "configuser" + socket_group = "configgroup" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +listener { + type = "tcp" + address = "127.0.0.1:3000" + tls_disable = true + role = "metrics_only" +} + +listener "tcp" { + role = "default" + address = "127.0.0.1:8400" + tls_key_file = "/path/to/cakey.pem" + tls_cert_file = "/path/to/cacert.pem" +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = "true" + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/proxy/test-fixtures/reload/reload_bar.key b/command/proxy/test-fixtures/reload/reload_bar.key new file mode 100644 index 000000000000..10849fbe1d7f --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_bar.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwF7sRAyUiLcd6es6VeaTRUBOusFFGkmKJ5lU351waCJqXFju +Z6i/SQYNAAnnRgotXSTE1fIPjE2kZNH1hvqE5IpTGgAwy50xpjJrrBBI6e9lyKqj +7T8gLVNBvtC0cpQi+pGrszEI0ckDQCSZHqi/PAzcpmLUgh2KMrgagT+YlN35KHtl +/bQ/Fsn+kqykVqNw69n/CDKNKdDHn1qPwiX9q/fTMj3EG6g+3ntKrUOh8V/gHKPz +q8QGP/wIud2K+tTSorVXr/4zx7xgzlbJkCakzcQQiP6K+paPnDRlE8fK+1gRRyR7 +XCzyp0irUl8G1NjYAR/tVWxiUhlk/jZutb8PpwIDAQABAoIBAEOzJELuindyujxQ +ZD9G3h1I/GwNCFyv9Mbq10u7BIwhUH0fbwdcA7WXQ4v38ERd4IkfH4aLoZ0m1ewF +V/sgvxQO+h/0YTfHImny5KGxOXfaoF92bipYROKuojydBmQsbgLwsRRm9UufCl3Q +g3KewG5JuH112oPQEYq379v8nZ4FxC3Ano1OFBTm9UhHIAX1Dn22kcHOIIw8jCsQ +zp7TZOW+nwtkS41cBwhvV4VIeL6yse2UgbOfRVRwI7B0OtswS5VgW3wysO2mTDKt +V/WCmeht1il/6ZogEHgi/mvDCKpj20wQ1EzGnPdFLdiFJFylf0oufQD/7N/uezbC +is0qJEECgYEA3AE7SeLpe3SZApj2RmE2lcD9/Saj1Y30PznxB7M7hK0sZ1yXEbtS +Qf894iDDD/Cn3ufA4xk/K52CXgAcqvH/h2geG4pWLYsT1mdWhGftprtOMCIvJvzU +8uWJzKdOGVMG7R59wNgEpPDZDpBISjexwQsFo3aw1L/H1/Sa8cdY3a0CgYEA39hB +1oLmGRyE32Q4GF/srG4FqKL1EsbISGDUEYTnaYg2XiM43gu3tC/ikfclk27Jwc2L +m7cA5FxxaEyfoOgfAizfU/uWTAbx9GoXgWsO0hWSN9+YNq61gc5WKoHyrJ/rfrti +y5d7k0OCeBxckLqGDuJqICQ0myiz0El6FU8h5SMCgYEAuhigmiNC9JbwRu40g9v/ +XDVfox9oPmBRVpogdC78DYKeqN/9OZaGQiUxp3GnDni2xyqqUm8srCwT9oeJuF/z +kgpUTV96/hNCuH25BU8UC5Es1jJUSFpdlwjqwx5SRcGhfjnojZMseojwUg1h2MW7 +qls0bc0cTxnaZaYW2qWRWhECgYBrT0cwyQv6GdvxJCBoPwQ9HXmFAKowWC+H0zOX +Onmd8/jsZEJM4J0uuo4Jn8vZxBDg4eL9wVuiHlcXwzP7dYv4BP8DSechh2rS21Ft +b59pQ4IXWw+jl1nYYsyYEDgAXaIN3VNder95N7ICVsZhc6n01MI/qlu1zmt1fOQT +9x2utQKBgHI9SbsfWfbGiu6oLS3+9V1t4dORhj8D8b7z3trvECrD6tPhxoZqtfrH +4apKr3OKRSXk3K+1K6pkMHJHunspucnA1ChXLhzfNF08BSRJkQDGYuaRLS6VGgab +JZTl54bGvO1GkszEBE/9QFcqNVtWGMWXnUPwNNv8t//yJT5rvQil +-----END RSA PRIVATE KEY----- diff --git a/command/proxy/test-fixtures/reload/reload_bar.pem b/command/proxy/test-fixtures/reload/reload_bar.pem new file mode 100644 index 000000000000..a8217be5c7df --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_bar.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIULLCz3mZKmg2xy3rWCud0f1zcmBwwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjQ0WhcNMzYw +MzA1MDEzNzE0WjAaMRgwFgYDVQQDEw9iYXIuZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAXuxEDJSItx3p6zpV5pNFQE66wUUaSYon +mVTfnXBoImpcWO5nqL9JBg0ACedGCi1dJMTV8g+MTaRk0fWG+oTkilMaADDLnTGm +MmusEEjp72XIqqPtPyAtU0G+0LRylCL6kauzMQjRyQNAJJkeqL88DNymYtSCHYoy +uBqBP5iU3fkoe2X9tD8Wyf6SrKRWo3Dr2f8IMo0p0MefWo/CJf2r99MyPcQbqD7e +e0qtQ6HxX+Aco/OrxAY//Ai53Yr61NKitVev/jPHvGDOVsmQJqTNxBCI/or6lo+c +NGUTx8r7WBFHJHtcLPKnSKtSXwbU2NgBH+1VbGJSGWT+Nm61vw+nAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSVoF8F +7qbzSryIFrldurAG78LvSjAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9iYXIuZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAGmz2N282iT2IaEZvOmzIE4znHGkvoxZmrr/2byq5PskBg9ysyCHfUvw +SFA8U7jWjezKTnGRUu5blB+yZdjrMtB4AePWyEqtkJwVsZ2SPeP+9V2gNYK4iktP +UF3aIgBbAbw8rNuGIIB0T4D+6Zyo9Y3MCygs6/N4bRPZgLhewWn1ilklfnl3eqaC +a+JY1NBuTgCMa28NuC+Hy3mCveqhI8tFNiOthlLdgAEbuQaOuNutAG73utZ2aq6Q +W4pajFm3lEf5zt7Lo6ZCFtY/Q8jjURJ9e4O7VjXcqIhBM5bSMI6+fgQyOH0SLboj +RNanJ2bcyF1iPVyPBGzV3dF0ngYzxEY= +-----END CERTIFICATE----- diff --git a/command/proxy/test-fixtures/reload/reload_ca.pem b/command/proxy/test-fixtures/reload/reload_ca.pem new file mode 100644 index 000000000000..72a74440c482 --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_ca.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNTCCAh2gAwIBAgIUBeVo+Ce2BrdRT1cogKvJLtdOky8wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNTM4WhcNMzYw +MzA1MDIzNjA4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAPTQGWPRIOECGeJB6tR/ftvvtioC9f84fY2QdJ5k +JBupXjPAGYKgS4MGzyT5bz9yY400tCtmh6h7p9tZwHl/TElTugtLQ/8ilMbJTiOM +SiyaMDPHiMJJYKTjm9bu6bKeU1qPZ0Cryes4rygbqs7w2XPgA2RxNmDh7JdX7/h+ +VB5onBmv8g4WFSayowGyDcJWWCbu5yv6ZdH1bqQjgRzQ5xp17WXNmvlzdp2vate/ +9UqPdA8sdJzW/91Gvmros0o/FnG7c2pULhk22wFqO8t2HRjKb3nuxALEJvqoPvad +KjpDTaq1L1ZzxcB7wvWyhy/lNLZL7jiNWy0mN1YB0UpSWdECAwEAAaN7MHkwDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHMM2+oX9Orb +U6BazXcHljJ1mOW/MB8GA1UdIwQYMBaAFHMM2+oX9OrbU6BazXcHljJ1mOW/MBYG +A1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQAp17XsOaT9 +hculRqrFptn3+zkH3HrIckHm+28R5xYT8ASFXFcLFugGizJAXVL5lvsRVRIwCoOX +Nhi8XSNEFP640VbHcEl81I84bbRIIDS+Yheu6JDZGemTaDYLv1J3D5SHwgoM+nyf +oTRgotUCIXcwJHmTpWEUkZFKuqBxsoTGzk0jO8wOP6xoJkzxVVG5PvNxs924rxY8 +Y8iaLdDfMeT7Pi0XIliBa/aSp/iqSW8XKyJl5R5vXg9+DOgZUrVzIxObaF5RBl/a +mJOeklJBdNVzQm5+iMpO42lu0TA9eWtpP+YiUEXU17XDvFeQWOocFbQ1Peo0W895 +XRz2GCwCNyvW +-----END CERTIFICATE----- diff --git a/command/proxy/test-fixtures/reload/reload_foo.key b/command/proxy/test-fixtures/reload/reload_foo.key new file mode 100644 index 000000000000..86e6cce63e64 --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_foo.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEAzNyVieSti9XBb5/celB5u8YKRJv3mQS9A4/X0mqY1ePznt1i +ilG7OmG0yM2VAk0ceIAQac3Bsn74jxn2cDlrrVniPXcNgYtMtW0kRqNEo4doo4EX +xZguS9vNBu29useHhif1TGX/pA3dgvaVycUCjzTEVk6qI8UEehMK6gEGZb7nOr0A +A9nipSqoeHpDLe3a4KVqj1vtlJKUvD2i1MuBuQ130cB1K9rufLCShGu7mEgzEosc +gr+K3Bf03IejbeVRyIfLtgj1zuvV1katec75UqRA/bsvt5G9JfJqiZ9mwFN0vp3g +Cr7pdQBSBQ2q4yf9s8CuY5c5w9fl3F8f5QFQoQIDAQABAoIBAQCbCb1qNFRa5ZSV +I8i6ELlwMDqJHfhOJ9XcIjpVljLAfNlcu3Ld92jYkCU/asaAjVckotbJG9yhd5Io +yp9E40/oS4P6vGTOS1vsWgMAKoPBtrKsOwCAm+E9q8UIn1fdSS/5ibgM74x+3bds +a62Em8KKGocUQkhk9a+jq1GxMsFisbHRxEHvClLmDMgGnW3FyGmWwT6yZLPSC0ey +szmmjt3ouP8cLAOmSjzcQBMmEZpQMCgR6Qckg6nrLQAGzZyTdCd875wbGA57DpWX +Lssn95+A5EFvr/6b7DkXeIFCrYBFFa+UQN3PWGEQ6Zjmiw4VgV2vO8yX2kCLlUhU +02bL393ZAoGBAPXPD/0yWINbKUPcRlx/WfWQxfz0bu50ytwIXzVK+pRoAMuNqehK +BJ6kNzTTBq40u+IZ4f5jbLDulymR+4zSkirLE7CyWFJOLNI/8K4Pf5DJUgNdrZjJ +LCtP9XRdxiPatQF0NGfdgHlSJh+/CiRJP4AgB17AnB/4z9/M0ZlJGVrzAoGBANVa +69P3Rp/WPBQv0wx6f0tWppJolWekAHKcDIdQ5HdOZE5CPAYSlTrTUW3uJuqMwU2L +M0Er2gIPKWIR5X+9r7Fvu9hQW6l2v3xLlcrGPiapp3STJvuMxzhRAmXmu3bZfVn1 +Vn7Vf1jPULHtTFSlNFEvYG5UJmygK9BeyyVO5KMbAoGBAMCyAibLQPg4jrDUDZSV +gUAwrgUO2ae1hxHWvkxY6vdMUNNByuB+pgB3W4/dnm8Sh/dHsxJpftt1Lqs39ar/ +p/ZEHLt4FCTxg9GOrm7FV4t5RwG8fko36phJpnIC0UFqQltRbYO+8OgqrhhU+u5X +PaCDe0OcWsf1lYAsYGN6GpZhAoGBAMJ5Ksa9+YEODRs1cIFKUyd/5ztC2xRqOAI/ +3WemQ2nAacuvsfizDZVeMzYpww0+maAuBt0btI719PmwaGmkpDXvK+EDdlmkpOwO +FY6MXvBs6fdnfjwCWUErDi2GQFAX9Jt/9oSL5JU1+08DhvUM1QA/V/2Y9KFE6kr3 +bOIn5F4LAoGBAKQzH/AThDGhT3hwr4ktmReF3qKxBgxzjVa8veXtkY5VWwyN09iT +jnTTt6N1CchZoK5WCETjdzNYP7cuBTcV4d3bPNRiJmxXaNVvx3Tlrk98OiffT8Qa +5DO/Wfb43rNHYXBjU6l0n2zWcQ4PUSSbu0P0bM2JTQPRCqSthXvSHw2P +-----END RSA PRIVATE KEY----- diff --git a/command/proxy/test-fixtures/reload/reload_foo.pem b/command/proxy/test-fixtures/reload/reload_foo.pem new file mode 100644 index 000000000000..c8b868bcd0f0 --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_foo.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIUFVW6i/M+yJUsDrXWgRKO/Dnb+L4wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjA1WhcNMzYw +MzA1MDEzNjM1WjAaMRgwFgYDVQQDEw9mb28uZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDM3JWJ5K2L1cFvn9x6UHm7xgpEm/eZBL0D +j9fSapjV4/Oe3WKKUbs6YbTIzZUCTRx4gBBpzcGyfviPGfZwOWutWeI9dw2Bi0y1 +bSRGo0Sjh2ijgRfFmC5L280G7b26x4eGJ/VMZf+kDd2C9pXJxQKPNMRWTqojxQR6 +EwrqAQZlvuc6vQAD2eKlKqh4ekMt7drgpWqPW+2UkpS8PaLUy4G5DXfRwHUr2u58 +sJKEa7uYSDMSixyCv4rcF/Tch6Nt5VHIh8u2CPXO69XWRq15zvlSpED9uy+3kb0l +8mqJn2bAU3S+neAKvul1AFIFDarjJ/2zwK5jlznD1+XcXx/lAVChAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRNJoOJ +dnazDiuqLhV6truQ4cRe9jAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9mb28uZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAHzv67mtbxMWcuMsxCFBN1PJNAyUDZVCB+1gWhk59EySbVg81hWJDCBy +fl3TKjz3i7wBGAv+C2iTxmwsSJbda22v8JQbuscXIfLFbNALsPzF+J0vxAgJs5Gc +sDbfJ7EQOIIOVKQhHLYnQoLnigSSPc1kd0JjYyHEBjgIaSuXgRRTBAeqLiBMx0yh +RKL1lQ+WoBU/9SXUZZkwokqWt5G7khi5qZkNxVXZCm8VGPg0iywf6gGyhI1SU5S2 +oR219S6kA4JY/stw1qne85/EmHmoImHGt08xex3GoU72jKAjsIpqRWopcD/+uene +Tc9nn3fTQW/Z9fsoJ5iF5OdJnDEswqE= +-----END CERTIFICATE----- diff --git a/command/proxy_test.go b/command/proxy_test.go new file mode 100644 index 000000000000..cffc93b7507a --- /dev/null +++ b/command/proxy_test.go @@ -0,0 +1,1282 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net/http" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/go-hclog" + vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + credAppRole "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/command/agent" + proxyConfig "github.com/hashicorp/vault/command/proxy/config" + "github.com/hashicorp/vault/helper/testhelpers/minimal" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func testProxyCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *ProxyCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &ProxyCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + logger: logger, + startedCh: make(chan struct{}, 5), + reloadedCh: make(chan struct{}, 5), + } +} + +// TestProxy_ExitAfterAuth tests the exit_after_auth flag, provided both +// as config and via -exit-after-auth. +func TestProxy_ExitAfterAuth(t *testing.T) { + t.Run("via_config", func(t *testing.T) { + testProxyExitAfterAuth(t, false) + }) + + t.Run("via_flag", func(t *testing.T) { + testProxyExitAfterAuth(t, true) + }) +} + +func testProxyExitAfterAuth(t *testing.T, viaFlag bool) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "jwt": vaultjwt.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{ + Type: "jwt", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{ + "bound_issuer": "https://team-vault.auth0.com/", + "jwt_validation_pubkeys": agent.TestECDSAPubKey, + "jwt_supported_algs": "ES256", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{ + "role_type": "jwt", + "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", + "bound_audiences": "https://vault.plugin.auth.jwt.test", + "user_claim": "https://vault/user", + "groups_claim": "https://vault/groups", + "policies": "test", + "period": "3s", + }) + if err != nil { + t.Fatal(err) + } + + dir := t.TempDir() + inf, err := os.CreateTemp(dir, "auth.jwt.test.") + if err != nil { + t.Fatal(err) + } + in := inf.Name() + inf.Close() + // We remove these files in this test since we don't need the files, we just need + // a non-conflicting file name for the config. + os.Remove(in) + t.Logf("input: %s", in) + + sink1f, err := os.CreateTemp(dir, "sink1.jwt.test.") + if err != nil { + t.Fatal(err) + } + sink1 := sink1f.Name() + sink1f.Close() + os.Remove(sink1) + t.Logf("sink1: %s", sink1) + + sink2f, err := os.CreateTemp(dir, "sink2.jwt.test.") + if err != nil { + t.Fatal(err) + } + sink2 := sink2f.Name() + sink2f.Close() + os.Remove(sink2) + t.Logf("sink2: %s", sink2) + + conff, err := os.CreateTemp(dir, "conf.jwt.test.") + if err != nil { + t.Fatal(err) + } + conf := conff.Name() + conff.Close() + os.Remove(conf) + t.Logf("config: %s", conf) + + jwtToken, _ := agent.GetTestJWT(t) + if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test jwt", "path", in) + } + + exitAfterAuthTemplText := "exit_after_auth = true" + if viaFlag { + exitAfterAuthTemplText = "" + } + + config := ` +%s + +auto_auth { + method { + type = "jwt" + config = { + role = "test" + path = "%s" + } + } + + sink { + type = "file" + config = { + path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +} +` + + config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sink1, sink2) + if err := os.WriteFile(conf, []byte(config), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test config", "path", conf) + } + + doneCh := make(chan struct{}) + go func() { + ui, cmd := testProxyCommand(t, logger) + cmd.client = client + + args := []string{"-config", conf} + if viaFlag { + args = append(args, "-exit-after-auth") + } + + code := cmd.Run(args) + if code != 0 { + t.Errorf("expected %d to be %d", code, 0) + t.Logf("output from proxy:\n%s", ui.OutputWriter.String()) + t.Logf("error from proxy:\n%s", ui.ErrorWriter.String()) + } + close(doneCh) + }() + + select { + case <-doneCh: + break + case <-time.After(1 * time.Minute): + t.Fatal("timeout reached while waiting for proxy to exit") + } + + sink1Bytes, err := os.ReadFile(sink1) + if err != nil { + t.Fatal(err) + } + if len(sink1Bytes) == 0 { + t.Fatal("got no output from sink 1") + } + + sink2Bytes, err := os.ReadFile(sink2) + if err != nil { + t.Fatal(err) + } + if len(sink2Bytes) == 0 { + t.Fatal("got no output from sink 2") + } + + if string(sink1Bytes) != string(sink2Bytes) { + t.Fatal("sink 1/2 values don't match") + } +} + +// TestProxy_AutoAuth_UserAgent tests that the User-Agent sent +// to Vault by Vault Proxy is correct when performing Auto-Auth. +// Uses the custom handler userAgentHandler (defined above) so +// that Vault validates the User-Agent on requests sent by Proxy. +func TestProxy_AutoAuth_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + var h userAgentHandler + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.ProxyAutoAuthString() + h.requestMethodToCheck = "PUT" + h.pathToCheck = "auth/approle/login" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") + req.BodyBytes = []byte(`{ + "type": "approle" + }`) + request(t, serverClient, req, 204) + + // Create a named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") + req.BodyBytes = []byte(`{ + "secret_id_num_uses": "10", + "secret_id_ttl": "1m", + "token_max_ttl": "1m", + "token_num_uses": "10", + "token_ttl": "1m", + "policies": "default" + }`) + request(t, serverClient, req, 204) + + // Fetch the RoleID of the named role + req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") + body := request(t, serverClient, req, 200) + data := body["data"].(map[string]interface{}) + roleID := data["role_id"].(string) + + // Get a SecretID issued against the named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") + body = request(t, serverClient, req, 200) + data = body["data"].(map[string]interface{}) + secretID := data["secret_id"].(string) + + // Write the RoleID and SecretID to temp files + roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") + secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") + defer os.Remove(roleIDPath) + defer os.Remove(secretIDPath) + + sinkf, err := os.CreateTemp("", "sink.test.") + if err != nil { + t.Fatal(err) + } + sink := sinkf.Name() + sinkf.Close() + os.Remove(sink) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, roleIDPath, secretIDPath, sink) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +api_proxy { + use_auto_auth_token = true +} +%s +%s +`, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + proxyClient.SetToken("") + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // Wait for the token to be sent to syncs and be available to be used + time.Sleep(5 * time.Second) + + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_APIProxyWithoutCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Proxy is correct using the API proxy without +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Proxy. +func TestProxy_APIProxyWithoutCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.ProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +`, serverClient.Address(), listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.AddHeader("User-Agent", userAgentForProxiedClient) + proxyClient.SetToken(serverClient.Token()) + proxyClient.SetMaxRetries(0) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = proxyClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_APIProxyWithCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Proxy is correct using the API proxy with +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Proxy. +func TestProxy_APIProxyWithCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.ProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + cacheConfig := ` +cache { +}` + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), listenConfig, cacheConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.AddHeader("User-Agent", userAgentForProxiedClient) + proxyClient.SetToken(serverClient.Token()) + proxyClient.SetMaxRetries(0) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = proxyClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_Cache_DynamicSecret tests that the cache successfully caches a dynamic secret +// going through the Proxy, and that a subsequent request will be served from the cache. +func TestProxy_Cache_DynamicSecret(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + cacheConfig := ` +cache { +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.SetToken(serverClient.Token()) + proxyClient.SetMaxRetries(0) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + renewable := true + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + // This was the simplest test I could find to trigger the caching behaviour, + // i.e. the most concise I could make the test that I can tell + // creating an orphan token returns Auth, is renewable, and isn't a token + // that's managed elsewhere (since it's an orphan) + secret, err := proxyClient.Auth().Token().CreateOrphan(tokenCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil { + t.Fatalf("secret not as expected: %v", secret) + } + + token := secret.Auth.ClientToken + + secret, err = proxyClient.Auth().Token().CreateOrphan(tokenCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil { + t.Fatalf("secret not as expected: %v", secret) + } + + token2 := secret.Auth.ClientToken + + if token != token2 { + t.Fatalf("token create response not cached when it should have been, as tokens differ") + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_ApiProxy_Retry Tests the retry functionalities of Vault Proxy's API Proxy +func TestProxy_ApiProxy_Retry(t *testing.T) { + //---------------------------------------------------- + // Start the server and proxy + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + var h handler + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc(func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + _, err := serverClient.Logical().Write("secret/foo", map[string]interface{}{ + "bar": "baz", + }) + if err != nil { + t.Fatal(err) + } + + intRef := func(i int) *int { + return &i + } + // start test cases here + testCases := map[string]struct { + retries *int + expectError bool + }{ + "none": { + retries: intRef(-1), + expectError: true, + }, + "one": { + retries: intRef(1), + expectError: true, + }, + "two": { + retries: intRef(2), + expectError: false, + }, + "missing": { + retries: nil, + expectError: false, + }, + "default": { + retries: intRef(0), + expectError: false, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + h.failCount = 2 + + cacheConfig := ` +cache { +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + var retryConf string + if tc.retries != nil { + retryConf = fmt.Sprintf("retry { num_retries = %d }", *tc.retries) + } + + config := fmt.Sprintf(` +vault { + address = "%s" + %s + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), retryConf, cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + client.SetToken(serverClient.Token()) + client.SetMaxRetries(0) + err = client.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + secret, err := client.Logical().Read("secret/foo") + switch { + case (err != nil || secret == nil) && tc.expectError: + case (err == nil || secret != nil) && !tc.expectError: + default: + t.Fatalf("%s expectError=%v error=%v secret=%v", tcname, tc.expectError, err, secret) + } + if secret != nil && secret.Data["foo"] != nil { + val := secret.Data["foo"].(map[string]interface{}) + if !reflect.DeepEqual(val, map[string]interface{}{"bar": "baz"}) { + t.Fatalf("expected key 'foo' to yield bar=baz, got: %v", val) + } + } + time.Sleep(time.Second) + + close(cmd.ShutdownCh) + wg.Wait() + }) + } +} + +// TestProxy_Metrics tests that metrics are being properly reported. +func TestProxy_Metrics(t *testing.T) { + // Start a vault server + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, nil, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Create a config file + listenAddr := generateListenerAddress(t) + config := fmt.Sprintf(` +cache {} + +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + ui, cmd := testProxyCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running proxy: %d", code) + t.Logf("STDOUT from proxy:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from proxy:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // defer proxy shutdown + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + req := proxyClient.NewRequest("GET", "/proxy/v1/metrics") + body := request(t, proxyClient, req, 200) + keys := []string{} + for k := range body { + keys = append(keys, k) + } + require.ElementsMatch(t, keys, []string{ + "Counters", + "Samples", + "Timestamp", + "Gauges", + "Points", + }) +} + +// TestProxy_QuitAPI Tests the /proxy/v1/quit API that can be enabled for the proxy. +func TestProxy_QuitAPI(t *testing.T) { + cluster := minimal.NewTestSoloCluster(t, nil) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + err := os.Unsetenv(api.EnvVaultAddress) + if err != nil { + t.Fatal(err) + } + + listenAddr := generateListenerAddress(t) + listenAddr2 := generateListenerAddress(t) + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} + +listener "tcp" { + address = "%s" + tls_disable = true +} + +listener "tcp" { + address = "%s" + tls_disable = true + proxy_api { + enable_quit = true + } +} + +cache {} +`, serverClient.Address(), listenAddr, listenAddr2) + + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + _, cmd := testProxyCommand(t, nil) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + client.SetToken(serverClient.Token()) + client.SetMaxRetries(0) + err = client.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // First try on listener 1 where the API should be disabled. + resp, err := client.RawRequest(client.NewRequest(http.MethodPost, "/proxy/v1/quit")) + if err == nil { + t.Fatalf("expected error") + } + if resp != nil && resp.StatusCode != http.StatusNotFound { + t.Fatalf("expected %d but got: %d", http.StatusNotFound, resp.StatusCode) + } + + // Now try on listener 2 where the quit API should be enabled. + err = client.SetAddress("http://" + listenAddr2) + if err != nil { + t.Fatal(err) + } + + _, err = client.RawRequest(client.NewRequest(http.MethodPost, "/proxy/v1/quit")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-cmd.ShutdownCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + wg.Wait() +} + +// TestProxy_LogFile_CliOverridesConfig tests that the CLI values +// override the config for log files +func TestProxy_LogFile_CliOverridesConfig(t *testing.T) { + // Create basic config + configFile := populateTempFile(t, "proxy-config.hcl", BasicHclConfig) + cfg, err := proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + // Sanity check that the config value is the current value + assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile) + + // Initialize the command and parse any flags + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + f := cmd.Flags() + // Simulate the flag being specified + err = f.Parse([]string{"-log-file=/foo/bar/test.log"}) + if err != nil { + t.Fatal(err) + } + + // Update the config based on the inputs. + cmd.applyConfigOverrides(f, cfg) + + assert.NotEqual(t, "TMPDIR/juan.log", cfg.LogFile) + assert.NotEqual(t, "/squiggle/logs.txt", cfg.LogFile) + assert.Equal(t, "/foo/bar/test.log", cfg.LogFile) +} + +// TestProxy_LogFile_Config tests log file config when loaded from config +func TestProxy_LogFile_Config(t *testing.T) { + configFile := populateTempFile(t, "proxy-config.hcl", BasicHclConfig) + + cfg, err := proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + // Sanity check that the config value is the current value + assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile, "sanity check on log config failed") + assert.Equal(t, 2, cfg.LogRotateMaxFiles) + assert.Equal(t, 1048576, cfg.LogRotateBytes) + + // Parse the cli flags (but we pass in an empty slice) + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + f := cmd.Flags() + err = f.Parse([]string{}) + if err != nil { + t.Fatal(err) + } + + // Should change nothing... + cmd.applyConfigOverrides(f, cfg) + + assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile, "actual config check") + assert.Equal(t, 2, cfg.LogRotateMaxFiles) + assert.Equal(t, 1048576, cfg.LogRotateBytes) +} + +// TestProxy_EnvVar_Overrides tests that environment variables are properly +// parsed and override defaults. +func TestProxy_EnvVar_Overrides(t *testing.T) { + configFile := populateTempFile(t, "proxy-config.hcl", BasicHclConfig) + + cfg, err := proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + assert.Equal(t, false, cfg.Vault.TLSSkipVerify) + + t.Setenv("VAULT_SKIP_VERIFY", "true") + // Parse the cli flags (but we pass in an empty slice) + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + f := cmd.Flags() + err = f.Parse([]string{}) + if err != nil { + t.Fatal(err) + } + + cmd.applyConfigOverrides(f, cfg) + assert.Equal(t, true, cfg.Vault.TLSSkipVerify) + + t.Setenv("VAULT_SKIP_VERIFY", "false") + + cmd.applyConfigOverrides(f, cfg) + assert.Equal(t, false, cfg.Vault.TLSSkipVerify) +} + +// TestProxy_Config_NewLogger_Default Tests defaults for log level and +// specifically cmd.newLogger() +func TestProxy_Config_NewLogger_Default(t *testing.T) { + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + cmd.config = proxyConfig.NewConfig() + logger, err := cmd.newLogger() + + assert.NoError(t, err) + assert.NotNil(t, logger) + assert.Equal(t, hclog.Info.String(), logger.GetLevel().String()) +} + +// TestProxy_Config_ReloadLogLevel Tests reloading updates the log +// level as expected. +func TestProxy_Config_ReloadLogLevel(t *testing.T) { + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + var err error + tempDir := t.TempDir() + + // Load an initial config + hcl := strings.ReplaceAll(BasicHclConfig, "TMPDIR", tempDir) + configFile := populateTempFile(t, "proxy-config.hcl", hcl) + cmd.config, err = proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + // Tweak the loaded config to make sure we can put log files into a temp dir + // and systemd log attempts work fine, this would usually happen during Run. + cmd.logWriter = os.Stdout + cmd.logger, err = cmd.newLogger() + if err != nil { + t.Fatal("logger required for systemd log messages", err) + } + + // Sanity check + assert.Equal(t, "warn", cmd.config.LogLevel) + + // Load a new config + hcl = strings.ReplaceAll(BasicHclConfig2, "TMPDIR", tempDir) + configFile = populateTempFile(t, "proxy-config.hcl", hcl) + err = cmd.reloadConfig([]string{configFile.Name()}) + assert.NoError(t, err) + assert.Equal(t, "debug", cmd.config.LogLevel) +} + +// TestProxy_Config_ReloadTls Tests that the TLS certs for the listener are +// correctly reloaded. +func TestProxy_Config_ReloadTls(t *testing.T) { + var wg sync.WaitGroup + wd, err := os.Getwd() + if err != nil { + t.Fatal("unable to get current working directory") + } + workingDir := filepath.Join(wd, "/proxy/test-fixtures/reload") + fooCert := "reload_foo.pem" + fooKey := "reload_foo.key" + + barCert := "reload_bar.pem" + barKey := "reload_bar.key" + + reloadCert := "reload_cert.pem" + reloadKey := "reload_key.pem" + caPem := "reload_ca.pem" + + tempDir := t.TempDir() + + // Set up initial 'foo' certs + inBytes, err := os.ReadFile(filepath.Join(workingDir, fooCert)) + if err != nil { + t.Fatal("unable to read cert required for test", fooCert, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadCert), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert required for test", reloadCert, err) + } + + inBytes, err = os.ReadFile(filepath.Join(workingDir, fooKey)) + if err != nil { + t.Fatal("unable to read cert key required for test", fooKey, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadKey), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert key required for test", reloadKey, err) + } + + inBytes, err = os.ReadFile(filepath.Join(workingDir, caPem)) + if err != nil { + t.Fatal("unable to read CA pem required for test", caPem, err) + } + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM(inBytes) + if !ok { + t.Fatal("not ok when appending CA cert") + } + + replacedHcl := strings.ReplaceAll(BasicHclConfig, "TMPDIR", tempDir) + configFile := populateTempFile(t, "proxy-config.hcl", replacedHcl) + + // Set up Proxy + logger := logging.NewVaultLogger(hclog.Trace) + ui, cmd := testProxyCommand(t, logger) + + var output string + var code int + wg.Add(1) + args := []string{"-config", configFile.Name()} + go func() { + if code = cmd.Run(args); code != 0 { + output = ui.ErrorWriter.String() + ui.OutputWriter.String() + } + wg.Done() + }() + + testCertificateName := func(cn string) error { + conn, err := tls.Dial("tcp", "127.0.0.1:8100", &tls.Config{ + RootCAs: certPool, + }) + if err != nil { + return err + } + defer conn.Close() + if err = conn.Handshake(); err != nil { + return err + } + servName := conn.ConnectionState().PeerCertificates[0].Subject.CommonName + if servName != cn { + return fmt.Errorf("expected %s, got %s", cn, servName) + } + return nil + } + + // Start + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + if err := testCertificateName("foo.example.com"); err != nil { + t.Fatalf("certificate name didn't check out: %s", err) + } + + // Swap out certs + inBytes, err = os.ReadFile(filepath.Join(workingDir, barCert)) + if err != nil { + t.Fatal("unable to read cert required for test", barCert, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadCert), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert required for test", reloadCert, err) + } + + inBytes, err = os.ReadFile(filepath.Join(workingDir, barKey)) + if err != nil { + t.Fatal("unable to read cert key required for test", barKey, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadKey), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert key required for test", reloadKey, err) + } + + // Reload + cmd.SighupCh <- struct{}{} + select { + case <-cmd.reloadedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + if err := testCertificateName("bar.example.com"); err != nil { + t.Fatalf("certificate name didn't check out: %s", err) + } + + // Shut down + cmd.ShutdownCh <- struct{}{} + wg.Wait() + + if code != 0 { + t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) + } +} diff --git a/command/read.go b/command/read.go index 91e50c519de9..742e03676381 100644 --- a/command/read.go +++ b/command/read.go @@ -1,12 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( + "context" "fmt" "io" "os" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -77,6 +81,10 @@ func (c *ReadCommand) Run(args []string) int { return 2 } + // client.ReadRaw* methods require a manual timeout override + ctx, cancel := context.WithTimeout(context.Background(), client.ClientTimeout()) + defer cancel() + // Pull our fake stdin if needed stdin := (io.Reader)(os.Stdin) if c.testStdin != nil { @@ -92,7 +100,7 @@ func (c *ReadCommand) Run(args []string) int { } if Format(c.UI) != "raw" { - secret, err := client.Logical().ReadWithData(path, data) + secret, err := client.Logical().ReadWithDataWithContext(ctx, path, data) if err != nil { c.UI.Error(fmt.Sprintf("Error reading %s: %s", path, err)) return 2 @@ -109,7 +117,7 @@ func (c *ReadCommand) Run(args []string) int { return OutputSecret(c.UI, secret) } - resp, err := client.Logical().ReadRawWithData(path, data) + resp, err := client.Logical().ReadRawWithDataWithContext(ctx, path, data) if err != nil { c.UI.Error(fmt.Sprintf("Error reading: %s: %s", path, err)) return 2 diff --git a/command/read_test.go b/command/read_test.go index 4a8ec877aa0f..fe8961afb669 100644 --- a/command/read_test.go +++ b/command/read_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testReadCommand(tb testing.TB) (*cli.MockUi, *ReadCommand) { diff --git a/command/rotate.go b/command/rotate.go index f366a6133b10..2a17e41f9b5e 100644 --- a/command/rotate.go +++ b/command/rotate.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/rotate_test.go b/command/rotate_test.go index 37ac32340590..927812934c3f 100644 --- a/command/rotate_test.go +++ b/command/rotate_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testOperatorRotateCommand(tb testing.TB) (*cli.MockUi, *OperatorRotateCommand) { diff --git a/command/secrets.go b/command/secrets.go index 06e63bec281f..a205aae17443 100644 --- a/command/secrets.go +++ b/command/secrets.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*SecretsCommand)(nil) diff --git a/command/secrets_disable.go b/command/secrets_disable.go index 47a61c5fe094..163af4a785c7 100644 --- a/command/secrets_disable.go +++ b/command/secrets_disable.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/secrets_disable_test.go b/command/secrets_disable_test.go index 567c8956d630..253107136430 100644 --- a/command/secrets_disable_test.go +++ b/command/secrets_disable_test.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testSecretsDisableCommand(tb testing.TB) (*cli.MockUi, *SecretsDisableCommand) { diff --git a/command/secrets_enable.go b/command/secrets_enable.go index 8be62953dca9..a73a5e49ef87 100644 --- a/command/secrets_enable.go +++ b/command/secrets_enable.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,9 +10,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -39,6 +41,8 @@ type SecretsEnableCommand struct { flagExternalEntropyAccess bool flagVersion int flagAllowedManagedKeys []string + flagDelegatedAuthAccessors []string + flagIdentityTokenKey string } func (c *SecretsEnableCommand) Synopsis() string { @@ -169,7 +173,7 @@ func (c *SecretsEnableCommand) Flags() *FlagSets { f.StringVar(&StringVar{ Name: "plugin-name", Target: &c.flagPluginName, - Completion: c.PredictVaultPlugins(consts.PluginTypeSecrets, consts.PluginTypeDatabase), + Completion: c.PredictVaultPlugins(api.PluginTypeSecrets, api.PluginTypeDatabase), Usage: "Name of the secrets engine plugin. This plugin name must already " + "exist in Vault's plugin catalog.", }) @@ -226,6 +230,21 @@ func (c *SecretsEnableCommand) Flags() *FlagSets { "each time with 1 key.", }) + f.StringSliceVar(&StringSliceVar{ + Name: flagNameDelegatedAuthAccessors, + Target: &c.flagDelegatedAuthAccessors, + Usage: "A list of permitted authentication accessors this backend can delegate authentication to. " + + "Note that multiple values may be specified by providing this option multiple times, " + + "each time with 1 accessor.", + }) + + f.StringVar(&StringVar{ + Name: flagNameIdentityTokenKey, + Target: &c.flagIdentityTokenKey, + Default: "default", + Usage: "Select the key used to sign plugin identity tokens.", + }) + return set } @@ -329,9 +348,17 @@ func (c *SecretsEnableCommand) Run(args []string) int { mountInput.Config.AllowedManagedKeys = c.flagAllowedManagedKeys } + if fl.Name == flagNameDelegatedAuthAccessors { + mountInput.Config.DelegatedAuthAccessors = c.flagDelegatedAuthAccessors + } + if fl.Name == flagNamePluginVersion { mountInput.Config.PluginVersion = c.flagPluginVersion } + + if fl.Name == flagNameIdentityTokenKey { + mountInput.Config.IdentityTokenKey = c.flagIdentityTokenKey + } }) if err := client.Sys().Mount(mountPath, mountInput); err != nil { diff --git a/command/secrets_enable_test.go b/command/secrets_enable_test.go index 127e54a6ac67..3efc171a7be1 100644 --- a/command/secrets_enable_test.go +++ b/command/secrets_enable_test.go @@ -1,23 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "errors" "io/ioutil" "os" + "sort" "strings" "testing" "github.com/go-test/deep" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/cli" + "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/helper/builtinplugins" "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/mitchellh/cli" ) -// logicalBackendAdjustmentFactor is set to plus 1 for the database backend -// which is a plugin but not found in go.mod files, and minus 1 for the ldap -// and openldap secret backends which have the same underlying plugin. -var logicalBackendAdjustmentFactor = 1 - 1 - func testSecretsEnableCommand(tb testing.TB) (*cli.MockUi, *SecretsEnableCommand) { tb.Helper() @@ -117,6 +118,8 @@ func TestSecretsEnableCommand_Run(t *testing.T) { "-passthrough-request-headers", "www-authentication", "-allowed-response-headers", "authorization", "-allowed-managed-keys", "key1,key2", + "-identity-token-key", "default", + "-delegated-auth-accessors", "authAcc1,authAcc2", "-force-no-cache", "pki", }) @@ -169,6 +172,12 @@ func TestSecretsEnableCommand_Run(t *testing.T) { if diff := deep.Equal([]string{"key1,key2"}, mountInfo.Config.AllowedManagedKeys); len(diff) > 0 { t.Errorf("Failed to find expected values in AllowedManagedKeys. Difference is: %v", diff) } + if diff := deep.Equal([]string{"authAcc1,authAcc2"}, mountInfo.Config.DelegatedAuthAccessors); len(diff) > 0 { + t.Errorf("Failed to find expected values in DelegatedAuthAccessors. Difference is: %v", diff) + } + if diff := deep.Equal("default", mountInfo.Config.IdentityTokenKey); len(diff) > 0 { + t.Errorf("Failed to find expected values in IdentityTokenKey. Difference is: %v", diff) + } }) t.Run("communication_failure", func(t *testing.T) { @@ -215,7 +224,7 @@ func TestSecretsEnableCommand_Run(t *testing.T) { var backends []string for _, f := range files { if f.IsDir() { - if f.Name() == "plugin" { + if f.Name() == "plugin" || f.Name() == "database" { continue } if _, err := os.Stat("../builtin/logical/" + f.Name() + "/backend.go"); errors.Is(err, os.ErrNotExist) { @@ -242,10 +251,12 @@ func TestSecretsEnableCommand_Run(t *testing.T) { } } - // backends are found by walking the directory, which includes the database backend, - // however, the plugins registry omits that one - if len(backends) != len(builtinplugins.Registry.Keys(consts.PluginTypeSecrets))+logicalBackendAdjustmentFactor { - t.Fatalf("expected %d logical backends, got %d", len(builtinplugins.Registry.Keys(consts.PluginTypeSecrets))+logicalBackendAdjustmentFactor, len(backends)) + regkeys := strutil.StrListDelete(builtinplugins.Registry.Keys(consts.PluginTypeSecrets), "ldap") + sort.Strings(regkeys) + sort.Strings(backends) + + if d := cmp.Diff(regkeys, backends); len(d) > 0 { + t.Fatalf("found logical registry mismatch: %v", d) } for _, b := range backends { diff --git a/command/secrets_list.go b/command/secrets_list.go index 998620f0964a..2819e2a1d390 100644 --- a/command/secrets_list.go +++ b/command/secrets_list.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,8 +9,8 @@ import ( "strconv" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/secrets_list_test.go b/command/secrets_list_test.go index 1aeee5bf6729..dcc51eb01892 100644 --- a/command/secrets_list_test.go +++ b/command/secrets_list_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testSecretsListCommand(tb testing.TB) (*cli.MockUi, *SecretsListCommand) { diff --git a/command/secrets_move.go b/command/secrets_move.go index 458e3bbece7a..bd4062969a49 100644 --- a/command/secrets_move.go +++ b/command/secrets_move.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "strings" "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/secrets_move_test.go b/command/secrets_move_test.go index 153fbeb2cdc0..ed7a5a5c629c 100644 --- a/command/secrets_move_test.go +++ b/command/secrets_move_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testSecretsMoveCommand(tb testing.TB) (*cli.MockUi, *SecretsMoveCommand) { diff --git a/command/secrets_tune.go b/command/secrets_tune.go index bf8fa3d59378..b853aec2711b 100644 --- a/command/secrets_tune.go +++ b/command/secrets_tune.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -7,8 +10,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -32,6 +35,8 @@ type SecretsTuneCommand struct { flagVersion int flagPluginVersion string flagAllowedManagedKeys []string + flagDelegatedAuthAccessors []string + flagIdentityTokenKey string } func (c *SecretsTuneCommand) Synopsis() string { @@ -155,6 +160,21 @@ func (c *SecretsTuneCommand) Flags() *FlagSets { "the plugin catalog, and will not start running until the plugin is reloaded.", }) + f.StringSliceVar(&StringSliceVar{ + Name: flagNameDelegatedAuthAccessors, + Target: &c.flagDelegatedAuthAccessors, + Usage: "A list of permitted authentication accessors this backend can delegate authentication to. " + + "Note that multiple values may be specified by providing this option multiple times, " + + "each time with 1 accessor.", + }) + + f.StringVar(&StringVar{ + Name: flagNameIdentityTokenKey, + Target: &c.flagIdentityTokenKey, + Default: "default", + Usage: "Select the key used to sign plugin identity tokens.", + }) + return set } @@ -239,6 +259,14 @@ func (c *SecretsTuneCommand) Run(args []string) int { if fl.Name == flagNamePluginVersion { mountConfigInput.PluginVersion = c.flagPluginVersion } + + if fl.Name == flagNameDelegatedAuthAccessors { + mountConfigInput.DelegatedAuthAccessors = c.flagDelegatedAuthAccessors + } + + if fl.Name == flagNameIdentityTokenKey { + mountConfigInput.IdentityTokenKey = c.flagIdentityTokenKey + } }) if err := client.Sys().TuneMount(mountPath, mountConfigInput); err != nil { diff --git a/command/secrets_tune_test.go b/command/secrets_tune_test.go index 41c6bd2f6fd1..b2d932779fd8 100644 --- a/command/secrets_tune_test.go +++ b/command/secrets_tune_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,10 +8,9 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" ) func testSecretsTuneCommand(tb testing.TB) (*cli.MockUi, *SecretsTuneCommand) { @@ -150,8 +152,7 @@ func TestSecretsTuneCommand_Run(t *testing.T) { t.Run("integration", func(t *testing.T) { t.Run("flags_all", func(t *testing.T) { t.Parallel() - pluginDir, cleanup := vault.MakeTestPluginDir(t) - defer cleanup(t) + pluginDir := corehelpers.MakeTestPluginDir(t) client, _, closer := testVaultServerPluginDir(t, pluginDir) defer closer() @@ -179,7 +180,7 @@ func TestSecretsTuneCommand_Run(t *testing.T) { t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) } - _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "pki", consts.PluginTypeSecrets) + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "pki", api.PluginTypeSecrets) code := cmd.Run([]string{ "-description", "new description", @@ -191,8 +192,10 @@ func TestSecretsTuneCommand_Run(t *testing.T) { "-passthrough-request-headers", "www-authentication", "-allowed-response-headers", "authorization,www-authentication", "-allowed-managed-keys", "key1,key2", + "-identity-token-key", "default", "-listing-visibility", "unauth", "-plugin-version", version, + "-delegated-auth-accessors", "authAcc1,authAcc2", "mount_tune_integration/", }) if exp := 0; code != exp { @@ -244,6 +247,12 @@ func TestSecretsTuneCommand_Run(t *testing.T) { if diff := deep.Equal([]string{"key1,key2"}, mountInfo.Config.AllowedManagedKeys); len(diff) > 0 { t.Errorf("Failed to find expected values in AllowedManagedKeys. Difference is: %v", diff) } + if diff := deep.Equal([]string{"authAcc1,authAcc2"}, mountInfo.Config.DelegatedAuthAccessors); len(diff) > 0 { + t.Errorf("Failed to find expected values in DelegatedAuthAccessors. Difference is: %v", diff) + } + if diff := deep.Equal("default", mountInfo.Config.IdentityTokenKey); len(diff) > 0 { + t.Errorf("Failed to find expected values in IdentityTokenKey. Difference is: %v", diff) + } }) t.Run("flags_description", func(t *testing.T) { diff --git a/command/server.go b/command/server.go index f3f7db537b45..9c754b490df5 100644 --- a/command/server.go +++ b/command/server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,6 +8,7 @@ import ( "crypto/sha256" "encoding/base64" "encoding/hex" + "errors" "fmt" "io" "io/ioutil" @@ -22,8 +26,11 @@ import ( "time" systemd "github.com/coreos/go-systemd/daemon" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/cli" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy/v2" wrapping "github.com/hashicorp/go-kms-wrapping/v2" aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" "github.com/hashicorp/go-multierror" @@ -36,27 +43,31 @@ import ( "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/helper/builtinplugins" "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/experiments" loghelper "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" "github.com/hashicorp/vault/helper/useragent" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/plugins/event" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/testcluster" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" sr "github.com/hashicorp/vault/serviceregistration" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/hcp_link" + "github.com/hashicorp/vault/vault/plugincatalog" vaultseal "github.com/hashicorp/vault/vault/seal" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" "github.com/mitchellh/go-testing-interface" - "github.com/pkg/errors" "github.com/posener/complete" + "github.com/sasha-s/go-deadlock" "go.uber.org/atomic" "golang.org/x/net/http/httpproxy" "google.golang.org/grpc/grpclog" @@ -69,11 +80,6 @@ var ( var memProfilerEnabled = false -var enableFourClusterDev = func(c *ServerCommand, base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int { - c.logger.Error("-dev-four-cluster only supported in enterprise Vault") - return 1 -} - const ( storageMigrationLock = "core/migration" @@ -91,6 +97,7 @@ type ServerCommand struct { CredentialBackends map[string]logical.Factory LogicalBackends map[string]logical.Factory PhysicalBackends map[string]physical.Factory + EventBackends map[string]event.Factory ServiceRegistrations map[string]sr.Factory @@ -116,9 +123,11 @@ type ServerCommand struct { flagConfigs []string flagRecovery bool + flagExperiments []string flagDev bool flagDevTLS bool flagDevTLSCertDir string + flagDevTLSSANs []string flagDevRootTokenID string flagDevListenAddr string flagDevNoStoreToken bool @@ -134,10 +143,13 @@ type ServerCommand struct { flagDevFourCluster bool flagDevTransactional bool flagDevAutoSeal bool + flagDevClusterJson string flagTestVerifyOnly bool flagTestServerConfig bool flagDevConsul bool flagExitOnCoreShutdown bool + + sealsToFinalize []*vault.Seal } func (c *ServerCommand) Synopsis() string { @@ -200,8 +212,19 @@ func (c *ServerCommand) Flags() *FlagSets { f.BoolVar(&BoolVar{ Name: "recovery", Target: &c.flagRecovery, - Usage: "Enable recovery mode. In this mode, Vault is used to perform recovery actions." + - "Using a recovery operation token, \"sys/raw\" API can be used to manipulate the storage.", + Usage: "Enable recovery mode. In this mode, Vault is used to perform recovery actions. " + + "Using a recovery token, \"sys/raw\" API can be used to manipulate the storage.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "experiment", + Target: &c.flagExperiments, + Completion: complete.PredictSet(experiments.ValidExperiments()...), + Usage: "Name of an experiment to enable. Experiments should NOT be used in production, and " + + "the associated APIs may have backwards incompatible changes between releases. This " + + "flag can be specified multiple times to specify multiple experiments. This can also be " + + fmt.Sprintf("specified via the %s environment variable as a comma-separated list. ", EnvVaultExperiments) + + "Valid experiments are: " + strings.Join(experiments.ValidExperiments(), ", "), }) f = set.NewFlagSet("Dev Options") @@ -231,6 +254,18 @@ func (c *ServerCommand) Flags() *FlagSets { "specified. If left unset, files are generated in a temporary directory.", }) + f.StringSliceVar(&StringSliceVar{ + Name: "dev-tls-san", + Target: &c.flagDevTLSSANs, + Default: nil, + Usage: "Additional Subject Alternative Name (as a DNS name or IP address) " + + "to generate the certificate with if `-dev-tls` is specified. The " + + "certificate will always use localhost, localhost4, localhost6, " + + "localhost.localdomain, and the host name as alternate DNS names, " + + "and 127.0.0.1 as an alternate IP address. This flag can be specified " + + "multiple times to specify multiple SANs.", + }) + f.StringVar(&StringVar{ Name: "dev-root-token-id", Target: &c.flagDevRootTokenID, @@ -354,6 +389,12 @@ func (c *ServerCommand) Flags() *FlagSets { Hidden: true, }) + f.StringVar(&StringVar{ + Name: "dev-cluster-json", + Target: &c.flagDevClusterJson, + Usage: "File to write cluster definition to", + }) + // TODO: should the below flags be public? f.BoolVar(&BoolVar{ Name: "test-verify-only", @@ -433,7 +474,7 @@ func (c *ServerCommand) runRecoveryMode() int { } // Update the 'log' related aspects of shared config based on config/env var/cli - c.Flags().updateLogConfig(config.SharedConfig) + c.flags.applyLogConfigOverrides(config.SharedConfig) l, err := c.configureLogging(config) if err != nil { c.UI.Error(err.Error()) @@ -499,57 +540,37 @@ func (c *ServerCommand) runRecoveryMode() int { var barrierSeal vault.Seal var sealConfigError error - var wrapper wrapping.Wrapper if len(config.Seals) == 0 { config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) } - if len(config.Seals) > 1 { - c.UI.Error("Only one seal block is accepted in recovery mode") + ctx := context.Background() + existingSealGenerationInfo, err := vault.PhysicalSealGenInfo(ctx, backend) + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting seal generation info: %v", err)) return 1 } - configSeal := config.Seals[0] - sealType := wrapping.WrapperTypeShamir.String() - if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { - sealType = os.Getenv("VAULT_SEAL_TYPE") - configSeal.Type = sealType - } else { - sealType = configSeal.Type + hasPartialPaths, err := hasPartiallyWrappedPaths(ctx, backend) + if err != nil { + c.UI.Error(fmt.Sprintf("Cannot determine if there are partially seal wrapped entries in storage: %v", err)) + return 1 } - - infoKeys = append(infoKeys, "Seal Type") - info["Seal Type"] = sealType - - var seal vault.Seal - defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewShamirWrapper(), - }) - sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) - wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &infoKeys, &info, sealLogger) - if sealConfigError != nil { - if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { - c.UI.Error(fmt.Sprintf( - "Error parsing Seal configuration: %s", sealConfigError)) - return 1 - } + setSealResponse, err := setSeal(c, config, infoKeys, info, existingSealGenerationInfo, hasPartialPaths) + if err != nil { + c.UI.Error(err.Error()) + return 1 } - if wrapper == nil { - seal = defaultSeal - } else { - seal, err = vault.NewAutoSeal(&vaultseal.Access{ - Wrapper: wrapper, - }) - if err != nil { - c.UI.Error(fmt.Sprintf("error creating auto seal: %v", err)) - } + if setSealResponse.barrierSeal == nil { + c.UI.Error(fmt.Sprintf("Error setting up seal: %v", setSealResponse.sealConfigError)) + return 1 } - barrierSeal = seal + barrierSeal = setSealResponse.barrierSeal // Ensure that the seal finalizer is called, even if using verify-only defer func() { - err = seal.Finalize(context.Background()) + err = barrierSeal.Finalize(ctx) if err != nil { c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) } @@ -559,6 +580,7 @@ func (c *ServerCommand) runRecoveryMode() int { Physical: backend, StorageType: config.Storage.Type, Seal: barrierSeal, + UnwrapSeal: setSealResponse.unwrapSeal, LogLevel: config.LogLevel, Logger: c.logger, DisableMlock: config.DisableMlock, @@ -574,7 +596,7 @@ func (c *ServerCommand) runRecoveryMode() int { } } - if err := core.InitializeRecovery(context.Background()); err != nil { + if err := core.InitializeRecovery(ctx); err != nil { c.UI.Error(fmt.Sprintf("Error initializing core in recovery mode: %s", err)) return 1 } @@ -626,7 +648,7 @@ func (c *ServerCommand) runRecoveryMode() int { infoKeys = append(infoKeys, "go version") info["go version"] = runtime.Version() - fipsStatus := getFIPSInfoKey() + fipsStatus := entGetFIPSInfoKey() if fipsStatus != "" { infoKeys = append(infoKeys, "fips") info["fips"] = fipsStatus @@ -648,6 +670,12 @@ func (c *ServerCommand) runRecoveryMode() int { c.UI.Output("") + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + return 0 + } + for _, ln := range lns { handler := vaulthttp.Handler.Handler(&vault.HandlerProperties{ Core: core, @@ -669,7 +697,7 @@ func (c *ServerCommand) runRecoveryMode() int { } if sealConfigError != nil { - init, err := core.InitializedLocally(context.Background()) + init, err := core.InitializedLocally(ctx) if err != nil { c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err)) return 1 @@ -837,9 +865,9 @@ func (c *ServerCommand) InitListeners(config *server.Config, disableClustering b } if reloadFunc != nil { - relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type] + relSlice := (*c.reloadFuncs)[fmt.Sprintf("listener|%s", lnConfig.Type)] relSlice = append(relSlice, reloadFunc) - (*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice + (*c.reloadFuncs)[fmt.Sprintf("listener|%s", lnConfig.Type)] = relSlice } if !disableClustering && lnConfig.Type == "tcp" { @@ -877,6 +905,12 @@ func (c *ServerCommand) InitListeners(config *server.Config, disableClustering b } props["max_request_duration"] = lnConfig.MaxRequestDuration.String() + props["disable_request_limiter"] = strconv.FormatBool(lnConfig.DisableRequestLimiter) + + if lnConfig.ChrootNamespace != "" { + props["chroot_namespace"] = lnConfig.ChrootNamespace + } + lns = append(lns, listenerutil.Listener{ Listener: ln, Config: lnConfig, @@ -903,6 +937,79 @@ func (c *ServerCommand) InitListeners(config *server.Config, disableClustering b return 0, lns, clusterAddrs, nil } +func configureDevTLS(c *ServerCommand) (func(), *server.Config, string, error) { + var devStorageType string + + switch { + case c.flagDevConsul: + devStorageType = "consul" + case c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional_ha" + case !c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional" + case c.flagDevHA && !c.flagDevTransactional: + devStorageType = "inmem_ha" + default: + devStorageType = "inmem" + } + + var certDir string + var err error + var config *server.Config + var f func() + + if c.flagDevTLS { + if c.flagDevTLSCertDir != "" { + if _, err = os.Stat(c.flagDevTLSCertDir); err != nil { + return nil, nil, "", err + } + + certDir = c.flagDevTLSCertDir + } else { + if certDir, err = os.MkdirTemp("", "vault-tls"); err != nil { + return nil, nil, certDir, err + } + } + extraSANs := c.flagDevTLSSANs + host, _, err := net.SplitHostPort(c.flagDevListenAddr) + if err == nil { + // 127.0.0.1 is the default, and already included in the SANs. + // Empty host means listen on all interfaces, but users should use the + // -dev-tls-san flag to get the right SANs in that case. + if host != "" && host != "127.0.0.1" { + extraSANs = append(extraSANs, host) + } + } + config, err = server.DevTLSConfig(devStorageType, certDir, extraSANs) + + f = func() { + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename)); err != nil { + c.UI.Error(err.Error()) + } + + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCertFilename)); err != nil { + c.UI.Error(err.Error()) + } + + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevKeyFilename)); err != nil { + c.UI.Error(err.Error()) + } + + // Only delete temp directories we made. + if c.flagDevTLSCertDir == "" { + if err := os.Remove(certDir); err != nil { + c.UI.Error(err.Error()) + } + } + } + + } else { + config, err = server.DevConfig(devStorageType) + } + + return f, config, certDir, err +} + func (c *ServerCommand) Run(args []string) int { f := c.Flags() @@ -911,6 +1018,9 @@ func (c *ServerCommand) Run(args []string) int { return 1 } + // Don't exit just because we saw a potential deadlock. + deadlock.Opts.OnPotentialDeadlock = func() {} + c.logGate = gatedwriter.NewWriter(os.Stderr) c.logWriter = c.logGate @@ -943,68 +1053,11 @@ func (c *ServerCommand) Run(args []string) int { // Load the configuration var config *server.Config - var err error var certDir string if c.flagDev { - var devStorageType string - switch { - case c.flagDevConsul: - devStorageType = "consul" - case c.flagDevHA && c.flagDevTransactional: - devStorageType = "inmem_transactional_ha" - case !c.flagDevHA && c.flagDevTransactional: - devStorageType = "inmem_transactional" - case c.flagDevHA && !c.flagDevTransactional: - devStorageType = "inmem_ha" - default: - devStorageType = "inmem" - } - - if c.flagDevTLS { - if c.flagDevTLSCertDir != "" { - _, err := os.Stat(c.flagDevTLSCertDir) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - - certDir = c.flagDevTLSCertDir - } else { - certDir, err = os.MkdirTemp("", "vault-tls") - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - } - config, err = server.DevTLSConfig(devStorageType, certDir) - - defer func() { - err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename)) - if err != nil { - c.UI.Error(err.Error()) - } - - err = os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCertFilename)) - if err != nil { - c.UI.Error(err.Error()) - } - - err = os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevKeyFilename)) - if err != nil { - c.UI.Error(err.Error()) - } - - // Only delete temp directories we made. - if c.flagDevTLSCertDir == "" { - err = os.Remove(certDir) - if err != nil { - c.UI.Error(err.Error()) - } - } - }() - - } else { - config, err = server.DevConfig(devStorageType) + df, cfg, dir, err := configureDevTLS(c) + if df != nil { + defer df() } if err != nil { @@ -1012,6 +1065,9 @@ func (c *ServerCommand) Run(args []string) int { return 1 } + config = cfg + certDir = dir + if c.flagDevListenAddr != "" { config.Listeners[0].Address = c.flagDevListenAddr } @@ -1039,7 +1095,7 @@ func (c *ServerCommand) Run(args []string) int { return 1 } - f.updateLogConfig(config.SharedConfig) + f.applyLogConfigOverrides(config.SharedConfig) // Set 'trace' log level for the following 'dev' clusters if c.flagDevThreeNode || c.flagDevFourCluster { @@ -1054,6 +1110,11 @@ func (c *ServerCommand) Run(args []string) int { c.logger = l c.allLoggers = append(c.allLoggers, l) + // flush logs right away if the server is started with the disable-gated-logs flag + if c.logFlags.flagDisableGatedLogs { + c.flushLog() + } + // reporting Errors found in the config for _, cErr := range configErrors { c.logger.Warn(cErr.String()) @@ -1095,13 +1156,19 @@ func (c *ServerCommand) Run(args []string) int { if envLicense := os.Getenv(EnvVaultLicense); envLicense != "" { config.License = envLicense } - if disableSSC := os.Getenv(DisableSSCTokens); disableSSC != "" { - var err error - config.DisableSSCTokens, err = strconv.ParseBool(disableSSC) - if err != nil { - c.UI.Warn(wrapAtLength("WARNING! failed to parse " + - "VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS env var: " + - "setting to default value false")) + + if envPluginTmpdir := os.Getenv(EnvVaultPluginTmpdir); envPluginTmpdir != "" { + config.PluginTmpdir = envPluginTmpdir + } + + if err := server.ExperimentsFromEnvAndCLI(config, EnvVaultExperiments, c.flagExperiments); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + for _, experiment := range config.Experiments { + if experiments.IsUnused(experiment) { + c.UI.Warn(fmt.Sprintf("WARNING! Experiment %s is no longer used", experiment)) } } @@ -1132,16 +1199,18 @@ func (c *ServerCommand) Run(args []string) int { metricsHelper := metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) // Initialize the storage backend - backend, err := c.setupStorage(config) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - - // Prevent server startup if migration is active - // TODO: Use OpenTelemetry to integrate this into Diagnose - if c.storageMigrationActive(backend) { - return 1 + var backend physical.Backend + if !c.flagDev || config.Storage != nil { + backend, err = c.setupStorage(config) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + // Prevent server startup if migration is active + // TODO: Use OpenTelemetry to integrate this into Diagnose + if c.storageMigrationActive(backend) { + return 1 + } } // Initialize the Service Discovery, if there is one @@ -1173,48 +1242,32 @@ func (c *ServerCommand) Run(args []string) int { info[key] = strings.Join(envVarKeys, ", ") infoKeys = append(infoKeys, key) - barrierSeal, barrierWrapper, unwrapSeal, seals, sealConfigError, err := setSeal(c, config, infoKeys, info) - // Check error here - if err != nil { - c.UI.Error(err.Error()) - return 1 + if len(config.Experiments) != 0 { + expKey := "experiments" + info[expKey] = strings.Join(config.Experiments, ", ") + infoKeys = append(infoKeys, expKey) } - for _, seal := range seals { - // There is always one nil seal. We need to skip it so we don't start an empty Finalize-Seal-Shamir - // section. - if seal == nil { - continue - } - seal := seal // capture range variable - // Ensure that the seal finalizer is called, even if using verify-only - defer func(seal *vault.Seal) { - err = (*seal).Finalize(context.Background()) - if err != nil { - c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) - } - }(&seal) - } - - if barrierSeal == nil { - c.UI.Error("Could not create barrier seal! Most likely proper Seal configuration information was not set, but no error was generated.") - return 1 - } + ctx := context.Background() - // prepare a secure random reader for core - secureRandomReader, err := configutil.CreateSecureRandomReaderFunc(config.SharedConfig, barrierWrapper) + setSealResponse, secureRandomReader, err := c.configureSeals(ctx, config, backend, infoKeys, info) if err != nil { c.UI.Error(err.Error()) return 1 } - coreConfig := createCoreConfig(c, config, backend, configSR, barrierSeal, unwrapSeal, metricsHelper, metricSink, secureRandomReader) + c.setSealsToFinalize(setSealResponse.getCreatedSeals()) + defer func() { + c.finalizeSeals(ctx, c.sealsToFinalize) + }() + + coreConfig := createCoreConfig(c, config, backend, configSR, setSealResponse.barrierSeal, setSealResponse.unwrapSeal, metricsHelper, metricSink, secureRandomReader) if c.flagDevThreeNode { return c.enableThreeNodeDevCluster(&coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) } if c.flagDevFourCluster { - return enableFourClusterDev(c, &coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) + return entEnableFourClusterDev(c, &coreConfig, info, infoKeys, os.Getenv("VAULT_DEV_TEMP_DIR")) } if allowPendingRemoval := os.Getenv(consts.EnvVaultAllowPendingRemovalMounts); allowPendingRemoval != "" { @@ -1266,9 +1319,9 @@ func (c *ServerCommand) Run(args []string) int { } // Apply any enterprise configuration onto the coreConfig. - adjustCoreConfigForEnt(config, &coreConfig) + entAdjustCoreConfig(config, &coreConfig) - if !storageSupportedForEnt(&coreConfig) { + if !entCheckStorageType(&coreConfig) { c.UI.Warn("") c.UI.Warn(wrapAtLength(fmt.Sprintf("WARNING: storage configured to use %q which is not supported for Vault Enterprise, must be \"raft\" or \"consul\"", coreConfig.StorageType))) c.UI.Warn("") @@ -1371,7 +1424,7 @@ func (c *ServerCommand) Run(args []string) int { infoKeys = append(infoKeys, "go version") info["go version"] = runtime.Version() - fipsStatus := getFIPSInfoKey() + fipsStatus := entGetFIPSInfoKey() if fipsStatus != "" { infoKeys = append(infoKeys, "fips") info["fips"] = fipsStatus @@ -1388,6 +1441,15 @@ func (c *ServerCommand) Run(args []string) int { info["HCP resource ID"] = config.HCPLinkConf.Resource.ID } + requestLimiterStatus := entGetRequestLimiterStatus(coreConfig) + if requestLimiterStatus != "" { + infoKeys = append(infoKeys, "request limiter") + info["request limiter"] = requestLimiterStatus + } + + infoKeys = append(infoKeys, "administrative namespace") + info["administrative namespace"] = config.AdministrativeNamespacePath + sort.Strings(infoKeys) c.UI.Output("==> Vault server configuration:\n") @@ -1410,22 +1472,23 @@ func (c *ServerCommand) Run(args []string) int { // mode if it's set core.SetClusterListenerAddrs(clusterAddrs) core.SetClusterHandler(vaulthttp.Handler.Handler(&vault.HandlerProperties{ - Core: core, + Core: core, + ListenerConfig: &configutil.Listener{}, })) // Attempt unsealing in a background goroutine. This is needed for when a // Vault cluster with multiple servers is configured with auto-unseal but is // uninitialized. Once one server initializes the storage backend, this // goroutine will pick up the unseal keys and unseal this instance. - if !core.IsInSealMigrationMode() { - go runUnseal(c, core, context.Background()) + if !core.IsInSealMigrationMode(true) { + go runUnseal(c, core, ctx) } // When the underlying storage is raft, kick off retry join if it was specified // in the configuration // TODO: Should we also support retry_join for ha_storage? if config.Storage.Type == storageTypeRaft { - if err := core.InitiateRetryJoin(context.Background()); err != nil { + if err := core.InitiateRetryJoin(ctx); err != nil { c.UI.Error(fmt.Sprintf("Failed to initiate raft retry join, %q", err.Error())) return 1 } @@ -1444,7 +1507,8 @@ func (c *ServerCommand) Run(args []string) int { } // If we're in Dev mode, then initialize the core - err = initDevCore(c, &coreConfig, config, core, certDir) + clusterJson := &testcluster.ClusterJson{} + err = initDevCore(c, &coreConfig, config, core, certDir, clusterJson) if err != nil { c.UI.Error(err.Error()) return 1 @@ -1469,8 +1533,8 @@ func (c *ServerCommand) Run(args []string) int { return 0 } - if sealConfigError != nil { - init, err := core.InitializedLocally(context.Background()) + if setSealResponse.sealConfigError != nil { + init, err := core.InitializedLocally(ctx) if err != nil { c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err)) return 1 @@ -1481,6 +1545,10 @@ func (c *ServerCommand) Run(args []string) int { } } + core.SetSealReloadFunc(func(ctx context.Context) error { + return c.reloadSealsOnLeaderActivation(ctx, core) + }) + // Output the header that the server has started if !c.logFlags.flagCombineLogs { c.UI.Output("==> Vault server started! Log data will stream in below:\n") @@ -1504,6 +1572,34 @@ func (c *ServerCommand) Run(args []string) int { // Notify systemd that the server is ready (if applicable) c.notifySystemd(systemd.SdNotifyReady) + if c.flagDev { + protocol := "http://" + if c.flagDevTLS { + protocol = "https://" + } + clusterJson.Nodes = []testcluster.ClusterNode{ + { + APIAddress: protocol + config.Listeners[0].Address, + }, + } + if c.flagDevTLS { + clusterJson.CACertPath = fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename) + } + + if c.flagDevClusterJson != "" && !c.flagDevThreeNode { + b, err := jsonutil.EncodeJSON(clusterJson) + if err != nil { + c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err)) + return 1 + } + err = os.WriteFile(c.flagDevClusterJson, b, 0o600) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing cluster.json %q: %s", c.flagDevClusterJson, err)) + return 1 + } + } + } + defer func() { if err := c.removePidFile(config.PidFile); err != nil { c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) @@ -1535,22 +1631,10 @@ func (c *ServerCommand) Run(args []string) int { c.notifySystemd(systemd.SdNotifyReloading) // Check for new log level - var config *server.Config - var configErrors []configutil.ConfigError - for _, path := range c.flagConfigs { - current, err := server.LoadConfig(path) - if err != nil { - c.logger.Error("could not reload config", "path", path, "error", err) - goto RUNRELOADFUNCS - } - - configErrors = append(configErrors, current.Validate(path)...) - - if config == nil { - config = current - } else { - config = config.Merge(current) - } + config, configErrors, err := c.reloadConfigFiles() + if err != nil { + c.logger.Error("could not reload config", "error", err) + goto RUNRELOADFUNCS } // Ensure at least one config was found. @@ -1564,6 +1648,16 @@ func (c *ServerCommand) Run(args []string) int { c.logger.Warn(cErr.String()) } + // Note that seal reloading can also be triggered via Core.TriggerSealReload. + // See the call to Core.SetSealReloadFunc above. + if reloaded, err := c.reloadSealsOnSigHup(ctx, core, config); err != nil { + c.UI.Error(fmt.Errorf("error reloading seal config: %s", err).Error()) + config.Seals = core.GetCoreConfigInternal().Seals + goto RUNRELOADFUNCS + } else if !reloaded { + config.Seals = core.GetCoreConfigInternal().Seals + } + core.SetConfig(config) // reloading custom response headers to make sure we have @@ -1575,6 +1669,8 @@ func (c *ServerCommand) Run(args []string) int { // Setting log request with the new value in the config after reload core.ReloadLogRequestsLevel() + core.ReloadRequestLimiter() + // reloading HCP link hcpLink, err = c.reloadHCPLink(hcpLink, config, core, hcpLogger) if err != nil { @@ -1597,10 +1693,13 @@ func (c *ServerCommand) Run(args []string) int { } // Reload license file - if err = vault.LicenseReload(core); err != nil { + if err = core.EntReloadLicense(); err != nil { c.UI.Error(err.Error()) } + if err := core.ReloadCensus(); err != nil { + c.UI.Error(err.Error()) + } select { case c.licenseReloadedCh <- err: default: @@ -1651,6 +1750,46 @@ func (c *ServerCommand) Run(args []string) int { c.logger.Info(fmt.Sprintf("Wrote stacktrace to: %s", f.Name())) f.Close() } + + // We can only get pprof outputs via the API but sometimes Vault can get + // into a state where it cannot process requests so we can get pprof outputs + // via SIGUSR2. + if os.Getenv("VAULT_PPROF_WRITE_TO_FILE") != "" { + dir := "" + path := os.Getenv("VAULT_PPROF_FILE_PATH") + if path != "" { + if _, err := os.Stat(path); err != nil { + c.logger.Error("Checking pprof path failed", "error", err) + continue + } + dir = path + } else { + dir, err = os.MkdirTemp("", "vault-pprof") + if err != nil { + c.logger.Error("Could not create temporary directory for pprof", "error", err) + continue + } + } + + dumps := []string{"goroutine", "heap", "allocs", "threadcreate"} + for _, dump := range dumps { + pFile, err := os.Create(filepath.Join(dir, dump)) + if err != nil { + c.logger.Error("error creating pprof file", "name", dump, "error", err) + break + } + + err = pprof.Lookup(dump).WriteTo(pFile, 0) + if err != nil { + c.logger.Error("error generating pprof data", "name", dump, "error", err) + pFile.Close() + break + } + pFile.Close() + } + + c.logger.Info(fmt.Sprintf("Wrote pprof files to: %s", dir)) + } } } // Notify systemd that the server is shutting down @@ -1677,6 +1816,85 @@ func (c *ServerCommand) Run(args []string) int { return retCode } +func (c *ServerCommand) reloadConfigFiles() (*server.Config, []configutil.ConfigError, error) { + var config *server.Config + var configErrors []configutil.ConfigError + for _, path := range c.flagConfigs { + current, err := server.LoadConfig(path) + if err != nil { + return nil, nil, err + } + + configErrors = append(configErrors, current.Validate(path)...) + + if config == nil { + config = current + } else { + config = config.Merge(current) + } + } + + return config, configErrors, nil +} + +func (c *ServerCommand) configureSeals(ctx context.Context, config *server.Config, backend physical.Backend, infoKeys []string, info map[string]string) (*SetSealResponse, io.Reader, error) { + existingSealGenerationInfo, err := vault.PhysicalSealGenInfo(ctx, backend) + if err != nil { + return nil, nil, fmt.Errorf("Error getting seal generation info: %v", err) + } + + hasPartialPaths, err := hasPartiallyWrappedPaths(ctx, backend) + if err != nil { + return nil, nil, fmt.Errorf("Cannot determine if there are partially seal wrapped entries in storage: %v", err) + } + setSealResponse, err := setSeal(c, config, infoKeys, info, existingSealGenerationInfo, hasPartialPaths) + if err != nil { + return nil, nil, err + } + if setSealResponse.sealConfigWarning != nil { + c.UI.Warn(fmt.Sprintf("Warnings during seal configuration: %v", setSealResponse.sealConfigWarning)) + } + + if setSealResponse.barrierSeal == nil { + return nil, nil, errors.New("Could not create barrier seal! Most likely proper Seal configuration information was not set, but no error was generated.") + } + + // prepare a secure random reader for core + entropyAugLogger := c.logger.Named("entropy-augmentation") + var entropySources []*configutil.EntropySourcerInfo + for _, sealWrapper := range setSealResponse.barrierSeal.GetAccess().GetEnabledSealWrappersByPriority() { + if s, ok := sealWrapper.Wrapper.(entropy.Sourcer); ok { + entropySources = append(entropySources, &configutil.EntropySourcerInfo{ + Sourcer: s, + Name: sealWrapper.Name, + }) + } + } + secureRandomReader, err := configutil.CreateSecureRandomReaderFunc(config.SharedConfig, entropySources, entropyAugLogger) + if err != nil { + return nil, nil, err + } + + return setSealResponse, secureRandomReader, nil +} + +func (c *ServerCommand) setSealsToFinalize(seals []*vault.Seal) { + prev := c.sealsToFinalize + c.sealsToFinalize = seals + + c.finalizeSeals(context.Background(), prev) +} + +func (c *ServerCommand) finalizeSeals(ctx context.Context, seals []*vault.Seal) { + for _, seal := range seals { + // Ensure that the seal finalizer is called, even if using verify-only + err := (*seal).Finalize(ctx) + if err != nil { + c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) + } + } +} + // configureLogging takes the configuration and attempts to parse config values into 'log' friendly configuration values // If all goes to plan, a logger is created and setup. func (c *ServerCommand) configureLogging(config *server.Config) (hclog.InterceptLogger, error) { @@ -1696,25 +1914,16 @@ func (c *ServerCommand) configureLogging(config *server.Config) (hclog.Intercept return nil, err } - logRotateBytes, err := parseutil.ParseInt(config.LogRotateBytes) + logCfg, err := loghelper.NewLogConfig("vault") if err != nil { return nil, err } - - logRotateMaxFiles, err := parseutil.ParseInt(config.LogRotateMaxFiles) - if err != nil { - return nil, err - } - - logCfg := &loghelper.LogConfig{ - Name: "vault", - LogLevel: logLevel, - LogFormat: logFormat, - LogFilePath: config.LogFile, - LogRotateDuration: logRotateDuration, - LogRotateBytes: int(logRotateBytes), - LogRotateMaxFiles: int(logRotateMaxFiles), - } + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = config.LogRotateBytes + logCfg.LogRotateMaxFiles = config.LogRotateMaxFiles return loghelper.Setup(logCfg, c.logWriter) } @@ -1764,6 +1973,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig barrierConfig := &vault.SealConfig{ SecretShares: 1, SecretThreshold: 1, + Name: "shamir", } if core.SealAccess().RecoveryKeySupported() { @@ -1894,24 +2104,43 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig } resp, err := core.HandleRequest(ctx, req) if err != nil { - return nil, fmt.Errorf("error creating default K/V store: %w", err) + return nil, fmt.Errorf("error creating default KV store: %w", err) } if resp.IsError() { - return nil, fmt.Errorf("failed to create default K/V store: %w", resp.Error()) + return nil, fmt.Errorf("failed to create default KV store: %w", resp.Error()) } return init, nil } func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int { - testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{ + conf, opts := teststorage.ClusterSetup(base, &vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, BaseListenAddress: c.flagDevListenAddr, Logger: c.logger, TempDir: tempDir, - }) + DefaultHandlerProperties: vault.HandlerProperties{ + ListenerConfig: &configutil.Listener{ + Profiling: configutil.ListenerProfiling{ + UnauthenticatedPProfAccess: true, + }, + Telemetry: configutil.ListenerTelemetry{ + UnauthenticatedMetricsAccess: true, + }, + }, + }, + }, nil) + testCluster := vault.NewTestCluster(&testing.RuntimeT{}, conf, opts) defer c.cleanupGuard.Do(testCluster.Cleanup) + if constants.IsEnterprise { + err := testcluster.WaitForActiveNodeAndPerfStandbys(context.Background(), testCluster) + if err != nil { + c.UI.Error(fmt.Sprintf("perf standbys didn't become ready: %v", err)) + return 1 + } + } + info["cluster parameters path"] = testCluster.TempDir infoKeys = append(infoKeys, "cluster parameters path") @@ -1937,7 +2166,7 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m infoKeys = append(infoKeys, "go version") info["go version"] = runtime.Version() - fipsStatus := getFIPSInfoKey() + fipsStatus := entGetFIPSInfoKey() if fipsStatus != "" { infoKeys = append(infoKeys, "fips") info["fips"] = fipsStatus @@ -1961,7 +2190,8 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m for _, core := range testCluster.Cores { core.Server.Handler = vaulthttp.Handler.Handler(&vault.HandlerProperties{ - Core: core.Core, + Core: core.Core, + ListenerConfig: &configutil.Listener{}, }) core.SetClusterHandler(core.Server.Handler) } @@ -2052,6 +2282,29 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m testCluster.TempDir, )) + if c.flagDevClusterJson != "" { + clusterJson := testcluster.ClusterJson{ + Nodes: []testcluster.ClusterNode{}, + CACertPath: filepath.Join(testCluster.TempDir, "ca_cert.pem"), + RootToken: testCluster.RootToken, + } + for _, core := range testCluster.Cores { + clusterJson.Nodes = append(clusterJson.Nodes, testcluster.ClusterNode{ + APIAddress: core.Client.Address(), + }) + } + b, err := jsonutil.EncodeJSON(clusterJson) + if err != nil { + c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err)) + return 1 + } + err = os.WriteFile(c.flagDevClusterJson, b, 0o600) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing cluster.json %q: %s", c.flagDevClusterJson, err)) + return 1 + } + } + // Output the header that the server has started c.UI.Output("==> Vault server started! Log data will stream in below:\n") @@ -2305,9 +2558,11 @@ func (c *ServerCommand) storageMigrationActive(backend physical.Backend) bool { } c.logger.Warn("storage migration check error", "error", err.Error()) + timer := time.NewTimer(2 * time.Second) select { - case <-time.After(2 * time.Second): + case <-timer.C: case <-c.ShutdownCh: + timer.Stop() return true } } @@ -2335,86 +2590,315 @@ func CheckStorageMigration(b physical.Backend) (*StorageMigrationStatus, error) return &status, nil } -// setSeal return barrierSeal, barrierWrapper, unwrapSeal, and all the created seals from the configs so we can close them in Run -// The two errors are the sealConfigError and the regular error -func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info map[string]string) (vault.Seal, wrapping.Wrapper, vault.Seal, []vault.Seal, error, error) { - var barrierSeal vault.Seal - var unwrapSeal vault.Seal +type SetSealResponse struct { + barrierSeal vault.Seal + unwrapSeal vault.Seal - var sealConfigError error - var wrapper wrapping.Wrapper - var barrierWrapper wrapping.Wrapper + // sealConfigError is present if there was an error configuring wrappers, other than KeyNotFound. + sealConfigError error + sealConfigWarning error + hasPartiallyWrappedPaths bool +} + +func (r *SetSealResponse) getCreatedSeals() []*vault.Seal { + var ret []*vault.Seal + if r.barrierSeal != nil { + ret = append(ret, &r.barrierSeal) + } + if r.unwrapSeal != nil { + ret = append(ret, &r.unwrapSeal) + } + return ret +} + +// setSeal return barrierSeal, barrierWrapper, unwrapSeal, all the created seals, and all the provided seals from the configs so we can close them in Run +// The two errors are the sealConfigError and the regular error +func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info map[string]string, existingSealGenerationInfo *vaultseal.SealGenerationInfo, hasPartiallyWrappedPaths bool) (*SetSealResponse, error) { if c.flagDevAutoSeal { - var err error - barrierSeal, err = vault.NewAutoSeal(vaultseal.NewTestSeal(nil)) - if err != nil { - return nil, nil, nil, nil, nil, err - } - return barrierSeal, nil, nil, nil, nil, nil + access, _ := vaultseal.NewTestSeal(nil) + barrierSeal := vault.NewAutoSeal(access) + + return &SetSealResponse{barrierSeal: barrierSeal}, nil } // Handle the case where no seal is provided switch len(config.Seals) { case 0: - config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) - case 1: - // If there's only one seal and it's disabled assume they want to + config.Seals = append(config.Seals, &configutil.KMS{ + Type: vault.SealConfigTypeShamir.String(), + Priority: 1, + Name: "shamir", + }) + default: + allSealsDisabled := true + for _, c := range config.Seals { + if !c.Disabled { + allSealsDisabled = false + } else if c.Type == vault.SealConfigTypeShamir.String() { + return nil, errors.New("shamir seals cannot be set disabled (they should simply not be set)") + } + } + // If all seals are disabled assume they want to // migrate to a shamir seal and simply didn't provide it - if config.Seals[0].Disabled { - config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) + if allSealsDisabled { + config.Seals = append(config.Seals, &configutil.KMS{ + Type: vault.SealConfigTypeShamir.String(), + Priority: 1, + Name: "shamir", + }) } } - var createdSeals []vault.Seal = make([]vault.Seal, len(config.Seals)) + + var sealConfigError error + var sealConfigWarning error + recordSealConfigError := func(err error) { + sealConfigError = errors.Join(sealConfigError, err) + } + recordSealConfigWarning := func(err error) { + sealConfigWarning = errors.Join(sealConfigWarning, err) + } + enabledSealWrappers := make([]*vaultseal.SealWrapper, 0) + disabledSealWrappers := make([]*vaultseal.SealWrapper, 0) + allSealKmsConfigs := make([]*configutil.KMS, 0) + + type infoKeysAndMap struct { + keys []string + theMap map[string]string + } + sealWrapperInfoKeysMap := make(map[string]infoKeysAndMap) + + configuredSeals := 0 for _, configSeal := range config.Seals { - sealType := wrapping.WrapperTypeShamir.String() - if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { - sealType = os.Getenv("VAULT_SEAL_TYPE") + sealTypeEnvVarName := "VAULT_SEAL_TYPE" + if configSeal.Priority > 1 { + sealTypeEnvVarName = sealTypeEnvVarName + "_" + configSeal.Name + } + + if !configSeal.Disabled && os.Getenv(sealTypeEnvVarName) != "" { + sealType := os.Getenv(sealTypeEnvVarName) configSeal.Type = sealType - } else { - sealType = configSeal.Type } - var seal vault.Seal - sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) + sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", configSeal.Type)) c.allLoggers = append(c.allLoggers, sealLogger) - defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{ - Wrapper: aeadwrapper.NewShamirWrapper(), - }) - var sealInfoKeys []string - sealInfoMap := map[string]string{} - wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &sealInfoKeys, &sealInfoMap, sealLogger) - if sealConfigError != nil { - if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { - return barrierSeal, barrierWrapper, unwrapSeal, createdSeals, sealConfigError, fmt.Errorf( - "Error parsing Seal configuration: %s", sealConfigError) + + allSealKmsConfigs = append(allSealKmsConfigs, configSeal) + var wrapperInfoKeys []string + wrapperInfoMap := map[string]string{} + wrapper, wrapperConfigError := configutil.ConfigureWrapper(configSeal, &wrapperInfoKeys, &wrapperInfoMap, sealLogger) + if wrapperConfigError == nil { + // for some reason configureWrapper in kms.go returns nil wrapper and nil error for wrapping.WrapperTypeShamir + if wrapper == nil { + wrapper = aeadwrapper.NewShamirWrapper() } - } - if wrapper == nil { - seal = defaultSeal + configuredSeals++ + } else if config.IsMultisealEnabled() { + recordSealConfigWarning(fmt.Errorf("error configuring seal: %v", wrapperConfigError)) } else { - var err error - seal, err = vault.NewAutoSeal(&vaultseal.Access{ - Wrapper: wrapper, - }) - if err != nil { - return nil, nil, nil, nil, nil, err + // It seems that we are checking for this particular error here is to distinguish between a + // mis-configured seal vs one that fails for another reason. Apparently the only other reason is + // a key not found error. It seems the intention is for the key not found error to be returned + // as a seal specific error later + if !errwrap.ContainsType(wrapperConfigError, new(logical.KeyNotFoundError)) { + return nil, fmt.Errorf("error parsing Seal configuration: %s", wrapperConfigError) + } else { + sealLogger.Error("error configuring seal", "name", configSeal.Name, "err", wrapperConfigError) + recordSealConfigError(wrapperConfigError) } } - infoPrefix := "" + + sealWrapper := vaultseal.NewSealWrapper( + wrapper, + configSeal.Priority, + configSeal.Name, + configSeal.Type, + configSeal.Disabled, + wrapperConfigError == nil, + ) + if configSeal.Disabled { - unwrapSeal = seal - infoPrefix = "Old " + disabledSealWrappers = append(disabledSealWrappers, sealWrapper) } else { - barrierSeal = seal - barrierWrapper = wrapper + enabledSealWrappers = append(enabledSealWrappers, sealWrapper) + } + + sealWrapperInfoKeysMap[sealWrapper.Name] = infoKeysAndMap{ + keys: wrapperInfoKeys, + theMap: wrapperInfoMap, + } + } + + if len(enabledSealWrappers) == 0 && len(disabledSealWrappers) == 0 && sealConfigWarning != nil { + // All of them errored out, so warnings are now errors + recordSealConfigError(sealConfigWarning) + sealConfigWarning = nil + } + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Set the info keys, this modifies the function arguments `info` and `infoKeys` + // TODO(SEALHA): Why are we doing this? What is its use? + appendWrapperInfoKeys := func(prefix string, sealWrappers []*vaultseal.SealWrapper) { + if len(sealWrappers) == 0 { + return + } + useName := false + if len(sealWrappers) > 1 { + useName = true + } + for _, sealWrapper := range sealWrappers { + if useName { + prefix = fmt.Sprintf("%s %s ", prefix, sealWrapper.Name) + } + for _, k := range sealWrapperInfoKeysMap[sealWrapper.Name].keys { + infoKeys = append(infoKeys, prefix+k) + info[prefix+k] = sealWrapperInfoKeysMap[sealWrapper.Name].theMap[k] + } + } + } + appendWrapperInfoKeys("", enabledSealWrappers) + appendWrapperInfoKeys("Old", disabledSealWrappers) + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Compute seal generation + sealGenerationInfo, err := c.computeSealGenerationInfo(existingSealGenerationInfo, allSealKmsConfigs, hasPartiallyWrappedPaths, config.IsMultisealEnabled()) + if err != nil { + return nil, err + } + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Create the Seals + + containsShamir := func(sealWrappers []*vaultseal.SealWrapper) bool { + for _, si := range sealWrappers { + if vault.SealConfigTypeShamir.IsSameAs(si.SealConfigType) { + return true + } + } + return false + } + + var barrierSeal vault.Seal + var unwrapSeal vault.Seal + + sealLogger := c.logger + switch { + case len(enabledSealWrappers) == 0: + return nil, errors.Join(sealConfigWarning, errors.New("no enabled Seals in configuration")) + case configuredSeals == 0: + return nil, errors.Join(sealConfigWarning, errors.New("no seals were successfully initialized")) + case len(enabledSealWrappers) == 1 && containsShamir(enabledSealWrappers): + // The barrier seal is Shamir. If there are any disabled seals, then we put them all in the same + // autoSeal. + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, enabledSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewDefaultSeal(a) + if len(disabledSealWrappers) > 0 { + a, err = vaultseal.NewAccess(sealLogger, sealGenerationInfo, disabledSealWrappers) + if err != nil { + return nil, err + } + unwrapSeal = vault.NewAutoSeal(a) + } else if sealGenerationInfo.Generation == 1 { + // First generation, and shamir, with no disabled wrapperrs, so there can be no wrapped values + sealGenerationInfo.SetRewrapped(true) } - for _, k := range sealInfoKeys { - infoKeys = append(infoKeys, infoPrefix+k) - info[infoPrefix+k] = sealInfoMap[k] + + case len(disabledSealWrappers) == 1 && containsShamir(disabledSealWrappers): + // The unwrap seal is Shamir, we are migrating to an autoSeal. + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, enabledSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewAutoSeal(a) + a, err = vaultseal.NewAccess(sealLogger, sealGenerationInfo, disabledSealWrappers) + if err != nil { + return nil, err + } + unwrapSeal = vault.NewDefaultSeal(a) + + case config.IsMultisealEnabled(): + // We know we are not using Shamir seal, that we are not migrating away from one, and multi seal is supported, + // so just put enabled and disabled wrappers on the same seal Access + allSealWrappers := append(enabledSealWrappers, disabledSealWrappers...) + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, allSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewAutoSeal(a) + if configuredSeals < len(enabledSealWrappers) { + c.UI.Warn("WARNING: running with fewer than all configured seals during unseal. Will not be fully highly available until errors are corrected and Vault restarted.") + } + case len(enabledSealWrappers) == 1: + // We may have multiple seals disabled, but we know Shamir is not one of them. + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, enabledSealWrappers) + if err != nil { + return nil, err } - createdSeals = append(createdSeals, seal) + barrierSeal = vault.NewAutoSeal(a) + if len(disabledSealWrappers) > 0 { + a, err = vaultseal.NewAccess(sealLogger, sealGenerationInfo, disabledSealWrappers) + if err != nil { + return nil, err + } + unwrapSeal = vault.NewAutoSeal(a) + } + + default: + // We know there are multiple enabled seals but multi seal is not supported. + return nil, errors.Join(sealConfigWarning, errors.New("error: more than one enabled seal found")) + } + + return &SetSealResponse{ + barrierSeal: barrierSeal, + unwrapSeal: unwrapSeal, + sealConfigError: sealConfigError, + sealConfigWarning: sealConfigWarning, + hasPartiallyWrappedPaths: hasPartiallyWrappedPaths, + }, nil +} + +func (c *ServerCommand) computeSealGenerationInfo(existingSealGenInfo *vaultseal.SealGenerationInfo, sealConfigs []*configutil.KMS, hasPartiallyWrappedPaths, multisealEnabled bool) (*vaultseal.SealGenerationInfo, error) { + generation := uint64(1) + + if existingSealGenInfo != nil { + // This forces a seal re-wrap on all seal related config changes, as we can't + // be sure what effect the config change might do. This is purposefully different + // from within the Validate call below that just matches on seal configs based + // on name/type. + if cmp.Equal(existingSealGenInfo.Seals, sealConfigs) { + return existingSealGenInfo, nil + } + generation = existingSealGenInfo.Generation + 1 + } + c.logger.Info("incrementing seal generation", "generation", generation) + + // If the stored copy doesn't match the current configuration, we introduce a new generation + // which keeps track if a rewrap of all CSPs and seal wrapped values has completed (initially false). + newSealGenInfo := &vaultseal.SealGenerationInfo{ + Generation: generation, + Seals: sealConfigs, + Enabled: multisealEnabled, + } + + if multisealEnabled || (existingSealGenInfo != nil && existingSealGenInfo.Enabled) { + err := newSealGenInfo.Validate(existingSealGenInfo, hasPartiallyWrappedPaths) + if err != nil { + return nil, err + } + } + + return newSealGenInfo, nil +} + +func hasPartiallyWrappedPaths(ctx context.Context, backend physical.Backend) (bool, error) { + paths, err := vault.GetPartiallySealWrappedPaths(ctx, backend) + if err != nil { + return false, err } - return barrierSeal, barrierWrapper, unwrapSeal, createdSeals, sealConfigError, nil + + return len(paths) > 0, nil } func initHaBackend(c *ServerCommand, config *server.Config, coreConfig *vault.CoreConfig, backend physical.Backend) (bool, error) { @@ -2595,10 +3079,12 @@ func runUnseal(c *ServerCommand, core *vault.Core, ctx context.Context) { } c.logger.Warn("failed to unseal core", "error", err) + timer := time.NewTimer(5 * time.Second) select { case <-c.ShutdownCh: + timer.Stop() return - case <-time.After(5 * time.Second): + case <-timer.C: } } } @@ -2618,7 +3104,11 @@ func createCoreConfig(c *ServerCommand, config *server.Config, backend physical. AuditBackends: c.AuditBackends, CredentialBackends: c.CredentialBackends, LogicalBackends: c.LogicalBackends, + EventBackends: c.EventBackends, + LogLevel: config.LogLevel, Logger: c.logger, + DetectDeadlocks: config.DetectDeadlocks, + ImpreciseLeaseRoleTracking: config.ImpreciseLeaseRoleTracking, DisableSentinelTrace: config.DisableSentinelTrace, DisableCache: config.DisableCache, DisableMlock: config.DisableMlock, @@ -2627,6 +3117,7 @@ func createCoreConfig(c *ServerCommand, config *server.Config, backend physical. ClusterName: config.ClusterName, CacheSize: config.CacheSize, PluginDirectory: config.PluginDirectory, + PluginTmpdir: config.PluginTmpdir, PluginFileUid: config.PluginFileUid, PluginFilePermissions: config.PluginFilePermissions, EnableUI: config.EnableUI, @@ -2646,6 +3137,8 @@ func createCoreConfig(c *ServerCommand, config *server.Config, backend physical. License: config.License, LicensePath: config.LicensePath, DisableSSCTokens: config.DisableSSCTokens, + Experiments: config.Experiments, + AdministrativeNamespacePath: config.AdministrativeNamespacePath, } if c.flagDev { @@ -2679,7 +3172,7 @@ func runListeners(c *ServerCommand, coreConfig *vault.CoreConfig, config *server return nil } -func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core, certDir string) error { +func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core, certDir string, clusterJSON *testcluster.ClusterJson) error { if c.flagDev && !c.flagDevSkipInit { init, err := c.enableDev(core, coreConfig) @@ -2687,6 +3180,10 @@ func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server. return fmt.Errorf("Error initializing Dev mode: %s", err) } + if clusterJSON != nil { + clusterJSON.RootToken = init.RootToken + } + var plugins, pluginsNotLoaded []string if c.flagDevPluginDir != "" && c.flagDevPluginInit { @@ -2704,7 +3201,7 @@ func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server. for _, name := range list { path := filepath.Join(f.Name(), name) if err := c.addPlugin(path, init.RootToken, core); err != nil { - if !errwrap.Contains(err, vault.ErrPluginBadType.Error()) { + if !errwrap.Contains(err, plugincatalog.ErrPluginBadType.Error()) { return fmt.Errorf("Error enabling plugin %s: %s", name, err) } pluginsNotLoaded = append(pluginsNotLoaded, name) @@ -2866,6 +3363,161 @@ func startHttpServers(c *ServerCommand, core *vault.Core, config *server.Config, return nil } +// reloadSealsOnLeaderActivation checks to see if the in-memory seal generation info is stale, and if so, +// reloads the seal configuration. +func (c *ServerCommand) reloadSealsOnLeaderActivation(ctx context.Context, core *vault.Core) error { + existingSealGenerationInfo, err := vault.PhysicalSealGenInfo(ctx, core.PhysicalAccess()) + if err != nil { + return fmt.Errorf("error checking for stale seal generation info: %w", err) + } + if existingSealGenerationInfo == nil { + c.logger.Debug("not reloading seals config since there is no seal generation info in storage") + return nil + } + + currentSealGenerationInfo := core.SealAccess().GetAccess().GetSealGenerationInfo() + if currentSealGenerationInfo == nil { + c.logger.Debug("not reloading seal config since there is no current generation info (the seal has not been initialized)") + return nil + } + if currentSealGenerationInfo.Generation >= existingSealGenerationInfo.Generation { + c.logger.Debug("seal generation info is up to date, not reloading seal configuration") + return nil + } + + // Reload seal configuration + + config, _, err := c.reloadConfigFiles() + if err != nil { + return fmt.Errorf("error reading configuration files while reloading seal configuration: %w", err) + } + if config == nil { + return errors.New("no configuration files found while reloading seal configuration") + } + reloaded, err := c.reloadSeals(ctx, false, core, config) + if reloaded { + core.SetConfig(config) + } + return err +} + +// reloadSealsOnSigHup will reload seal configurtion as a result of receiving a SIGHUP signal. +func (c *ServerCommand) reloadSealsOnSigHup(ctx context.Context, core *vault.Core, config *server.Config) (bool, error) { + return c.reloadSeals(ctx, true, core, config) +} + +// reloadSeals reloads configuration files and determines whether it needs to re-create the Seal.Access() objects. +// This function needs do detect that core.SealAccess() is no longer using the seal Wrapper that is specified +// in the seal configuration files. +// This function returns true if the newConfig was used to re-create the Seal.Access() objects. In other words, +// if false is returned, there were no changes done to the seals. +func (c *ServerCommand) reloadSeals(ctx context.Context, grabStateLock bool, core *vault.Core, newConfig *server.Config) (bool, error) { + if core.IsInSealMigrationMode(grabStateLock) { + c.logger.Debug("not reloading seal configuration since Vault is in migration mode") + return false, nil + } + + currentConfig := core.GetCoreConfigInternal() + + // We only want to reload if multiseal is currently enabled, or it is being enabled + if !(currentConfig.IsMultisealEnabled() || newConfig.IsMultisealEnabled()) { + c.logger.Debug("not reloading seal configuration since enable_multiseal is not set, nor is it being disabled") + return false, nil + } + + if conf, err := core.PhysicalBarrierSealConfig(ctx); err != nil { + return false, fmt.Errorf("error reading barrier seal configuration from storage while reloading seals: %w", err) + } else if conf == nil { + c.logger.Debug("not reloading seal configuration since there is no barrier config in storage (the seal has not been initialized)") + return false, nil + } + + if core.SealAccess().BarrierSealConfigType() == vault.SealConfigTypeShamir { + switch { + case len(newConfig.Seals) == 0: + // We are fine, our ServerCommand.reloadConfigFiles() does not do the "automagic" creation + // of the Shamir seal configuration. + c.logger.Debug("not reloading seal configuration since the new one has no seal stanzas") + return false, nil + + case len(newConfig.Seals) == 1 && newConfig.Seals[0].Disabled: + // If we have only one seal and it is disabled, it means that the newConfig wants to migrate + // to Shamir, which is not supported by seal reloading. + c.logger.Debug("not reloading seal configuration since the new one specifies migration to Shamir") + return false, nil + + case len(newConfig.Seals) == 1 && newConfig.Seals[0].Type == vault.SealConfigTypeShamir.String(): + // Having a single Shamir seal in newConfig is not really possible, since a Shamir seal + // is specified in configuration by *not* having a seal stanza. If we were to hit this + // case, though, it is equivalent to trying to migrate to Shamir, which is not supported + // by seal reloading. + c.logger.Debug("not reloading seal configuration since the new one has single Shamir stanza") + return false, nil + } + } + + // Verify that the new config we picked up is not trying to migrate from autoseal to shamir + if len(newConfig.Seals) == 1 && newConfig.Seals[0].Disabled { + // If we get here, it means the node was not started in migration mode, but the new config says + // we should go into migration mode. This case should be caught by the core.IsInSealMigrationMode() + // above. + + return false, errors.New("not reloading seal configuration: moving from autoseal to shamir requires seal migration") + } + + // Verify that the new config we picked up is not trying to migrate shamir to autoseal + if core.SealAccess().BarrierSealConfigType() == vault.SealConfigTypeShamir { + return false, errors.New("not reloading seal configuration: moving from Shamir to autoseal requires seal migration") + } + + infoKeysReload := make([]string, 0) + infoReload := make(map[string]string) + + core.SetMultisealEnabled(newConfig.IsMultisealEnabled()) + setSealResponse, secureRandomReader, err := c.configureSeals(ctx, newConfig, core.PhysicalAccess(), infoKeysReload, infoReload) + if err != nil { + return false, fmt.Errorf("error reloading seal configuration: %w", err) + } + if setSealResponse.sealConfigError != nil { + return false, fmt.Errorf("error reloading seal configuration: %w", setSealResponse.sealConfigError) + } + + newGen := setSealResponse.barrierSeal.GetAccess().GetSealGenerationInfo() + + var standby, perf bool + if grabStateLock { + // If grabStateLock is false we know we are on a leader activation + standby, perf = core.StandbyStates() + } + switch { + case !perf && !standby: + c.logger.Debug("persisting reloaded seals as we are the active node") + err = core.SetSeals(ctx, grabStateLock, setSealResponse.barrierSeal, secureRandomReader, !newGen.IsRewrapped() || setSealResponse.hasPartiallyWrappedPaths) + if err != nil { + return false, fmt.Errorf("error setting seal: %s", err) + } + + if err := core.SetPhysicalSealGenInfo(ctx, newGen); err != nil { + c.logger.Warn("could not update seal information in storage", "err", err) + } + case perf: + c.logger.Debug("updating reloaded seals in memory on perf standby") + err = core.SetSealsOnPerfStandby(ctx, grabStateLock, setSealResponse.barrierSeal, secureRandomReader) + if err != nil { + return false, fmt.Errorf("error setting seal on perf standby: %s", err) + } + default: + return false, errors.New("skipping seal reload as we are a standby") + } + + // finalize the old seals and set the new seals as the current ones + c.setSealsToFinalize(setSealResponse.getCreatedSeals()) + + c.logger.Debug("seal configuration reloaded successfully") + + return true, nil +} + func SetStorageMigration(b physical.Backend, active bool) error { if !active { return b.Delete(context.Background(), storageMigrationLock) diff --git a/command/server/config.go b/command/server/config.go index b83a9fe2f7da..9d31ed67b31e 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( @@ -5,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "os" "path/filepath" @@ -17,9 +19,13 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/helper/experiments" "github.com/hashicorp/vault/helper/osutil" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/mitchellh/mapstructure" ) const ( @@ -28,9 +34,8 @@ const ( VaultDevKeyFilename = "vault-key.pem" ) -var entConfigValidate = func(_ *Config, _ string) []configutil.ConfigError { - return nil -} +// Modified internally for testing. +var validExperiments = experiments.ValidExperiments() // Config is the configuration for the vault server. type Config struct { @@ -45,6 +50,8 @@ type Config struct { ServiceRegistration *ServiceRegistration `hcl:"-"` + Experiments []string `hcl:"experiments"` + CacheSize int `hcl:"cache_size"` DisableCache bool `hcl:"-"` DisableCacheRaw interface{} `hcl:"disable_cache"` @@ -62,6 +69,7 @@ type Config struct { ClusterCipherSuites string `hcl:"cluster_cipher_suites"` PluginDirectory string `hcl:"plugin_directory"` + PluginTmpdir string `hcl:"plugin_tmpdir"` PluginFileUid int `hcl:"plugin_file_uid"` @@ -97,6 +105,10 @@ type Config struct { LogRequestsLevel string `hcl:"-"` LogRequestsLevelRaw interface{} `hcl:"log_requests_level"` + DetectDeadlocks string `hcl:"detect_deadlocks"` + + ImpreciseLeaseRoleTracking bool `hcl:"imprecise_lease_role_tracking"` + EnableResponseHeaderRaftNodeID bool `hcl:"-"` EnableResponseHeaderRaftNodeIDRaw interface{} `hcl:"enable_response_header_raft_node_id"` @@ -120,14 +132,10 @@ func (c *Config) Validate(sourceFilePath string) []configutil.ConfigError { for _, l := range c.Listeners { results = append(results, l.Validate(sourceFilePath)...) } - results = append(results, c.validateEnt(sourceFilePath)...) + results = append(results, entValidateConfig(c, sourceFilePath)...) return results } -func (c *Config) validateEnt(sourceFilePath string) []configutil.ConfigError { - return entConfigValidate(c, sourceFilePath) -} - // DevConfig is a Config that is used for dev mode of Vault. func DevConfig(storageType string) (*Config, error) { hclStr := ` @@ -161,13 +169,13 @@ ui = true } // DevTLSConfig is a Config that is used for dev tls mode of Vault. -func DevTLSConfig(storageType, certDir string) (*Config, error) { +func DevTLSConfig(storageType, certDir string, extraSANs []string) (*Config, error) { ca, err := GenerateCA() if err != nil { return nil, err } - cert, key, err := GenerateCert(ca.Template, ca.Signer) + cert, key, err := generateCert(ca.Template, ca.Signer, extraSANs) if err != nil { return nil, err } @@ -183,7 +191,10 @@ func DevTLSConfig(storageType, certDir string) (*Config, error) { if err := os.WriteFile(fmt.Sprintf("%s/%s", certDir, VaultDevKeyFilename), []byte(key), 0o400); err != nil { return nil, err } + return parseDevTLSConfig(storageType, certDir) +} +func parseDevTLSConfig(storageType, certDir string) (*Config, error) { hclStr := ` disable_mlock = true @@ -206,8 +217,8 @@ storage "%s" { ui = true ` - - hclStr = fmt.Sprintf(hclStr, certDir, certDir, storageType) + certDirEscaped := strings.Replace(certDir, "\\", "\\\\", -1) + hclStr = fmt.Sprintf(hclStr, certDirEscaped, certDirEscaped, storageType) parsed, err := ParseConfig(hclStr, "") if err != nil { return nil, err @@ -353,6 +364,11 @@ func (c *Config) Merge(c2 *Config) *Config { result.PluginDirectory = c2.PluginDirectory } + result.PluginTmpdir = c.PluginTmpdir + if c2.PluginTmpdir != "" { + result.PluginTmpdir = c2.PluginTmpdir + } + result.PluginFileUid = c.PluginFileUid if c2.PluginFileUid != 0 { result.PluginFileUid = c2.PluginFileUid @@ -389,6 +405,16 @@ func (c *Config) Merge(c2 *Config) *Config { result.LogRequestsLevel = c2.LogRequestsLevel } + result.DetectDeadlocks = c.DetectDeadlocks + if c2.DetectDeadlocks != "" { + result.DetectDeadlocks = c2.DetectDeadlocks + } + + result.ImpreciseLeaseRoleTracking = c.ImpreciseLeaseRoleTracking + if c2.ImpreciseLeaseRoleTracking { + result.ImpreciseLeaseRoleTracking = c2.ImpreciseLeaseRoleTracking + } + result.EnableResponseHeaderRaftNodeID = c.EnableResponseHeaderRaftNodeID if c2.EnableResponseHeaderRaftNodeID { result.EnableResponseHeaderRaftNodeID = c2.EnableResponseHeaderRaftNodeID @@ -424,8 +450,15 @@ func (c *Config) Merge(c2 *Config) *Config { } } + result.AdministrativeNamespacePath = c.AdministrativeNamespacePath + if c2.AdministrativeNamespacePath != "" { + result.AdministrativeNamespacePath = c2.AdministrativeNamespacePath + } + result.entConfig = c.entConfig.Merge(c2.entConfig) + result.Experiments = mergeExperiments(c.Experiments, c2.Experiments) + return result } @@ -447,9 +480,14 @@ func LoadConfig(path string) (*Config, error) { return nil, errors.New("Error parsing the environment variable VAULT_ENABLE_FILE_PERMISSIONS_CHECK") } } + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() if enableFilePermissionsCheck { - err = osutil.OwnerPermissionsMatch(path, 0, 0) + err = osutil.OwnerPermissionsMatchFile(f, 0, 0) if err != nil { return nil, err } @@ -464,13 +502,21 @@ func CheckConfig(c *Config, e error) (*Config, error) { return c, e } - if len(c.Seals) == 2 { - switch { - case c.Seals[0].Disabled && c.Seals[1].Disabled: - return nil, errors.New("seals: two seals provided but both are disabled") - case !c.Seals[0].Disabled && !c.Seals[1].Disabled: - return nil, errors.New("seals: two seals provided but neither is disabled") + if err := c.checkSealConfig(); err != nil { + return nil, err + } + + sealMap := make(map[string]*configutil.KMS) + for _, seal := range c.Seals { + if seal.Name == "" { + return nil, errors.New("seals: seal name is empty") } + + if _, ok := sealMap[seal.Name]; ok { + return nil, errors.New("seals: seal names must be unique") + } + + sealMap[seal.Name] = seal } return c, nil @@ -478,8 +524,14 @@ func CheckConfig(c *Config, e error) (*Config, error) { // LoadConfigFile loads the configuration from the given file. func LoadConfigFile(path string) (*Config, error) { + // Open the file + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() // Read the file - d, err := ioutil.ReadFile(path) + d, err := io.ReadAll(f) if err != nil { return nil, err } @@ -500,7 +552,7 @@ func LoadConfigFile(path string) (*Config, error) { if enableFilePermissionsCheck { // check permissions of the config file - err = osutil.OwnerPermissionsMatch(path, 0, 0) + err = osutil.OwnerPermissionsMatchFile(f, 0, 0) if err != nil { return nil, err } @@ -692,7 +744,11 @@ func ParseConfig(d, source string) (*Config, error) { } } - if err := result.parseConfig(list); err != nil { + if err := validateExperiments(result.Experiments); err != nil { + return nil, fmt.Errorf("error validating experiment(s) from config: %w", err) + } + + if err := result.parseConfig(list, source); err != nil { return nil, fmt.Errorf("error parsing enterprise config: %w", err) } @@ -708,6 +764,69 @@ func ParseConfig(d, source string) (*Config, error) { return result, nil } +func ExperimentsFromEnvAndCLI(config *Config, envKey string, flagExperiments []string) error { + if envExperimentsRaw := os.Getenv(envKey); envExperimentsRaw != "" { + envExperiments := strings.Split(envExperimentsRaw, ",") + err := validateExperiments(envExperiments) + if err != nil { + return fmt.Errorf("error validating experiment(s) from environment variable %q: %w", envKey, err) + } + + config.Experiments = mergeExperiments(config.Experiments, envExperiments) + } + + if len(flagExperiments) != 0 { + err := validateExperiments(flagExperiments) + if err != nil { + return fmt.Errorf("error validating experiment(s) from command line flag: %w", err) + } + + config.Experiments = mergeExperiments(config.Experiments, flagExperiments) + } + + return nil +} + +// validateExperiments checks each experiment is a known experiment. +func validateExperiments(experiments []string) error { + var invalid []string + + for _, experiment := range experiments { + if !strutil.StrListContains(validExperiments, experiment) { + invalid = append(invalid, experiment) + } + } + + if len(invalid) != 0 { + return fmt.Errorf("valid experiment(s) are %s, but received the following invalid experiment(s): %s", + strings.Join(validExperiments, ", "), + strings.Join(invalid, ", ")) + } + + return nil +} + +// mergeExperiments returns the logical OR of the two sets. +func mergeExperiments(left, right []string) []string { + processed := map[string]struct{}{} + var result []string + for _, l := range left { + if _, seen := processed[l]; !seen { + result = append(result, l) + } + processed[l] = struct{}{} + } + + for _, r := range right { + if _, seen := processed[r]; !seen { + result = append(result, r) + processed[r] = struct{}{} + } + } + + return result +} + // LoadConfigDir loads all the configurations in the given directory // in alphabetical order. func LoadConfigDir(dir string) (*Config, error) { @@ -1001,6 +1120,7 @@ func (c *Config) Sanitized() map[string]interface{} { "cluster_cipher_suites": c.ClusterCipherSuites, "plugin_directory": c.PluginDirectory, + "plugin_tmpdir": c.PluginTmpdir, "plugin_file_uid": c.PluginFileUid, @@ -1025,6 +1145,11 @@ func (c *Config) Sanitized() map[string]interface{} { "enable_response_header_raft_node_id": c.EnableResponseHeaderRaftNodeID, "log_requests_level": c.LogRequestsLevel, + "experiments": c.Experiments, + + "detect_deadlocks": c.DetectDeadlocks, + + "imprecise_lease_role_tracking": c.ImpreciseLeaseRoleTracking, } for k, v := range sharedResult { result[k] = v @@ -1032,23 +1157,39 @@ func (c *Config) Sanitized() map[string]interface{} { // Sanitize storage stanza if c.Storage != nil { + storageType := c.Storage.Type sanitizedStorage := map[string]interface{}{ - "type": c.Storage.Type, + "type": storageType, "redirect_addr": c.Storage.RedirectAddr, "cluster_addr": c.Storage.ClusterAddr, "disable_clustering": c.Storage.DisableClustering, } + + if storageType == "raft" { + sanitizedStorage["raft"] = map[string]interface{}{ + "max_entry_size": c.Storage.Config["max_entry_size"], + } + } + result["storage"] = sanitizedStorage } // Sanitize HA storage stanza if c.HAStorage != nil { + haStorageType := c.HAStorage.Type sanitizedHAStorage := map[string]interface{}{ - "type": c.HAStorage.Type, + "type": haStorageType, "redirect_addr": c.HAStorage.RedirectAddr, "cluster_addr": c.HAStorage.ClusterAddr, "disable_clustering": c.HAStorage.DisableClustering, } + + if haStorageType == "raft" { + sanitizedHAStorage["raft"] = map[string]interface{}{ + "max_entry_size": c.HAStorage.Config["max_entry_size"], + } + } + result["ha_storage"] = sanitizedHAStorage } @@ -1087,3 +1228,12 @@ func (c *Config) found(s, k string) { delete(c.UnusedKeys, s) c.FoundKeys = append(c.FoundKeys, k) } + +func (c *Config) ToVaultNodeConfig() (*testcluster.VaultNodeConfig, error) { + var vnc testcluster.VaultNodeConfig + err := mapstructure.Decode(c, &vnc) + if err != nil { + return nil, err + } + return &vnc, nil +} diff --git a/command/server/config_custom_response_headers_test.go b/command/server/config_custom_response_headers_test.go index 5380568c2510..8db646bfd128 100644 --- a/command/server/config_custom_response_headers_test.go +++ b/command/server/config_custom_response_headers_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( diff --git a/command/server/config_oss.go b/command/server/config_oss.go new file mode 100644 index 000000000000..22abae3003f4 --- /dev/null +++ b/command/server/config_oss.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package server + +func (c *Config) IsMultisealEnabled() bool { + return false +} diff --git a/command/server/config_oss_test.go b/command/server/config_oss_test.go index f64670e03a55..20e97c1cfe7e 100644 --- a/command/server/config_oss_test.go +++ b/command/server/config_oss_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !enterprise package server diff --git a/command/server/config_telemetry_test.go b/command/server/config_telemetry_test.go index 581710f565a7..1f29a3862e9e 100644 --- a/command/server/config_telemetry_test.go +++ b/command/server/config_telemetry_test.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestMetricFilterConfigs(t *testing.T) { @@ -38,3 +42,35 @@ func TestMetricFilterConfigs(t *testing.T) { } }) } + +// TestRollbackMountPointMetricsConfig verifies that the add_mount_point_rollback_metrics +// config option is parsed correctly, when it is set to true. Also verifies that +// the default for this setting is false +func TestRollbackMountPointMetricsConfig(t *testing.T) { + t.Parallel() + testCases := []struct { + name string + configFile string + wantMountPoint bool + }{ + { + name: "include mount point", + configFile: "./test-fixtures/telemetry/rollback_mount_point.hcl", + wantMountPoint: true, + }, + { + name: "exclude mount point", + configFile: "./test-fixtures/telemetry/valid_prefix_filter.hcl", + wantMountPoint: false, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + config, err := LoadConfigFile(tc.configFile) + require.NoError(t, err) + require.Equal(t, tc.wantMountPoint, config.Telemetry.RollbackMetricsIncludeMountPoint) + }) + } +} diff --git a/command/server/config_test.go b/command/server/config_test.go index 21ebd38b63c1..9fa20b182fd2 100644 --- a/command/server/config_test.go +++ b/command/server/config_test.go @@ -1,7 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( + "fmt" + "reflect" + "strings" "testing" + + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/stretchr/testify/require" ) func TestLoadConfigFile(t *testing.T) { @@ -56,6 +65,12 @@ func TestParseStorage(t *testing.T) { testParseStorageTemplate(t) } +// TestConfigWithAdministrativeNamespace tests that .hcl and .json configurations are correctly parsed when the administrative_namespace_path is present. +func TestConfigWithAdministrativeNamespace(t *testing.T) { + testConfigWithAdministrativeNamespaceHcl(t) + testConfigWithAdministrativeNamespaceJson(t) +} + func TestUnknownFieldValidation(t *testing.T) { testUnknownFieldValidation(t) } @@ -71,3 +86,211 @@ func TestUnknownFieldValidationHcl(t *testing.T) { func TestUnknownFieldValidationListenerAndStorage(t *testing.T) { testUnknownFieldValidationStorageAndListener(t) } + +func TestExperimentsConfigParsing(t *testing.T) { + const envKey = "VAULT_EXPERIMENTS" + originalValue := validExperiments + validExperiments = []string{"foo", "bar", "baz"} + t.Cleanup(func() { + validExperiments = originalValue + }) + + for name, tc := range map[string]struct { + fromConfig []string + fromEnv []string + fromCLI []string + expected []string + expectedError string + }{ + // Multiple sources. + "duplication": {[]string{"foo"}, []string{"foo"}, []string{"foo"}, []string{"foo"}, ""}, + "disjoint set": {[]string{"foo"}, []string{"bar"}, []string{"baz"}, []string{"foo", "bar", "baz"}, ""}, + + // Single source. + "config only": {[]string{"foo"}, nil, nil, []string{"foo"}, ""}, + "env only": {nil, []string{"foo"}, nil, []string{"foo"}, ""}, + "CLI only": {nil, nil, []string{"foo"}, []string{"foo"}, ""}, + + // Validation errors. + "config invalid": {[]string{"invalid"}, nil, nil, nil, "from config"}, + "env invalid": {nil, []string{"invalid"}, nil, nil, "from environment variable"}, + "CLI invalid": {nil, nil, []string{"invalid"}, nil, "from command line flag"}, + } { + t.Run(name, func(t *testing.T) { + var configString string + t.Setenv(envKey, strings.Join(tc.fromEnv, ",")) + if len(tc.fromConfig) != 0 { + configString = fmt.Sprintf("experiments = [\"%s\"]", strings.Join(tc.fromConfig, "\", \"")) + } + config, err := ParseConfig(configString, "") + if err == nil { + err = ExperimentsFromEnvAndCLI(config, envKey, tc.fromCLI) + } + + switch tc.expectedError { + case "": + if err != nil { + t.Fatal(err) + } + + default: + if err == nil || !strings.Contains(err.Error(), tc.expectedError) { + t.Fatalf("Expected error to contain %q, but got: %s", tc.expectedError, err) + } + } + }) + } +} + +func TestValidate(t *testing.T) { + originalValue := validExperiments + for name, tc := range map[string]struct { + validSet []string + input []string + expectError bool + }{ + // Valid cases + "minimal valid": {[]string{"foo"}, []string{"foo"}, false}, + "valid subset": {[]string{"foo", "bar"}, []string{"bar"}, false}, + "repeated": {[]string{"foo"}, []string{"foo", "foo"}, false}, + + // Error cases + "partially valid": {[]string{"foo", "bar"}, []string{"foo", "baz"}, true}, + "empty": {[]string{"foo"}, []string{""}, true}, + "no valid experiments": {[]string{}, []string{"foo"}, true}, + } { + t.Run(name, func(t *testing.T) { + t.Cleanup(func() { + validExperiments = originalValue + }) + + validExperiments = tc.validSet + err := validateExperiments(tc.input) + if tc.expectError && err == nil { + t.Fatal("Expected error but got none") + } + if !tc.expectError && err != nil { + t.Fatal("Did not expect error but got", err) + } + }) + } +} + +func TestMerge(t *testing.T) { + for name, tc := range map[string]struct { + left []string + right []string + expected []string + }{ + "disjoint": {[]string{"foo"}, []string{"bar"}, []string{"foo", "bar"}}, + "empty left": {[]string{}, []string{"foo"}, []string{"foo"}}, + "empty right": {[]string{"foo"}, []string{}, []string{"foo"}}, + "overlapping": {[]string{"foo", "bar"}, []string{"foo", "baz"}, []string{"foo", "bar", "baz"}}, + } { + t.Run(name, func(t *testing.T) { + result := mergeExperiments(tc.left, tc.right) + if !reflect.DeepEqual(tc.expected, result) { + t.Fatalf("Expected %v but got %v", tc.expected, result) + } + }) + } +} + +// Test_parseDevTLSConfig verifies that both Windows and Unix directories are correctly escaped when creating a dev TLS +// configuration in HCL +func Test_parseDevTLSConfig(t *testing.T) { + tests := []struct { + name string + certDirectory string + }{ + { + name: "windows path", + certDirectory: `C:\Users\ADMINI~1\AppData\Local\Temp\2\vault-tls4169358130`, + }, + { + name: "unix path", + certDirectory: "/tmp/vault-tls4169358130", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg, err := parseDevTLSConfig("file", tt.certDirectory) + require.NoError(t, err) + require.Equal(t, fmt.Sprintf("%s/%s", tt.certDirectory, VaultDevCertFilename), cfg.Listeners[0].TLSCertFile) + require.Equal(t, fmt.Sprintf("%s/%s", tt.certDirectory, VaultDevKeyFilename), cfg.Listeners[0].TLSKeyFile) + }) + } +} + +func TestCheckConfig(t *testing.T) { + testCases := []struct { + name string + config *Config + expectError bool + }{ + { + name: "no-seals-configured", + config: &Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{}}}, + expectError: false, + }, + { + name: "seal-with-empty-name", + config: &Config{SharedConfig: &configutil.SharedConfig{ + Seals: []*configutil.KMS{ + { + Type: "awskms", + Disabled: false, + }, + }, + }}, + expectError: true, + }, + { + name: "seals-with-unique-names", + config: &Config{SharedConfig: &configutil.SharedConfig{ + Seals: []*configutil.KMS{ + { + Type: "awskms", + Disabled: false, + Name: "enabled-awskms", + }, + { + Type: "awskms", + Disabled: true, + Name: "disabled-awskms", + }, + }, + }}, + expectError: false, + }, + { + name: "seals-with-same-names", + config: &Config{SharedConfig: &configutil.SharedConfig{ + Seals: []*configutil.KMS{ + { + Type: "awskms", + Disabled: false, + Name: "awskms", + }, + { + Type: "awskms", + Disabled: true, + Name: "awskms", + }, + }, + }}, + expectError: true, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + _, err := CheckConfig(tt.config, nil) + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/command/server/config_test_helpers.go b/command/server/config_test_helpers.go index aac19b5df6dc..de51b559e0b0 100644 --- a/command/server/config_test_helpers.go +++ b/command/server/config_test_helpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( @@ -99,18 +102,22 @@ func testLoadConfigFile_topLevel(t *testing.T, entropy *configutil.Entropy) { Seals: []*configutil.KMS{ { Type: "nopurpose", + Name: "nopurpose", }, { Type: "stringpurpose", Purpose: []string{"foo"}, + Name: "stringpurpose", }, { Type: "commastringpurpose", Purpose: []string{"foo", "bar"}, + Name: "commastringpurpose", }, { Type: "slicepurpose", Purpose: []string{"zip", "zap"}, + Name: "slicepurpose", }, }, }, @@ -469,6 +476,9 @@ func testLoadConfigFile(t *testing.T) { EnableResponseHeaderRaftNodeIDRaw: true, LicensePath: "/path/to/license", + + PluginDirectory: "/path/to/plugins", + PluginTmpdir: "/tmp/plugins", } addExpectedEntConfig(expected, []string{}) @@ -500,8 +510,8 @@ func testUnknownFieldValidation(t *testing.T) { Problem: "unknown or unsupported field bad_value found in configuration", Position: token.Pos{ Filename: "./test-fixtures/config.hcl", - Offset: 583, - Line: 34, + Offset: 652, + Line: 37, Column: 5, }, }, @@ -569,6 +579,28 @@ func testUnknownFieldValidationHcl(t *testing.T) { } } +// testConfigWithAdministrativeNamespaceJson tests that a config with a valid administrative namespace path is correctly validated and loaded. +func testConfigWithAdministrativeNamespaceJson(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config_with_valid_admin_ns.json") + require.NoError(t, err) + + configErrors := config.Validate("./test-fixtures/config_with_valid_admin_ns.json") + require.Empty(t, configErrors) + + require.NotEmpty(t, config.AdministrativeNamespacePath) +} + +// testConfigWithAdministrativeNamespaceHcl tests that a config with a valid administrative namespace path is correctly validated and loaded. +func testConfigWithAdministrativeNamespaceHcl(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config_with_valid_admin_ns.hcl") + require.NoError(t, err) + + configErrors := config.Validate("./test-fixtures/config_with_valid_admin_ns.hcl") + require.Empty(t, configErrors) + + require.NotEmpty(t, config.AdministrativeNamespacePath) +} + func testLoadConfigFile_json(t *testing.T) { config, err := LoadConfigFile("./test-fixtures/config.hcl.json") if err != nil { @@ -738,6 +770,7 @@ func testConfig_Sanitized(t *testing.T) { "disable_indexing": false, "disable_mlock": true, "disable_performance_standby": false, + "experiments": []string(nil), "plugin_file_uid": 0, "plugin_file_permissions": 0, "disable_printable_check": false, @@ -745,6 +778,7 @@ func testConfig_Sanitized(t *testing.T) { "raw_storage_endpoint": true, "introspection_endpoint": false, "disable_sentinel_trace": true, + "detect_deadlocks": "", "enable_ui": true, "enable_response_header_hostname": false, "enable_response_header_raft_node_id": false, @@ -758,9 +792,11 @@ func testConfig_Sanitized(t *testing.T) { "listeners": []interface{}{ map[string]interface{}{ "config": map[string]interface{}{ - "address": "127.0.0.1:443", + "address": "127.0.0.1:443", + "chroot_namespace": "admin/", + "disable_request_limiter": false, }, - "type": "tcp", + "type": configutil.TCP, }, }, "log_format": "", @@ -768,10 +804,12 @@ func testConfig_Sanitized(t *testing.T) { "max_lease_ttl": (30 * 24 * time.Hour) / time.Second, "pid_file": "./pidfile", "plugin_directory": "", + "plugin_tmpdir": "", "seals": []interface{}{ map[string]interface{}{ "disabled": false, "type": "awskms", + "name": "awskms", }, }, "storage": map[string]interface{}{ @@ -813,7 +851,10 @@ func testConfig_Sanitized(t *testing.T) { "lease_metrics_epsilon": time.Hour, "num_lease_metrics_buckets": 168, "add_lease_metrics_namespace_labels": false, + "add_mount_point_rollback_metrics": false, }, + "administrative_namespace_path": "admin/", + "imprecise_lease_role_tracking": false, } addExpectedEntSanitizedConfig(expected, []string{"http"}) @@ -846,6 +887,24 @@ listener "tcp" { agent_api { enable_quit = true } + proxy_api { + enable_quit = true + } + chroot_namespace = "admin" + redact_addresses = true + redact_cluster_name = true + redact_version = true + disable_request_limiter = true +} +listener "unix" { + address = "/var/run/vault.sock" + socket_mode = "644" + socket_user = "1000" + socket_group = "1000" + redact_addresses = true + redact_cluster_name = true + redact_version = true + disable_request_limiter = true }`)) config := Config{ @@ -853,16 +912,21 @@ listener "tcp" { } list, _ := obj.Node.(*ast.ObjectList) objList := list.Filter("listener") - configutil.ParseListeners(config.SharedConfig, objList) - listeners := config.Listeners - if len(listeners) == 0 { - t.Fatalf("expected at least one listener in the config") - } - listener := listeners[0] - if listener.Type != "tcp" { - t.Fatalf("expected tcp listener in the config") + listeners, err := configutil.ParseListeners(objList) + require.NoError(t, err) + // Update the shared config + config.Listeners = listeners + // Track which types of listener were found. + for _, l := range config.Listeners { + config.found(l.Type.String(), l.Type.String()) } + require.Len(t, config.Listeners, 2) + tcpListener := config.Listeners[0] + require.Equal(t, configutil.TCP, tcpListener.Type) + unixListner := config.Listeners[1] + require.Equal(t, configutil.Unix, unixListner.Type) + expected := &Config{ SharedConfig: &configutil.SharedConfig{ Listeners: []*configutil.Listener{ @@ -886,7 +950,26 @@ listener "tcp" { AgentAPI: &configutil.AgentAPI{ EnableQuit: true, }, + ProxyAPI: &configutil.ProxyAPI{ + EnableQuit: true, + }, CustomResponseHeaders: DefaultCustomHeaders, + ChrootNamespace: "admin/", + RedactAddresses: true, + RedactClusterName: true, + RedactVersion: true, + DisableRequestLimiter: true, + }, + { + Type: "unix", + Address: "/var/run/vault.sock", + SocketMode: "644", + SocketUser: "1000", + SocketGroup: "1000", + RedactAddresses: false, + RedactClusterName: false, + RedactVersion: false, + DisableRequestLimiter: true, }, }, }, @@ -1075,6 +1158,7 @@ func testParseSeals(t *testing.T) { "default_hmac_key_label": "vault-hsm-hmac-key", "generate_key": "true", }, + Name: "pkcs11", }, { Type: "pkcs11", @@ -1091,10 +1175,12 @@ func testParseSeals(t *testing.T) { "default_hmac_key_label": "vault-hsm-hmac-key", "generate_key": "true", }, + Name: "pkcs11-disabled", }, }, }, } + addExpectedDefaultEntConfig(expected) config.Prune() require.Equal(t, config, expected) } diff --git a/command/server/config_test_helpers_stubs_oss.go b/command/server/config_test_helpers_stubs_oss.go new file mode 100644 index 000000000000..f7b6ef7c115e --- /dev/null +++ b/command/server/config_test_helpers_stubs_oss.go @@ -0,0 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package server + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func addExpectedEntConfig(c *Config, sentinelModules []string) {} +func addExpectedDefaultEntConfig(c *Config) {} +func addExpectedEntSanitizedConfig(c map[string]interface{}, sentinelModules []string) {} diff --git a/command/server/config_test_helpers_util.go b/command/server/config_test_helpers_util.go deleted file mode 100644 index 63fa3cfe6a80..000000000000 --- a/command/server/config_test_helpers_util.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build !enterprise - -package server - -func addExpectedEntConfig(c *Config, sentinelModules []string) {} -func addExpectedEntSanitizedConfig(c map[string]interface{}, sentinelModules []string) {} diff --git a/command/server/config_util.go b/command/server/config_util.go index feefc6092aac..9447ded65222 100644 --- a/command/server/config_util.go +++ b/command/server/config_util.go @@ -1,14 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !enterprise package server import ( + "errors" + "fmt" + "github.com/hashicorp/hcl/hcl/ast" ) type entConfig struct{} -func (ec *entConfig) parseConfig(list *ast.ObjectList) error { +func (ec *entConfig) parseConfig(list *ast.ObjectList, source string) error { return nil } @@ -20,3 +26,30 @@ func (ec entConfig) Merge(ec2 entConfig) entConfig { func (ec entConfig) Sanitized() map[string]interface{} { return nil } + +func (c *Config) checkSealConfig() error { + if len(c.Seals) == 0 { + return nil + } + + if len(c.Seals) > 2 { + return fmt.Errorf("seals: at most 2 seals can be provided: received %d", len(c.Seals)) + } + + disabledSeals := 0 + for _, seal := range c.Seals { + if seal.Disabled { + disabledSeals++ + } + } + + if len(c.Seals) > 1 && disabledSeals == len(c.Seals) { + return errors.New("seals: seals provided but all are disabled") + } + + if disabledSeals < len(c.Seals)-1 { + return errors.New("seals: only one seal can be enabled") + } + + return nil +} diff --git a/command/server/config_util_test.go b/command/server/config_util_test.go new file mode 100644 index 000000000000..21e98a22f9be --- /dev/null +++ b/command/server/config_util_test.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package server + +import ( + "testing" + + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/stretchr/testify/require" +) + +func TestCheckSealConfig(t *testing.T) { + testCases := []struct { + name string + config Config + expectError bool + }{ + { + name: "no-seals", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{}}}, + }, + { + name: "one-seal", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{ + { + Disabled: false, + }, + }}}, + }, + { + name: "one-disabled-seal", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{ + { + Disabled: true, + }, + }}}, + }, + { + name: "two-seals-one-disabled", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{ + { + Disabled: false, + }, + { + Disabled: true, + }, + }}}, + }, + { + name: "two-seals-enabled", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{ + { + Disabled: false, + }, + { + Disabled: false, + }, + }}}, + expectError: true, + }, + { + name: "two-disabled-seals", + config: Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{ + { + Disabled: true, + }, + { + Disabled: true, + }, + }}}, + expectError: true, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.checkSealConfig() + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/command/server/hcp_link_config_test.go b/command/server/hcp_link_config_test.go index f71c96d76cc5..c038e9b99062 100644 --- a/command/server/hcp_link_config_test.go +++ b/command/server/hcp_link_config_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( @@ -9,6 +12,10 @@ import ( ) func TestHCPLinkConfig(t *testing.T) { + t.Setenv("HCP_CLIENT_ID", "") + t.Setenv("HCP_CLIENT_SECRET", "") + t.Setenv("HCP_RESOURCE_ID", "") + config, err := LoadConfigFile("./test-fixtures/hcp_link_config.hcl") if err != nil { t.Fatalf("err: %s", err) diff --git a/command/server/listener.go b/command/server/listener.go index 78acbd3e7b92..c003a6289dc1 100644 --- a/command/server/listener.go +++ b/command/server/listener.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( @@ -9,17 +12,17 @@ import ( // We must import sha512 so that it registers with the runtime so that // certificates that use it can be parsed. + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/vault/helper/proxyutil" "github.com/hashicorp/vault/internalshared/configutil" - "github.com/mitchellh/cli" ) // ListenerFactory is the factory function to create a listener. type ListenerFactory func(*configutil.Listener, io.Writer, cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) // BuiltinListeners is the list of built-in listener types. -var BuiltinListeners = map[string]ListenerFactory{ +var BuiltinListeners = map[configutil.ListenerType]ListenerFactory{ "tcp": tcpListenerFactory, "unix": unixListenerFactory, } diff --git a/command/server/listener_tcp.go b/command/server/listener_tcp.go index dbba4b40e88c..6c121ec403e2 100644 --- a/command/server/listener_tcp.go +++ b/command/server/listener_tcp.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( @@ -9,10 +12,10 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/listenerutil" - "github.com/mitchellh/cli" ) func tcpListenerFactory(l *configutil.Listener, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) { diff --git a/command/server/listener_tcp_test.go b/command/server/listener_tcp_test.go index 5ebf6111413e..42da6c0d21a6 100644 --- a/command/server/listener_tcp_test.go +++ b/command/server/listener_tcp_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( @@ -11,9 +14,9 @@ import ( "testing" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/internalshared/configutil" - "github.com/mitchellh/cli" "github.com/pires/go-proxyproto" ) diff --git a/command/server/listener_test.go b/command/server/listener_test.go index b1bf62ddfedf..b1c6be73f7f8 100644 --- a/command/server/listener_test.go +++ b/command/server/listener_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( diff --git a/command/server/listener_unix.go b/command/server/listener_unix.go index 3740b58f7c67..35306d166699 100644 --- a/command/server/listener_unix.go +++ b/command/server/listener_unix.go @@ -1,13 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( "io" "net" + "github.com/hashicorp/cli" "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/internalshared/listenerutil" - "github.com/mitchellh/cli" ) func unixListenerFactory(l *configutil.Listener, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) { diff --git a/command/server/listener_unix_test.go b/command/server/listener_unix_test.go index e5254a294795..72f21bb471cd 100644 --- a/command/server/listener_unix_test.go +++ b/command/server/listener_unix_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( @@ -5,8 +8,8 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/internalshared/configutil" - "github.com/mitchellh/cli" ) func TestUnixListener(t *testing.T) { diff --git a/command/server/server_seal_transit_acc_test.go b/command/server/server_seal_transit_acc_test.go index 7f357b7f2a96..45e4e9b5165a 100644 --- a/command/server/server_seal_transit_acc_test.go +++ b/command/server/server_seal_transit_acc_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( @@ -11,8 +14,8 @@ import ( "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/docker" "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/docker" ) func TestTransitWrapper_Lifecycle(t *testing.T) { diff --git a/command/server/server_stubs_oss.go b/command/server/server_stubs_oss.go new file mode 100644 index 000000000000..6426318df2a6 --- /dev/null +++ b/command/server/server_stubs_oss.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package server + +import "github.com/hashicorp/vault/internalshared/configutil" + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func entValidateConfig(_ *Config, _ string) []configutil.ConfigError { + return nil +} diff --git a/command/server/test-fixtures/config-dir/baz.hcl b/command/server/test-fixtures/config-dir/baz.hcl index 47146c717c17..3f2e01d58dd6 100644 --- a/command/server/test-fixtures/config-dir/baz.hcl +++ b/command/server/test-fixtures/config-dir/baz.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + telemetry { statsd_address = "baz" statsite_address = "qux" diff --git a/command/server/test-fixtures/config-dir/foo.hcl b/command/server/test-fixtures/config-dir/foo.hcl index f538ede1ba4a..2731eb55191f 100644 --- a/command/server/test-fixtures/config-dir/foo.hcl +++ b/command/server/test-fixtures/config-dir/foo.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config.hcl b/command/server/test-fixtures/config.hcl index 38ad47524169..7750e5e6565f 100644 --- a/command/server/test-fixtures/config.hcl +++ b/command/server/test-fixtures/config.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true @@ -48,4 +51,6 @@ disable_sealwrap = true disable_printable_check = true enable_response_header_hostname = true enable_response_header_raft_node_id = true -license_path = "/path/to/license" \ No newline at end of file +license_path = "/path/to/license" +plugin_directory = "/path/to/plugins" +plugin_tmpdir = "/tmp/plugins" \ No newline at end of file diff --git a/command/server/test-fixtures/config2.hcl b/command/server/test-fixtures/config2.hcl index 7b1dbfd56faa..0e383fb25910 100644 --- a/command/server/test-fixtures/config2.hcl +++ b/command/server/test-fixtures/config2.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config3.hcl b/command/server/test-fixtures/config3.hcl index 1023284e09e5..587698b35e9e 100644 --- a/command/server/test-fixtures/config3.hcl +++ b/command/server/test-fixtures/config3.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true log_requests_level = "Basic" @@ -9,6 +12,8 @@ cluster_addr = "top_level_cluster_addr" listener "tcp" { address = "127.0.0.1:443" + chroot_namespace="admin/" + disable_request_limiter = false } backend "consul" { @@ -52,3 +57,4 @@ pid_file = "./pidfile" raw_storage_endpoint = true disable_sealwrap = true disable_sentinel_trace = true +administrative_namespace_path = "admin/" diff --git a/command/server/test-fixtures/config4.hcl b/command/server/test-fixtures/config4.hcl index b620f3c7e75f..69c767fd6973 100644 --- a/command/server/test-fixtures/config4.hcl +++ b/command/server/test-fixtures/config4.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true ui = true diff --git a/command/server/test-fixtures/config5.hcl b/command/server/test-fixtures/config5.hcl index 3b3c64c7e0af..5fc5935953b4 100644 --- a/command/server/test-fixtures/config5.hcl +++ b/command/server/test-fixtures/config5.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config_bad_https_storage.hcl b/command/server/test-fixtures/config_bad_https_storage.hcl index f8b5d7734be2..41b78ba57431 100644 --- a/command/server/test-fixtures/config_bad_https_storage.hcl +++ b/command/server/test-fixtures/config_bad_https_storage.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config_custom_response_headers_1.hcl b/command/server/test-fixtures/config_custom_response_headers_1.hcl index c2f868c2f146..b12f2059b822 100644 --- a/command/server/test-fixtures/config_custom_response_headers_1.hcl +++ b/command/server/test-fixtures/config_custom_response_headers_1.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + storage "inmem" {} listener "tcp" { address = "127.0.0.1:8200" diff --git a/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl b/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl index 11aa099232f9..99c62b537d95 100644 --- a/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl +++ b/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + storage "inmem" {} listener "tcp" { address = "127.0.0.1:8200" diff --git a/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl b/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl index deded2ddf170..264c6dca701b 100644 --- a/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl +++ b/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config_diagnose_ok.hcl b/command/server/test-fixtures/config_diagnose_ok.hcl index a3f70540bf4d..5e1986762828 100644 --- a/command/server/test-fixtures/config_diagnose_ok.hcl +++ b/command/server/test-fixtures/config_diagnose_ok.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true @@ -42,3 +45,4 @@ pid_file = "./pidfile" raw_storage_endpoint = true disable_sealwrap = true disable_printable_check = true +enable_multiseal = true \ No newline at end of file diff --git a/command/server/test-fixtures/config_diagnose_ok_singleseal.hcl b/command/server/test-fixtures/config_diagnose_ok_singleseal.hcl new file mode 100644 index 000000000000..761d87e7b2c6 --- /dev/null +++ b/command/server/test-fixtures/config_diagnose_ok_singleseal.hcl @@ -0,0 +1,47 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1024" + tls_disable = true +} + +backend "consul" { + address = "127.0.0.1:1025" +} + +ha_backend "consul" { + address = "127.0.0.1:8500" + bar = "baz" + advertise_addr = "https://127.0.0.1:8500" + disable_clustering = "true" +} + +service_registration "consul" { + address = "127.0.0.1:8500" + foo = "bar" +} + +telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 + + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + metrics_prefix = "myprefix" +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true +disable_printable_check = true diff --git a/command/server/test-fixtures/config_raft.hcl b/command/server/test-fixtures/config_raft.hcl index c23a434744ba..9563d011d1f6 100644 --- a/command/server/test-fixtures/config_raft.hcl +++ b/command/server/test-fixtures/config_raft.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/config_seals.hcl b/command/server/test-fixtures/config_seals.hcl index 7917dc1b79c3..0761ff19ba7d 100644 --- a/command/server/test-fixtures/config_seals.hcl +++ b/command/server/test-fixtures/config_seals.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + listener "tcp" { address = "127.0.0.1:443" } diff --git a/command/server/test-fixtures/config_small.hcl b/command/server/test-fixtures/config_small.hcl index cfbc28db8fdd..982162f98a61 100644 --- a/command/server/test-fixtures/config_small.hcl +++ b/command/server/test-fixtures/config_small.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + storage "raft" { path = "/path/to/raft" node_id = "raft_node_1" diff --git a/command/server/test-fixtures/config_with_valid_admin_ns.hcl b/command/server/test-fixtures/config_with_valid_admin_ns.hcl new file mode 100644 index 000000000000..af8630612f14 --- /dev/null +++ b/command/server/test-fixtures/config_with_valid_admin_ns.hcl @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +storage "raft" { + path = "/path/to/raft" + node_id = "raft_node_1" +} +listener "tcp" { + address = "127.0.0.1:8200" + tls_cert_file = "/path/to/cert.pem" + tls_key_file = "/path/to/key.key" +} +seal "awskms" { + kms_key_id = "alias/kms-unseal-key" +} +service_registration "consul" { + address = "127.0.0.1:8500" +} +administrative_namespace_path = "admin/" \ No newline at end of file diff --git a/command/server/test-fixtures/config_with_valid_admin_ns.json b/command/server/test-fixtures/config_with_valid_admin_ns.json new file mode 100644 index 000000000000..9f6041381b09 --- /dev/null +++ b/command/server/test-fixtures/config_with_valid_admin_ns.json @@ -0,0 +1,28 @@ +{ + "listener": { + "tcp": { + "address": "0.0.0.0:8200", + "tls_cert_file": "/path/to/cert.pem", + "tls_key_file": "/path/to/key.key" + } + }, + "seal": { + "awskms": { + "kms_key_id": "alias/kms-unseal-key" + } + }, + "storage": { + "raft": { + "path": "/path/to/raft", + "node_id": "raft_node_1" + } + }, + "cluster_addr": "http://127.0.0.1:8201", + "api_addr": "http://127.0.0.1:8200", + "service_registration": { + "consul": { + "address": "127.0.0.1:8500" + } + }, + "administrative_namespace_path": "admin/" +} \ No newline at end of file diff --git a/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl b/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl index 6faecaab73fb..8019194148a5 100644 --- a/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl +++ b/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/diagnose_bad_telemetry1.hcl b/command/server/test-fixtures/diagnose_bad_telemetry1.hcl index f7629bdd02d2..815b671df7dd 100644 --- a/command/server/test-fixtures/diagnose_bad_telemetry1.hcl +++ b/command/server/test-fixtures/diagnose_bad_telemetry1.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true ui = true diff --git a/command/server/test-fixtures/diagnose_bad_telemetry2.hcl b/command/server/test-fixtures/diagnose_bad_telemetry2.hcl index 5c967e3ef926..090a7ff6e3e3 100644 --- a/command/server/test-fixtures/diagnose_bad_telemetry2.hcl +++ b/command/server/test-fixtures/diagnose_bad_telemetry2.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true ui = true diff --git a/command/server/test-fixtures/diagnose_bad_telemetry3.hcl b/command/server/test-fixtures/diagnose_bad_telemetry3.hcl index f9669258460b..0a41c80fec68 100644 --- a/command/server/test-fixtures/diagnose_bad_telemetry3.hcl +++ b/command/server/test-fixtures/diagnose_bad_telemetry3.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true ui = true diff --git a/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl b/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl index 3b6a9abf290a..905345301179 100644 --- a/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl +++ b/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl b/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl index d92186f2642d..eaf3660d5975 100644 --- a/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl +++ b/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + storage "raft" { path = "/path/to/raft/data" node_id = "raft_node_1" diff --git a/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl b/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl index a51c27b37a86..a7007d57313a 100644 --- a/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl +++ b/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true @@ -54,3 +57,4 @@ pid_file = "./pidfile" raw_storage_endpoint = true disable_sealwrap = true disable_printable_check = true +enable_multiseal = true \ No newline at end of file diff --git a/command/server/test-fixtures/hcp_link_config.hcl b/command/server/test-fixtures/hcp_link_config.hcl index fc25b760e77c..bffbe83ae607 100644 --- a/command/server/test-fixtures/hcp_link_config.hcl +++ b/command/server/test-fixtures/hcp_link_config.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + storage "inmem" {} listener "tcp" { address = "127.0.0.1:8200" diff --git a/command/server/test-fixtures/nostore_config.hcl b/command/server/test-fixtures/nostore_config.hcl index 667570cb0c1c..306ef7c9cacd 100644 --- a/command/server/test-fixtures/nostore_config.hcl +++ b/command/server/test-fixtures/nostore_config.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/raft_retry_join.hcl b/command/server/test-fixtures/raft_retry_join.hcl index a4f1f3df0139..844dd744e40c 100644 --- a/command/server/test-fixtures/raft_retry_join.hcl +++ b/command/server/test-fixtures/raft_retry_join.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + storage "raft" { path = "/storage/path/raft" node_id = "raft1" diff --git a/command/server/test-fixtures/telemetry/filter_default_override.hcl b/command/server/test-fixtures/telemetry/filter_default_override.hcl index 04e55f646cb1..d3d540715ee5 100644 --- a/command/server/test-fixtures/telemetry/filter_default_override.hcl +++ b/command/server/test-fixtures/telemetry/filter_default_override.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_mlock = true ui = true diff --git a/command/server/test-fixtures/telemetry/rollback_mount_point.hcl b/command/server/test-fixtures/telemetry/rollback_mount_point.hcl new file mode 100644 index 000000000000..5aa5a287c8f8 --- /dev/null +++ b/command/server/test-fixtures/telemetry/rollback_mount_point.hcl @@ -0,0 +1,9 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +disable_mlock = true +ui = true + +telemetry { + add_mount_point_rollback_metrics = true +} \ No newline at end of file diff --git a/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl b/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl index 814dd1c825f9..a40e392c0921 100644 --- a/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl +++ b/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_mlock = true ui = true diff --git a/command/server/test-fixtures/tls_config_ok.hcl b/command/server/test-fixtures/tls_config_ok.hcl index 0dee4b483603..02a2733d4138 100644 --- a/command/server/test-fixtures/tls_config_ok.hcl +++ b/command/server/test-fixtures/tls_config_ok.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + disable_cache = true disable_mlock = true diff --git a/command/server/test-fixtures/unauth_in_flight_access.hcl b/command/server/test-fixtures/unauth_in_flight_access.hcl index eda6641276f1..bb04d3d29ecb 100644 --- a/command/server/test-fixtures/unauth_in_flight_access.hcl +++ b/command/server/test-fixtures/unauth_in_flight_access.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + storage "inmem" {} listener "tcp" { address = "127.0.0.1:8200" diff --git a/command/server/tls_util.go b/command/server/tls_util.go index 34f6a72f61ea..cd07dde92758 100644 --- a/command/server/tls_util.go +++ b/command/server/tls_util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package server import ( @@ -24,8 +27,8 @@ type CaCert struct { Signer crypto.Signer } -// GenerateCert creates a new leaf cert from provided CA template and signer -func GenerateCert(caCertTemplate *x509.Certificate, caSigner crypto.Signer) (string, string, error) { +// generateCert creates a new leaf cert from provided CA template and signer +func generateCert(caCertTemplate *x509.Certificate, caSigner crypto.Signer, extraSANs []string) (string, string, error) { // Create the private key signer, keyPEM, err := privateKey() if err != nil { @@ -77,6 +80,13 @@ func GenerateCert(caCertTemplate *x509.Certificate, caSigner crypto.Signer) (str if !foundHostname { template.DNSNames = append(template.DNSNames, hostname) } + for _, san := range extraSANs { + if ip := net.ParseIP(san); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, san) + } + } bs, err := x509.CreateCertificate( rand.Reader, &template, caCertTemplate, signer.Public(), caSigner) diff --git a/command/server/tls_util_test.go b/command/server/tls_util_test.go new file mode 100644 index 000000000000..acb010d4109b --- /dev/null +++ b/command/server/tls_util_test.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package server + +import ( + "crypto/x509" + "encoding/pem" + "testing" + + "github.com/hashicorp/go-secure-stdlib/strutil" +) + +// TestGenerateCertExtraSans ensures the implementation backing the flag +// -dev-tls-san populates alternate DNS and IP address names in the generated +// certificate as expected. +func TestGenerateCertExtraSans(t *testing.T) { + ca, err := GenerateCA() + if err != nil { + t.Fatal(err) + } + + for name, tc := range map[string]struct { + extraSans []string + expectedDNSNames []string + expectedIPAddresses []string + }{ + "empty": {}, + "DNS names": { + extraSans: []string{"foo", "foo.bar"}, + expectedDNSNames: []string{"foo", "foo.bar"}, + }, + "IP addresses": { + extraSans: []string{"0.0.0.0", "::1"}, + expectedIPAddresses: []string{"0.0.0.0", "::1"}, + }, + "mixed": { + extraSans: []string{"bar", "0.0.0.0", "::1"}, + expectedDNSNames: []string{"bar"}, + expectedIPAddresses: []string{"0.0.0.0", "::1"}, + }, + } { + t.Run(name, func(t *testing.T) { + certStr, _, err := generateCert(ca.Template, ca.Signer, tc.extraSans) + if err != nil { + t.Fatal(err) + } + + block, _ := pem.Decode([]byte(certStr)) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + + expectedDNSNamesLen := len(tc.expectedDNSNames) + 5 + if len(cert.DNSNames) != expectedDNSNamesLen { + t.Errorf("Wrong number of DNS names, expected %d but got %v", expectedDNSNamesLen, cert.DNSNames) + } + expectedIPAddrLen := len(tc.expectedIPAddresses) + 1 + if len(cert.IPAddresses) != expectedIPAddrLen { + t.Errorf("Wrong number of IP addresses, expected %d but got %v", expectedIPAddrLen, cert.IPAddresses) + } + + for _, expected := range tc.expectedDNSNames { + if !strutil.StrListContains(cert.DNSNames, expected) { + t.Errorf("Missing DNS name %s", expected) + } + } + for _, expected := range tc.expectedIPAddresses { + var found bool + for _, ip := range cert.IPAddresses { + if ip.String() == expected { + found = true + break + } + } + if !found { + t.Errorf("Missing IP address %s", expected) + } + } + }) + } +} diff --git a/command/server_noprofile.go b/command/server_noprofile.go index edaebeb13c05..07a9a3902e2a 100644 --- a/command/server_noprofile.go +++ b/command/server_noprofile.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !memprofiler package command diff --git a/command/server_profile.go b/command/server_profile.go index 1ce87a684153..fe5cbf7087dc 100644 --- a/command/server_profile.go +++ b/command/server_profile.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build memprofiler package command diff --git a/command/server_test.go b/command/server_test.go index 4ffdd17a62fc..9a1328ad7eb9 100644 --- a/command/server_test.go +++ b/command/server_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !race && !hsm && !fips_140_3 // NOTE: we can't use this with HSM. We can't set testing mode on and it's not @@ -8,6 +11,7 @@ package command import ( + "context" "crypto/tls" "crypto/x509" "fmt" @@ -18,9 +22,14 @@ import ( "testing" "time" - "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internalshared/configutil" physInmem "github.com/hashicorp/vault/sdk/physical/inmem" - "github.com/mitchellh/cli" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/seal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func init() { @@ -86,29 +95,6 @@ cloud { ` ) -func testServerCommand(tb testing.TB) (*cli.MockUi, *ServerCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &ServerCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - ShutdownCh: MakeShutdownCh(), - SighupCh: MakeSighupCh(), - SigUSR2Ch: MakeSigUSR2Ch(), - PhysicalBackends: map[string]physical.Factory{ - "inmem": physInmem.NewInmem, - "inmem_ha": physInmem.NewInmemHA, - }, - - // These prevent us from random sleep guessing... - startedCh: make(chan struct{}, 5), - reloadedCh: make(chan struct{}, 5), - licenseReloadedCh: make(chan error), - } -} - func TestServer_ReloadListener(t *testing.T) { t.Parallel() @@ -282,6 +268,13 @@ func TestServer(t *testing.T) { 0, []string{"-test-verify-only"}, }, + { + "recovery_mode", + testBaseHCL(t, "") + inmemHCL, + "", + 0, + []string{"-test-verify-only", "-recovery"}, + }, } for _, tc := range cases { @@ -291,26 +284,142 @@ func TestServer(t *testing.T) { t.Parallel() ui, cmd := testServerCommand(t) - f, err := ioutil.TempFile("", "") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - f.WriteString(tc.contents) - f.Close() - defer os.Remove(f.Name()) - args := append(tc.args, "-config", f.Name()) + f, err := os.CreateTemp(t.TempDir(), "") + require.NoErrorf(t, err, "error creating temp dir: %v", err) + + _, err = f.WriteString(tc.contents) + require.NoErrorf(t, err, "cannot write temp file contents") + + err = f.Close() + require.NoErrorf(t, err, "unable to close temp file") + args := append(tc.args, "-config", f.Name()) code := cmd.Run(args) output := ui.ErrorWriter.String() + ui.OutputWriter.String() + require.Equal(t, tc.code, code, "expected %d to be %d: %s", code, tc.code, output) + require.Contains(t, output, tc.exp, "expected %q to contain %q", output, tc.exp) + }) + } +} - if code != tc.code { - t.Errorf("expected %d to be %d: %s", code, tc.code, output) - } +// TestServer_DevTLS verifies that a vault server starts up correctly with the -dev-tls flag +func TestServer_DevTLS(t *testing.T) { + ui, cmd := testServerCommand(t) + args := []string{"-dev-tls", "-dev-listen-address=127.0.0.1:0", "-test-server-config"} + retCode := cmd.Run(args) + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + require.Equal(t, 0, retCode, output) + require.Contains(t, output, `tls: "enabled"`) +} - if !strings.Contains(output, tc.exp) { - t.Fatalf("expected %q to contain %q", output, tc.exp) +// TestConfigureDevTLS verifies the various logic paths that flow through the +// configureDevTLS function. +func TestConfigureDevTLS(t *testing.T) { + testcases := []struct { + ServerCommand *ServerCommand + DeferFuncNotNil bool + ConfigNotNil bool + TLSDisable bool + CertPathEmpty bool + ErrNotNil bool + TestDescription string + }{ + { + ServerCommand: &ServerCommand{ + flagDevTLS: false, + }, + ConfigNotNil: true, + TLSDisable: true, + CertPathEmpty: true, + ErrNotNil: false, + TestDescription: "flagDev is false, nothing will be configured", + }, + { + ServerCommand: &ServerCommand{ + flagDevTLS: true, + flagDevTLSCertDir: "", + }, + DeferFuncNotNil: true, + ConfigNotNil: true, + ErrNotNil: false, + TestDescription: "flagDevTLSCertDir is empty", + }, + { + ServerCommand: &ServerCommand{ + flagDevTLS: true, + flagDevTLSCertDir: "@/#", + }, + CertPathEmpty: true, + ErrNotNil: true, + TestDescription: "flagDevTLSCertDir is set to something invalid", + }, + } + + for _, testcase := range testcases { + fun, cfg, certPath, err := configureDevTLS(testcase.ServerCommand) + if fun != nil { + // If a function is returned, call it right away to clean up + // files created in the temporary directory before anything else has + // a chance to fail this test. + fun() + } + + t.Run(testcase.TestDescription, func(t *testing.T) { + assert.Equal(t, testcase.DeferFuncNotNil, (fun != nil)) + assert.Equal(t, testcase.ConfigNotNil, cfg != nil) + if testcase.ConfigNotNil && cfg != nil { + assert.True(t, len(cfg.Listeners) > 0) + assert.Equal(t, testcase.TLSDisable, cfg.Listeners[0].TLSDisable) + } + assert.Equal(t, testcase.CertPathEmpty, len(certPath) == 0) + if testcase.ErrNotNil { + assert.Error(t, err) + } else { + assert.NoError(t, err) } }) } } + +func TestConfigureSeals(t *testing.T) { + testConfig := server.Config{SharedConfig: &configutil.SharedConfig{}} + _, testCommand := testServerCommand(t) + + logger := corehelpers.NewTestLogger(t) + backend, err := physInmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + testCommand.logger = logger + + setSealResponse, _, err := testCommand.configureSeals(context.Background(), &testConfig, backend, []string{}, map[string]string{}) + if err != nil { + t.Fatal(err) + } + + if len(setSealResponse.barrierSeal.GetAccess().GetAllSealWrappersByPriority()) != 1 { + t.Fatalf("expected 1 seal, got %d", len(setSealResponse.barrierSeal.GetAccess().GetAllSealWrappersByPriority())) + } + + if setSealResponse.barrierSeal.BarrierSealConfigType() != vault.SealConfigTypeShamir { + t.Fatalf("expected shamir seal, got seal type %s", setSealResponse.barrierSeal.BarrierSealConfigType()) + } +} + +func TestReloadSeals(t *testing.T) { + testCore := vault.TestCoreWithSeal(t, vault.NewTestSeal(t, &seal.TestSealOpts{StoredKeys: seal.StoredKeysSupportedShamirRoot}), false) + _, testCommand := testServerCommand(t) + testConfig := server.Config{SharedConfig: &configutil.SharedConfig{}} + + testCommand.logger = corehelpers.NewTestLogger(t) + ctx := context.Background() + reloaded, err := testCommand.reloadSealsOnSigHup(ctx, testCore, &testConfig) + require.NoError(t, err) + require.False(t, reloaded, "reloadSeals does not support Shamir seals") + + testConfig = server.Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{{Disabled: true}}}} + reloaded, err = testCommand.reloadSealsOnSigHup(ctx, testCore, &testConfig) + require.NoError(t, err) + require.False(t, reloaded, "reloadSeals does not support Shamir seals") +} diff --git a/command/server_util.go b/command/server_util.go index d5d9c8f4f334..667b958595db 100644 --- a/command/server_util.go +++ b/command/server_util.go @@ -1,24 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( - "github.com/hashicorp/vault/command/server" - "github.com/hashicorp/vault/vault" -) + "testing" -var ( - adjustCoreConfigForEnt = adjustCoreConfigForEntNoop - storageSupportedForEnt = checkStorageTypeForEntNoop + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/sdk/physical" + physInmem "github.com/hashicorp/vault/sdk/physical/inmem" ) -func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) { +func TestServerCommand(tb testing.TB) (*cli.MockUi, *ServerCommand) { + tb.Helper() + return testServerCommand(tb) } -var getFIPSInfoKey = getFIPSInfoKeyNoop +func (c *ServerCommand) StartedCh() chan struct{} { + return c.startedCh +} -func getFIPSInfoKeyNoop() string { - return "" +func (c *ServerCommand) ReloadedCh() chan struct{} { + return c.reloadedCh } -func checkStorageTypeForEntNoop(coreConfig *vault.CoreConfig) bool { - return true +func testServerCommand(tb testing.TB) (*cli.MockUi, *ServerCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &ServerCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + SigUSR2Ch: MakeSigUSR2Ch(), + PhysicalBackends: map[string]physical.Factory{ + "inmem": physInmem.NewInmem, + "inmem_ha": physInmem.NewInmemHA, + }, + + // These prevent us from random sleep guessing... + startedCh: make(chan struct{}, 5), + reloadedCh: make(chan struct{}, 5), + licenseReloadedCh: make(chan error), + } } diff --git a/command/ssh.go b/command/ssh.go index e5e5af373e7a..cd39ad45782d 100644 --- a/command/ssh.go +++ b/command/ssh.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -11,9 +14,9 @@ import ( "strings" "syscall" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/logical/ssh" - "github.com/mitchellh/cli" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" "github.com/posener/complete" @@ -238,7 +241,7 @@ type SSHCredentialResp struct { func (c *SSHCommand) Run(args []string) int { f := c.Flags() - if err := f.Parse(args); err != nil { + if err := f.Parse(args, DisableDisplayFlagWarning(true)); err != nil { c.UI.Error(err.Error()) return 1 } diff --git a/command/ssh_test.go b/command/ssh_test.go index 344e3de0d247..b6dfd563d242 100644 --- a/command/ssh_test.go +++ b/command/ssh_test.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( + "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testSSHCommand(tb testing.TB) (*cli.MockUi, *SSHCommand) { @@ -214,3 +218,18 @@ func TestIsSingleSSHArg(t *testing.T) { }) } } + +// TestSSHCommandOmitFlagWarning checks if flags warning messages are printed +// in the output of the CLI command or not. If so, it will fail. +func TestSSHCommandOmitFlagWarning(t *testing.T) { + t.Parallel() + + ui, cmd := testSSHCommand(t) + + _ = cmd.Run([]string{"-mode", "ca", "-role", "otp_key_role", "user@1.2.3.4", "-extraFlag", "bug"}) + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if strings.Contains(combined, "Command flags must be provided before positional arguments. The following arguments will not be parsed as flags") { + t.Fatalf("ssh command displayed flag warnings") + } +} diff --git a/command/status.go b/command/status.go index 770adfcf3d48..9f7c7010f86d 100644 --- a/command/status.go +++ b/command/status.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/status_test.go b/command/status_test.go index e34a72c578d1..47a2803d66cb 100644 --- a/command/status_test.go +++ b/command/status_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testStatusCommand(tb testing.TB) (*cli.MockUi, *StatusCommand) { diff --git a/command/test-backend/main.go b/command/test-backend/main.go new file mode 100644 index 000000000000..69a6fcd0aa5f --- /dev/null +++ b/command/test-backend/main.go @@ -0,0 +1,4 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package test_backend diff --git a/command/test-fixtures/config.hcl b/command/test-fixtures/config.hcl index 31de773909c9..9161fff4520b 100644 --- a/command/test-fixtures/config.hcl +++ b/command/test-fixtures/config.hcl @@ -1 +1,4 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + token_helper = "foo" diff --git a/command/test-fixtures/policy.hcl b/command/test-fixtures/policy.hcl index 7d46bdeabe16..6160bf780274 100644 --- a/command/test-fixtures/policy.hcl +++ b/command/test-fixtures/policy.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + path "secret/foo" { policy = "write" } diff --git a/command/token.go b/command/token.go index 20af230a5b30..eb430b48daa0 100644 --- a/command/token.go +++ b/command/token.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) var _ cli.Command = (*TokenCommand)(nil) diff --git a/command/token/helper.go b/command/token/helper.go index ff559e40d447..a4bf1fa0f866 100644 --- a/command/token/helper.go +++ b/command/token/helper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package token // TokenHelper is an interface that contains basic operations that must be diff --git a/command/token/helper_external.go b/command/token/helper_external.go index 83f5f8907291..26e7f44ee8e9 100644 --- a/command/token/helper_external.go +++ b/command/token/helper_external.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package token import ( diff --git a/command/token/helper_external_test.go b/command/token/helper_external_test.go index b49dd93343cc..d7b03236022d 100644 --- a/command/token/helper_external_test.go +++ b/command/token/helper_external_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package token import ( diff --git a/command/token/helper_internal.go b/command/token/helper_internal.go index c5f35721ee9e..866ff1880e10 100644 --- a/command/token/helper_internal.go +++ b/command/token/helper_internal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package token import ( diff --git a/command/token/helper_internal_test.go b/command/token/helper_internal_test.go index 18f3abae56f6..10a7a0cc974f 100644 --- a/command/token/helper_internal_test.go +++ b/command/token/helper_internal_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package token import ( diff --git a/command/token/helper_testing.go b/command/token/helper_testing.go index 93465931b789..e948092f4506 100644 --- a/command/token/helper_testing.go +++ b/command/token/helper_testing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package token import ( diff --git a/command/token/testing.go b/command/token/testing.go index 725f1276a052..24dc3258e511 100644 --- a/command/token/testing.go +++ b/command/token/testing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package token import ( @@ -6,7 +9,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) // Test is a public function that can be used in other tests to diff --git a/command/token_capabilities.go b/command/token_capabilities.go index 093765630d66..239793658bad 100644 --- a/command/token_capabilities.go +++ b/command/token_capabilities.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "sort" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) @@ -16,6 +19,8 @@ var ( type TokenCapabilitiesCommand struct { *BaseCommand + + flagAccessor bool } func (c *TokenCapabilitiesCommand) Synopsis() string { @@ -24,12 +29,15 @@ func (c *TokenCapabilitiesCommand) Synopsis() string { func (c *TokenCapabilitiesCommand) Help() string { helpText := ` -Usage: vault token capabilities [options] [TOKEN] PATH +Usage: vault token capabilities [options] [TOKEN | ACCESSOR] PATH - Fetches the capabilities of a token for a given path. If a TOKEN is provided - as an argument, the "/sys/capabilities" endpoint and permission is used. If - no TOKEN is provided, the "/sys/capabilities-self" endpoint and permission - is used with the locally authenticated token. + Fetches the capabilities of a token or accessor for a given path. If a TOKEN + is provided as an argument, the "/sys/capabilities" endpoint is used, which + returns the capabilities of the provided TOKEN. If an ACCESSOR is provided + as an argument along with the -accessor option, the "/sys/capabilities-accessor" + endpoint is used, which returns the capabilities of the token referenced by + ACCESSOR. If no TOKEN is provided, the "/sys/capabilities-self" endpoint + is used, which returns the capabilities of the locally authenticated token. List capabilities for the local token on the "secret/foo" path: @@ -39,6 +47,10 @@ Usage: vault token capabilities [options] [TOKEN] PATH $ vault token capabilities 96ddf4bc-d217-f3ba-f9bd-017055595017 cubbyhole/foo + List capabilities for a token on the "cubbyhole/foo" path via its accessor: + + $ vault token capabilities -accessor 9793c9b3-e04a-46f3-e7b8-748d7da248da cubbyhole/foo + For a full list of examples, please see the documentation. ` + c.Flags().Help() @@ -47,7 +59,20 @@ Usage: vault token capabilities [options] [TOKEN] PATH } func (c *TokenCapabilitiesCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "accessor", + Target: &c.flagAccessor, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Treat the argument as an accessor instead of a token.", + }) + + return set } func (c *TokenCapabilitiesCommand) AutocompleteArgs() complete.Predictor { @@ -69,13 +94,19 @@ func (c *TokenCapabilitiesCommand) Run(args []string) int { token := "" path := "" args = f.Args() - switch len(args) { - case 0: + switch { + case c.flagAccessor && len(args) < 2: + c.UI.Error(fmt.Sprintf("Not enough arguments with -accessor (expected 2, got %d)", len(args))) + return 1 + case c.flagAccessor && len(args) > 2: + c.UI.Error(fmt.Sprintf("Too many arguments with -accessor (expected 2, got %d)", len(args))) + return 1 + case len(args) == 0: c.UI.Error("Not enough arguments (expected 1-2, got 0)") return 1 - case 1: + case len(args) == 1: path = args[0] - case 2: + case len(args) == 2: token, path = args[0], args[1] default: c.UI.Error(fmt.Sprintf("Too many arguments (expected 1-2, got %d)", len(args))) @@ -89,11 +120,15 @@ func (c *TokenCapabilitiesCommand) Run(args []string) int { } var capabilities []string - if token == "" { + switch { + case token == "": capabilities, err = client.Sys().CapabilitiesSelf(path) - } else { + case c.flagAccessor: + capabilities, err = client.Sys().CapabilitiesAccessor(token, path) + default: capabilities, err = client.Sys().Capabilities(token, path) } + if err != nil { c.UI.Error(fmt.Sprintf("Error listing capabilities: %s", err)) return 2 diff --git a/command/token_capabilities_test.go b/command/token_capabilities_test.go index 874db49129af..1588b14a330a 100644 --- a/command/token_capabilities_test.go +++ b/command/token_capabilities_test.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testTokenCapabilitiesCommand(tb testing.TB) (*cli.MockUi, *TokenCapabilitiesCommand) { @@ -28,6 +31,24 @@ func TestTokenCapabilitiesCommand_Run(t *testing.T) { out string code int }{ + { + "accessor_no_args", + []string{"-accessor"}, + "Not enough arguments", + 1, + }, + { + "accessor_too_few_args", + []string{"-accessor", "abcd1234"}, + "Not enough arguments", + 1, + }, + { + "accessor_too_many_args", + []string{"-accessor", "abcd1234", "efgh5678", "ijkl9012"}, + "Too many arguments", + 1, + }, { "too_many_args", []string{"foo", "bar", "zip"}, @@ -100,6 +121,48 @@ func TestTokenCapabilitiesCommand_Run(t *testing.T) { } }) + t.Run("accessor", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + policy := `path "secret/foo" { capabilities = ["read"] }` + if err := client.Sys().PutPolicy("policy", policy); err != nil { + t.Error(err) + } + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"policy"}, + TTL: "30m", + }) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("missing auth data: %#v", secret) + } + accessor := secret.Auth.Accessor + + ui, cmd := testTokenCapabilitiesCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-accessor", + accessor, + "secret/foo", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "read" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + t.Run("local", func(t *testing.T) { t.Parallel() diff --git a/command/token_create.go b/command/token_create.go index a8dc2f03ea7a..3e49bb2ca72b 100644 --- a/command/token_create.go +++ b/command/token_create.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,8 +8,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/token_create_test.go b/command/token_create_test.go index 1fd11b1e9f84..3acd2dd1474e 100644 --- a/command/token_create_test.go +++ b/command/token_create_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,7 +8,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testTokenCreateCommand(tb testing.TB) (*cli.MockUi, *TokenCreateCommand) { @@ -68,6 +71,12 @@ func TestTokenCreateCommand_Run(t *testing.T) { "not present in secret", 1, }, + { + "ttl", + []string{"-ttl", "1d", "-explicit-max-ttl", "2d"}, + "token", + 0, + }, } t.Run("validations", func(t *testing.T) { diff --git a/command/token_lookup.go b/command/token_lookup.go index 55284a29d1bc..afb622372b1e 100644 --- a/command/token_lookup.go +++ b/command/token_lookup.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/token_lookup_test.go b/command/token_lookup_test.go index e027b3f7c97b..6a351f781c5a 100644 --- a/command/token_lookup_test.go +++ b/command/token_lookup_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testTokenLookupCommand(tb testing.TB) (*cli.MockUi, *TokenLookupCommand) { diff --git a/command/token_renew.go b/command/token_renew.go index 88d6fa20fb2c..c354b4e6a506 100644 --- a/command/token_renew.go +++ b/command/token_renew.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,8 +8,8 @@ import ( "strings" "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/token_renew_test.go b/command/token_renew_test.go index c958d4d55af6..4fc469995b05 100644 --- a/command/token_renew_test.go +++ b/command/token_renew_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,7 +9,7 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testTokenRenewCommand(tb testing.TB) (*cli.MockUi, *TokenRenewCommand) { diff --git a/command/token_revoke.go b/command/token_revoke.go index f6eb72101bb3..c9f6a2b7f22f 100644 --- a/command/token_revoke.go +++ b/command/token_revoke.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/token_revoke_test.go b/command/token_revoke_test.go index 7f66e9d4a0d8..3cdf13d615e8 100644 --- a/command/token_revoke_test.go +++ b/command/token_revoke_test.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) func testTokenRevokeCommand(tb testing.TB) (*cli.MockUi, *TokenRevokeCommand) { diff --git a/command/transform.go b/command/transform.go new file mode 100644 index 000000000000..46129cd32e29 --- /dev/null +++ b/command/transform.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + + "github.com/hashicorp/cli" +) + +var _ cli.Command = (*TransformCommand)(nil) + +type TransformCommand struct { + *BaseCommand +} + +func (c *TransformCommand) Synopsis() string { + return "Interact with Vault's Transform Secrets Engine" +} + +func (c *TransformCommand) Help() string { + helpText := ` +Usage: vault transform [options] [args] + + This command has subcommands for interacting with Vault's Transform Secrets + Engine. Here are some simple examples, and more detailed examples are + available in the subcommands or the documentation. + + To import a key into a new FPE transformation: + + $ vault transform import transform/transformations/fpe/new-transformation @path/to/key \ + template=identifier \ + allowed_roles=physical-access + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *TransformCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/transform_import_key.go b/command/transform_import_key.go new file mode 100644 index 000000000000..d01100acea04 --- /dev/null +++ b/command/transform_import_key.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "errors" + "regexp" + "strings" + + "github.com/hashicorp/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TransformImportCommand)(nil) + _ cli.CommandAutocomplete = (*TransformImportCommand)(nil) + transformKeyPath = regexp.MustCompile("^(.*)/transformations/(fpe|tokenization)/([^/]*)$") +) + +type TransformImportCommand struct { + *BaseCommand +} + +func (c *TransformImportCommand) Synopsis() string { + return "Import a key into the Transform secrets engines." +} + +func (c *TransformImportCommand) Help() string { + helpText := ` +Usage: vault transform import PATH KEY [options...] + + Using the Transform key wrapping system, imports key material from + the base64 encoded KEY (either directly on the CLI or via @path notation), + into a new FPE or tokenization transformation whose API path is PATH. + + To import a new key version into an existing tokenization transformation, + use import_version. + + The remaining options after KEY (key=value style) are passed on to + Create/Update FPE Transformation or Create/Update Tokenization Transformation + API endpoints. + + For example: + $ vault transform import transform/transformations/tokenization/application-form @path/to/key \ + allowed_roles=legacy-system +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TransformImportCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *TransformImportCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TransformImportCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TransformImportCommand) Run(args []string) int { + return ImportKey(c.BaseCommand, "import", transformImportKeyPath, c.Flags(), args) +} + +func transformImportKeyPath(s string, operation string) (path string, apiPath string, err error) { + parts := transformKeyPath.FindStringSubmatch(s) + if len(parts) != 4 { + return "", "", errors.New("expected transform path and key name in the form :path:/transformations/fpe|tokenization/:name:") + } + path = parts[1] + transformation := parts[2] + keyName := parts[3] + apiPath = path + "/transformations/" + transformation + "/" + keyName + "/" + operation + + return path, apiPath, nil +} diff --git a/command/transform_import_key_version.go b/command/transform_import_key_version.go new file mode 100644 index 000000000000..61a6db45b674 --- /dev/null +++ b/command/transform_import_key_version.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + + "github.com/hashicorp/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TransformImportVersionCommand)(nil) + _ cli.CommandAutocomplete = (*TransformImportVersionCommand)(nil) +) + +type TransformImportVersionCommand struct { + *BaseCommand +} + +func (c *TransformImportVersionCommand) Synopsis() string { + return "Import key material into a new key version in the Transform secrets engines." +} + +func (c *TransformImportVersionCommand) Help() string { + helpText := ` +Usage: vault transform import-version PATH KEY [...] + + Using the Transform key wrapping system, imports new key material from + the base64 encoded KEY (either directly on the CLI or via @path notation), + into an existing tokenization transformation whose API path is PATH. + + The remaining options after KEY (key=value style) are passed on to + Create/Update Tokenization Transformation API endpoint. + + For example: + $ vault transform import-version transform/transformations/tokenization/application-form @path/to/new_version \ + allowed_roles=legacy-system +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TransformImportVersionCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *TransformImportVersionCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TransformImportVersionCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TransformImportVersionCommand) Run(args []string) int { + return ImportKey(c.BaseCommand, "import_version", transformImportKeyPath, c.Flags(), args) +} diff --git a/command/transit.go b/command/transit.go new file mode 100644 index 000000000000..9b988d7e3c70 --- /dev/null +++ b/command/transit.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + + "github.com/hashicorp/cli" +) + +var _ cli.Command = (*TransitCommand)(nil) + +type TransitCommand struct { + *BaseCommand +} + +func (c *TransitCommand) Synopsis() string { + return "Interact with Vault's Transit Secrets Engine" +} + +func (c *TransitCommand) Help() string { + helpText := ` +Usage: vault transit [options] [args] + + This command has subcommands for interacting with Vault's Transit Secrets + Engine. Here are some simple examples, and more detailed examples are + available in the subcommands or the documentation. + + To import a key into the specified Transit mount: + + $ vault transit import transit/keys/newly-imported @path/to/key type=rsa-2048 + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *TransitCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/transit_import_key.go b/command/transit_import_key.go new file mode 100644 index 000000000000..350821b07612 --- /dev/null +++ b/command/transit_import_key.go @@ -0,0 +1,218 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "os" + "regexp" + "strings" + + "github.com/hashicorp/vault/api" + + "github.com/google/tink/go/kwp/subtle" + + "github.com/hashicorp/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TransitImportCommand)(nil) + _ cli.CommandAutocomplete = (*TransitImportCommand)(nil) + keyPath = regexp.MustCompile("^(.*)/keys/([^/]*)$") +) + +type TransitImportCommand struct { + *BaseCommand +} + +func (c *TransitImportCommand) Synopsis() string { + return "Import a key into the Transit secrets engines." +} + +func (c *TransitImportCommand) Help() string { + helpText := ` +Usage: vault transit import PATH KEY [options...] + + Using the Transit key wrapping system, imports key material from + the base64 encoded KEY (either directly on the CLI or via @path notation), + into a new key whose API path is PATH. To import a new version into an + existing key, use import_version. The remaining options after KEY (key=value + style) are passed on to the Transit create key endpoint. If your + system or device natively supports the RSA AES key wrap mechanism (such as + the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it directly + rather than this command. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TransitImportCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *TransitImportCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TransitImportCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TransitImportCommand) Run(args []string) int { + return ImportKey(c.BaseCommand, "import", transitImportKeyPath, c.Flags(), args) +} + +func transitImportKeyPath(s string, operation string) (path string, apiPath string, err error) { + parts := keyPath.FindStringSubmatch(s) + if len(parts) != 3 { + return "", "", errors.New("expected transit path and key name in the form :path:/keys/:name:") + } + path = parts[1] + keyName := parts[2] + apiPath = path + "/keys/" + keyName + "/" + operation + + return path, apiPath, nil +} + +type ImportKeyFunc func(s string, operation string) (path string, apiPath string, err error) + +// error codes: 1: user error, 2: internal computation error, 3: remote api call error +func ImportKey(c *BaseCommand, operation string, pathFunc ImportKeyFunc, flags *FlagSets, args []string) int { + // Parse and validate the arguments. + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = flags.Args() + if len(args) < 2 { + c.UI.Error(fmt.Sprintf("Incorrect argument count (expected 2+, got %d). Wanted PATH to import into and KEY material.", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + ephemeralAESKey := make([]byte, 32) + _, err = rand.Read(ephemeralAESKey) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to generate ephemeral key: %v", err)) + } + path, apiPath, err := pathFunc(args[0], operation) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + keyMaterial := args[1] + if keyMaterial[0] == '@' { + keyMaterialBytes, err := os.ReadFile(keyMaterial[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("error reading key material file: %v", err)) + return 1 + } + + keyMaterial = string(keyMaterialBytes) + } + + key, err := base64.StdEncoding.DecodeString(keyMaterial) + if err != nil { + c.UI.Error(fmt.Sprintf("error base64 decoding source key material: %v", err)) + return 1 + } + // Fetch the wrapping key + c.UI.Output("Retrieving wrapping key.") + wrappingKey, err := fetchWrappingKey(client, path) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to fetch wrapping key: %v", err)) + return 3 + } + c.UI.Output("Wrapping source key with ephemeral key.") + wrapKWP, err := subtle.NewKWP(ephemeralAESKey) + if err != nil { + c.UI.Error(fmt.Sprintf("failure building key wrapping key: %v", err)) + return 2 + } + wrappedTargetKey, err := wrapKWP.Wrap(key) + if err != nil { + c.UI.Error(fmt.Sprintf("failure wrapping source key: %v", err)) + return 2 + } + c.UI.Output("Encrypting ephemeral key with wrapping key.") + wrappedAESKey, err := rsa.EncryptOAEP( + sha256.New(), + rand.Reader, + wrappingKey, + ephemeralAESKey, + []byte{}, + ) + if err != nil { + c.UI.Error(fmt.Sprintf("failure encrypting wrapped key: %v", err)) + return 2 + } + combinedCiphertext := append(wrappedAESKey, wrappedTargetKey...) + importCiphertext := base64.StdEncoding.EncodeToString(combinedCiphertext) + + // Parse all the key options + data, err := parseArgsData(os.Stdin, args[2:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse extra K=V data: %s", err)) + return 1 + } + if data == nil { + data = make(map[string]interface{}, 1) + } + + data["ciphertext"] = importCiphertext + + c.UI.Output("Submitting wrapped key.") + // Finally, call import + + _, err = client.Logical().Write(apiPath, data) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to call import:%v", err)) + return 3 + } else { + c.UI.Output("Success!") + return 0 + } +} + +func fetchWrappingKey(client *api.Client, path string) (*rsa.PublicKey, error) { + resp, err := client.Logical().Read(path + "/wrapping_key") + if err != nil { + return nil, fmt.Errorf("error fetching wrapping key: %w", err) + } + if resp == nil { + return nil, fmt.Errorf("no mount found at %s: %v", path, err) + } + key, ok := resp.Data["public_key"] + if !ok { + return nil, fmt.Errorf("missing public_key field in response") + } + keyBlock, _ := pem.Decode([]byte(key.(string))) + if keyBlock == nil { + return nil, fmt.Errorf("failed to decode PEM information from public_key response field") + } + parsedKey, err := x509.ParsePKIXPublicKey(keyBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("error parsing wrapping key: %w", err) + } + rsaKey, ok := parsedKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("returned value was not an RSA public key but a %T", rsaKey) + } + return rsaKey, nil +} diff --git a/command/transit_import_key_test.go b/command/transit_import_key_test.go new file mode 100644 index 000000000000..21884c0799ca --- /dev/null +++ b/command/transit_import_key_test.go @@ -0,0 +1,200 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "testing" + "time" + + "github.com/hashicorp/vault/api" + + "github.com/stretchr/testify/require" +) + +// Validate the `vault transit import` command works. +func TestTransitImport(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }); err != nil { + t.Fatalf("transit mount error: %#v", err) + } + + // Force the generation of the Transit wrapping key now with a longer context + // to help the 32bit nightly tests. This creates a 4096-bit RSA key which can take + // a while on an overloaded system + genWrappingKeyCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + if _, err := client.Logical().ReadWithContext(genWrappingKeyCtx, "transit/wrapping_key"); err != nil { + t.Fatalf("transit failed generating wrapping key: %#v", err) + } + + rsa1, rsa2, aes128, aes256 := generateKeys(t) + + type testCase struct { + variant string + path string + key []byte + args []string + shouldFail bool + } + tests := []testCase{ + { + "import", + "transit/keys/rsa1", + rsa1, + []string{"type=rsa-2048"}, + false, /* first import */ + }, + { + "import", + "transit/keys/rsa1", + rsa2, + []string{"type=rsa-2048"}, + true, /* already exists */ + }, + { + "import-version", + "transit/keys/rsa1", + rsa2, + []string{"type=rsa-2048"}, + false, /* new version */ + }, + { + "import", + "transit/keys/rsa2", + rsa2, + []string{"type=rsa-4096"}, + true, /* wrong type */ + }, + { + "import", + "transit/keys/rsa2", + rsa2, + []string{"type=rsa-2048"}, + false, /* new name */ + }, + { + "import", + "transit/keys/aes1", + aes128, + []string{"type=aes128-gcm96"}, + false, /* first import */ + }, + { + "import", + "transit/keys/aes1", + aes256, + []string{"type=aes256-gcm96"}, + true, /* already exists */ + }, + { + "import-version", + "transit/keys/aes1", + aes256, + []string{"type=aes256-gcm96"}, + true, /* new version, different type */ + }, + { + "import-version", + "transit/keys/aes1", + aes128, + []string{"type=aes128-gcm96"}, + false, /* new version */ + }, + { + "import", + "transit/keys/aes2", + aes256, + []string{"type=aes128-gcm96"}, + true, /* wrong type */ + }, + { + "import", + "transit/keys/aes2", + aes256, + []string{"type=aes256-gcm96"}, + false, /* new name */ + }, + } + + for index, tc := range tests { + t.Logf("Running test case %d: %v", index, tc) + execTransitImport(t, client, tc.variant, tc.path, tc.key, tc.args, tc.shouldFail) + } +} + +func execTransitImport(t *testing.T, client *api.Client, method string, path string, key []byte, data []string, expectFailure bool) { + t.Helper() + + keyBase64 := base64.StdEncoding.EncodeToString(key) + + var args []string + args = append(args, "transit") + args = append(args, method) + args = append(args, path) + args = append(args, keyBase64) + args = append(args, data...) + + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + code := RunCustom(args, runOpts) + combined := stdout.String() + stderr.String() + + if code != 0 { + if !expectFailure { + t.Fatalf("Got unexpected failure from test (ret %d): %v", code, combined) + } + } else { + if expectFailure { + t.Fatalf("Expected failure, got success from test (ret %d): %v", code, combined) + } + } +} + +func generateKeys(t *testing.T) (rsa1 []byte, rsa2 []byte, aes128 []byte, aes256 []byte) { + t.Helper() + + priv1, err := rsa.GenerateKey(rand.Reader, 2048) + require.NotNil(t, priv1, "failed generating RSA 1 key") + require.NoError(t, err, "failed generating RSA 1 key") + + rsa1, err = x509.MarshalPKCS8PrivateKey(priv1) + require.NotNil(t, rsa1, "failed marshaling RSA 1 key") + require.NoError(t, err, "failed marshaling RSA 1 key") + + priv2, err := rsa.GenerateKey(rand.Reader, 2048) + require.NotNil(t, priv2, "failed generating RSA 2 key") + require.NoError(t, err, "failed generating RSA 2 key") + + rsa2, err = x509.MarshalPKCS8PrivateKey(priv2) + require.NotNil(t, rsa2, "failed marshaling RSA 2 key") + require.NoError(t, err, "failed marshaling RSA 2 key") + + aes128 = make([]byte, 128/8) + _, err = rand.Read(aes128) + require.NoError(t, err, "failed generating AES 128 key") + + aes256 = make([]byte, 256/8) + _, err = rand.Read(aes256) + require.NoError(t, err, "failed generating AES 256 key") + + return +} diff --git a/command/transit_import_key_version.go b/command/transit_import_key_version.go new file mode 100644 index 000000000000..cf248554f779 --- /dev/null +++ b/command/transit_import_key_version.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + + "github.com/hashicorp/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TransitImportVersionCommand)(nil) + _ cli.CommandAutocomplete = (*TransitImportVersionCommand)(nil) +) + +type TransitImportVersionCommand struct { + *BaseCommand +} + +func (c *TransitImportVersionCommand) Synopsis() string { + return "Import key material into a new key version in the Transit secrets engines." +} + +func (c *TransitImportVersionCommand) Help() string { + helpText := ` +Usage: vault transit import-version PATH KEY [...] + + Using the Transit key wrapping system, imports key material from + the base64 encoded KEY (either directly on the CLI or via @path notation), + into a new key whose API path is PATH. To import a new Transit + key, use the import command instead. The remaining options after KEY + (key=value style) are passed on to the Transit create key endpoint. + If your system or device natively supports the RSA AES key wrap mechanism + (such as the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it + directly rather than this command. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TransitImportVersionCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *TransitImportVersionCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TransitImportVersionCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TransitImportVersionCommand) Run(args []string) int { + return ImportKey(c.BaseCommand, "import_version", transitImportKeyPath, c.Flags(), args) +} diff --git a/command/unwrap.go b/command/unwrap.go index 53ff0787de28..a671071c15a0 100644 --- a/command/unwrap.go +++ b/command/unwrap.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) diff --git a/command/unwrap_test.go b/command/unwrap_test.go index 4a06418b027c..518d32fb12d6 100644 --- a/command/unwrap_test.go +++ b/command/unwrap_test.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testUnwrapCommand(tb testing.TB) (*cli.MockUi, *UnwrapCommand) { diff --git a/command/util.go b/command/util.go index 8c0215250a7f..717191025d24 100644 --- a/command/util.go +++ b/command/util.go @@ -1,16 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "io" + "net/http" "os" + "testing" "time" "github.com/fatih/color" + "github.com/hashicorp/cli" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/config" "github.com/hashicorp/vault/command/token" - "github.com/mitchellh/cli" ) // DefaultTokenHelper returns the token helper that is configured for Vault. @@ -158,3 +164,40 @@ func getWriterFromUI(ui cli.Ui) io.Writer { return os.Stdout } } + +func mockClient(t *testing.T) (*api.Client, *recordingRoundTripper) { + t.Helper() + + config := api.DefaultConfig() + httpClient := cleanhttp.DefaultClient() + roundTripper := &recordingRoundTripper{} + httpClient.Transport = roundTripper + config.HttpClient = httpClient + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + + return client, roundTripper +} + +var _ http.RoundTripper = (*recordingRoundTripper)(nil) + +type recordingRoundTripper struct { + path string + body []byte +} + +func (r *recordingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + r.path = req.URL.Path + defer req.Body.Close() + body, err := io.ReadAll(req.Body) + if err != nil { + return nil, err + } + + r.body = body + return &http.Response{ + StatusCode: 200, + }, nil +} diff --git a/command/version.go b/command/version.go index 5e5503ba8b9f..8b54511c099a 100644 --- a/command/version.go +++ b/command/version.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" "github.com/posener/complete" ) diff --git a/command/version_history.go b/command/version_history.go index 56d3deb96354..7326bffdeddf 100644 --- a/command/version_history.go +++ b/command/version_history.go @@ -1,10 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" "github.com/ryanuber/columnize" ) diff --git a/command/version_history_test.go b/command/version_history_test.go index 69bd56788868..8d2e18445107 100644 --- a/command/version_history_test.go +++ b/command/version_history_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,8 +9,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" ) func testVersionHistoryCommand(tb testing.TB) (*cli.MockUi, *VersionHistoryCommand) { diff --git a/command/version_test.go b/command/version_test.go index 0f59e9ffcb6c..abacfd3662c0 100644 --- a/command/version_test.go +++ b/command/version_test.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" ) func testVersionCommand(tb testing.TB) (*cli.MockUi, *VersionCommand) { diff --git a/command/write.go b/command/write.go index 3daa2bae60ff..33ee3be0f242 100644 --- a/command/write.go +++ b/command/write.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -6,8 +9,8 @@ import ( "os" "strings" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) @@ -152,7 +155,8 @@ func handleWriteSecretOutput(c *BaseCommand, path string, secret *api.Secret, er } if secret == nil { // Don't output anything unless using the "table" format - if Format(c.UI) == "table" { + // and even then, don't output anything if a specific field was requested + if c.flagField == "" && Format(c.UI) == "table" { c.UI.Info(fmt.Sprintf("Success! Data written to: %s", path)) } return 0 diff --git a/command/write_test.go b/command/write_test.go index 03aab4c79af2..2e7a32833fa1 100644 --- a/command/write_test.go +++ b/command/write_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package command import ( @@ -5,8 +8,8 @@ import ( "strings" "testing" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" ) func testWriteCommand(tb testing.TB) (*cli.MockUi, *WriteCommand) { @@ -115,6 +118,30 @@ func TestWriteCommand_Run(t *testing.T) { }) } + // If we ask for a field and get an empty result, do not output "Success!" or anything else + t.Run("field_from_nothing", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testWriteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-field", "somefield", + "secret/write/foo", "foo=bar", + }) + if exp := 0; code != exp { + t.Fatalf("expected %d to be %d: %q", code, exp, ui.ErrorWriter.String()) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if combined != "" { + t.Errorf("expected %q to be empty", combined) + } + }) + t.Run("force", func(t *testing.T) { t.Parallel() diff --git a/enos/Makefile b/enos/Makefile index ad27fb0ffbe9..d1f933453d50 100644 --- a/enos/Makefile +++ b/enos/Makefile @@ -1,5 +1,5 @@ .PHONY: default -default: check-fmt +default: check-fmt shellcheck .PHONY: check-fmt check-fmt: check-fmt-enos check-fmt-modules @@ -22,3 +22,14 @@ check-fmt-modules: .PHONY: fmt-modules fmt-modules: terraform fmt -diff -recursive ./modules + +.PHONY: validate-enos +validate-enos: + enos scenario validate --timeout 30m0s + +.PHONY: lint +lint: check-fmt shellcheck validate-enos + +.PHONY: shellcheck +shellcheck: + find ./modules/ -type f -name '*.sh' | xargs shellcheck diff --git a/enos/README.md b/enos/README.md index 2aef9b9e0755..1ec6b8e13d4b 100644 --- a/enos/README.md +++ b/enos/README.md @@ -18,36 +18,35 @@ is going to give you faster feedback and execution time, whereas Enos is going to give you a real-world execution and validation of the requirement. Consider the following cases as examples of when one might opt for an Enos scenario: -* The feature require third-party integrations. Whether that be networked +- The feature require third-party integrations. Whether that be networked dependencies like a real Consul backend, a real KMS key to test awskms auto-unseal, auto-join discovery using AWS tags, or Cloud hardware KMS's. -* The feature might behave differently under multiple configuration variants +- The feature might behave differently under multiple configuration variants and therefore should be tested with both combinations, e.g. auto-unseal and manual shamir unseal or replication in HA mode with integrated storage or Consul storage. -* The scenario requires coordination between multiple targets. For example, +- The scenario requires coordination between multiple targets. For example, consider the complex lifecycle event of migrating the seal type or storage, or manually triggering a raft disaster scenario by partitioning the network between the leader and follower nodes. Or perhaps an auto-pilot upgrade between a stable version of Vault and our candidate version. -* The scenario has specific deployment strategy requirements. For example, +- The scenario has specific deployment strategy requirements. For example, if we want to add a regression test for an issue that only arises when the software is deployed in a certain manner. -* The scenario needs to use actual build artifacts that will be promoted +- The scenario needs to use actual build artifacts that will be promoted through the pipeline. ## Requirements -* AWS access. HashiCorp Vault developers should use Doormat. -* Terraform >= 1.2 -* Enos >= v0.0.10. You can [install it from a release channel](https://github.com/hashicorp/Enos-Docs/blob/main/installation.md). -* Access to the QTI org in Terraform Cloud. HashiCorp Vault developers can - access a shared token in 1Password or request their own in #team-quality on - Slack. -* An SSH keypair in the AWS region you wish to run the scenario. You can use +- AWS access. HashiCorp Vault developers should use Doormat. +- Terraform >= 1.7 +- Enos >= v0.0.28. You can [download a release](https://github.com/hashicorp/enos/releases/) or + install it with Homebrew: + ```shell + brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos + ``` +- An SSH keypair in the AWS region you wish to run the scenario. You can use Doormat to log in to the AWS console to create or upload an existing keypair. -* A Vault install bundle downloaded from releases.hashicorp.com or Artifactory - when using the `builder:crt` variants. When using the `builder:local` variants - Enos will build a Vault bundle from the current branch for you. +- A Vault artifact is downloaded from the GHA artifacts when using the `artifact_source:crt` variants, from Artifactory when using `artifact_source:artifactory`, and is built locally from the current branch when using `artifact_source:local` variant. ## Scenario Variables In CI, each scenario is executed via Github Actions and has been configured using @@ -59,7 +58,6 @@ variables, or you can update `enos.vars.hcl` with values and uncomment the lines Variables that are required: * `aws_ssh_keypair_name` * `aws_ssh_private_key_path` -* `tfc_api_token` * `vault_bundle_path` * `vault_license_path` (only required for non-OSS editions) @@ -75,24 +73,24 @@ enos scenario list # Run the smoke or upgrade scenario with an artifact that is built locally. Make sure # the local machine has been configured as detailed in the requirements # section. This will execute the scenario and clean up any resources if successful. -enos scenario run smoke builder:local -enos scenario run upgrade builder:local +enos scenario run smoke artifact_source:local +enos scenario run upgrade artifact_source:local # To run the same scenario variants that are run in CI, refer to the scenarios listed -# in .github/workflows/enos-run.yml under `jobs.enos.strategy.matrix.include`, -# adding `builder:local` to run locally. -enos scenario run smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms builder:local arch:amd64 edition:oss +# in json files under .github/enos-run-matrices directory, +# adding `artifact_source:local` to run locally. +enos scenario run smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms artifact_source:local arch:amd64 edition:oss # Launch an individual scenario but leave infrastructure up after execution -enos scenario launch smoke builder:local +enos scenario launch smoke artifact_source:local # Check an individual scenario for validity. This is useful during scenario # authoring and debugging. -enos scenario validate smoke builder:local +enos scenario validate smoke artifact_source:local # If you've run the tests and desire to see the outputs, such as the URL or # credentials, you can run the output command to see them. Please note that # after "run" or destroy there will be no "outputs" as the infrastructure # will have been destroyed and state cleared. -enos scenario output smoke builder:local +enos scenario output smoke artifact_source:local # Explicitly destroy all existing infrastructure -enos scenario destroy smoke builder:local +enos scenario destroy smoke artifact_source:local ``` Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) @@ -100,7 +98,7 @@ for further information regarding installation, execution or composing scenarios # Scenarios There are current two scenarios: `smoke` and `upgrade`. Both begin by building Vault -as specified by the selected `builder` variant (see Variants section below for more +as specified by the selected `artifact_source` variant (see Variants section below for more information). ## Smoke @@ -114,33 +112,103 @@ depending on the backend and seal type. The [`upgrade` scenario](./enos-scenario-upgrade.hcl) creates a Vault cluster using the version specified in `vault_upgrade_initial_release`, with the backend specified by the `backend` variant (`raft` or `consul`). Next, it upgrades the Vault binary -that is determined by the `builder` variant. After the upgrade, it verifies that +that is determined by the `artifact_source` variant. After the upgrade, it verifies that cluster is at the desired version, along with additional verifications. ## Autopilot The [`autopilot` scenario](./enos-scenario-autopilot.hcl) creates a Vault cluster using -the version specified in `vault_upgrade_initial_release`. Next, it creates additional -nodes with the candiate version of Vault as determined by the `builder` variant. +the version specified in `vault_upgrade_initial_release`. It writes test data to the Vault cluster. Next, it creates additional nodes with the candidate version of Vault as determined by the `vault_product_version` variable set. The module uses AWS auto-join to handle discovery and unseals with auto-unseal or Shamir depending on the `seal` variant. After the new nodes have joined and been -unsealed, it waits for Autopilot to upgrade the new nodes and demote the old nodes. +unsealed, it verifies reading stored data on the new nodes. Autopilot upgrade verification checks the upgrade status is "await-server-removal" and the target version is set to the version of upgraded nodes. This test also verifies the undo_logs status for Vault versions 1.13.x + +## Replication +The [`replication` scenario](./enos-scenario-replication.hcl) creates two 3-node Vault clusters and runs following verification steps: + + 1. Writes data on the primary cluster + 1. Enables performance replication + 1. Verifies reading stored data from secondary cluster + 1. Verifies initial replication status between both clusters + 1. Replaces the leader node and one standby node on the primary Vault cluster + 1. Verifies updated replication status between both clusters + + This scenario verifies the performance replication status on both clusters to have their connection_status as "connected" and that the secondary cluster has known_primaries cluster addresses updated to the active nodes IP addresses of the primary Vault cluster. This scenario currently works around issues VAULT-12311 and VAULT-12309. The scenario fails when the primary storage backend is Consul due to issue VAULT-12332 + +## UI Tests +The [`ui` scenario](./enos-scenario-ui.hcl) creates a Vault cluster (deployed to AWS) using a version +built from the current checkout of the project. Once the cluster is available the UI acceptance tests +are run in a headless browser. +### Variables +In addition to the required variables that must be set, as described in the [Scenario Variables](#Scenario Variables), +the `ui` scenario has two optional variables: + +**ui_test_filter** - An optional test filter to limit the tests that are run, i.e. `'!enterprise'`. +To set a filter export the variable as follows: +```shell +> export ENOS_VAR_ui_test_filter="some filter" +``` +**ui_run_tests** - An optional boolean variable to run or not run the tests. The default value is true. +Setting this value to false is useful in the case where you want to create a cluster, but run the tests +manually. The section [Running the Tests](#Running the Tests) describes the different ways to run the +'UI' acceptance tests. + +### Running the Tests +The UI tests can be run fully automated or manually. +#### Fully Automated +The following will deploy the cluster, run the tests, and subsequently tear down the cluster: +```shell +> export ENOS_VAR_ui_test_filter="some filter" # <-- optional +> cd enos +> enos scenario ui run edition:oss +``` +#### Manually +The UI tests can be run manually as follows: +```shell +> export ENOS_VAR_ui_test_filter="some filter" # <-- optional +> export ENOS_VAR_ui_run_tests=false +> cd enos +> enos scenario ui launch edition:oss +# once complete the scenario will output a set of environment variables that must be exported. The +# output will look as follows: +export TEST_FILTER='some filter>' \ +export VAULT_ADDR='http://:8200' \ +export VAULT_TOKEN='' \ +export VAULT_UNSEAL_KEYS='["","",""]' +# copy and paste the above into the terminal to export the values +> cd ../ui +> yarn test:enos # run headless +# or +> yarn test:enos -s # run manually in a web browser +# once testing is complete +> cd ../enos +> enos scenario ui destroy edition:oss +``` # Variants Both scenarios support a matrix of variants. In order to achieve broad coverage while keeping test run time reasonable, the variants executed by the `enos-run` Github Actions are tailored to maximize variant distribution per scenario. -## `builder:crt` +## `artifact_source:crt` This variant is designed for use in Github Actions. The `enos-run.yml` workflow downloads the artifact built by the `build.yml` workflow, unzips it, and sets the `vault_bundle_path` to the zip file and the `vault_local_binary_path` to the binary. -## `builder:local` +## `artifact_source:local` This variant is for running the Enos scenario locally. It builds the Vault bundle from the current branch, placing the bundle at the `vault_bundle_path` and the unzipped Vault binary at the `vault_local_binary_path`. +## `artifact_source:artifactory` +This variant is for running the Enos scenario to test an artifact from Artifactory. It requires following Enos variables to be set: +* `artifactory_username` +* `artifactory_token` +* `aws_ssh_keypair_name` +* `aws_ssh_private_key_path` +* `vault_product_version` +* `vault_revision` + # CI Bootstrap In order to execute any of the scenarios in this repository, it is first necessary to bootstrap the CI AWS account with the required permissions, service quotas and supporting AWS resources. There are @@ -165,7 +233,6 @@ and destroyed each time a scenario is run, the Terraform state will be managed b Here are the steps to configure the GitHub Actions service user: #### Pre-requisites -- Access to the `hashicorp-qti` organization in Terraform Cloud. - Full access to the CI AWS account is required. **Notes:** diff --git a/enos/ci/aws-nuke.yml b/enos/ci/aws-nuke.yml new file mode 100644 index 000000000000..c8e4204465fa --- /dev/null +++ b/enos/ci/aws-nuke.yml @@ -0,0 +1,398 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +regions: +- eu-north-1 +- ap-south-1 +- eu-west-3 +- eu-west-2 +- eu-west-1 +- ap-northeast-3 +- ap-northeast-2 +- ap-northeast-1 +- sa-east-1 +- ca-central-1 +- ap-southeast-1 +- ap-southeast-2 +- eu-central-1 +- us-east-1 +- us-east-2 +- us-west-1 +- us-west-2 +- global + +account-blocklist: + - 1234567890 + +accounts: + # replaced in CI + ACCOUNT_NUM: + presets: + - default + - olderthan + - honeybee + - enos + +presets: + default: + # Ignores default VPC resources + filters: + EC2VPC: + - property: IsDefault + value: "true" + EC2RouteTable: + - property: DefaultVPC + value: "true" + EC2DHCPOption: + - property: DefaultVPC + value: "true" + EC2InternetGateway: + - property: DefaultVPC + value: "true" + EC2Subnet: + - property: DefaultVPC + value: "true" + EC2InternetGatewayAttachment: + - property: DefaultVPC + value: "true" + olderthan: + # Filters resources by age (when available) + # TIME_LIMIT replaced in CI + filters: + EC2Instance: + - property: LaunchTime + type: dateOlderThan + value: "TIME_LIMIT" + EC2NetworkACL: + EC2RouteTable: + EC2SecurityGroup: + EC2Subnet: + EC2Volume: + EC2VPC: + - property: tag:cloud-nuke-first-seen + type: dateOlderThan + value: "TIME_LIMIT" + ELBv2: + - property: tag:cloud-nuke-first-seen + type: dateOlderThan + value: "TIME_LIMIT" + ELBv2TargetGroup: + EC2NetworkInterface: + EC2InternetGateway: + EC2InternetGatewayAttachment: + RDSInstance: + - property: InstanceCreateTime + type: dateOlderThan + value: "TIME_LIMIT" + + honeybee: + # Cloudsec + filters: + IAMRole: + - property: tag:hc-config-as-code + value: "honeybee" + IAMRolePolicy: + - property: tag:role:hc-config-as-code + value: "honeybee" + IAMRolePolicyAttachment: + - property: tag:role:hc-config-as-code + value: "honeybee" + + enos: + # Existing CI to be cleaned up later + filters: + LambdaFunction: + - property: Name + value: "enos_cleanup" + IAMRole: + - property: Name + type: glob + value: "github_actions-*" + - property: Name + value: "rds-monitoring-role" + IAMRolePolicy: + - property: role:RoleName + type: glob + value: "github_actions*" + - property: role:RoleName + type: glob + value: "rds-*" + IAMRolePolicyAttachment: + - "rds-monitoring-role -> AmazonRDSEnhancedMonitoringRole" + IAMUserPolicy: + - "github_actions-vault_ci -> AssumeServiceUserRole" + - "github_actions-vault_enterprise_ci -> AssumeServiceUserRole" + +resource-types: + # Run against everything, excluding these: + excludes: + # Avoid cloudsec things + - IAMUser + - IAMPolicy + - IAMUserAccessKey + - S3Object + - S3Bucket + - EC2KeyPair + - CloudWatchEventsTarget + - CloudWatchEventsRule + - CloudWatchLogsLogGroup + - ConfigServiceConfigurationRecorder + - ConfigServiceConfigRule + - ConfigServiceDeliveryChannel + - CloudTrailTrail + - RDSSnapshot + - RDSClusterSnapshot + - WAFWebACL + - WAFv2WebACL + - WAFRegionalWebACL + - GuardDutyDetector + + # Unused services, filtering these speeds up runs and + # removes errors about things we don't have enabled + - ACMCertificate + - ACMPCACertificateAuthority + - ACMPCACertificateAuthorityState + - AMGWorkspace + - AMPWorkspace + - APIGatewayAPIKey + - APIGatewayClientCertificate + - APIGatewayDomainName + - APIGatewayRestAPI + - APIGatewayUsagePlan + - APIGatewayV2API + - APIGatewayV2VpcLink + - APIGatewayVpcLink + - AWS::AppFlow::ConnectorProfile + - AWS::AppFlow::Flow + - AWS::AppRunner::Service + - AWS::ApplicationInsights::Application + - AWS::Backup::Framework + - AWS::MWAA::Environment + - AWS::NetworkFirewall::Firewall + - AWS::NetworkFirewall::FirewallPolicy + - AWS::NetworkFirewall::RuleGroup + - AWS::Synthetics::Canary + - AWS::Timestream::Database + - AWS::Timestream::ScheduledQuery + - AWS::Timestream::Table + - AWS::Transfer::Workflow + - AWSBackupPlan + - AWSBackupRecoveryPoint + - AWSBackupSelection + - AWSBackupVault + - AWSBackupVaultAccessPolicy + - AccessAnalyzer + - AppMeshMesh + - AppMeshRoute + - AppMeshVirtualGateway + - AppMeshVirtualNode + - AppMeshVirtualRouter + - AppMeshVirtualService + - AppStreamDirectoryConfig + - AppStreamFleet + - AppStreamFleetState + - AppStreamImage + - AppStreamImageBuilder + - AppStreamImageBuilderWaiter + - AppStreamStack + - AppStreamStackFleetAttachment + - AppSyncGraphqlAPI + - ApplicationAutoScalingScalableTarget + - ArchiveRule + - AthenaNamedQuery + - AthenaWorkGroup + - BatchComputeEnvironment + - BatchComputeEnvironmentState + - BatchJobQueue + - BatchJobQueueState + - BillingCostandUsageReport + - Budget + - Cloud9Environment + - CloudDirectoryDirectory + - CloudDirectorySchema + - CodeArtifactDomain + - CodeArtifactRepository + - CodeBuildProject + - CodeCommitRepository + - CodeDeployApplication + - CodePipelinePipeline + - CodeStarConnection + - CodeStarNotificationRule + - CodeStarProject + - CognitoIdentityPool + - CognitoIdentityProvider + - CognitoUserPool + - CognitoUserPoolClient + - CognitoUserPoolDomain + - ComprehendDocumentClassifier + - ComprehendDominantLanguageDetectionJob + - ComprehendEndpoint + - ComprehendEntitiesDetectionJob + - ComprehendEntityRecognizer + - ComprehendKeyPhrasesDetectionJob + - ComprehendSentimentDetectionJob + - ConfigServiceConfigRule + - ConfigServiceConfigurationRecorder + - ConfigServiceDeliveryChannel + - DAXCluster + - DAXParameterGroup + - DAXSubnetGroup + - DataPipelinePipeline + - DatabaseMigrationServiceCertificate + - DatabaseMigrationServiceEndpoint + - DatabaseMigrationServiceEventSubscription + - DatabaseMigrationServiceReplicationInstance + - DatabaseMigrationServiceReplicationTask + - DatabaseMigrationServiceSubnetGroup + - DeviceFarmProject + - DirectoryServiceDirectory + - EC2ClientVpnEndpointAttachment + - EC2ClientVpnEndpoint + - EC2DefaultSecurityGroupRule + - FMSNotificationChannel + - FMSPolicy + - FSxBackup + - FSxFileSystem + - FirehoseDeliveryStream + - GlobalAccelerator + - GlobalAcceleratorEndpointGroup + - GlobalAcceleratorListener + - GlueClassifier + - GlueConnection + - GlueCrawler + - GlueDatabase + - GlueDevEndpoint + - GlueJob + - GlueTrigger + - Inspector2 + - InspectorAssessmentRun + - InspectorAssessmentTarget + - InspectorAssessmentTemplate + - IoTAuthorizer + - IoTCACertificate + - IoTCertificate + - IoTJob + - IoTOTAUpdate + - IoTPolicy + - IoTRoleAlias + - IoTStream + - IoTThing + - IoTThingGroup + - IoTThingType + - IoTThingTypeState + - IoTTopicRule + - KendraIndex + - KinesisAnalyticsApplication + - KinesisStream + - KinesisVideoProject + - LexBot + - LexIntent + - LexModelBuildingServiceBotAlias + - LexSlotType + - LifecycleHook + - LightsailDisk + - LightsailDomain + - LightsailInstance + - LightsailKeyPair + - LightsailLoadBalancer + - LightsailStaticIP + - MQBroker + - MSKCluster + - MSKConfiguration + - MachineLearningBranchPrediction + - MachineLearningDataSource + - MachineLearningEvaluation + - MachineLearningMLModel + - Macie + - MediaConvertJobTemplate + - MediaConvertPreset + - MediaConvertQueue + - MediaLiveChannel + - MediaLiveInput + - MediaLiveInputSecurityGroup + - MediaPackageChannel + - MediaPackageOriginEndpoint + - MediaStoreContainer + - MediaStoreDataItems + - MediaTailorConfiguration + - MobileProject + - NeptuneCluster + - NeptuneInstance + - NetpuneSnapshot + - OpsWorksApp + - OpsWorksCMBackup + - OpsWorksCMServer + - OpsWorksCMServerState + - OpsWorksInstance + - OpsWorksLayer + - OpsWorksUserProfile + - QLDBLedger + - RoboMakerRobotApplication + - RoboMakerSimulationApplication + - RoboMakerSimulationJob + - SESConfigurationSet + - SESIdentity + - SESReceiptFilter + - SESReceiptRuleSet + - SESTemplate + - SSMActivation + - SSMAssociation + - SSMDocument + - SSMMaintenanceWindow + - SSMParameter + - SSMPatchBaseline + - SSMResourceDataSync + - SageMakerApp + - SageMakerDomain + - SageMakerEndpoint + - SageMakerEndpointConfig + - SageMakerModel + - SageMakerNotebookInstance + - SageMakerNotebookInstanceLifecycleConfig + - SageMakerNotebookInstanceState + - SageMakerUserProfiles + - ServiceCatalogConstraintPortfolioAttachment + - ServiceCatalogPortfolio + - ServiceCatalogPortfolioProductAttachment + - ServiceCatalogPortfolioShareAttachment + - ServiceCatalogPrincipalPortfolioAttachment + - ServiceCatalogProduct + - ServiceCatalogProvisionedProduct + - ServiceCatalogTagOption + - ServiceCatalogTagOptionPortfolioAttachment + - ServiceDiscoveryInstance + - ServiceDiscoveryNamespace + - ServiceDiscoveryService + - SimpleDBDomain + - StorageGatewayFileShare + - StorageGatewayGateway + - StorageGatewayTape + - StorageGatewayVolume + - TransferServer + - TransferServerUser + - WAFRegionalByteMatchSet + - WAFRegionalByteMatchSetIP + - WAFRegionalIPSet + - WAFRegionalIPSetIP + - WAFRegionalRateBasedRule + - WAFRegionalRateBasedRulePredicate + - WAFRegionalRegexMatchSet + - WAFRegionalRegexMatchTuple + - WAFRegionalRegexPatternSet + - WAFRegionalRegexPatternString + - WAFRegionalRule + - WAFRegionalRuleGroup + - WAFRegionalRulePredicate + - WAFRegionalWebACL + - WAFRegionalWebACLRuleAttachment + - WAFRule + - WAFWebACL + - WAFWebACLRuleAttachment + - WAFv2IPSet + - WAFv2RegexPatternSet + - WAFv2RuleGroup + - WAFv2WebACL + - WorkLinkFleet + - WorkSpacesWorkspace + - XRayGroup + - XRaySamplingRule diff --git a/enos/ci/bootstrap/main.tf b/enos/ci/bootstrap/main.tf index 804f1e66bfa4..db89663153e0 100644 --- a/enos/ci/bootstrap/main.tf +++ b/enos/ci/bootstrap/main.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { aws = { diff --git a/enos/ci/bootstrap/outputs.tf b/enos/ci/bootstrap/outputs.tf index 858318e4cd5c..a83ef9eb080e 100644 --- a/enos/ci/bootstrap/outputs.tf +++ b/enos/ci/bootstrap/outputs.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + output "keys" { value = { "us-east-1" = { diff --git a/enos/ci/bootstrap/variables.tf b/enos/ci/bootstrap/variables.tf index 3aab3449f368..7e80d5ccc919 100644 --- a/enos/ci/bootstrap/variables.tf +++ b/enos/ci/bootstrap/variables.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "aws_ssh_public_key" { description = "The public key to use for the ssh key" type = string diff --git a/enos/ci/service-user-iam/main.tf b/enos/ci/service-user-iam/main.tf index 1c930962e80b..0df9bcafca78 100644 --- a/enos/ci/service-user-iam/main.tf +++ b/enos/ci/service-user-iam/main.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { aws = { @@ -28,6 +31,7 @@ resource "aws_iam_role" "role" { data "aws_iam_policy_document" "assume_role_policy_document" { provider = aws.us_east_1 + statement { effect = "Allow" actions = ["sts:AssumeRole"] @@ -43,103 +47,179 @@ resource "aws_iam_role_policy" "role_policy" { provider = aws.us_east_1 role = aws_iam_role.role.name name = "${local.service_user}_policy" - policy = data.aws_iam_policy_document.iam_policy_document.json + policy = data.aws_iam_policy_document.role_policy.json +} + +data "aws_iam_policy_document" "role_policy" { + source_policy_documents = [ + data.aws_iam_policy_document.enos_scenario.json, + data.aws_iam_policy_document.aws_nuke.json, + ] } -data "aws_iam_policy_document" "iam_policy_document" { +data "aws_iam_policy_document" "aws_nuke" { provider = aws.us_east_1 + statement { effect = "Allow" actions = [ - "iam:ListRoles", - "iam:CreateRole", - "iam:GetRole", - "iam:DeleteRole", - "iam:ListInstanceProfiles", - "iam:ListInstanceProfilesForRole", - "iam:CreateInstanceProfile", - "iam:GetInstanceProfile", - "iam:DeleteInstanceProfile", - "iam:ListPolicies", - "iam:CreatePolicy", - "iam:DeletePolicy", - "iam:ListRoles", - "iam:CreateRole", - "iam:AddRoleToInstanceProfile", - "iam:PassRole", - "iam:RemoveRoleFromInstanceProfile", - "iam:DeleteRole", - "iam:ListRolePolicies", - "iam:ListAttachedRolePolicies", - "iam:AttachRolePolicy", - "iam:GetRolePolicy", - "iam:PutRolePolicy", - "iam:DetachRolePolicy", - "iam:DeleteRolePolicy", - "ec2:DescribeAccountAttributes", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstanceCreditSpecifications", - "ec2:DescribeImages", - "ec2:DescribeTags", - "ec2:DescribeVpcClassicLink", - "ec2:DescribeVpcClassicLinkDnsSupport", - "ec2:DescribeNetworkInterfaces", - "ec2:DescribeAvailabilityZones", - "ec2:DescribeSecurityGroups", - "ec2:CreateSecurityGroup", - "ec2:AuthorizeSecurityGroupIngress", + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeRegions", + "ec2:DescribeVpnGateways", + "iam:DeleteAccessKey", + "iam:DeleteUser", + "iam:DeleteUserPolicy", + "iam:GetUser", + "iam:ListAccessKeys", + "iam:ListAccountAliases", + "iam:ListGroupsForUser", + "iam:ListUserPolicies", + "iam:ListUserTags", + "iam:ListUsers", + "iam:UntagUser", + "servicequotas:ListServiceQuotas" + ] + + resources = ["*"] + } +} + +data "aws_iam_policy_document" "enos_scenario" { + provider = aws.us_east_1 + + statement { + effect = "Allow" + actions = [ + "ec2:AssociateRouteTable", + "ec2:AttachInternetGateway", "ec2:AuthorizeSecurityGroupEgress", - "ec2:DeleteSecurityGroup", - "ec2:RevokeSecurityGroupIngress", - "ec2:RevokeSecurityGroupEgress", - "ec2:DescribeInstances", - "ec2:DescribeInstanceAttribute", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CancelSpotFleetRequests", + "ec2:CancelSpotInstanceRequests", + "ec2:CreateInternetGateway", + "ec2:CreateKeyPair", + "ec2:CreateFleet", + "ec2:CreateLaunchTemplate", + "ec2:CreateLaunchTemplateVersion", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSpotDatafeedSubscription", + "ec2:CreateSubnet", "ec2:CreateTags", - "ec2:RunInstances", - "ec2:ModifyInstanceAttribute", - "ec2:TerminateInstances", - "ec2:ResetInstanceAttribute", - "ec2:DeleteTags", - "ec2:DescribeVolumes", "ec2:CreateVolume", - "ec2:DeleteVolume", - "ec2:DescribeVpcs", - "ec2:DescribeVpcAttribute", "ec2:CreateVPC", - "ec2:ModifyVPCAttribute", - "ec2:DeleteVPC", - "ec2:DescribeSubnets", - "ec2:CreateSubnet", - "ec2:ModifySubnetAttribute", + "ec2:DeleteFleets", + "ec2:DeleteInternetGateway", + "ec2:DeleteLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", + "ec2:DeleteKeyPair", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSpotDatafeedSubscription", "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DeleteVPC", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeFleets", + "ec2:DescribeFleetHistory", + "ec2:DescribeFleetInstances", + "ec2:DescribeImages", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstanceCreditSpecifications", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", "ec2:DescribeInternetGateways", - "ec2:CreateInternetGateway", - "ec2:AttachInternetGateway", - "ec2:DetachInternetGateway", - "ec2:DeleteInternetGateway", + "ec2:DescribeKeyPairs", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeRegions", "ec2:DescribeRouteTables", - "ec2:CreateRoute", - "ec2:CreateRouteTable", - "ec2:AssociateRouteTable", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotDatafeedSubscription", + "ec2:DescribeSpotFleetInstances", + "ec2:DescribeSpotFleetInstanceRequests", + "ec2:DescribeSpotFleetRequests", + "ec2:DescribeSpotFleetRequestHistory", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcClassicLinkDnsSupport", + "ec2:DescribeVpcs", + "ec2:DescribeVpnGateways", + "ec2:DetachInternetGateway", "ec2:DisassociateRouteTable", - "ec2:DeleteRouteTable", - "ec2:CreateKeyPair", + "ec2:GetLaunchTemplateData", + "ec2:GetSpotPlacementScores", "ec2:ImportKeyPair", - "ec2:DeleteKeyPair", - "ec2:DescribeKeyPairs", - "kms:ListKeys", - "kms:ListResourceTags", - "kms:GetKeyPolicy", - "kms:GetKeyRotationStatus", - "kms:DescribeKey", + "ec2:ModifyFleet", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyLaunchTemplate", + "ec2:ModifySpotFleetRequest", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVPCAttribute", + "ec2:RequestSpotInstances", + "ec2:RequestSpotFleet", + "ec2:ResetInstanceAttribute", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:SendSpotInstanceInterruptions", + "ec2:TerminateInstances", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "iam:AddRoleToInstanceProfile", + "iam:AttachRolePolicy", + "iam:CreateInstanceProfile", + "iam:CreatePolicy", + "iam:CreateRole", + "iam:CreateServiceLinkedRole", + "iam:DeleteInstanceProfile", + "iam:DeletePolicy", + "iam:DeleteRole", + "iam:DeleteRolePolicy", + "iam:DetachRolePolicy", + "iam:GetInstanceProfile", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:ListAccountAliases", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfiles", + "iam:ListInstanceProfilesForRole", + "iam:ListPolicies", + "iam:ListRolePolicies", + "iam:ListRoles", + "iam:PassRole", + "iam:PutRolePolicy", + "iam:RemoveRoleFromInstanceProfile", + "kms:CreateAlias", "kms:CreateKey", - "kms:Encrypt", "kms:Decrypt", - "kms:ScheduleKeyDeletion", - "kms:ListAliases", - "kms:CreateAlias", "kms:DeleteAlias", + "kms:DescribeKey", + "kms:Encrypt", + "kms:GetKeyPolicy", + "kms:GetKeyRotationStatus", + "kms:ListAliases", + "kms:ListKeys", + "kms:ListResourceTags", + "kms:ScheduleKeyDeletion", + "kms:TagResource", + "servicequotas:ListServiceQuotas" ] + resources = ["*"] } } diff --git a/enos/ci/service-user-iam/outputs.tf b/enos/ci/service-user-iam/outputs.tf index d4ba89910df9..348696b4d723 100644 --- a/enos/ci/service-user-iam/outputs.tf +++ b/enos/ci/service-user-iam/outputs.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + output "ci_role" { value = { name = aws_iam_role.role.name diff --git a/enos/ci/service-user-iam/providers.tf b/enos/ci/service-user-iam/providers.tf index 09c86d7bae4e..cf2d21e20296 100644 --- a/enos/ci/service-user-iam/providers.tf +++ b/enos/ci/service-user-iam/providers.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + provider "aws" { region = "us-east-1" alias = "us_east_1" diff --git a/enos/ci/service-user-iam/service-quotas.tf b/enos/ci/service-user-iam/service-quotas.tf index 73a68363d84d..676bbb0a3a53 100644 --- a/enos/ci/service-user-iam/service-quotas.tf +++ b/enos/ci/service-user-iam/service-quotas.tf @@ -1,33 +1,65 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + locals { // This is the code of the service quota to request a change for. Each adjustable limit has a // unique code. See, https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/servicequotas_service_quota#quota_code - subnets_per_vps_quota = "L-F678F1CE" + subnets_per_vpcs_quota = "L-F678F1CE" + standard_spot_instance_requests_quota = "L-34B43A08" } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_1" { - provider = aws.us_east_2 - quota_code = local.subnets_per_vps_quota + provider = aws.us_east_1 + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 50 + value = 100 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_2" { provider = aws.us_east_2 - quota_code = local.subnets_per_vps_quota + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 50 + value = 100 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_1" { provider = aws.us_west_1 - quota_code = local.subnets_per_vps_quota + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 50 + value = 100 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_2" { provider = aws.us_west_2 - quota_code = local.subnets_per_vps_quota + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" - value = 50 + value = 100 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_1" { + provider = aws.us_east_1 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_2" { + provider = aws.us_east_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_1" { + provider = aws.us_west_1 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_2" { + provider = aws.us_west_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 } diff --git a/enos/ci/service-user-iam/variables.tf b/enos/ci/service-user-iam/variables.tf index 6cc7efd6bd9b..b69c07b81fe9 100644 --- a/enos/ci/service-user-iam/variables.tf +++ b/enos/ci/service-user-iam/variables.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "repository" { description = "The GitHub repository, either vault or vault-enterprise" type = string diff --git a/enos/enos-dev-scenario-pr-replication.hcl b/enos/enos-dev-scenario-pr-replication.hcl new file mode 100644 index 000000000000..54aaa6e6bae9 --- /dev/null +++ b/enos/enos-dev-scenario-pr-replication.hcl @@ -0,0 +1,911 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +scenario "dev_pr_replication" { + description = <<-EOF + This scenario spins up a two Vault clusters with either an external Consul cluster or + integrated Raft for storage. The secondary cluster is configured with performance replication + from the primary cluster. None of our test verification is included in this scenario in order + to improve end-to-end speed. If you wish to perform such verification you'll need to a non-dev + scenario. + + The scenario supports finding and installing any released 'linux/amd64' or 'linux/arm64' Vault + artifact as long as its version is >= 1.8. You can also use the 'artifact:local' variant to + build and deploy the current branch! + + In order to execute this scenario you'll need to install the enos CLI: + brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos + + You'll also need access to an AWS account with an SSH keypair. + Perform the steps here to get AWS access with Doormat https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#authenticate-with-doormat + Perform the steps here to get an AWS keypair set up: https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#set-your-aws-key-pair-name-and-private-key + + Please note that this scenario requires several inputs variables to be set in order to function + properly. While not all variants will require all variables, it's suggested that you look over + the scenario outline to determine which variables affect which steps and which have inputs that + you should set. You can use the following command to get a textual outline of the entire + scenario: + enos scenario outline dev_pr_replication + + You can also create an HTML version that is suitable for viewing in web browsers: + enos scenario outline dev_pr_replication --format html > index.html + open index.html + + To configure the required variables you have a couple of choices. You can create an + 'enos-local.vars' file in the same 'enos' directory where this scenario is defined. In it you + declare your desired variable values. For example, you could copy the following content and + then set the values as necessary: + + artifactory_username = "username@hashicorp.com" + artifactory_token = " + aws_region = "us-west-2" + aws_ssh_keypair_name = "" + aws_ssh_keypair_key_path = "/path/to/your/private/key.pem" + dev_build_local_ui = false + dev_consul_version = "1.18.1" + vault_license_path = "./support/vault.hclic" + vault_product_version = "1.16.2" + + Alternatively, you can set them in your environment: + export ENOS_VAR_aws_region="us-west-2" + export ENOS_VAR_vault_license_path="./support/vault.hclic" + + After you've configured your inputs you can list and filter the available scenarios and then + subsequently launch and destroy them. + enos scenario list --help + enos scenario launch --help + enos scenario list dev_pr_replication + enos scenario launch dev_pr_replication arch:amd64 artifact:deb distro:ubuntu edition:ent.hsm primary_backend:raft primary_seal:awskms secondary_backend:raft secondary_seal:pkcs11 + + When the scenario is finished launching you refer to the scenario outputs to see information + related to your cluster. You can use this information to SSH into nodes and/or to interact + with vault. + enos scenario output dev_pr_replication arch:amd64 artifact:deb distro:ubuntu edition:ent.hsm primary_backend:raft primary_seal:awskms secondary_backend:raft secondary_seal:pkcs11 + ssh -i /path/to/your/private/key.pem + vault status + + After you've finished you can tear down the cluster + enos scenario destroy dev_pr_replication arch:amd64 artifact:deb distro:ubuntu edition:ent.hsm primary_backend:raft primary_seal:awskms secondary_backend:raft secondary_seal:pkcs11 + EOF + + // The matrix is where we define all the baseline combinations that enos can utilize to customize + // your scenario. By default enos attempts to perform your command an the entire product! Most + // of the time you'll want to reduce that by passing in a filter. + // Run 'enos scenario list --help' to see more about how filtering scenarios works in enos. + matrix { + arch = ["amd64", "arm64"] + artifact = ["local", "deb", "rpm", "zip"] + distro = ["ubuntu", "rhel"] + edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + primary_backend = ["consul", "raft"] + primary_seal = ["awskms", "pkcs11", "shamir"] + secondary_backend = ["consul", "raft"] + secondary_seal = ["awskms", "pkcs11", "shamir"] + + exclude { + edition = ["ent.hsm", "ent.fips1402", "ent.hsm.fips1402"] + arch = ["arm64"] + } + + exclude { + artifact = ["rpm"] + distro = ["ubuntu"] + } + + exclude { + artifact = ["deb"] + distro = ["rhel"] + } + + exclude { + primary_seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + + exclude { + secondary_seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + } + + // Specify which Terraform configs and providers to use in this scenario. Most of the time you'll + // never need to change this! If you wanted to test with different terraform or terraform CLI + // settings you can define them and assign them here. + terraform_cli = terraform_cli.default + terraform = terraform.default + + // Here we declare all of the providers that we might need for our scenario. + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + // These are variable values that are local to our scenario. They are evaluated after external + // variables and scenario matrices but before any of our steps. + locals { + // The enos provider uses different ssh transport configs for different distros (as + // specified in enos-providers.hcl), and we need to be able to access both of those here. + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + // We install vault packages from artifactory. If you wish to use one of these variants you'll + // need to configure your artifactory credentials. + use_artifactory = matrix.artifact == "deb" || matrix.artifact == "rpm" + // Zip bundles and local builds don't come with systemd units or any associated configuration. + // When this is true we'll let enos handle this for us. + manage_service = matrix.artifact == "zip" || matrix.artifact == "local" + // If you are using an ent edition, you will need a Vault license. Common convention + // is to store it at ./support/vault.hclic, but you may change this path according + // to your own preference. + vault_install_dir = matrix.artifact == "zip" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + // Begin scenario steps. These are the steps we'll perform to get your cluster up and running. + step "build_or_find_vault_artifact" { + description = <<-EOF + Depending on how we intend to get our Vault artifact, this step either builds vault from our + current branch or finds debian or redhat packages in Artifactory. If we're using a zip bundle + we'll get it from releases.hashicorp.com and skip this step entirely. Please note that if you + wish to use a deb or rpm artifact you'll have to configure your artifactory credentials! + + Variables that are used in this step: + + artifactory_host: + The artifactory host to search. It's very unlikely that you'll want to change this. The + default value is the HashiCorp Artifactory instance. + artifactory_repo + The artifactory host to search. It's very unlikely that you'll want to change this. The + default value is where CRT will publish packages. + artifactory_username + The artifactory username associated with your token. You'll need this if you wish to use + deb or rpm artifacts! You can request access via Okta. + artifactory_token + The artifactory token associated with your username. You'll need this if you wish to use + deb or rpm artifacts! You can create a token by logging into Artifactory via Okta. + vault_product_version: + When using the artifact:rpm or artifact:deb variants we'll use this variable to determine + which version of the Vault pacakge we should fetch from Artifactory. + vault_artifact_path: + When using the artifact:local variant we'll utilize this variable to determine where + to create the vault.zip archive from the local branch. Default: to /tmp/vault.zip. + vault_local_tags: + When using the artifact:local variant we'll use this variable to inject custom build + tags. If left unset we'll automatically use the build tags that correspond to the edition + variant. + EOF + module = matrix.artifact == "local" ? "build_local" : local.use_artifactory ? "build_artifactory_package" : "build_crt" + + variables { + // Used for all modules + arch = matrix.arch + edition = matrix.edition + product_version = var.vault_product_version + // Required for the local build which will always result in using a local zip bundle + artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null + build_ui = var.dev_build_local_ui + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + goarch = matrix.arch + goos = "linux" + // Required when using a RPM or Deb package + // Some of these variables don't have default values so we'll only set them if they are + // required. + artifactory_host = local.use_artifactory ? var.artifactory_host : null + artifactory_repo = local.use_artifactory ? var.artifactory_repo : null + artifactory_username = local.use_artifactory ? var.artifactory_username : null + artifactory_token = local.use_artifactory ? var.artifactory_token : null + distro = matrix.distro + } + } + + step "ec2_info" { + description = "This discovers usefull metadata in Ec2 like AWS AMI ID's that we use in later modules." + module = module.ec2_info + } + + step "create_vpc" { + description = <<-EOF + Create the VPC resources required for our scenario. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + EOF + module = module.create_vpc + depends_on = [step.ec2_info] + + variables { + common_tags = global.tags + } + } + + step "read_backend_license" { + description = <<-EOF + Read the contents of the backend license if we're using a Consul backend for either cluster + and the backend_edition variable is set to "ent". + + Variables that are used in this step: + backend_edition: + The edition of Consul to use. If left unset it will default to CE. + backend_license_path: + If this variable is set we'll use it to determine the local path on disk that contains a + Consul Enterprise license. If it is not set we'll attempt to load it from + ./support/consul.hclic. + EOF + skip_step = (var.backend_edition == "ce" || var.backend_edition == "oss") || (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + description = <<-EOF + Validates and reads into memory the contents of a local Vault Enterprise license if we're + using an Enterprise edition. This step does not run when using a community edition of Vault. + + Variables that are used in this step: + vault_license_path: + If this variable is set we'll use it to determine the local path on disk that contains a + Vault Enterprise license. If it is not set we'll attempt to load it from + ./support/vault.hclic. + EOF + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_primary_seal_key" { + description = <<-EOF + Create the necessary seal keys depending on our configured seal. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + EOF + module = "seal_${matrix.primary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "primary" + common_tags = global.tags + } + } + + step "create_secondary_seal_key" { + description = <<-EOF + Create the necessary seal keys depending on our configured seal. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + EOF + module = "seal_${matrix.secondary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "secondary" + common_tags = global.tags + other_resources = step.create_primary_seal_key.resource_names + } + } + + step "create_primary_cluster_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the Vault cluster. We also ensure + that the firewall is configured to allow the necessary Vault and Consul traffic and SSH + from the machine executing the Enos scenario. + + Variables that are used in this step: + aws_ssh_keypair_name: + The AWS SSH Keypair name to use for target machines. + project_name: + The project name is used for additional tag metadata on resources. + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + vault_instance_count: + How many instances to provision for the Vault cluster. If left unset it will use a default + of three. + EOF + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_cluster_backend_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the backend Consul storage cluster. + We also ensure that the firewall is configured to allow the necessary Consul traffic and SSH + from the machine executing the Enos scenario. When using integrated storage this step is a + no-op that does nothing. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + project_name: + The project name is used for additional tag metadata on resources. + aws_ssh_keypair_name: + The AWS SSH Keypair name to use for target machines. + EOF + module = matrix.primary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the Vault cluster. We also ensure + that the firewall is configured to allow the necessary Vault and Consul traffic and SSH + from the machine executing the Enos scenario. + EOF + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_backend_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the backend Consul storage cluster. + We also ensure that the firewall is configured to allow the necessary Consul traffic and SSH + from the machine executing the Enos scenario. When using integrated storage this step is a + no-op that does nothing. + EOF + + module = matrix.secondary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_backend_cluster" { + description = <<-EOF + Install, configure, and start the backend Consul storage cluster for the primary Vault Cluster. + When we are using the raft storage variant this step is a no-op. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + the edition of Consul to use for the cluster. Note that if you set it to 'ent' you will + also need a valid license configured for the read_backend_license step. Default: ce. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + the version of Consul to use for the cluster. + EOF + module = "backend_${matrix.primary_backend}" + depends_on = [ + step.create_primary_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_primary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = matrix.primary_backend == "consul" ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = var.dev_consul_version + } + target_hosts = step.create_primary_cluster_backend_targets.hosts + } + } + + step "create_primary_cluster" { + description = <<-EOF + Install, configure, start, initialize and unseal the primary Vault cluster on the specified + target instances. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of the consul client to install on each node for Consul storage. Note that + if you set it to 'ent' you will also need a valid license configured for the + read_backend_license step. If left unset we'll use an unlicensed CE version. + dev_config_mode: + You can set this variable to instruct enos on how to primarily configure Vault when starting + the service. Options are 'file' and 'env' for configuration file or environment variables. + If left unset we'll use the default value. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of Consul to install. If left unset we'll utilize the default value. + vault_artifact_path: + When using the artifact:local variant this variable is utilized to specify where on + the local disk the vault.zip file we've built is located. It can be left unset to use + the default value. + vault_enable_audit_devices: + Whether or not to enable various audit devices after unsealing the Vault cluster. By default + we'll configure syslog, socket, and file auditing. + vault_product_version: + When using the artifact:zip variant this variable is utilized to specify the version of + Vault to download from releases.hashicorp.com. + EOF + module = module.vault_cluster + depends_on = [ + step.create_primary_backend_cluster, + step.create_primary_cluster_targets, + step.build_or_find_vault_artifact, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + // We set vault_artifactory_release when we want to get a .deb or .rpm package from Artifactory. + // We set vault_release when we want to get a .zip bundle from releases.hashicorp.com + // We only set one or the other, never both. + artifactory_release = local.use_artifactory ? step.build_or_find_vault_artifact.release : null + backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_primary_cluster_targets.cluster_name + config_mode = var.dev_config_mode + consul_license = matrix.primary_backend == "consul" ? step.read_backend_license.license : null + consul_release = matrix.primary_backend == "consul" ? { + edition = var.backend_edition + version = var.dev_consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = step.read_vault_license.license + local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + release = matrix.artifact == "zip" ? { version = var.vault_product_version, edition = matrix.edition } : null + seal_attributes = step.create_primary_seal_key.attributes + seal_type = matrix.primary_seal + storage_backend = matrix.primary_backend + target_hosts = step.create_primary_cluster_targets.hosts + } + } + + step "create_secondary_backend_cluster" { + description = <<-EOF + Install, configure, and start the backend Consul storage cluster for the primary Vault Cluster. + When we are using the raft storage variant this step is a no-op. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + the edition of Consul to use for the cluster. Note that if you set it to 'ent' you will + also need a valid license configured for the read_backend_license step. Default: ce. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + the version of Consul to use for the cluster. + EOF + module = "backend_${matrix.secondary_backend}" + depends_on = [ + step.create_secondary_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = matrix.secondary_backend == "consul" ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = var.dev_consul_version + } + target_hosts = step.create_secondary_cluster_backend_targets.hosts + } + } + + step "create_secondary_cluster" { + description = <<-EOF + Install, configure, start, initialize and unseal the secondary Vault cluster on the specified + target instances. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of the consul client to install on each node for Consul storage. Note that + if you set it to 'ent' you will also need a valid license configured for the + read_backend_license step. If left unset we'll use an unlicensed CE version. + dev_config_mode: + You can set this variable to instruct enos on how to primarily configure Vault when starting + the service. Options are 'file' and 'env' for configuration file or environment variables. + If left unset we'll use the default value. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of Consul to install. If left unset we'll utilize the default value. + vault_artifact_path: + When using the artifact:local variant this variable is utilized to specify where on + the local disk the vault.zip file we've built is located. It can be left unset to use + the default value. + vault_enable_audit_devices: + Whether or not to enable various audit devices after unsealing the Vault cluster. By default + we'll configure syslog, socket, and file auditing. + vault_product_version: + When using the artifact:zip variant this variable is utilized to specify the version of + Vault to download from releases.hashicorp.com. + EOF + module = module.vault_cluster + depends_on = [ + step.create_secondary_backend_cluster, + step.create_secondary_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + // We set vault_artifactory_release when we want to get a .deb or .rpm package from Artifactory. + // We set vault_release when we want to get a .zip bundle from releases.hashicorp.com + // We only set one or the other, never both. + artifactory_release = local.use_artifactory ? step.build_or_find_vault_artifact.release : null + backend_cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_secondary_cluster_targets.cluster_name + config_mode = var.dev_config_mode + consul_license = matrix.secondary_backend == "consul" ? step.read_backend_license.license : null + consul_release = matrix.secondary_backend == "consul" ? { + edition = var.backend_edition + version = var.dev_consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = step.read_vault_license.license + local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + release = matrix.artifact == "zip" ? { version = var.vault_product_version, edition = matrix.edition } : null + seal_attributes = step.create_secondary_seal_key.attributes + seal_type = matrix.secondary_seal + storage_backend = matrix.secondary_backend + target_hosts = step.create_secondary_cluster_targets.hosts + } + } + + step "verify_that_vault_primary_cluster_is_unsealed" { + description = <<-EOF + Wait for the for the primary cluster to unseal and reach a healthy state. + EOF + module = module.vault_verify_unsealed + depends_on = [ + step.create_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_that_vault_secondary_cluster_is_unsealed" { + description = <<-EOF + Wait for the for the secondary cluster to unseal and reach a healthy state. + EOF + module = module.vault_verify_unsealed + depends_on = [ + step.create_secondary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_secondary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "get_primary_cluster_ips" { + description = <<-EOF + Determine which node is the primary and which are followers and map their private IP address + to their public IP address. We'll use this information so that we can enable performance + replication on the leader. + EOF + module = module.vault_get_cluster_ips + depends_on = [step.verify_that_vault_primary_cluster_is_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_primary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "get_secondary_cluster_ips" { + description = <<-EOF + Determine which node is the primary and which are followers and map their private IP address + to their public IP address. We'll use this information so that we can enable performance + replication on the leader. + EOF + module = module.vault_get_cluster_ips + depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_secondary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + } + } + + step "setup_userpass_for_replication_auth" { + description = <<-EOF + Enable the auth userpass method and create a new user. + EOF + module = module.vault_verify_write_data + depends_on = [step.get_primary_cluster_ips] + + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + vault_instances = step.create_primary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "configure_performance_replication_primary" { + description = <<-EOF + Create a superuser policy write it for our new user. Activate performance replication on + the primary. + EOF + module = module.vault_setup_perf_primary + depends_on = [ + step.get_primary_cluster_ips, + step.get_secondary_cluster_ips, + step.setup_userpass_for_replication_auth, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "generate_secondary_token" { + description = <<-EOF + Create a random token and write it to sys/replication/performance/primary/secondary-token on + the primary. + EOF + module = module.generate_secondary_token + depends_on = [step.configure_performance_replication_primary] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "configure_performance_replication_secondary" { + description = <<-EOF + Enable performance replication on the secondary using the new shared token. + EOF + module = module.vault_setup_perf_secondary + depends_on = [step.generate_secondary_token] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + wrapping_token = step.generate_secondary_token.secondary_token + } + } + + step "unseal_secondary_followers" { + description = <<-EOF + After replication is enabled we need to unseal the followers on the secondary cluster. + Depending on how we're configured we'll pass the unseal keys according to this guide: + https://developer.hashicorp.com/vault/docs/enterprise/replication#seals + EOF + module = module.vault_unseal_nodes + depends_on = [ + step.create_primary_cluster, + step.create_secondary_cluster, + step.get_secondary_cluster_ips, + step.configure_performance_replication_secondary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + follower_public_ips = step.get_secondary_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex + vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal + } + } + + step "verify_secondary_cluster_is_unsealed_after_enabling_replication" { + description = <<-EOF + Verify that the secondary cluster is unsealed after we enable PR replication. + EOF + module = module.vault_verify_unsealed + depends_on = [ + step.unseal_secondary_followers + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_secondary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_performance_replication" { + description = <<-EOF + Check sys/replication/performance/status and ensure that all nodes are in the correct state + after enabling performance replication. + EOF + module = module.vault_verify_performance_replication + depends_on = [step.verify_secondary_cluster_is_unsealed_after_enabling_replication] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + } + } + + // When using a Consul backend, these output values will be for the Consul backend. + // When using a Raft backend, these output values will be null. + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_primary_cluster.audit_device_file_path + } + + output "primary_cluster_hosts" { + description = "The Vault primary cluster target hosts" + value = step.create_primary_cluster_targets.hosts + } + + output "primary_cluster_root_token" { + description = "The Vault primary cluster root token" + value = step.create_primary_cluster.root_token + } + + output "primary_cluster_unseal_keys_b64" { + description = "The Vault primary cluster unseal keys" + value = step.create_primary_cluster.unseal_keys_b64 + } + + output "primary_cluster_unseal_keys_hex" { + description = "The Vault primary cluster unseal keys hex" + value = step.create_primary_cluster.unseal_keys_hex + } + + output "primary_cluster_recovery_key_shares" { + description = "The Vault primary cluster recovery key shares" + value = step.create_primary_cluster.recovery_key_shares + } + + output "primary_cluster_recovery_keys_b64" { + description = "The Vault primary cluster recovery keys b64" + value = step.create_primary_cluster.recovery_keys_b64 + } + + output "primary_cluster_recovery_keys_hex" { + description = "The Vault primary cluster recovery keys hex" + value = step.create_primary_cluster.recovery_keys_hex + } + + output "secondary_cluster_hosts" { + description = "The Vault secondary cluster public IPs" + value = step.create_secondary_cluster_targets.hosts + } + + output "secondary_cluster_root_token" { + description = "The Vault secondary cluster root token" + value = step.create_secondary_cluster.root_token + } + + output "performance_secondary_token" { + description = "The performance secondary replication token" + value = step.generate_secondary_token.secondary_token + } +} diff --git a/enos/enos-dev-scenario-single-cluster.hcl b/enos/enos-dev-scenario-single-cluster.hcl new file mode 100644 index 000000000000..b3052584e51c --- /dev/null +++ b/enos/enos-dev-scenario-single-cluster.hcl @@ -0,0 +1,510 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +scenario "dev_single_cluster" { + description = <<-EOF + This scenario spins up a single Vault cluster with either an external Consul cluster or + integrated Raft for storage. None of our test verification is included in this scenario in order + to improve end-to-end speed. If you wish to perform such verification you'll need to use a + non-dev scenario instead. + + The scenario supports finding and installing any released 'linux/amd64' or 'linux/arm64' Vault + artifact as long as its version is >= 1.8. You can also use the 'artifact:local' variant to + build and deploy the current branch! + + In order to execute this scenario you'll need to install the enos CLI: + brew tap hashicorp/tap && brew update && brew install hashicorp/tap/enos + + You'll also need access to an AWS account with an SSH keypair. + Perform the steps here to get AWS access with Doormat https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#authenticate-with-doormat + Perform the steps here to get an AWS keypair set up: https://eng-handbook.hashicorp.services/internal-tools/enos/common-setup-steps/#set-your-aws-key-pair-name-and-private-key + + Please note that this scenario requires several inputs variables to be set in order to function + properly. While not all variants will require all variables, it's suggested that you look over + the scenario outline to determine which variables affect which steps and which have inputs that + you should set. You can use the following command to get a textual outline of the entire + scenario: + enos scenario outline dev_single_cluster + + You can also create an HTML version that is suitable for viewing in web browsers: + enos scenario outline dev_single_cluster --format html > index.html + open index.html + + To configure the required variables you have a couple of choices. You can create an + 'enos-local.vars' file in the same 'enos' directory where this scenario is defined. In it you + declare your desired variable values. For example, you could copy the following content and + then set the values as necessary: + + artifactory_username = "username@hashicorp.com" + artifactory_token = " + aws_region = "us-west-2" + aws_ssh_keypair_name = "" + aws_ssh_keypair_key_path = "/path/to/your/private/key.pem" + dev_build_local_ui = false + dev_consul_version = "1.18.1" + vault_license_path = "./support/vault.hclic" + vault_product_version = "1.16.2" + + Alternatively, you can set them in your environment: + export ENOS_VAR_aws_region="us-west-2" + export ENOS_VAR_vault_license_path="./support/vault.hclic" + + After you've configured your inputs you can list and filter the available scenarios and then + subsequently launch and destroy them. + enos scenario list --help + enos scenario launch --help + enos scenario list dev_single_cluster + enos scenario launch dev_single_cluster arch:arm64 artifact:local backend:raft distro:ubuntu edition:ce seal:awskms + + When the scenario is finished launching you refer to the scenario outputs to see information + related to your cluster. You can use this information to SSH into nodes and/or to interact + with vault. + enos scenario output dev_single_cluster arch:arm64 artifact:local backend:raft distro:ubuntu edition:ce seal:awskms + ssh -i /path/to/your/private/key.pem + vault status + + After you've finished you can tear down the cluster + enos scenario destroy dev_single_cluster arch:arm64 artifact:local backend:raft distro:ubuntu edition:ce seal:awskms + EOF + + // The matrix is where we define all the baseline combinations that enos can utilize to customize + // your scenario. By default enos attempts to perform your command an the entire product! Most + // of the time you'll want to reduce that by passing in a filter. + // Run 'enos scenario list --help' to see more about how filtering scenarios works in enos. + matrix { + arch = ["amd64", "arm64"] + artifact = ["local", "deb", "rpm", "zip"] + backend = ["consul", "raft"] + distro = ["ubuntu", "rhel"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + seal = ["awskms", "pkcs11", "shamir"] + + exclude { + edition = ["ent.hsm", "ent.fips1402", "ent.hsm.fips1402"] + arch = ["arm64"] + } + + exclude { + artifact = ["rpm"] + distro = ["ubuntu"] + } + + exclude { + artifact = ["deb"] + distro = ["rhel"] + } + + exclude { + seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + } + + // Specify which Terraform configs and providers to use in this scenario. Most of the time you'll + // never need to change this! If you wanted to test with different terraform or terraform CLI + // settings you can define them and assign them here. + terraform_cli = terraform_cli.default + terraform = terraform.default + + // Here we declare all of the providers that we might need for our scenario. + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + // These are variable values that are local to our scenario. They are evaluated after external + // variables and scenario matrices but before any of our steps. + locals { + // The enos provider uses different ssh transport configs for different distros (as + // specified in enos-providers.hcl), and we need to be able to access both of those here. + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + // We install vault packages from artifactory. If you wish to use one of these variants you'll + // need to configure your artifactory credentials. + use_artifactory = matrix.artifact == "deb" || matrix.artifact == "rpm" + // Zip bundles and local builds don't come with systemd units or any associated configuration. + // When this is true we'll let enos handle this for us. + manage_service = matrix.artifact == "zip" || matrix.artifact == "local" + // If you are using an ent edition, you will need a Vault license. Common convention + // is to store it at ./support/vault.hclic, but you may change this path according + // to your own preference. + vault_install_dir = matrix.artifact == "zip" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + // Begin scenario steps. These are the steps we'll perform to get your cluster up and running. + step "build_or_find_vault_artifact" { + description = <<-EOF + Depending on how we intend to get our Vault artifact, this step either builds vault from our + current branch or finds debian or redhat packages in Artifactory. If we're using a zip bundle + we'll get it from releases.hashicorp.com and skip this step entirely. Please note that if you + wish to use a deb or rpm artifact you'll have to configure your artifactory credentials! + + Variables that are used in this step: + + artifactory_host: + The artifactory host to search. It's very unlikely that you'll want to change this. The + default value is the HashiCorp Artifactory instance. + artifactory_repo + The artifactory host to search. It's very unlikely that you'll want to change this. The + default value is where CRT will publish packages. + artifactory_username + The artifactory username associated with your token. You'll need this if you wish to use + deb or rpm artifacts! You can request access via Okta. + artifactory_token + The artifactory token associated with your username. You'll need this if you wish to use + deb or rpm artifacts! You can create a token by logging into Artifactory via Okta. + vault_product_version: + When using the artifact:rpm or artifact:deb variants we'll use this variable to determine + which version of the Vault pacakge we should fetch from Artifactory. + vault_artifact_path: + When using the artifact:local variant we'll utilize this variable to determine where + to create the vault.zip archive from the local branch. Default: to /tmp/vault.zip. + vault_local_build_tags: + When using the artifact:local variant we'll use this variable to inject custom build + tags. If left unset we'll automatically use the build tags that correspond to the edition + variant. + EOF + module = matrix.artifact == "local" ? "build_local" : local.use_artifactory ? "build_artifactory_package" : "build_crt" + skip_step = matrix.artifact == "zip" + + variables { + // Used for all modules + arch = matrix.arch + edition = matrix.edition + product_version = var.vault_product_version + // Required for the local build which will always result in using a local zip bundle + artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + build_ui = var.dev_build_local_ui + goarch = matrix.arch + goos = "linux" + // Required when using a RPM or Deb package + // Some of these variables don't have default values so we'll only set them if they are + // required. + artifactory_host = local.use_artifactory ? var.artifactory_host : null + artifactory_repo = local.use_artifactory ? var.artifactory_repo : null + artifactory_username = local.use_artifactory ? var.artifactory_username : null + artifactory_token = local.use_artifactory ? var.artifactory_token : null + distro = matrix.distro + } + } + + step "ec2_info" { + description = "This discovers usefull metadata in Ec2 like AWS AMI ID's that we use in later modules." + module = module.ec2_info + } + + step "create_vpc" { + description = <<-EOF + Create the VPC resources required for our scenario. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + EOF + module = module.create_vpc + depends_on = [step.ec2_info] + + variables { + common_tags = global.tags + } + } + + step "read_backend_license" { + description = <<-EOF + Read the contents of the backend license if we're using a Consul backend and the edition is "ent". + + Variables that are used in this step: + backend_edition: + The edition of Consul to use. If left unset it will default to CE. + backend_license_path: + If this variable is set we'll use it to determine the local path on disk that contains a + Consul Enterprise license. If it is not set we'll attempt to load it from + ./support/consul.hclic. + EOF + skip_step = matrix.backend == "raft" || var.backend_edition == "oss" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + description = <<-EOF + Validates and reads into memory the contents of a local Vault Enterprise license if we're + using an Enterprise edition. This step does not run when using a community edition of Vault. + + Variables that are used in this step: + vault_license_path: + If this variable is set we'll use it to determine the local path on disk that contains a + Vault Enterprise license. If it is not set we'll attempt to load it from + ./support/vault.hclic. + EOF + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + description = <<-EOF + Create the necessary seal keys depending on our configured seal. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + EOF + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the Vault cluster. We also ensure + that the firewall is configured to allow the necessary Vault and Consul traffic and SSH + from the machine executing the Enos scenario. + + Variables that are used in this step: + aws_ssh_keypair_name: + The AWS SSH Keypair name to use for target machines. + project_name: + The project name is used for additional tag metadata on resources. + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + vault_instance_count: + How many instances to provision for the Vault cluster. If left unset it will use a default + of three. + EOF + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + instance_count = try(var.vault_instance_count, 3) + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + description = <<-EOF + Creates the necessary machine infrastructure targets for the backend Consul storage cluster. + We also ensure that the firewall is configured to allow the necessary Consul traffic and SSH + from the machine executing the Enos scenario. When using integrated storage this step is a + no-op that does nothing. + + Variables that are used in this step: + tags: + If you wish to add custom tags to taggable resources in AWS you can set the 'tags' variable + and they'll be added to resources when possible. + project_name: + The project name is used for additional tag metadata on resources. + aws_ssh_keypair_name: + The AWS SSH Keypair name to use for target machines. + EOF + + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + description = <<-EOF + Install, configure, and start the backend Consul storage cluster. When we are using the raft + storage variant this step is a no-op. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + the edition of Consul to use for the cluster. Note that if you set it to 'ent' you will + also need a valid license configured for the read_backend_license step. Default: ce. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + the version of Consul to use for the cluster. + EOF + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = var.dev_consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + + step "create_vault_cluster" { + description = <<-EOF + Install, configure, start, initialize and unseal the Vault cluster on the specified target + instances. + + Variables that are used in this step: + backend_edition: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of the consul client to install on each node for Consul storage. Note that + if you set it to 'ent' you will also need a valid license configured for the + read_backend_license step. If left unset we'll use an unlicensed CE version. + dev_config_mode: + You can set this variable to instruct enos on how to primarily configure Vault when starting + the service. Options are 'file' and 'env' for configuration file or environment variables. + If left unset we'll use the default value. + dev_consul_version: + When configured with the backend:consul variant we'll utilize this variable to determine + which version of Consul to install. If left unset we'll utilize the default value. + vault_artifact_path: + When using the artifact:local variant this variable is utilized to specify where on + the local disk the vault.zip file we've built is located. It can be left unset to use + the default value. + vault_enable_audit_devices: + Whether or not to enable various audit devices after unsealing the Vault cluster. By default + we'll configure syslog, socket, and file auditing. + vault_product_version: + When using the artifact:zip variant this variable is utilized to specify the version of + Vault to download from releases.hashicorp.com. + EOF + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.create_vault_cluster_targets, + step.build_or_find_vault_artifact, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + // We set vault_artifactory_release when we want to get a .deb or .rpm package from Artifactory. + // We set vault_release when we want to get a .zip bundle from releases.hashicorp.com + // We only set one or the other, never both. + artifactory_release = local.use_artifactory ? step.build_or_find_vault_artifact.release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = var.dev_config_mode + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = var.dev_consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + release = matrix.artifact == "zip" ? { version = var.vault_product_version, edition = matrix.edition } : null + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // When using a Consul backend, these output values will be for the Consul backend. + // When using a Raft backend, these output values will be null. + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-dev-variables.hcl b/enos/enos-dev-variables.hcl new file mode 100644 index 000000000000..1184748f049a --- /dev/null +++ b/enos/enos-dev-variables.hcl @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "dev_build_local_ui" { + type = bool + description = "Whether or not to build the web UI when using the local builder var. If the assets have already been built we'll still include them" + default = false +} + +variable "dev_config_mode" { + type = string + description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." + default = "file" // or "env" +} + +variable "dev_consul_version" { + type = string + description = "The version of Consul to use when using Consul for storage!" + default = "1.18.1" + // NOTE: You can also set the "backend_edition" if you want to use Consul Enterprise +} diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl new file mode 100644 index 000000000000..5ca6dd86f8f6 --- /dev/null +++ b/enos/enos-globals.hcl @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +globals { + archs = ["amd64", "arm64"] + artifact_sources = ["local", "crt", "artifactory"] + artifact_types = ["bundle", "package"] + backends = ["consul", "raft"] + backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) + backend_tag_key = "VaultStorage" + build_tags = { + "ce" = ["ui"] + "ent" = ["ui", "enterprise", "ent"] + "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] + "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] + "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] + } + config_modes = ["env", "file"] + consul_versions = ["1.14.11", "1.15.7", "1.16.3", "1.17.0"] + distros = ["ubuntu", "rhel"] + distro_version = { + "rhel" = var.rhel_distro_version + "ubuntu" = var.ubuntu_distro_version + } + editions = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + packages = ["jq"] + distro_packages = { + ubuntu = ["netcat"] + rhel = ["nc"] + } + sample_attributes = { + aws_region = ["us-east-1", "us-west-2"] + } + seals = ["awskms", "pkcs11", "shamir"] + tags = merge({ + "Project Name" : var.project_name + "Project" : "Enos", + "Environment" : "ci" + }, var.tags) + // NOTE: when backporting, make sure that our initial versions are less than that + // release branch's version. Also beware if adding versions below 1.11.x. Some scenarios + // that use this global might not work as expected with earlier versions. Below 1.8.x is + // not supported in any way. + upgrade_initial_versions = ["1.11.12", "1.12.11", "1.13.11", "1.14.7", "1.15.3"] + vault_install_dir_packages = { + rhel = "/bin" + ubuntu = "/usr/bin" + } + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_tag_key = "Type" // enos_vault_start expects Type as the tag key +} diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 789fb7805e3c..396d54d90981 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -1,56 +1,155 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + module "autopilot_upgrade_storageconfig" { source = "./modules/autopilot_upgrade_storageconfig" } -module "az_finder" { - source = "./modules/az_finder" -} - module "backend_consul" { - source = "app.terraform.io/hashicorp-qti/aws-consul/enos" - - project_name = var.project_name - environment = "ci" - common_tags = var.tags - ssh_aws_keypair = var.aws_ssh_keypair_name + source = "./modules/backend_consul" - # Set this to a real license vault if using an Enterprise edition of Consul - consul_license = var.backend_license_path == null ? "none" : file(abspath(var.backend_license_path)) + license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path)) + log_level = var.backend_log_level } module "backend_raft" { source = "./modules/backend_raft" } +// Find any artifact in Artifactory. Requires the version, revision, and edition. +module "build_artifactory" { + source = "./modules/build_artifactory_artifact" +} + +// Find any released RPM or Deb in Artifactory. Requires the version, edition, distro, and distro +// version. +module "build_artifactory_package" { + source = "./modules/build_artifactory_package" +} + +// A shim "build module" suitable for use when using locally pre-built artifacts or a zip bundle +// from releases.hashicorp.com. When using a local pre-built artifact it requires the local +// artifact path. When using a release zip it does nothing as you'll need to configure the +// vault_cluster module with release info instead. module "build_crt" { source = "./modules/build_crt" } +// Build the local branch and package it into a zip artifact. Requires the goarch, goos, build tags, +// and bundle path. module "build_local" { source = "./modules/build_local" } -module "build_artifactory" { - source = "./modules/vault_artifactory_artifact" -} - module "create_vpc" { - source = "app.terraform.io/hashicorp-qti/aws-infra/enos" + source = "./modules/create_vpc" + + environment = "ci" + common_tags = var.tags +} - project_name = var.project_name - environment = "ci" - common_tags = var.tags - ami_architectures = ["amd64", "arm64"] +module "ec2_info" { + source = "./modules/ec2_info" } module "get_local_metadata" { source = "./modules/get_local_metadata" } +module "generate_secondary_token" { + source = "./modules/generate_secondary_token" + + vault_install_dir = var.vault_install_dir +} + +module "install_packages" { + source = "./modules/install_packages" +} + module "read_license" { source = "./modules/read_license" } +module "replication_data" { + source = "./modules/replication_data" +} + +module "seal_awskms" { + source = "./modules/seal_awskms" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "seal_shamir" { + source = "./modules/seal_shamir" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "seal_pkcs11" { + source = "./modules/seal_pkcs11" + + cluster_ssh_keypair = var.aws_ssh_keypair_name + common_tags = var.tags +} + +module "shutdown_node" { + source = "./modules/shutdown_node" +} + +module "shutdown_multiple_nodes" { + source = "./modules/shutdown_multiple_nodes" +} + +module "start_vault" { + source = "./modules/start_vault" + + install_dir = var.vault_install_dir + log_level = var.vault_log_level +} + +module "stop_vault" { + source = "./modules/stop_vault" +} + +# create target instances using ec2:CreateFleet +module "target_ec2_fleet" { + source = "./modules/target_ec2_fleet" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +# create target instances using ec2:RunInstances +module "target_ec2_instances" { + source = "./modules/target_ec2_instances" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +# don't create instances but satisfy the module interface +module "target_ec2_shim" { + source = "./modules/target_ec2_shim" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +# create target instances using ec2:RequestSpotFleet +module "target_ec2_spot_fleet" { + source = "./modules/target_ec2_spot_fleet" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + module "vault_agent" { source = "./modules/vault_agent" @@ -58,6 +157,12 @@ module "vault_agent" { vault_instance_count = var.vault_instance_count } +module "vault_proxy" { + source = "./modules/vault_proxy" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} module "vault_verify_agent_output" { source = "./modules/vault_verify_agent_output" @@ -66,17 +171,50 @@ module "vault_verify_agent_output" { } module "vault_cluster" { - source = "app.terraform.io/hashicorp-qti/aws-vault/enos" - # source = "../../terraform-enos-aws-vault" - - common_tags = var.tags - environment = "ci" - instance_count = var.vault_instance_count - project_name = var.project_name - ssh_aws_keypair = var.aws_ssh_keypair_name + source = "./modules/vault_cluster" + + install_dir = var.vault_install_dir + consul_license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path)) + log_level = var.vault_log_level +} + +module "vault_get_cluster_ips" { + source = "./modules/vault_get_cluster_ips" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_raft_remove_peer" { + source = "./modules/vault_raft_remove_peer" vault_install_dir = var.vault_install_dir } +module "vault_setup_perf_secondary" { + source = "./modules/vault_setup_perf_secondary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_step_down" { + source = "./modules/vault_step_down" + + vault_install_dir = var.vault_install_dir +} + +module "vault_test_ui" { + source = "./modules/vault_test_ui" + + ui_run_tests = var.ui_run_tests +} + +module "vault_unseal_nodes" { + source = "./modules/vault_unseal_nodes" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + module "vault_upgrade" { source = "./modules/vault_upgrade" @@ -106,17 +244,23 @@ module "vault_verify_undo_logs" { vault_instance_count = var.vault_instance_count } +module "vault_verify_default_lcq" { + source = "./modules/vault_verify_default_lcq" + + vault_autopilot_default_max_leases = "300000" + vault_instance_count = var.vault_instance_count +} + module "vault_verify_replication" { - source = "./modules/vault-verify-replication" + source = "./modules/vault_verify_replication" vault_install_dir = var.vault_install_dir vault_instance_count = var.vault_instance_count } module "vault_verify_ui" { - source = "./modules/vault-verify-ui" + source = "./modules/vault_verify_ui" - vault_install_dir = var.vault_install_dir vault_instance_count = var.vault_instance_count } @@ -127,6 +271,25 @@ module "vault_verify_unsealed" { vault_instance_count = var.vault_instance_count } +module "vault_setup_perf_primary" { + source = "./modules/vault_setup_perf_primary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_read_data" { + source = "./modules/vault_verify_read_data" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_verify_performance_replication" { + source = "./modules/vault_verify_performance_replication" + + vault_install_dir = var.vault_install_dir +} + module "vault_verify_version" { source = "./modules/vault_verify_version" @@ -134,9 +297,27 @@ module "vault_verify_version" { vault_instance_count = var.vault_instance_count } -module "vault_verify_write_test_data" { - source = "./modules/vault-verify-write-data" +module "vault_verify_write_data" { + source = "./modules/vault_verify_write_data" vault_install_dir = var.vault_install_dir vault_instance_count = var.vault_instance_count } + +module "vault_wait_for_leader" { + source = "./modules/vault_wait_for_leader" + + vault_install_dir = var.vault_install_dir +} + +module "vault_wait_for_seal_rewrap" { + source = "./modules/vault_wait_for_seal_rewrap" + + vault_install_dir = var.vault_install_dir +} + +module "verify_seal_type" { + source = "./modules/verify_seal_type" + + vault_install_dir = var.vault_install_dir +} diff --git a/enos/enos-providers.hcl b/enos/enos-providers.hcl index 9301b55037d0..472589f4a1eb 100644 --- a/enos/enos-providers.hcl +++ b/enos/enos-providers.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + provider "aws" "default" { region = var.aws_region } diff --git a/enos/enos-samples-ce-build.hcl b/enos/enos-samples-ce-build.hcl new file mode 100644 index 000000000000..2c3cae0f7750 --- /dev/null +++ b/enos/enos-samples-ce-build.hcl @@ -0,0 +1,264 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +sample "build_ce_linux_amd64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_arm64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_arm64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_amd64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the build pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "build_ce_linux_amd64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_arm64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } +} diff --git a/enos/enos-samples-ce-release.hcl b/enos/enos-samples-ce-release.hcl new file mode 100644 index 000000000000..4e3d9acdf254 --- /dev/null +++ b/enos/enos-samples-ce-release.hcl @@ -0,0 +1,258 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +sample "release_ce_linux_amd64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_arm64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_arm64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_amd64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_amd64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_arm64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } +} diff --git a/enos/enos-scenario-agent.hcl b/enos/enos-scenario-agent.hcl index aea7ba7376cf..f87f29785d18 100644 --- a/enos/enos-scenario-agent.hcl +++ b/enos/enos-scenario-agent.hcl @@ -1,9 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + scenario "agent" { matrix { - arch = ["amd64", "arm64"] - artifact_source = ["local", "crt", "artifactory"] - distro = ["ubuntu", "rhel"] - edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + consul_version = global.consul_versions + distro = global.distros + edition = global.editions + seal = global.seals + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + # PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } } terraform_cli = terraform_cli.default @@ -15,31 +41,13 @@ scenario "agent" { ] locals { - build_tags = { - "oss" = ["ui"] - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } - install_artifactory_artifact = local.bundle_path == null - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] } step "get_local_metadata" { @@ -51,56 +59,57 @@ scenario "agent" { module = "build_${matrix.artifact_source}" variables { - build_tags = try(var.vault_local_build_tags, local.build_tags[matrix.edition]) - bundle_path = local.bundle_path - goarch = matrix.arch - goos = "linux" - artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null - artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null - artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null - artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null - arch = matrix.artifact_source == "artifactory" ? matrix.arch : null - vault_product_version = var.vault_product_version - artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null - distro = matrix.artifact_source == "artifactory" ? matrix.distro : null - edition = matrix.artifact_source == "artifactory" ? matrix.edition : null - instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null - revision = var.vault_revision + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision } } - step "find_azs" { - module = module.az_finder + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc variables { - instance_type = [ - var.backend_instance_type, - local.vault_instance_type - ] + common_tags = global.tags } } - step "create_vpc" { - module = module.create_vpc + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license variables { - ami_architectures = distinct([matrix.arch, "amd64"]) - availability_zones = step.find_azs.availability_zones - common_tags = local.tags + file_name = global.backend_license_path } } - step "read_license" { - skip_step = matrix.edition == "oss" + step "read_vault_license" { + skip_step = matrix.edition == "ce" module = module.read_license variables { - file_name = local.vault_license_path + file_name = global.vault_license_path } } - step "create_backend_cluster" { - module = "backend_raft" + step "create_seal_key" { + module = "seal_${matrix.seal}" depends_on = [step.create_vpc] providers = { @@ -108,11 +117,64 @@ scenario "agent" { } variables { - ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"] - common_tags = local.tags - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts } } @@ -121,6 +183,7 @@ scenario "agent" { depends_on = [ step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets ] providers = { @@ -128,27 +191,52 @@ scenario "agent" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = "raft" - unseal_method = "shamir" - vault_local_artifact_path = local.bundle_path - vault_artifactory_release = local.install_artifactory_artifact ? step.build_vault.vault_artifactory_release : null - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token } } step "start_vault_agent" { module = "vault_agent" depends_on = [ - step.create_backend_cluster, step.build_vault, step.create_vault_cluster, + step.wait_for_leader, ] providers = { @@ -156,8 +244,9 @@ scenario "agent" { } variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token vault_agent_template_destination = "/tmp/agent_output.txt" vault_agent_template_contents = "{{ with secret \\\"auth/token/lookup-self\\\" }}orphan={{ .Data.orphan }} display_name={{ .Data.display_name }}{{ end }}" } @@ -168,6 +257,7 @@ scenario "agent" { depends_on = [ step.create_vault_cluster, step.start_vault_agent, + step.wait_for_leader, ] providers = { @@ -175,49 +265,204 @@ scenario "agent" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_agent_template_destination = "/tmp/agent_output.txt" vault_agent_expected_output = "orphan=true display_name=approle" } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } } - output "vault_cluster_priv_ips" { + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_write_test_data, + step.verify_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_vault_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex } } diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl index 26615f51b046..a28500fe7c15 100644 --- a/enos/enos-scenario-autopilot.hcl +++ b/enos/enos-scenario-autopilot.hcl @@ -1,12 +1,39 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + scenario "autopilot" { matrix { - arch = ["amd64", "arm64"] - artifact_source = ["local", "crt", "artifactory"] - artifact_type = ["bundle", "package"] - distro = ["ubuntu", "rhel"] - edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] - seal = ["awskms", "shamir"] - undo_logs_status = ["0", "1"] + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + config_mode = global.config_modes + distro = global.distros + edition = global.editions + initial_version = global.upgrade_initial_versions + seal = global.seals + + # Autopilot wasn't available before 1.11.x + exclude { + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + # PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } } terraform_cli = terraform_cli.default @@ -18,45 +45,22 @@ scenario "autopilot" { ] locals { - build_tags = { - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - - enable_undo_logs = matrix.undo_logs_status == "1" && semverconstraint(var.vault_product_version, ">=1.13.0-0") ? true : false - - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + vault_autopilot_default_max_leases = semverconstraint(matrix.initial_version, ">=1.16.0-0") ? "300000" : "" } step "build_vault" { module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -68,29 +72,19 @@ scenario "autopilot" { artifact_type = matrix.artifact_type distro = matrix.artifact_source == "artifactory" ? matrix.distro : null edition = matrix.artifact_source == "artifactory" ? matrix.edition : null - instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null revision = var.vault_revision } } - step "find_azs" { - module = module.az_finder - - variables { - instance_type = [ - local.vault_instance_type - ] - } + step "ec2_info" { + module = module.ec2_info } step "create_vpc" { - module = module.create_vpc - depends_on = [step.find_azs] + module = module.create_vpc variables { - ami_architectures = [matrix.arch] - availability_zones = step.find_azs.availability_zones - common_tags = local.tags + common_tags = global.tags } } @@ -98,37 +92,87 @@ scenario "autopilot" { module = module.read_license variables { - file_name = local.vault_license_path + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_upgrade_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + common_tags = global.tags + cluster_name = step.create_vault_cluster_targets.cluster_name + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id } } - # This step creates a Vault cluster using a bundle downloaded from - # releases.hashicorp.com, with the version specified in var.vault_autopilot_initial_release step "create_vault_cluster" { module = module.vault_cluster depends_on = [ - step.create_vpc, step.build_vault, + step.create_vault_cluster_targets ] + providers = { enos = local.enos_provider[matrix.distro] } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = "raft" + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_license.license : null + packages = concat(global.packages, global.distro_packages[matrix.distro]) + release = { + edition = matrix.edition + version = matrix.initial_version + } + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = "raft" storage_backend_addl_config = { - autopilot_upgrade_version = var.vault_autopilot_initial_release.version + autopilot_upgrade_version = matrix.initial_version } - unseal_method = matrix.seal - vault_install_dir = local.vault_install_dir - vault_release = var.vault_autopilot_initial_release - vault_license = step.read_license.license - vpc_id = step.create_vpc.vpc_id + target_hosts = step.create_vault_cluster_targets.hosts } } @@ -137,6 +181,41 @@ scenario "autopilot" { module = module.get_local_metadata } + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster.target_hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster.target_hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + step "create_autopilot_upgrade_storageconfig" { module = module.autopilot_upgrade_storageconfig @@ -145,14 +224,13 @@ scenario "autopilot" { } } - # This step creates a new Vault cluster using a bundle or package - # from the matrix.artifact_source, with the var.vault_product_version step "upgrade_vault_cluster_with_autopilot" { module = module.vault_cluster depends_on = [ - step.create_vault_cluster, step.build_vault, + step.create_vault_cluster, step.create_autopilot_upgrade_storageconfig, + step.verify_write_test_data ] providers = { @@ -160,26 +238,26 @@ scenario "autopilot" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + enable_audit_devices = var.vault_enable_audit_devices + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + log_level = var.vault_log_level + force_unseal = matrix.seal == "shamir" + initialize_cluster = false + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + root_token = step.create_vault_cluster.root_token + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + shamir_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null storage_backend = "raft" storage_backend_addl_config = step.create_autopilot_upgrade_storageconfig.storage_addl_config - unseal_method = matrix.seal - vault_cluster_tag = step.create_vault_cluster.vault_cluster_tag - vault_init = false - vault_install_dir = local.vault_install_dir - vault_license = step.read_license.license - vault_local_artifact_path = local.bundle_path - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_node_prefix = "upgrade_node" - vault_root_token = step.create_vault_cluster.vault_root_token - vault_unseal_when_no_init = matrix.seal == "shamir" - vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.vault_unseal_keys_hex : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { "VAULT_REPLICATION_USE_UNDO_LOGS" : local.enable_undo_logs } + storage_node_prefix = "upgrade_node" + target_hosts = step.create_vault_cluster_upgrade_targets.hosts } } @@ -187,6 +265,7 @@ scenario "autopilot" { module = module.vault_verify_unsealed depends_on = [ step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, step.upgrade_vault_cluster_with_autopilot, ] @@ -196,8 +275,7 @@ scenario "autopilot" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts } } @@ -214,16 +292,17 @@ scenario "autopilot" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_root_token = step.upgrade_vault_cluster_with_autopilot.root_token } } - step "verify_autopilot_upgraded_vault_cluster" { + step "verify_autopilot_await_server_removal_state" { module = module.vault_verify_autopilot depends_on = [ + step.create_vault_cluster_upgrade_targets, step.upgrade_vault_cluster_with_autopilot, - step.verify_vault_unsealed + step.verify_raft_auto_join_voter ] providers = { @@ -234,17 +313,118 @@ scenario "autopilot" { vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version vault_autopilot_upgrade_status = "await-server-removal" vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster.target_hosts + vault_root_token = step.upgrade_vault_cluster_with_autopilot.root_token } } - step "verify_undo_logs_status" { - skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0") - module = module.vault_verify_undo_logs + step "wait_for_leader_in_upgrade_targets" { + module = module.vault_wait_for_leader + depends_on = [ + step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, + step.get_vault_cluster_ips, + step.upgrade_vault_cluster_with_autopilot + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + } + + step "get_updated_vault_cluster_ips" { + module = module.vault_get_cluster_ips depends_on = [ + step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, + step.get_vault_cluster_ips, step.upgrade_vault_cluster_with_autopilot, - step.verify_vault_unsealed + step.wait_for_leader_in_upgrade_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.get_updated_vault_cluster_ips, + step.verify_write_test_data, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_updated_vault_cluster_ips.follower_public_ips + vault_instance_count = 6 + vault_install_dir = local.vault_install_dir + } + } + + step "raft_remove_peers" { + module = module.vault_raft_remove_peer + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.get_updated_vault_cluster_ips, + step.upgrade_vault_cluster_with_autopilot, + step.verify_autopilot_await_server_removal_state + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + operator_instance = step.get_updated_vault_cluster_ips.leader_public_ip + remove_vault_instances = step.create_vault_cluster.target_hosts + vault_install_dir = local.vault_install_dir + vault_instance_count = 3 + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "remove_old_nodes" { + module = module.shutdown_multiple_nodes + depends_on = [ + step.create_vault_cluster, + step.raft_remove_peers + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + old_vault_instances = step.create_vault_cluster.target_hosts + vault_instance_count = 3 + } + } + + step "verify_autopilot_idle_state" { + module = module.vault_verify_autopilot + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes ] providers = { @@ -252,66 +432,192 @@ scenario "autopilot" { } variables { - vault_install_dir = local.vault_install_dir vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version - vault_undo_logs_status = matrix.undo_logs_status - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_autopilot_upgrade_status = "idle" + vault_install_dir = local.vault_install_dir + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_root_token = step.create_vault_cluster.root_token } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + } } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } } - output "vault_cluster_priv_ips" { + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + } + + step "verify_undo_logs_status" { + skip_step = true + # NOTE: temporarily disable undo logs checking until it is fixed. See VAULT-20259 + # skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0") + module = module.vault_verify_undo_logs + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.remove_old_nodes, + step.upgrade_vault_cluster_with_autopilot, + step.verify_autopilot_idle_state + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + # Verify that upgrading from a version <1.16.0 does not introduce Default LCQ + step "verify_default_lcq" { + module = module.vault_verify_default_lcq + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.remove_old_nodes, + step.upgrade_vault_cluster_with_autopilot, + step.verify_autopilot_idle_state + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_root_token = step.create_vault_cluster.root_token + vault_autopilot_default_max_leases = local.vault_autopilot_default_max_leases + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex } - output "upgraded_vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.upgrade_vault_cluster_with_autopilot.instance_ids + output "seal_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes } - output "upgraded_vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.upgrade_vault_cluster_with_autopilot.instance_public_ips + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 } - output "upgraded_vault_cluster_priv_ips" { + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } + + output "upgrade_hosts" { + description = "The Vault cluster target hosts" + value = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + + output "upgrade_private_ips" { description = "The Vault cluster private IPs" - value = step.upgrade_vault_cluster_with_autopilot.instance_private_ips + value = step.upgrade_vault_cluster_with_autopilot.private_ips + } + + output "upgrade_public_ips" { + description = "The Vault cluster public IPs" + value = step.upgrade_vault_cluster_with_autopilot.public_ips } } diff --git a/enos/enos-scenario-proxy.hcl b/enos/enos-scenario-proxy.hcl new file mode 100644 index 000000000000..356abb8b15f2 --- /dev/null +++ b/enos/enos-scenario-proxy.hcl @@ -0,0 +1,440 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +scenario "proxy" { + matrix { + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + consul_version = global.consul_versions + distro = global.distros + edition = global.editions + seal = global.seals + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + # PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = global.tags + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_seal_key" { + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "start_vault_proxy" { + module = "vault_proxy" + depends_on = [ + step.build_vault, + step.create_vault_cluster, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_write_test_data, + step.verify_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_vault_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-replication.hcl b/enos/enos-scenario-replication.hcl new file mode 100644 index 000000000000..0634d2ffa1a3 --- /dev/null +++ b/enos/enos-scenario-replication.hcl @@ -0,0 +1,920 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +// The replication scenario configures performance replication between two Vault clusters and verifies +// known_primary_cluster_addrs are updated on secondary Vault cluster with the IP addresses of replaced +// nodes on primary Vault cluster +scenario "replication" { + matrix { + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + config_mode = global.config_modes + consul_version = global.consul_versions + distro = global.distros + edition = global.editions + primary_backend = global.backends + primary_seal = global.seals + secondary_backend = global.backends + secondary_seal = global.seals + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + # PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + primary_seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + + exclude { + secondary_seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = global.tags + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + module = module.read_license + + variables { + file_name = abspath(joinpath(path.root, "./support/vault.hclic")) + } + } + + step "create_primary_seal_key" { + module = "seal_${matrix.primary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "primary" + common_tags = global.tags + } + } + + step "create_secondary_seal_key" { + module = "seal_${matrix.secondary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "secondary" + common_tags = global.tags + other_resources = step.create_primary_seal_key.resource_names + } + } + + # Create all of our instances for both primary and secondary clusters + step "create_primary_cluster_targets" { + module = module.target_ec2_instances + depends_on = [ + step.create_vpc, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_cluster_backend_targets" { + module = matrix.primary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [ + step.create_vpc, + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_cluster_additional_targets" { + module = module.target_ec2_instances + depends_on = [ + step.create_vpc, + step.create_primary_cluster_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_name = step.create_primary_cluster_targets.cluster_name + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_backend_targets" { + module = matrix.secondary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_backend_cluster" { + module = "backend_${matrix.primary_backend}" + depends_on = [ + step.create_primary_cluster_backend_targets, + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_primary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.primary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_primary_cluster_backend_targets.hosts + } + } + + step "create_primary_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_primary_backend_cluster, + step.build_vault, + step.create_primary_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + config_mode = matrix.config_mode + consul_license = (matrix.primary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + cluster_name = step.create_primary_cluster_targets.cluster_name + consul_release = matrix.primary_backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + seal_attributes = step.create_primary_seal_key.attributes + seal_type = matrix.primary_seal + storage_backend = matrix.primary_backend + target_hosts = step.create_primary_cluster_targets.hosts + } + } + + step "create_secondary_backend_cluster" { + module = "backend_${matrix.secondary_backend}" + depends_on = [ + step.create_secondary_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.secondary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_secondary_cluster_backend_targets.hosts + } + } + + step "create_secondary_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_secondary_backend_cluster, + step.build_vault, + step.create_secondary_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + config_mode = matrix.config_mode + consul_license = (matrix.secondary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + cluster_name = step.create_secondary_cluster_targets.cluster_name + consul_release = matrix.secondary_backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + seal_attributes = step.create_secondary_seal_key.attributes + seal_type = matrix.secondary_seal + storage_backend = matrix.secondary_backend + target_hosts = step.create_secondary_cluster_targets.hosts + } + } + + step "verify_that_vault_primary_cluster_is_unsealed" { + module = module.vault_verify_unsealed + depends_on = [ + step.create_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_that_vault_secondary_cluster_is_unsealed" { + module = module.vault_verify_unsealed + depends_on = [ + step.create_secondary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_secondary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [ + step.create_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.create_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_targets.hosts + } + } + + step "get_primary_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.verify_vault_version, + step.verify_ui, + step.verify_that_vault_primary_cluster_is_unsealed, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_primary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "get_primary_cluster_replication_data" { + module = module.replication_data + depends_on = [step.get_primary_cluster_ips] + + variables { + follower_hosts = step.get_primary_cluster_ips.follower_hosts + } + } + + step "get_secondary_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_secondary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + } + } + + step "write_test_data_on_primary" { + module = module.vault_verify_write_data + depends_on = [step.get_primary_cluster_ips] + + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + vault_instances = step.create_primary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "configure_performance_replication_primary" { + module = module.vault_setup_perf_primary + depends_on = [ + step.get_primary_cluster_ips, + step.get_secondary_cluster_ips, + step.write_test_data_on_primary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "generate_secondary_token" { + module = module.generate_secondary_token + depends_on = [step.configure_performance_replication_primary] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "configure_performance_replication_secondary" { + module = module.vault_setup_perf_secondary + depends_on = [step.generate_secondary_token] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + wrapping_token = step.generate_secondary_token.secondary_token + } + } + + // After replication is enabled, the secondary cluster followers need to be unsealed + // Secondary unseal keys are passed using the guide https://developer.hashicorp.com/vault/docs/enterprise/replication#seals + step "unseal_secondary_followers" { + module = module.vault_unseal_nodes + depends_on = [ + step.create_primary_cluster, + step.create_secondary_cluster, + step.get_secondary_cluster_ips, + step.configure_performance_replication_secondary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + follower_public_ips = step.get_secondary_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex + vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal + } + } + + step "verify_secondary_cluster_is_unsealed_after_enabling_replication" { + module = module.vault_verify_unsealed + depends_on = [ + step.unseal_secondary_followers + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_secondary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_performance_replication" { + module = module.vault_verify_performance_replication + depends_on = [step.verify_secondary_cluster_is_unsealed_after_enabling_replication] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + } + } + + step "verify_replicated_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_performance_replication, + step.get_secondary_cluster_ips, + step.write_test_data_on_primary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_secondary_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "add_additional_nodes_to_primary_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_vpc, + step.create_primary_backend_cluster, + step.create_primary_cluster, + step.verify_replicated_data, + step.create_primary_cluster_additional_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_primary_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.primary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.primary_backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + force_unseal = matrix.primary_seal == "shamir" + initialize_cluster = false + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + root_token = step.create_primary_cluster.root_token + seal_attributes = step.create_primary_seal_key.attributes + seal_type = matrix.primary_seal + shamir_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : null + storage_backend = matrix.primary_backend + storage_node_prefix = "newprimary_node" + target_hosts = step.create_primary_cluster_additional_targets.hosts + } + } + + step "verify_additional_primary_nodes_are_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.add_additional_nodes_to_primary_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_additional_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.primary_backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.add_additional_nodes_to_primary_cluster, + step.create_primary_cluster, + step.verify_additional_primary_nodes_are_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_additional_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "remove_primary_follower_1" { + module = module.shutdown_node + depends_on = [ + step.get_primary_cluster_replication_data, + step.verify_additional_primary_nodes_are_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ip = step.get_primary_cluster_replication_data.follower_public_ip_1 + } + } + + step "remove_primary_leader" { + module = module.shutdown_node + depends_on = [ + step.get_primary_cluster_ips, + step.remove_primary_follower_1 + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ip = step.get_primary_cluster_ips.leader_public_ip + } + } + + // After we've removed two nodes from the cluster we need to get an updated set of vault hosts + // to work with. + step "get_remaining_hosts_replication_data" { + module = module.replication_data + depends_on = [ + step.get_primary_cluster_ips, + step.remove_primary_leader, + ] + + variables { + added_hosts = step.create_primary_cluster_additional_targets.hosts + added_hosts_count = var.vault_instance_count + initial_hosts = step.create_primary_cluster_targets.hosts + initial_hosts_count = var.vault_instance_count + removed_follower_host = step.get_primary_cluster_replication_data.follower_host_1 + removed_primary_host = step.get_primary_cluster_ips.leader_host + } + } + + // Wait for the remaining hosts in our cluster to elect a new leader. + step "wait_for_leader_in_remaining_hosts" { + module = module.vault_wait_for_leader + depends_on = [ + step.remove_primary_leader, + step.get_remaining_hosts_replication_data, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts + } + } + + // Get our new leader and follower IP addresses. + step "get_updated_primary_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.get_remaining_hosts_replication_data, + step.wait_for_leader_in_remaining_hosts, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts + vault_install_dir = local.vault_install_dir + vault_instance_count = step.get_remaining_hosts_replication_data.remaining_hosts_count + vault_root_token = step.create_primary_cluster.root_token + } + } + + // Make sure the cluster has the correct performance replication state after the new leader election. + step "verify_updated_performance_replication" { + module = module.vault_verify_performance_replication + depends_on = [ + step.get_remaining_hosts_replication_data, + step.wait_for_leader_in_remaining_hosts, + step.get_updated_primary_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_updated_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_updated_primary_cluster_ips.leader_private_ip + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_primary_cluster.audit_device_file_path + } + + output "primary_cluster_hosts" { + description = "The Vault primary cluster target hosts" + value = step.create_primary_cluster_targets.hosts + } + + output "primary_cluster_additional_hosts" { + description = "The Vault added new node on primary cluster target hosts" + value = step.create_primary_cluster_additional_targets.hosts + } + + output "primary_cluster_root_token" { + description = "The Vault primary cluster root token" + value = step.create_primary_cluster.root_token + } + + output "primary_cluster_unseal_keys_b64" { + description = "The Vault primary cluster unseal keys" + value = step.create_primary_cluster.unseal_keys_b64 + } + + output "primary_cluster_unseal_keys_hex" { + description = "The Vault primary cluster unseal keys hex" + value = step.create_primary_cluster.unseal_keys_hex + } + + output "primary_cluster_recovery_key_shares" { + description = "The Vault primary cluster recovery key shares" + value = step.create_primary_cluster.recovery_key_shares + } + + output "primary_cluster_recovery_keys_b64" { + description = "The Vault primary cluster recovery keys b64" + value = step.create_primary_cluster.recovery_keys_b64 + } + + output "primary_cluster_recovery_keys_hex" { + description = "The Vault primary cluster recovery keys hex" + value = step.create_primary_cluster.recovery_keys_hex + } + + output "secondary_cluster_hosts" { + description = "The Vault secondary cluster public IPs" + value = step.create_secondary_cluster_targets.hosts + } + + output "secondary_cluster_root_token" { + description = "The Vault secondary cluster root token" + value = step.create_secondary_cluster.root_token + } + + output "performance_secondary_token" { + description = "The performance secondary replication token" + value = step.generate_secondary_token.secondary_token + } + + output "remaining_hosts" { + description = "The Vault cluster primary hosts after removing the leader and follower" + value = step.get_remaining_hosts_replication_data.remaining_hosts + } + + output "initial_primary_replication_status" { + description = "The Vault primary cluster performance replication status" + value = step.verify_performance_replication.primary_replication_status + } + + output "initial_known_primary_cluster_addresses" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_performance_replication.known_primary_cluster_addrs + } + + output "initial_secondary_performance_replication_status" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_performance_replication.secondary_replication_status + } + + output "intial_primary_replication_data_secondaries" { + description = "The Vault primary cluster secondaries connection status" + value = step.verify_performance_replication.primary_replication_data_secondaries + } + + output "initial_secondary_replication_data_primaries" { + description = "The Vault secondary cluster primaries connection status" + value = step.verify_performance_replication.secondary_replication_data_primaries + } + + output "updated_primary_replication_status" { + description = "The Vault updated primary cluster performance replication status" + value = step.verify_updated_performance_replication.primary_replication_status + } + + output "updated_known_primary_cluster_addresses" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_updated_performance_replication.known_primary_cluster_addrs + } + + output "updated_secondary_replication_status" { + description = "The Vault updated secondary cluster performance replication status" + value = step.verify_updated_performance_replication.secondary_replication_status + } + + output "updated_primary_replication_data_secondaries" { + description = "The Vault updated primary cluster secondaries connection status" + value = step.verify_updated_performance_replication.primary_replication_data_secondaries + } + + output "updated_secondary_replication_data_primaries" { + description = "The Vault updated secondary cluster primaries connection status" + value = step.verify_updated_performance_replication.secondary_replication_data_primaries + } +} diff --git a/enos/enos-scenario-seal-ha.hcl b/enos/enos-scenario-seal-ha.hcl new file mode 100644 index 000000000000..0d02d1225af5 --- /dev/null +++ b/enos/enos-scenario-seal-ha.hcl @@ -0,0 +1,795 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +scenario "seal_ha" { + matrix { + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + consul_version = global.consul_versions + distro = global.distros + edition = global.editions + // Seal HA is only supported with auto-unseal devices. + primary_seal = ["awskms", "pkcs11"] + secondary_seal = ["awskms", "pkcs11"] + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + # PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + primary_seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + + exclude { + secondary_seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = global.tags + } + } + + step "create_primary_seal_key" { + module = "seal_${matrix.primary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "primary" + common_tags = global.tags + } + } + + step "create_secondary_seal_key" { + module = "seal_${matrix.secondary_seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + cluster_meta = "secondary" + common_tags = global.tags + other_resources = step.create_primary_seal_key.resource_names + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + // Only configure our primary seal during our initial cluster setup + seal_attributes = step.create_primary_seal_key.attributes + seal_type = matrix.primary_seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + // Write some test data before we create the new seal + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips, + step.verify_vault_unsealed, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for the initial seal rewrap to complete before we add our HA seal. + step "wait_for_initial_seal_rewrap" { + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.verify_write_test_data, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Stop the vault service on all nodes before we restart with new seal config + step "stop_vault" { + module = module.stop_vault + depends_on = [ + step.create_vault_cluster, + step.verify_write_test_data, + step.wait_for_initial_seal_rewrap, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Add the secondary seal to the cluster + step "add_ha_seal_to_cluster" { + module = module.start_vault + depends_on = [step.stop_vault] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + cluster_name = step.create_vault_cluster_targets.cluster_name + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + manage_service = local.manage_service + seal_attributes = step.create_primary_seal_key.attributes + seal_attributes_secondary = step.create_secondary_seal_key.attributes + seal_type = matrix.primary_seal + seal_type_secondary = matrix.secondary_seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader_election" { + module = module.vault_wait_for_leader + depends_on = [step.add_ha_seal_to_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_leader_ip_for_step_down" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader_election] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Force a step down to trigger a new leader election + step "vault_leader_step_down" { + module = module.vault_step_down + depends_on = [step.get_leader_ip_for_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + leader_host = step.get_leader_ip_for_step_down.leader_host + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + module = module.vault_wait_for_leader + depends_on = [step.vault_leader_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_updated_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed_with_new_seal" { + module = module.vault_verify_unsealed + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + // Wait for the seal rewrap to complete and verify that no entries failed + step "wait_for_seal_rewrap" { + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.add_ha_seal_to_cluster, + step.verify_vault_unsealed_with_new_seal, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Perform all of our standard verifications after we've enabled multiseal + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + // Make sure our data is still available + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_updated_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + // Make sure we have a "multiseal" seal type + step "verify_seal_type" { + // Don't run this on versions less than 1.16.0-beta1 until VAULT-21053 is fixed on prior branches. + skip_step = semverconstraint(var.vault_product_version, "< 1.16.0-beta1") + module = module.verify_seal_type + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_hosts = step.create_vault_cluster_targets.hosts + seal_type = "multiseal" + } + } + + // Now we'll migrate away from our initial seal to our secondary seal + + // Stop the vault service on all nodes before we restart with new seal config + step "stop_vault_for_migration" { + module = module.stop_vault + depends_on = [ + step.wait_for_seal_rewrap, + step.verify_read_test_data, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Remove the "primary" seal from the cluster. Set our "secondary" seal to priority 1. We do this + // by restarting vault with the correct config. + step "remove_primary_seal" { + module = module.start_vault + depends_on = [step.stop_vault_for_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + cluster_name = step.create_vault_cluster_targets.cluster_name + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + manage_service = local.manage_service + seal_alias = "secondary" + seal_attributes = step.create_secondary_seal_key.attributes + seal_type = matrix.secondary_seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader after restarting vault with a new primary seal + step "wait_for_leader_after_migration" { + module = module.vault_wait_for_leader + depends_on = [step.remove_primary_seal] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Since we've restarted our cluster we might have a new leader and followers. Get the new IPs. + step "get_cluster_ips_after_migration" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Make sure we unsealed + step "verify_vault_unsealed_after_migration" { + module = module.vault_verify_unsealed + depends_on = [step.wait_for_leader_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + // Wait for the seal rewrap to complete and verify that no entries failed + step "wait_for_seal_rewrap_after_migration" { + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.wait_for_leader_after_migration, + step.verify_vault_unsealed_after_migration, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Make sure our data is still available after migration + step "verify_read_test_data_after_migration" { + module = module.vault_verify_read_data + depends_on = [step.wait_for_seal_rewrap_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_cluster_ips_after_migration.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + // Make sure we have our secondary seal type after migration + step "verify_seal_type_after_migration" { + // Don't run this on versions less than 1.16.0-beta1 until VAULT-21053 is fixed on prior branches. + skip_step = semverconstraint(var.vault_product_version, "<= 1.16.0-beta1") + module = module.verify_seal_type + depends_on = [step.wait_for_seal_rewrap_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_hosts = step.create_vault_cluster_targets.hosts + seal_type = matrix.secondary_seal + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "initial_seal_rewrap" { + description = "The initial seal rewrap status" + value = step.wait_for_initial_seal_rewrap.stdout + } + + output "post_migration_seal_rewrap" { + description = "The seal rewrap status after migrating the primary seal" + value = step.wait_for_seal_rewrap_after_migration.stdout + } + + output "primary_seal_attributes" { + description = "The Vault cluster primary seal attributes" + value = step.create_primary_seal_key.attributes + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "secondary_seal_attributes" { + description = "The Vault cluster secondary seal attributes" + value = step.create_secondary_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-smoke.hcl b/enos/enos-scenario-smoke.hcl index 84e9dc886bf4..2a4675c1a13c 100644 --- a/enos/enos-scenario-smoke.hcl +++ b/enos/enos-scenario-smoke.hcl @@ -1,18 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + scenario "smoke" { matrix { - arch = ["amd64", "arm64"] - backend = ["consul", "raft"] - artifact_source = ["local", "crt", "artifactory"] - artifact_type = ["bundle", "package"] - consul_version = ["1.14.2", "1.13.4", "1.12.7"] - distro = ["ubuntu", "rhel"] - edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] - seal = ["awskms", "shamir"] - - # Packages are not offered for the oss edition + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + consul_version = global.consul_versions + distro = global.distros + edition = global.editions + seal = global.seals + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + # PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. exclude { - edition = ["oss"] - artifact_type = ["package"] + seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] } } @@ -25,35 +41,13 @@ scenario "smoke" { ] locals { - build_tags = { - "oss" = ["ui"] - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] } step "get_local_metadata" { @@ -65,8 +59,8 @@ scenario "smoke" { module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -78,43 +72,75 @@ scenario "smoke" { artifact_type = matrix.artifact_type distro = matrix.artifact_source == "artifactory" ? matrix.distro : null edition = matrix.artifact_source == "artifactory" ? matrix.edition : null - instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null revision = var.vault_revision } } - step "find_azs" { - module = module.az_finder + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc variables { - instance_type = [ - var.backend_instance_type, - local.vault_instance_type - ] + common_tags = global.tags } } - step "create_vpc" { - module = module.create_vpc + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license variables { - ami_architectures = distinct([matrix.arch, "amd64"]) - availability_zones = step.find_azs.availability_zones - common_tags = local.tags + file_name = global.backend_license_path } } - step "read_license" { - skip_step = matrix.edition == "oss" + step "read_vault_license" { + skip_step = matrix.edition == "ce" module = module.read_license variables { - file_name = local.vault_license_path + file_name = global.vault_license_path } } - step "create_backend_cluster" { - module = "backend_${matrix.backend}" + step "create_seal_key" { + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim depends_on = [step.create_vpc] providers = { @@ -122,15 +148,33 @@ scenario "smoke" { } variables { - ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"] - common_tags = local.tags - consul_release = { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { edition = var.backend_edition version = matrix.consul_version } - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id + target_hosts = step.create_vault_cluster_backend_targets.hosts } } @@ -139,6 +183,7 @@ scenario "smoke" { depends_on = [ step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets, ] providers = { @@ -146,19 +191,106 @@ scenario "smoke" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.backend - unseal_method = matrix.seal - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_leader_ip_for_step_down" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Force a step down to trigger a new leader election + step "vault_leader_step_down" { + module = module.vault_step_down + depends_on = [step.get_leader_ip_for_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + leader_host = step.get_leader_ip_for_step_down.leader_host + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.vault_leader_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token } } @@ -171,19 +303,19 @@ scenario "smoke" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_edition = matrix.edition vault_install_dir = local.vault_install_dir vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } step "verify_vault_unsealed" { module = module.vault_verify_unsealed - depends_on = [step.create_vault_cluster] + depends_on = [step.wait_for_leader] providers = { enos = local.enos_provider[matrix.distro] @@ -191,15 +323,37 @@ scenario "smoke" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token } } step "verify_raft_auto_join_voter" { - skip_step = matrix.backend != "raft" - module = module.vault_verify_raft_auto_join_voter - depends_on = [step.create_vault_cluster] + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] providers = { enos = local.enos_provider[matrix.distro] @@ -207,14 +361,17 @@ scenario "smoke" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token } } step "verify_replication" { - module = module.vault_verify_replication - depends_on = [step.create_vault_cluster] + module = module.vault_verify_replication + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] providers = { enos = local.enos_provider[matrix.distro] @@ -223,76 +380,100 @@ scenario "smoke" { variables { vault_edition = matrix.edition vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts } } - step "verify_ui" { - module = module.vault_verify_ui - depends_on = [step.create_vault_cluster] + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_write_test_data, + step.verify_replication + ] providers = { enos = local.enos_provider[matrix.distro] } variables { - vault_instances = step.create_vault_cluster.vault_instances + node_public_ips = step.get_vault_cluster_ips.follower_public_ips vault_install_dir = local.vault_install_dir } } - step "verify_write_test_data" { - module = module.vault_verify_write_test_data - depends_on = [step.create_vault_cluster] + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] providers = { enos = local.enos_provider[matrix.distro] } variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster_targets.hosts } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name } - output "vault_cluster_priv_ips" { + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_attributes" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex } } diff --git a/enos/enos-scenario-ui.hcl b/enos/enos-scenario-ui.hcl new file mode 100644 index 000000000000..6e7e7c612640 --- /dev/null +++ b/enos/enos-scenario-ui.hcl @@ -0,0 +1,293 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +scenario "ui" { + matrix { + backend = global.backends + edition = ["ce", "ent"] + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu + ] + + locals { + arch = "amd64" + artifact_type = "bundle" + backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) + backend_tag_key = "VaultStorage" + build_tags = { + "ce" = ["ui"] + "ent" = ["ui", "enterprise", "ent"] + } + bundle_path = abspath(var.vault_artifact_path) + distro = "ubuntu" + consul_version = "1.17.0" + seal = "awskms" + tags = merge({ + "Project Name" : var.project_name + "Project" : "Enos", + "Environment" : "ci" + }, var.tags) + vault_install_dir_packages = { + rhel = "/bin" + ubuntu = "/usr/bin" + } + vault_install_dir = var.vault_install_dir + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_tag_key = "Type" // enos_vault_start expects Type as the tag key + ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "ce") ? "!enterprise" : null + } + + step "build_vault" { + module = module.build_local + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] + bundle_path = local.bundle_path + goarch = local.arch + goos = "linux" + product_version = var.vault_product_version + artifact_type = local.artifact_type + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = local.tags + } + } + + step "create_seal_key" { + module = "seal_${local.seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + common_tags = global.tags + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = local.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = local.vault_license_path + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids[local.arch][local.distro][var.ubuntu_distro_version] + cluster_tag_key = local.vault_tag_key + common_tags = local.tags + seal_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = local.backend_tag_key + common_tags = local.tags + seal_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets, + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = local.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = local.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = local.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = local.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.bundle_path + packages = global.distro_packages["ubuntu"] + seal_name = step.create_seal_key.resource_name + seal_type = local.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "test_ui" { + module = module.vault_test_ui + depends_on = [step.wait_for_leader] + + variables { + vault_addr = step.create_vault_cluster_targets.hosts[0].public_ip + vault_root_token = step.create_vault_cluster.root_token + vault_unseal_keys = step.create_vault_cluster.recovery_keys_b64 + vault_recovery_threshold = step.create_vault_cluster.recovery_threshold + ui_test_filter = local.ui_test_filter + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "seal_name" { + description = "The Vault cluster seal key name" + value = step.create_seal_key.resource_name + } + + output "ui_test_environment" { + value = step.test_ui.ui_test_environment + description = "The environment variables that are required in order to run the test:enos yarn target" + } + + output "ui_test_stderr" { + description = "The stderr of the ui tests that ran" + value = step.test_ui.ui_test_stderr + } + + output "ui_test_stdout" { + description = "The stdout of the ui tests that ran" + value = step.test_ui.ui_test_stdout + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl index 6457320a8e03..1eb398511a52 100644 --- a/enos/enos-scenario-upgrade.hcl +++ b/enos/enos-scenario-upgrade.hcl @@ -1,20 +1,48 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + scenario "upgrade" { matrix { - arch = ["amd64", "arm64"] - backend = ["consul", "raft"] - artifact_source = ["local", "crt", "artifactory"] - artifact_type = ["bundle", "package"] - consul_version = ["1.14.2", "1.13.4", "1.12.7"] - distro = ["ubuntu", "rhel"] - edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] - seal = ["awskms", "shamir"] - - # Packages are not offered for the oss edition + arch = global.archs + artifact_source = global.artifact_sources + artifact_type = global.artifact_types + backend = global.backends + config_mode = global.config_modes + consul_version = global.consul_versions + distro = global.distros + edition = global.editions + // NOTE: when backporting the initial version make sure we don't include initial versions that + // are a higher minor version that our release candidate. Also, prior to 1.11.x the + // /v1/sys/seal-status API has known issues that could cause this scenario to fail when using + // those earlier versions, therefore support from 1.8.x to 1.10.x is unreliable. Prior to 1.8.x + // is not supported due to changes with vault's signaling of systemd and the enos-provider + // no longer supporting setting the license via the license API. + initial_version = global.upgrade_initial_versions + seal = global.seals + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + # FIPS 140-2 editions began at 1.10 exclude { - edition = ["oss"] - artifact_type = ["package"] + edition = ["ent.fips1402", "ent.hsm.fips1402"] + initial_version = ["1.8.12", "1.9.10"] } + # PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402. + exclude { + seal = ["pkcs11"] + edition = ["ce", "ent", "ent.fips1402"] + } } terraform_cli = terraform_cli.default @@ -26,35 +54,18 @@ scenario "upgrade" { ] locals { - build_tags = { - "oss" = ["ui"] - "ent" = ["ui", "enterprise", "ent"] - "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] - "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] - "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] - } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } - tags = merge({ - "Project Name" : var.project_name - "Project" : "Enos", - "Environment" : "ci" - }, var.tags) - vault_instance_types = { - amd64 = "t3a.small" - arm64 = "t4g.small" - } - vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) - vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) - vault_install_dir_packages = { - rhel = "/bin" - ubuntu = "/usr/bin" - } - vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata } # This step gets/builds the upgrade artifact that we will upgrade to @@ -62,8 +73,8 @@ scenario "upgrade" { module = "build_${matrix.artifact_source}" variables { - build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] - bundle_path = local.bundle_path + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -75,48 +86,75 @@ scenario "upgrade" { artifact_type = matrix.artifact_type distro = matrix.artifact_source == "artifactory" ? matrix.distro : null edition = matrix.artifact_source == "artifactory" ? matrix.edition : null - instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null revision = var.vault_revision } } - step "find_azs" { - module = module.az_finder + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc variables { - instance_type = [ - var.backend_instance_type, - local.vault_instance_type, - ] + common_tags = global.tags } } - step "create_vpc" { - module = module.create_vpc + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license variables { - ami_architectures = distinct([matrix.arch, "amd64"]) - availability_zones = step.find_azs.availability_zones - common_tags = local.tags + file_name = global.backend_license_path } } - step "read_license" { - skip_step = matrix.edition == "oss" + step "read_vault_license" { + skip_step = matrix.edition == "ce" module = module.read_license variables { - file_name = local.vault_license_path + file_name = global.vault_license_path } } - step "get_local_metadata" { - skip_step = matrix.artifact_source != "local" - module = module.get_local_metadata + step "create_seal_key" { + module = "seal_${matrix.seal}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_id = step.create_vpc.id + common_tags = global.tags + } } - step "create_backend_cluster" { - module = "backend_${matrix.backend}" + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim depends_on = [step.create_vpc] providers = { @@ -124,25 +162,93 @@ scenario "upgrade" { } variables { - ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"] - common_tags = local.tags - consul_release = { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets, + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { edition = var.backend_edition version = matrix.consul_version } - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id + target_hosts = step.create_vault_cluster_backend_targets.hosts } } - # This step creates a Vault cluster using a bundle downloaded from - # releases.hashicorp.com, with the version specified in var.vault_autopilot_initial_release step "create_vault_cluster" { module = module.vault_cluster depends_on = [ step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + config_mode = matrix.config_mode + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + packages = concat(global.packages, global.distro_packages[matrix.distro]) + release = { + edition = matrix.edition + version = matrix.initial_version + } + seal_attributes = step.create_seal_key.attributes + seal_type = matrix.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips, ] providers = { @@ -150,18 +256,11 @@ scenario "upgrade" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.backend - unseal_method = matrix.seal - vault_install_dir = local.vault_install_dir - vault_release = var.vault_upgrade_initial_release - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token } } @@ -171,6 +270,7 @@ scenario "upgrade" { module = module.vault_upgrade depends_on = [ step.create_vault_cluster, + step.verify_write_test_data, ] providers = { @@ -179,20 +279,104 @@ scenario "upgrade" { variables { vault_api_addr = "http://localhost:8200" - vault_instances = step.create_vault_cluster.vault_instances - vault_local_artifact_path = local.bundle_path + vault_instances = step.create_vault_cluster_targets.hosts + vault_local_artifact_path = local.artifact_path vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null vault_install_dir = local.vault_install_dir - vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.vault_unseal_keys_hex : null + vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null vault_seal_type = matrix.seal } } + // Wait for our upgraded cluster to elect a leader + step "wait_for_leader_after_upgrade" { + module = module.vault_wait_for_leader + depends_on = [ + step.create_vault_cluster, + step.upgrade_vault, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_leader_ip_for_step_down" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader_after_upgrade] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Force a step down to trigger a new leader election + step "vault_leader_step_down" { + module = module.vault_step_down + depends_on = [step.get_leader_ip_for_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + leader_host = step.get_leader_ip_for_step_down.leader_host + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader_after_stepdown" { + module = module.vault_wait_for_leader + depends_on = [step.vault_leader_step_down] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_updated_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.wait_for_leader_after_stepdown, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + step "verify_vault_version" { module = module.vault_verify_version depends_on = [ - step.create_backend_cluster, - step.upgrade_vault, + step.get_updated_vault_cluster_ips, ] providers = { @@ -200,21 +384,20 @@ scenario "upgrade" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_edition = matrix.edition vault_install_dir = local.vault_install_dir vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } step "verify_vault_unsealed" { module = module.vault_verify_unsealed depends_on = [ - step.create_vault_cluster, - step.upgrade_vault, + step.get_updated_vault_cluster_ips, ] providers = { @@ -222,9 +405,26 @@ scenario "upgrade" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.get_updated_vault_cluster_ips, + step.verify_write_test_data, + step.verify_vault_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_updated_vault_cluster_ips.follower_public_ips vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token } } @@ -232,8 +432,7 @@ scenario "upgrade" { skip_step = matrix.backend != "raft" module = module.vault_verify_raft_auto_join_voter depends_on = [ - step.create_backend_cluster, - step.upgrade_vault, + step.get_updated_vault_cluster_ips, ] providers = { @@ -242,48 +441,100 @@ scenario "upgrade" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [ + step.get_updated_vault_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.get_updated_vault_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts } - output "vault_cluster_priv_ips" { + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "seal_name" { + description = "The Vault cluster seal attributes" + value = step.create_seal_key.attributes + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex } } diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl index 0c88cc7910f0..9320f54a57db 100644 --- a/enos/enos-terraform.hcl +++ b/enos/enos-terraform.hcl @@ -1,14 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform_cli "default" { plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null - credentials "app.terraform.io" { - token = var.tfc_api_token - } - /* provider_installation { dev_overrides = { - "app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider") + "registry.terraform.io/hashicorp-forge/enos" = abspath("../../enos-provider/dist") } direct {} } @@ -24,7 +23,8 @@ terraform "default" { } enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" } } } diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index 64129e86063f..ff5aeec7cb3c 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -1,19 +1,16 @@ -variable "artifact_path" { - type = string - description = "The local path for dev artifact to test" - default = null -} +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 variable "artifactory_username" { type = string - description = "The username to use when connecting to artifactory" + description = "The username to use when testing an artifact from artifactory" default = null sensitive = true } variable "artifactory_token" { type = string - description = "The token to use when connecting to artifactory" + description = "The token to use when authenticating to artifactory" default = null sensitive = true } @@ -33,7 +30,7 @@ variable "artifactory_repo" { variable "aws_region" { description = "The AWS region where we'll create infrastructure" type = string - default = "us-west-1" + default = "us-east-1" } variable "aws_ssh_keypair_name" { @@ -51,13 +48,13 @@ variable "aws_ssh_private_key_path" { variable "backend_edition" { description = "The backend release edition if applicable" type = string - default = "oss" + default = "ce" // or "ent" } variable "backend_instance_type" { - description = "The instance type to use for the Vault backend" + description = "The instance type to use for the Vault backend. Must be arm64/nitro compatible" type = string - default = "t3.small" + default = "t4g.small" } variable "backend_license_path" { @@ -66,12 +63,24 @@ variable "backend_license_path" { default = null } +variable "backend_log_level" { + description = "The server log level for the backend. Supported values include 'trace', 'debug', 'info', 'warn', 'error'" + type = string + default = "trace" +} + variable "project_name" { description = "The description of the project" type = string default = "vault-enos-integration" } +variable "rhel_distro_version" { + description = "The version of RHEL to use" + type = string + default = "9.1" // or "8.8" +} + variable "tags" { description = "Tags that will be applied to infrastructure resources that support tagging" type = map(string) @@ -84,41 +93,51 @@ variable "terraform_plugin_cache_dir" { default = null } -variable "tfc_api_token" { - description = "The Terraform Cloud QTI Organization API token." +variable "ubuntu_distro_version" { + description = "The version of ubuntu to use" type = string - sensitive = true + default = "22.04" // or "20.04", "18.04" } -variable "vault_artifact_type" { - description = "The Vault artifact type package or bundle" - default = "bundle" +variable "ui_test_filter" { + type = string + description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"\"'" + default = null } -variable "vault_autopilot_initial_release" { - description = "The Vault release to deploy before upgrading with autopilot" - default = { - edition = "ent" - version = "1.11.0" - } +variable "ui_run_tests" { + type = bool + description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" + default = true } -variable "vault_bundle_path" { +variable "vault_artifact_type" { + description = "The type of Vault artifact to use when installing Vault from artifactory. It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" + default = "bundle" +} + +variable "vault_artifact_path" { description = "Path to CRT generated or local vault.zip bundle" type = string default = "/tmp/vault.zip" } -variable "vault_install_dir" { +variable "vault_build_date" { + description = "The build date for Vault artifact" type = string - description = "The directory where the Vault binary will be installed" - default = "/opt/vault/bin" + default = "" } -variable "vault_instance_type" { - description = "The instance type to use for the Vault backend" +variable "vault_enable_audit_devices" { + description = "If true every audit device will be enabled" + type = bool + default = true +} + +variable "vault_install_dir" { type = string - default = null + description = "The directory where the Vault binary will be installed" + default = "/opt/vault/bin" } variable "vault_instance_count" { @@ -128,7 +147,7 @@ variable "vault_instance_count" { } variable "vault_license_path" { - description = "The path to a valid Vault enterprise edition license. This is only required for non-oss editions" + description = "The path to a valid Vault enterprise edition license. This is only required for non-ce editions" type = string default = null } @@ -139,10 +158,10 @@ variable "vault_local_build_tags" { default = null } -variable "vault_build_date" { - description = "The build date for Vault artifact" +variable "vault_log_level" { + description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." type = string - default = "" + default = "trace" } variable "vault_product_version" { @@ -160,7 +179,7 @@ variable "vault_revision" { variable "vault_upgrade_initial_release" { description = "The Vault release to deploy before upgrading" default = { - edition = "oss" + edition = "ce" // Vault 1.10.5 has a known issue with retry_join. version = "1.10.4" } diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl index d157c7b92650..8397eda372c0 100644 --- a/enos/enos.vars.hcl +++ b/enos/enos.vars.hcl @@ -1,6 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# artifactory_username is the username to use when testing an artifact stored in artfactory. +# artifactory_username = "yourname@hashicorp.com" + +# artifactory_token is the token to use when authenticating to artifactory. +# artifactory_token = "yourtoken" + +# artifactory_host is the artifactory host to search for vault artifacts. +# artifactory_host = "https://artifactory.hashicorp.engineering/artifactory" + +# artifactory_repo is the artifactory repo to search for vault artifacts. +# artifactory_repo = "hashicorp-crt-stable-local*" + # aws_region is the AWS region where we'll create infrastructure # for the smoke scenario -# aws_region = "us-west-1" +# aws_region = "us-east-1" # aws_ssh_keypair_name is the AWS keypair to use for SSH # aws_ssh_keypair_name = "enos-ci-ssh-key" @@ -8,8 +23,25 @@ # aws_ssh_private_key_path is the path to the AWS keypair private key # aws_ssh_private_key_path = "./support/private_key.pem" -# backend_instance_type is the instance type to use for the Vault backend -# backend_instance_type = "t3.small" +# backend_edition is the backend (consul) release edition if applicable to the scenario. +# backend_edition = "ce" + +# backend_license_path is the license for the backend if applicable (Consul Enterprise)". +# backend_license_path = "./support/consul.hclic" + +# backend_log_level is the server log level for the backend. Supported values include 'trace', +# 'debug', 'info', 'warn', 'error'" +# backend_log_level = "trace" + +# backend_instance_type is the instance type to use for the Vault backend. Must support arm64 +# backend_instance_type = "t4g.small" + +# project_name is the description of the project. It will often be used to tag infrastructure +# resources. +# project_name = "vault-enos-integration" + +# rhel_distro_version is the version of RHEL to use for "distro:rhel" variants. +# rhel_distro_version = "9.1" // or "8.8" # tags are a map of tags that will be applied to infrastructure resources that # support tagging. @@ -19,14 +51,36 @@ # It must exist. # terraform_plugin_cache_dir = "/Users//.terraform/plugin-cache-dir -# tfc_api_token is the Terraform Cloud QTI Organization API token. We need this -# to download the enos Terraform provider and the enos Terraform modules. -# tfc_api_token = "XXXXX.atlasv1.XXXXX..." +# ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will +# be appended to the ember test command as '-f=\"\"'. +# ui_test_filter = "sometest" + +# ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a +# cluster will be created but no tests will be run. +# ui_run_tests = true + +# ubuntu_distro_version is the version of ubuntu to use for "distro:ubuntu" variants +# ubuntu_distro_version = "22.04" // or "20.04", "18.04" -# vault_bundle_path is the path to CRT generated or local vault.zip bundle. When +# vault_artifact_path is the path to CRT generated or local vault.zip bundle. When # using the "builder:local" variant a bundle will be built from the current branch. # In CI it will use the output of the build workflow. -# vault_bundle_path = "./dist/vault.zip" +# vault_artifact_path = "./dist/vault.zip" + +# vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory. +# It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" +# vault_artifact_type = "bundle" + +# vault_build_date is the build date for Vault artifact. Some validations will require the binary build +# date to match" +# vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example + +# vault_enable_audit_devices sets whether or not to enable every audit device. It true +# a file audit device will be enabled at the path /var/log/vault_audit.log, the syslog +# audit device will be enabled, and a socket audit device connecting to 127.0.0.1:9090 +# will be enabled. The netcat program is run in listening mode to provide an endpoint +# that the socket audit device can connect to. +# vault_enable_audit_devices = true # vault_install_dir is the directory where the vault binary will be installed on # the remote machines. @@ -42,7 +96,20 @@ # vault_instance_count = 3 # vault_license_path is the path to a valid Vault enterprise edition license. -# This is only required for non-oss editions" +# This is only required for non-ce editions" # vault_license_path = "./support/vault.hclic" -# vault_upgrade_initial_release is the Vault release to deploy before upgrading. +# vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants. +# vault_local_build_tags = ["ui", "ent"] + +# vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are +# trace, debug, info, warn, and err." +# vault_log_level = "trace" + +# vault_product_version is the version of Vault we are testing. Some validations will expect the vault +# binary and cluster to report this version. +# vault_product_version = "1.15.0" + +# vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault +# binary and cluster to report this revision. +# vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de" diff --git a/enos/k8s/enos-modules-k8s.hcl b/enos/k8s/enos-modules-k8s.hcl index 17853fea900c..2e815eac62b6 100644 --- a/enos/k8s/enos-modules-k8s.hcl +++ b/enos/k8s/enos-modules-k8s.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + module "create_kind_cluster" { source = "../modules/local_kind_cluster" } @@ -41,7 +44,7 @@ module "k8s_verify_version" { module "k8s_verify_write_data" { source = "../modules/k8s_vault_verify_write_data" - vault_instance_count = var.vault_instance_count + vault_instance_count = var.vault_instance_count } module "read_license" { diff --git a/enos/k8s/enos-providers-k8s.hcl b/enos/k8s/enos-providers-k8s.hcl index 2a0cf2be3484..e11092c22c53 100644 --- a/enos/k8s/enos-providers-k8s.hcl +++ b/enos/k8s/enos-providers-k8s.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + provider "enos" "default" {} provider "helm" "default" { diff --git a/enos/k8s/enos-scenario-k8s.hcl b/enos/k8s/enos-scenario-k8s.hcl index 147574b325df..e40c1870598c 100644 --- a/enos/k8s/enos-scenario-k8s.hcl +++ b/enos/k8s/enos-scenario-k8s.hcl @@ -1,6 +1,9 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + scenario "k8s" { matrix { - edition = ["oss", "ent"] + edition = ["ce", "ent"] } terraform_cli = terraform_cli.default @@ -14,8 +17,8 @@ scenario "k8s" { locals { image_path = abspath(var.vault_docker_image_archive) - image_repo = var.vault_image_repository != null ? var.vault_image_repository : matrix.edition == "oss" ? "hashicorp/vault" : "hashicorp/vault-enterprise" - image_tag = replace(var.vault_product_version, "+ent", "-ent") + image_repo = var.vault_image_repository != null ? var.vault_image_repository : matrix.edition == "ce" ? "hashicorp/vault" : "hashicorp/vault-enterprise" + image_tag = replace(var.vault_product_version, "+ent", "-ent") // The additional '-0' is required in the constraint since without it, the semver function will // only compare the non-pre-release parts (Major.Minor.Patch) of the version and the constraint, @@ -24,7 +27,7 @@ scenario "k8s" { } step "read_license" { - skip_step = matrix.edition == "oss" + skip_step = matrix.edition == "ce" module = module.read_license variables { @@ -62,7 +65,8 @@ scenario "k8s" { image_repository = step.load_docker_image.repository kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 vault_edition = matrix.edition - ent_license = matrix.edition != "oss" ? step.read_license.license : null + vault_log_level = var.vault_log_level + ent_license = matrix.edition != "ce" ? step.read_license.license : null } depends_on = [step.load_docker_image, step.create_kind_cluster] @@ -70,7 +74,7 @@ scenario "k8s" { step "verify_build_date" { skip_step = !local.version_includes_build_date - module = module.k8s_verify_build_date + module = module.k8s_verify_build_date variables { vault_pods = step.deploy_vault.vault_pods @@ -96,8 +100,8 @@ scenario "k8s" { } step "verify_ui" { - module = module.k8s_verify_ui - skip_step = matrix.edition == "oss" + module = module.k8s_verify_ui + skip_step = matrix.edition == "ce" variables { vault_pods = step.deploy_vault.vault_pods diff --git a/enos/k8s/enos-terraform-k8s.hcl b/enos/k8s/enos-terraform-k8s.hcl index 18f1222ffab6..9b884ef12109 100644 --- a/enos/k8s/enos-terraform-k8s.hcl +++ b/enos/k8s/enos-terraform-k8s.hcl @@ -1,20 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform "k8s" { required_version = ">= 1.2.0" + required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } helm = { - source = "hashicorp/helm" + source = "hashicorp/helm" } } } terraform_cli "default" { plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null - - credentials "app.terraform.io" { - token = var.tfc_api_token - } } diff --git a/enos/k8s/enos-variables-k8s.hcl b/enos/k8s/enos-variables-k8s.hcl index ecae4e8069b7..52ffd8d8225c 100644 --- a/enos/k8s/enos-variables-k8s.hcl +++ b/enos/k8s/enos-variables-k8s.hcl @@ -1,9 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "vault_image_repository" { description = "The repository for the docker image to load, i.e. hashicorp/vault" type = string default = null } +variable "vault_log_level" { + description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." + type = string + default = "info" +} + variable "vault_product_version" { description = "The vault product version to test" type = string @@ -34,11 +43,6 @@ variable "terraform_plugin_cache_dir" { default = null } -variable "tfc_api_token" { - description = "The Terraform Cloud QTI Organization API token." - type = string -} - variable "vault_build_date" { description = "The build date for the vault docker image" type = string diff --git a/enos/modules/autopilot_upgrade_storageconfig/main.tf b/enos/modules/autopilot_upgrade_storageconfig/main.tf index 6093b8b1066d..3fcb77a7067c 100644 --- a/enos/modules/autopilot_upgrade_storageconfig/main.tf +++ b/enos/modules/autopilot_upgrade_storageconfig/main.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "vault_product_version" {} output "storage_addl_config" { diff --git a/enos/modules/az_finder/main.tf b/enos/modules/az_finder/main.tf deleted file mode 100644 index b55975578c61..000000000000 --- a/enos/modules/az_finder/main.tf +++ /dev/null @@ -1,25 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - } - } -} - -variable "instance_type" { - default = ["t3.small"] - type = list(string) -} - -data "aws_ec2_instance_type_offerings" "infra" { - filter { - name = "instance-type" - values = var.instance_type - } - - location_type = "availability-zone" -} - -output "availability_zones" { - value = data.aws_ec2_instance_type_offerings.infra.locations -} diff --git a/enos/modules/backend_consul/main.tf b/enos/modules/backend_consul/main.tf new file mode 100644 index 000000000000..2af4632bbd73 --- /dev/null +++ b/enos/modules/backend_consul/main.tf @@ -0,0 +1,56 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_version = ">= 1.2.0" + + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.4" + } + } +} + +locals { + bin_path = "${var.install_dir}/consul" +} + +resource "enos_bundle_install" "consul" { + for_each = var.target_hosts + + destination = var.install_dir + release = merge(var.release, { product = "consul" }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_consul_start" "consul" { + for_each = enos_bundle_install.consul + + bin_path = local.bin_path + data_dir = var.data_dir + config_dir = var.config_dir + config = { + data_dir = var.data_dir + datacenter = "dc1" + retry_join = ["provider=aws tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}"] + server = true + bootstrap_expect = length(var.target_hosts) + log_level = var.log_level + log_file = var.log_dir + } + license = var.license + unit_name = "consul" + username = "consul" + + transport = { + ssh = { + host = var.target_hosts[each.key].public_ip + } + } +} diff --git a/enos/modules/backend_consul/outputs.tf b/enos/modules/backend_consul/outputs.tf new file mode 100644 index 000000000000..8f32783e8dee --- /dev/null +++ b/enos/modules/backend_consul/outputs.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "private_ips" { + description = "Consul cluster target host private_ips" + value = [for host in var.target_hosts : host.private_ip] +} + +output "public_ips" { + description = "Consul cluster target host public_ips" + value = [for host in var.target_hosts : host.public_ip] +} + +output "target_hosts" { + description = "The Consul cluster instances that were created" + + value = var.target_hosts +} diff --git a/enos/modules/backend_consul/variables.tf b/enos/modules/backend_consul/variables.tf new file mode 100644 index 000000000000..34a96d8535f8 --- /dev/null +++ b/enos/modules/backend_consul/variables.tf @@ -0,0 +1,76 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "cluster_name" { + type = string + description = "The name of the Consul cluster" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The tag key for searching for Consul nodes" + default = null +} + +variable "config_dir" { + type = string + description = "The directory where the consul will write config files" + default = "/etc/consul.d" +} + +variable "data_dir" { + type = string + description = "The directory where the consul will store data" + default = "/opt/consul/data" +} + +variable "install_dir" { + type = string + description = "The directory where the consul binary will be installed" + default = "/opt/consul/bin" +} + +variable "license" { + type = string + sensitive = true + description = "The consul enterprise license" + default = null +} + +variable "log_dir" { + type = string + description = "The directory where the consul will write log files" + default = "/var/log/consul.d" +} + +variable "log_level" { + type = string + description = "The consul service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "Consul release version and edition to install from releases.hashicorp.com" + default = { + version = "1.15.3" + edition = "ce" + } +} + +variable "target_hosts" { + description = "The target machines host addresses to use for the consul cluster" + type = map(object({ + private_ip = string + public_ip = string + })) +} diff --git a/enos/modules/backend_raft/main.tf b/enos/modules/backend_raft/main.tf index 4cb8e58a592b..bc070235eb25 100644 --- a/enos/modules/backend_raft/main.tf +++ b/enos/modules/backend_raft/main.tf @@ -1,46 +1,70 @@ -// Shim module to handle the fact that Vault doesn't actually need a backend module +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +// Shim module to handle the fact that Vault doesn't actually need a backend module when we use raft. terraform { + required_version = ">= 1.2.0" + required_providers { - aws = { - source = "hashicorp/aws" - } enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" } } } -variable "ami_id" { +variable "cluster_name" { default = null } -variable "common_tags" { + +variable "cluster_tag_key" { default = null } -variable "consul_license" { + +variable "config_dir" { default = null } -variable "consul_release" { + +variable "consul_log_level" { default = null } -variable "environment" { + +variable "data_dir" { default = null } -variable "instance_type" { + +variable "install_dir" { default = null } -variable "kms_key_arn" { + +variable "license" { default = null } -variable "project_name" { + +variable "log_dir" { default = null } -variable "ssh_aws_keypair" { + +variable "log_level" { default = null } -variable "vpc_id" { + +variable "release" { + default = null +} + +variable "target_hosts" { default = null } -output "consul_cluster_tag" { - value = null +output "private_ips" { + value = [for host in var.target_hosts : host.private_ip] +} + +output "public_ips" { + value = [for host in var.target_hosts : host.public_ip] +} + +output "target_hosts" { + value = var.target_hosts } diff --git a/enos/modules/build_artifactory_artifact/locals.tf b/enos/modules/build_artifactory_artifact/locals.tf new file mode 100644 index 000000000000..77b453227916 --- /dev/null +++ b/enos/modules/build_artifactory_artifact/locals.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + + // file name extensions for the install packages of vault for the various architectures, distributions and editions + package_extensions = { + amd64 = { + ubuntu = "-1_amd64.deb" + rhel = "-1.x86_64.rpm" + } + arm64 = { + ubuntu = "-1_arm64.deb" + rhel = "-1.aarch64.rpm" + } + } + + // product_version --> artifact_version + artifact_version = replace(var.product_version, var.edition, "ent") + + // file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle) + artifact_package_release_names = { + ubuntu = { + "ce" = "vault_" + "ent" = "vault-enterprise_", + "ent.fips1402" = "vault-enterprise-fips1402_", + "ent.hsm" = "vault-enterprise-hsm_", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_", + }, + rhel = { + "ce" = "vault-" + "ent" = "vault-enterprise-", + "ent.fips1402" = "vault-enterprise-fips1402-", + "ent.hsm" = "vault-enterprise-hsm-", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-", + } + } + + // edition --> artifact name edition + artifact_name_edition = { + "ce" = "" + "ent" = "" + "ent.hsm" = ".hsm" + "ent.fips1402" = ".fips1402" + "ent.hsm.fips1402" = ".hsm.fips1402" + } + + artifact_name_prefix = var.artifact_type == "package" ? local.artifact_package_release_names[var.distro][var.edition] : "vault_" + artifact_name_extension = var.artifact_type == "package" ? local.package_extensions[var.arch][var.distro] : "_linux_${var.arch}.zip" + artifact_name = var.artifact_type == "package" ? "${local.artifact_name_prefix}${replace(local.artifact_version, "-", "~")}${local.artifact_name_extension}" : "${local.artifact_name_prefix}${var.product_version}${local.artifact_name_extension}" +} diff --git a/enos/modules/build_artifactory_artifact/main.tf b/enos/modules/build_artifactory_artifact/main.tf new file mode 100644 index 000000000000..fb8e4c0e1d8f --- /dev/null +++ b/enos/modules/build_artifactory_artifact/main.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.2.3" + } + } +} + +data "enos_artifactory_item" "vault" { + username = var.artifactory_username + token = var.artifactory_token + name = local.artifact_name + host = var.artifactory_host + repo = var.artifactory_repo + path = var.edition == "ce" ? "vault/*" : "vault-enterprise/*" + properties = tomap({ + "commit" = var.revision + "product-name" = var.edition == "ce" ? "vault" : "vault-enterprise" + "product-version" = local.artifact_version + }) +} diff --git a/enos/modules/build_artifactory_artifact/outputs.tf b/enos/modules/build_artifactory_artifact/outputs.tf new file mode 100644 index 000000000000..d05b5bf7959a --- /dev/null +++ b/enos/modules/build_artifactory_artifact/outputs.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +output "url" { + value = data.enos_artifactory_item.vault.results[0].url + description = "The artifactory download url for the artifact" +} + +output "sha256" { + value = data.enos_artifactory_item.vault.results[0].sha256 + description = "The sha256 checksum for the artifact" +} + +output "size" { + value = data.enos_artifactory_item.vault.results[0].size + description = "The size in bytes of the artifact" +} + +output "name" { + value = data.enos_artifactory_item.vault.results[0].name + description = "The name of the artifact" +} + +output "vault_artifactory_release" { + value = { + url = data.enos_artifactory_item.vault.results[0].url + sha256 = data.enos_artifactory_item.vault.results[0].sha256 + username = var.artifactory_username + token = var.artifactory_token + } +} diff --git a/enos/modules/build_artifactory_artifact/variables.tf b/enos/modules/build_artifactory_artifact/variables.tf new file mode 100644 index 000000000000..a2d9042af535 --- /dev/null +++ b/enos/modules/build_artifactory_artifact/variables.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 +variable "artifactory_username" { + type = string + description = "The username to use when connecting to artifactory" + default = null +} + +variable "artifactory_token" { + type = string + description = "The token to use when connecting to artifactory" + default = null + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The artifactory host to search for vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "artifactory_repo" { + type = string + description = "The artifactory repo to search for vault artifacts" + default = "hashicorp-crt-stable-local*" +} +variable "arch" {} +variable "artifact_type" {} +variable "artifact_path" {} +variable "distro" {} +variable "edition" {} +variable "revision" {} +variable "product_version" {} +variable "build_tags" { default = null } +variable "bundle_path" { default = null } +variable "goarch" { default = null } +variable "goos" { default = null } diff --git a/enos/modules/build_artifactory_package/main.tf b/enos/modules/build_artifactory_package/main.tf new file mode 100644 index 000000000000..1e7d0826d22f --- /dev/null +++ b/enos/modules/build_artifactory_package/main.tf @@ -0,0 +1,160 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "arch" { + type = string + description = "The architecture for the desired artifact" +} + +variable "artifactory_username" { + type = string + description = "The username to use when connecting to Artifactory" +} + +variable "artifactory_token" { + type = string + description = "The token to use when connecting to Artifactory" + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The Artifactory host to search for Vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "distro" { + type = string + description = "The distro for the desired artifact (ubuntu or rhel)" +} + +variable "distro_version" { + type = string + description = "The RHEL version for .rpm packages" + default = "9" +} + +variable "edition" { + type = string + description = "The edition of Vault to use" +} + +variable "product_version" { + type = string + description = "The version of Vault to use" +} + +// Shim variables that we don't use but include to satisfy the build module "interface" +variable "artifact_path" { default = null } +variable "artifact_type" { default = null } +variable "artifactory_repo" { default = null } +variable "build_tags" { default = null } +variable "build_ui" { default = null } +variable "bundle_path" { default = null } +variable "goarch" { default = null } +variable "goos" { default = null } +variable "revision" { default = null } + +locals { + // File name prefixes for the various distributions and editions + artifact_prefix = { + ubuntu = { + "ce" = "vault_" + "ent" = "vault-enterprise_", + "ent.hsm" = "vault-enterprise-hsm_", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_", + "oss" = "vault_" + }, + rhel = { + "ce" = "vault-" + "ent" = "vault-enterprise-", + "ent.hsm" = "vault-enterprise-hsm-", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-", + "oss" = "vault-" + } + } + + // Format the version and edition to use in the artifact name + artifact_version = { + "ce" = "${var.product_version}" + "ent" = "${var.product_version}+ent" + "ent.hsm" = "${var.product_version}+ent" + "ent.hsm.fips1402" = "${var.product_version}+ent" + "oss" = "${var.product_version}" + } + + // File name extensions for the various architectures and distributions + artifact_extension = { + amd64 = { + ubuntu = "-1_amd64.deb" + rhel = "-1.x86_64.rpm" + } + arm64 = { + ubuntu = "-1_arm64.deb" + rhel = "-1.aarch64.rpm" + } + } + + // Use the above variables to construct the artifact name to look up in Artifactory. + // Will look something like: + // vault_1.12.2-1_arm64.deb + // vault-enterprise_1.12.2+ent-1_amd64.deb + // vault-enterprise-hsm-1.12.2+ent-1.x86_64.rpm + artifact_name = "${local.artifact_prefix[var.distro][var.edition]}${local.artifact_version[var.edition]}${local.artifact_extension[var.arch][var.distro]}" + + // The path within the Artifactory repo that corresponds to the appropriate architecture + artifactory_repo_path_dir = { + "amd64" = "x86_64" + "arm64" = "aarch64" + } +} + +data "enos_artifactory_item" "vault_package" { + username = var.artifactory_username + token = var.artifactory_token + name = local.artifact_name + host = var.artifactory_host + repo = var.distro == "rhel" ? "hashicorp-rpm-release-local*" : "hashicorp-apt-release-local*" + path = var.distro == "rhel" ? "RHEL/${var.distro_version}/${local.artifactory_repo_path_dir[var.arch]}/stable" : "pool/${var.arch}/main" +} + +output "results" { + value = data.enos_artifactory_item.vault_package.results +} + +output "url" { + value = data.enos_artifactory_item.vault_package.results[0].url + description = "The artifactory download url for the artifact" +} + +output "sha256" { + value = data.enos_artifactory_item.vault_package.results[0].sha256 + description = "The sha256 checksum for the artifact" +} + +output "size" { + value = data.enos_artifactory_item.vault_package.results[0].size + description = "The size in bytes of the artifact" +} + +output "name" { + value = data.enos_artifactory_item.vault_package.results[0].name + description = "The name of the artifact" +} + +output "release" { + value = { + url = data.enos_artifactory_item.vault_package.results[0].url + sha256 = data.enos_artifactory_item.vault_package.results[0].sha256 + username = var.artifactory_username + token = var.artifactory_token + } +} diff --git a/enos/modules/build_crt/main.tf b/enos/modules/build_crt/main.tf index cffa44b17a00..d113c9cbe05e 100644 --- a/enos/modules/build_crt/main.tf +++ b/enos/modules/build_crt/main.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # Shim module since CRT provided things will use the crt_bundle_path variable variable "bundle_path" { default = "/tmp/vault.zip" @@ -23,27 +26,12 @@ variable "artifactory_host" { default = null } variable "artifactory_repo" { default = null } variable "artifactory_username" { default = null } variable "artifactory_token" { default = null } -variable "arch" { - default = null -} -variable "artifact_path" { - default = null -} -variable "artifact_type" { - default = null -} -variable "distro" { - default = null -} -variable "edition" { - default = null -} -variable "instance_type" { - default = null -} -variable "revision" { - default = null -} -variable "product_version" { - default = null -} +variable "arch" { default = null } +variable "artifact_path" { default = null } +variable "artifact_type" { default = null } +variable "build_ui" { default = null } +variable "distro" { default = null } +variable "distro_version" { default = null } +variable "edition" { default = null } +variable "revision" { default = null } +variable "product_version" { default = null } diff --git a/enos/modules/build_local/main.tf b/enos/modules/build_local/main.tf index 7688c843b08e..1ad1338bff91 100644 --- a/enos/modules/build_local/main.tf +++ b/enos/modules/build_local/main.tf @@ -1,14 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } -variable "bundle_path" { - type = string - default = "/tmp/vault.zip" +variable "artifact_path" { + description = "Where to create the zip bundle of the Vault build" } variable "build_tags" { @@ -16,6 +18,12 @@ variable "build_tags" { description = "The build tags to pass to the Go compiler" } +variable "build_ui" { + type = bool + description = "Whether or not we should build the UI when creating the local build" + default = true +} + variable "goarch" { type = string description = "The Go architecture target" @@ -32,38 +40,30 @@ variable "artifactory_host" { default = null } variable "artifactory_repo" { default = null } variable "artifactory_username" { default = null } variable "artifactory_token" { default = null } -variable "arch" { - default = null -} -variable "artifact_path" { - default = null -} -variable "artifact_type" { - default = null -} -variable "distro" { - default = null -} -variable "edition" { - default = null -} -variable "instance_type" { - default = null -} -variable "revision" { - default = null -} -variable "product_version" { - default = null +variable "arch" { default = null } +variable "artifact_type" { default = null } +variable "distro" { default = null } +variable "distro_version" { default = null } +variable "edition" { default = null } +variable "revision" { default = null } +variable "product_version" { default = null } + +module "local_metadata" { + source = "../get_local_metadata" } resource "enos_local_exec" "build" { - scripts = ["${path.module}/scripts/build.sh"] + scripts = [abspath("${path.module}/scripts/build.sh")] environment = { - BUNDLE_PATH = var.bundle_path, - GO_TAGS = join(" ", var.build_tags) - GOARCH = var.goarch - GOOS = var.goos + BASE_VERSION = module.local_metadata.version_base + BIN_PATH = abspath("${path.module}/../../../dist") + BUILD_UI = tostring(var.build_ui) + BUNDLE_PATH = abspath(var.artifact_path) + GO_TAGS = join(" ", var.build_tags) + GOARCH = var.goarch + GOOS = var.goos + PRERELEASE_VERSION = module.local_metadata.version_pre + VERSION_METADATA = module.local_metadata.version_meta } } diff --git a/enos/modules/build_local/scripts/build.sh b/enos/modules/build_local/scripts/build.sh index 385a3af55704..06fc03f39462 100755 --- a/enos/modules/build_local/scripts/build.sh +++ b/enos/modules/build_local/scripts/build.sh @@ -1,4 +1,7 @@ #!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -eux -o pipefail # Install yarn so we can build the UI @@ -8,5 +11,14 @@ export CGO_ENABLED=0 root_dir="$(git rev-parse --show-toplevel)" pushd "$root_dir" > /dev/null -make ci-build-ui ci-build ci-bundle + +if [ -n "$BUILD_UI" ] && [ "$BUILD_UI" = "true" ]; then + make ci-build-ui +fi + +make ci-build + popd > /dev/null + +echo "--> Bundling $BIN_PATH/* to $BUNDLE_PATH" +zip -r -j "$BUNDLE_PATH" "$BIN_PATH/" diff --git a/enos/modules/create_vpc/main.tf b/enos/modules/create_vpc/main.tf new file mode 100644 index 000000000000..2e7c3ebfcf24 --- /dev/null +++ b/enos/modules/create_vpc/main.tf @@ -0,0 +1,91 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = ["*"] + } +} + +resource "random_string" "cluster_id" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "aws_vpc" "vpc" { + cidr_block = var.cidr + enable_dns_hostnames = true + enable_dns_support = true + + tags = merge( + var.common_tags, + { + "Name" = var.name + }, + ) +} + +resource "aws_subnet" "subnet" { + count = length(data.aws_availability_zones.available.names) + vpc_id = aws_vpc.vpc.id + cidr_block = cidrsubnet(var.cidr, 8, count.index) + availability_zone = data.aws_availability_zones.available.names[count.index] + map_public_ip_on_launch = true + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-subnet-${data.aws_availability_zones.available.names[count.index]}" + }, + ) +} + +resource "aws_internet_gateway" "igw" { + vpc_id = aws_vpc.vpc.id + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-igw" + }, + ) +} + +resource "aws_route" "igw" { + route_table_id = aws_vpc.vpc.default_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.igw.id +} + +resource "aws_security_group" "default" { + vpc_id = aws_vpc.vpc.id + + ingress { + description = "allow_ingress_from_all" + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + description = "allow_egress_from_all" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-default" + }, + ) +} diff --git a/enos/modules/create_vpc/outputs.tf b/enos/modules/create_vpc/outputs.tf new file mode 100644 index 000000000000..a064644d3185 --- /dev/null +++ b/enos/modules/create_vpc/outputs.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "id" { + description = "Created VPC ID" + value = aws_vpc.vpc.id +} + +output "cidr" { + description = "CIDR for whole VPC" + value = var.cidr +} + +output "cluster_id" { + description = "A unique string associated with the VPC" + value = random_string.cluster_id.result +} diff --git a/enos/modules/create_vpc/variables.tf b/enos/modules/create_vpc/variables.tf new file mode 100644 index 000000000000..19861362293c --- /dev/null +++ b/enos/modules/create_vpc/variables.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "name" { + type = string + default = "vault-ci" + description = "The name of the VPC" +} + +variable "cidr" { + type = string + default = "10.13.0.0/16" + description = "CIDR block for the VPC" +} + +variable "environment" { + description = "Name of the environment." + type = string + default = "vault-ci" +} + +variable "common_tags" { + description = "Tags to set for all resources" + type = map(string) + default = { "Project" : "vault-ci" } +} diff --git a/enos/modules/ec2_info/main.tf b/enos/modules/ec2_info/main.tf new file mode 100644 index 000000000000..0b78e2b370b9 --- /dev/null +++ b/enos/modules/ec2_info/main.tf @@ -0,0 +1,190 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +locals { + architectures = toset(["arm64", "x86_64"]) + canonical_owner_id = "099720109477" + rhel_owner_id = "309956199498" + ids = { + "arm64" = { + "rhel" = { + "8.8" = data.aws_ami.rhel_88["arm64"].id + "9.1" = data.aws_ami.rhel_91["arm64"].id + } + "ubuntu" = { + "18.04" = data.aws_ami.ubuntu_1804["arm64"].id + "20.04" = data.aws_ami.ubuntu_2004["arm64"].id + "22.04" = data.aws_ami.ubuntu_2204["arm64"].id + } + } + "amd64" = { + "rhel" = { + "7.9" = data.aws_ami.rhel_79.id + "8.8" = data.aws_ami.rhel_88["x86_64"].id + "9.1" = data.aws_ami.rhel_91["x86_64"].id + } + "ubuntu" = { + "18.04" = data.aws_ami.ubuntu_1804["x86_64"].id + "20.04" = data.aws_ami.ubuntu_2004["x86_64"].id + "22.04" = data.aws_ami.ubuntu_2204["x86_64"].id + } + } + } +} + +data "aws_ami" "ubuntu_1804" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-18.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_ami" "ubuntu_2004" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-20.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_ami" "ubuntu_2204" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-22.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_ami" "rhel_79" { + most_recent = true + + # Currently latest latest point release-1 + filter { + name = "name" + values = ["RHEL-7.9*HVM-20*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = ["x86_64"] + } + + owners = [local.rhel_owner_id] +} + +data "aws_ami" "rhel_88" { + most_recent = true + for_each = local.architectures + + # Currently latest latest point release-1 + filter { + name = "name" + values = ["RHEL-8.8*HVM-20*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.rhel_owner_id] +} + +data "aws_ami" "rhel_91" { + most_recent = true + for_each = local.architectures + + # Currently latest latest point release-1 + filter { + name = "name" + values = ["RHEL-9.1*HVM-20*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.rhel_owner_id] +} + +data "aws_region" "current" {} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = ["*"] + } +} + +output "ami_ids" { + value = local.ids +} + +output "current_region" { + value = data.aws_region.current +} + +output "availability_zones" { + value = data.aws_availability_zones.available +} diff --git a/enos/modules/generate_secondary_token/main.tf b/enos/modules/generate_secondary_token/main.tf new file mode 100644 index 000000000000..5bc63b2f3d31 --- /dev/null +++ b/enos/modules/generate_secondary_token/main.tf @@ -0,0 +1,55 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "primary_leader_public_ip" { + type = string + description = "Vault primary cluster leader Public IP address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + token_id = random_uuid.token_id.id + secondary_token = enos_remote_exec.fetch_secondary_token.stdout +} +resource "random_uuid" "token_id" {} + +resource "enos_remote_exec" "fetch_secondary_token" { + depends_on = [random_uuid.token_id] + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write sys/replication/performance/primary/secondary-token id=${local.token_id} |sed -n '/^wrapping_token:/p' |awk '{print $2}'"] + + transport = { + ssh = { + host = var.primary_leader_public_ip + } + } +} + +output "secondary_token" { + value = local.secondary_token +} diff --git a/enos/modules/get_local_metadata/main.tf b/enos/modules/get_local_metadata/main.tf index d0749c669a90..2b1ee6d45c1d 100644 --- a/enos/modules/get_local_metadata/main.tf +++ b/enos/modules/get_local_metadata/main.tf @@ -1,31 +1,58 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } resource "enos_local_exec" "get_build_date" { - scripts = ["${path.module}/scripts/build_date.sh"] + scripts = [abspath("${path.module}/scripts/build_date.sh")] +} + +resource "enos_local_exec" "get_revision" { + inline = ["git rev-parse HEAD"] +} + +resource "enos_local_exec" "get_version" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version"] +} + +resource "enos_local_exec" "get_version_base" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-base"] +} + +resource "enos_local_exec" "get_version_pre" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-pre"] +} + +resource "enos_local_exec" "get_version_meta" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-meta"] } output "build_date" { value = trimspace(enos_local_exec.get_build_date.stdout) } -resource "enos_local_exec" "get_version" { - scripts = ["${path.module}/scripts/version.sh"] +output "revision" { + value = trimspace(enos_local_exec.get_revision.stdout) } output "version" { value = trimspace(enos_local_exec.get_version.stdout) } -resource "enos_local_exec" "get_revision" { - inline = ["git rev-parse HEAD"] +output "version_base" { + value = trimspace(enos_local_exec.get_version_base.stdout) } -output "revision" { - value = trimspace(enos_local_exec.get_revision.stdout) +output "version_pre" { + value = trimspace(enos_local_exec.get_version_pre.stdout) +} + +output "version_meta" { + value = trimspace(enos_local_exec.get_version_meta.stdout) } diff --git a/enos/modules/get_local_metadata/scripts/build_date.sh b/enos/modules/get_local_metadata/scripts/build_date.sh index 917888eb1cbf..ea63c74d8ed3 100755 --- a/enos/modules/get_local_metadata/scripts/build_date.sh +++ b/enos/modules/get_local_metadata/scripts/build_date.sh @@ -1,4 +1,7 @@ -#!/bin/env bash +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -eu -o pipefail pushd "$(git rev-parse --show-toplevel)" > /dev/null diff --git a/enos/modules/get_local_metadata/scripts/version.sh b/enos/modules/get_local_metadata/scripts/version.sh index 6921d772ea46..6b910c404e1e 100755 --- a/enos/modules/get_local_metadata/scripts/version.sh +++ b/enos/modules/get_local_metadata/scripts/version.sh @@ -1,6 +1,97 @@ -#!/bin/env bash -set -eu -o pipefail +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 -pushd "$(git rev-parse --show-toplevel)" > /dev/null -make ci-get-version -popd > /dev/null +set -euo pipefail + +# Get the full version information +# this is only needed for local enos builds in order to get the default version from version_base.go +# this should match the default version that the binary has been built with +# CRT release builds use the new static version from ./release/VERSION +function version() { + local version + local prerelease + local metadata + + version=$(version_base) + prerelease=$(version_pre) + metadata=$(version_metadata) + + if [ -n "$metadata" ] && [ -n "$prerelease" ]; then + echo "$version-$prerelease+$metadata" + elif [ -n "$metadata" ]; then + echo "$version+$metadata" + elif [ -n "$prerelease" ]; then + echo "$version-$prerelease" + else + echo "$version" + fi +} + +# Get the base version +function version_base() { + : "${VAULT_VERSION:=""}" + + if [ -n "$VAULT_VERSION" ]; then + echo "$VAULT_VERSION" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/VERSION}" + awk -F- '{ print $1 }' < "$VERSION_FILE" +} + +# Get the version pre-release +function version_pre() { + : "${VAULT_PRERELEASE:=""}" + + if [ -n "$VAULT_PRERELEASE" ]; then + echo "$VAULT_PRERELEASE" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/VERSION}" + awk -F- '{ print $2 }' < "$VERSION_FILE" +} + +# Get the version metadata, which is commonly the edition +function version_metadata() { + : "${VAULT_METADATA:=""}" + + if [ -n "$VAULT_METADATA" ]; then + echo "$VAULT_METADATA" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" + awk '$1 == "VersionMetadata" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" +} + +# Determine the root directory of the repository +function repo_root() { + git rev-parse --show-toplevel +} + +# Run Enos local +function main() { + case $1 in + version) + version + ;; + version-base) + version_base + ;; + version-pre) + version_pre + ;; + version-meta) + version_metadata + ;; + *) + echo "unknown sub-command" >&2 + exit 1 + ;; + esac +} + +main "$@" diff --git a/enos/modules/install_packages/main.tf b/enos/modules/install_packages/main.tf new file mode 100644 index 000000000000..96717edcabfe --- /dev/null +++ b/enos/modules/install_packages/main.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "packages" { + type = list(string) + default = [] +} + +variable "hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The hosts to install packages on" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 120 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +resource "enos_remote_exec" "install_packages" { + for_each = var.hosts + + environment = { + PACKAGES = length(var.packages) >= 1 ? join(" ", var.packages) : "__skip" + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + } + + scripts = [abspath("${path.module}/scripts/install-packages.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/install_packages/scripts/install-packages.sh b/enos/modules/install_packages/scripts/install-packages.sh new file mode 100644 index 000000000000..29868cd33d99 --- /dev/null +++ b/enos/modules/install_packages/scripts/install-packages.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$PACKAGES" ]] && fail "PACKAGES env variable has not been set" + +install_packages() { + if [ "$PACKAGES" = "__skip" ]; then + return 0 + fi + + echo "Installing Dependencies: $PACKAGES" + if [ -f /etc/debian_version ]; then + # Do our best to make sure that we don't race with cloud-init. Wait a reasonable time until we + # see ec2 in the sources list. Very rarely cloud-init will take longer than we wait. In that case + # we'll just install our packages. + grep ec2 /etc/apt/sources.list || true + + cd /tmp + sudo apt update + # shellcheck disable=2068 + sudo apt install -y ${PACKAGES[@]} + else + cd /tmp + # shellcheck disable=2068 + sudo yum -y install ${PACKAGES[@]} + fi +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if install_packages; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for packages to install" diff --git a/enos/modules/k8s_deploy_vault/main.tf b/enos/modules/k8s_deploy_vault/main.tf index 8e4ce2840636..a422be435caa 100644 --- a/enos/modules/k8s_deploy_vault/main.tf +++ b/enos/modules/k8s_deploy_vault/main.tf @@ -1,9 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_version = ">= 1.0" required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } helm = { @@ -27,6 +30,7 @@ locals { "server.limits.cpu" = "200m" "server.ha.raft.config" = file("${abspath(path.module)}/raft-config.hcl") "server.dataStorage.size" = "100m" + "server.logLevel" = var.vault_log_level } all_helm_chart_settings = var.ent_license == null ? local.helm_chart_settings : merge(local.helm_chart_settings, { "server.extraEnvironmentVars.VAULT_LICENSE" = var.ent_license diff --git a/enos/modules/k8s_deploy_vault/raft-config.hcl b/enos/modules/k8s_deploy_vault/raft-config.hcl index b624dad80fbb..423390b2d12f 100644 --- a/enos/modules/k8s_deploy_vault/raft-config.hcl +++ b/enos/modules/k8s_deploy_vault/raft-config.hcl @@ -7,14 +7,6 @@ listener "tcp" { storage "raft" { path = "/vault/data" - autopilot { - cleanup_dead_servers = "true" - last_contact_threshold = "200ms" - last_contact_failure_threshold = "10m" - max_trailing_logs = 250000 - min_quorum = 5 - server_stabilization_time = "10s" - } } service_registration "kubernetes" {} diff --git a/enos/modules/k8s_deploy_vault/variables.tf b/enos/modules/k8s_deploy_vault/variables.tf index 1428191b70f4..9730f87a7807 100644 --- a/enos/modules/k8s_deploy_vault/variables.tf +++ b/enos/modules/k8s_deploy_vault/variables.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "context_name" { type = string description = "The name of the k8s context for Vault" @@ -32,3 +35,8 @@ variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" } + +variable "vault_log_level" { + description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." + type = string +} diff --git a/enos/modules/k8s_vault_verify_build_date/main.tf b/enos/modules/k8s_vault_verify_build_date/main.tf index 38f17fbc9e79..dadf92ee5f30 100644 --- a/enos/modules/k8s_vault_verify_build_date/main.tf +++ b/enos/modules/k8s_vault_verify_build_date/main.tf @@ -1,8 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } diff --git a/enos/modules/k8s_vault_verify_build_date/variables.tf b/enos/modules/k8s_vault_verify_build_date/variables.tf index 7bba75ba68d8..4e1754ebe9f1 100644 --- a/enos/modules/k8s_vault_verify_build_date/variables.tf +++ b/enos/modules/k8s_vault_verify_build_date/variables.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" diff --git a/enos/modules/k8s_vault_verify_replication/main.tf b/enos/modules/k8s_vault_verify_replication/main.tf index 804b934591f1..666067366a98 100644 --- a/enos/modules/k8s_vault_verify_replication/main.tf +++ b/enos/modules/k8s_vault_verify_replication/main.tf @@ -1,8 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } diff --git a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh index 363ce7185845..96fdf6a32031 100755 --- a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh +++ b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh @@ -1,4 +1,7 @@ -#!/usr/bin/env bash +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # The Vault replication smoke test, documented in # https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 @@ -6,14 +9,14 @@ set -e fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } -# Replication STATUS endpoint should have data.mode disabled for OSS release -if [ "$VAULT_EDITION" == "oss" ]; then +# Replication STATUS endpoint should have data.mode disabled for CE release +if [ "$VAULT_EDITION" == "ce" ]; then if [ "$(echo "${STATUS}" | jq -r '.data.mode')" != "disabled" ]; then - fail "replication data mode is not disabled for OSS release!" + fail "replication data mode is not disabled for CE release!" fi else if [ "$(echo "${STATUS}" | jq -r '.data.dr')" == "" ]; then diff --git a/enos/modules/k8s_vault_verify_replication/variables.tf b/enos/modules/k8s_vault_verify_replication/variables.tf index 42ab38aa572b..011ae9cf2b39 100644 --- a/enos/modules/k8s_vault_verify_replication/variables.tf +++ b/enos/modules/k8s_vault_verify_replication/variables.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" diff --git a/enos/modules/k8s_vault_verify_ui/main.tf b/enos/modules/k8s_vault_verify_ui/main.tf index faccb7085870..40132541658a 100644 --- a/enos/modules/k8s_vault_verify_ui/main.tf +++ b/enos/modules/k8s_vault_verify_ui/main.tf @@ -1,9 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - version = ">= 0.1.17" - source = "app.terraform.io/hashicorp-qti/enos" + version = "> 0.4.0" + source = "registry.terraform.io/hashicorp-forge/enos" } } } diff --git a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh index b85d4da12473..4372a5308659 100755 --- a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh +++ b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh @@ -1,10 +1,13 @@ #!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -e fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } if [ "${REDIRECT_URL}" != "http://localhost:8200/ui/" ]; then diff --git a/enos/modules/k8s_vault_verify_ui/variables.tf b/enos/modules/k8s_vault_verify_ui/variables.tf index 6c06d5de792e..3f000c54f85c 100644 --- a/enos/modules/k8s_vault_verify_ui/variables.tf +++ b/enos/modules/k8s_vault_verify_ui/variables.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" diff --git a/enos/modules/k8s_vault_verify_version/main.tf b/enos/modules/k8s_vault_verify_version/main.tf index 693abf97910f..35746350443a 100644 --- a/enos/modules/k8s_vault_verify_version/main.tf +++ b/enos/modules/k8s_vault_verify_version/main.tf @@ -1,15 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } locals { instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) - expected_version = var.vault_edition == "oss" ? var.vault_product_version : "${var.vault_product_version}-ent" + expected_version = var.vault_edition == "ce" ? var.vault_product_version : "${var.vault_product_version}-ent" } resource "enos_remote_exec" "release_info" { @@ -35,13 +38,13 @@ resource "enos_local_exec" "smoke-verify-version" { for_each = enos_remote_exec.release_info environment = { - VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status) ACTUAL_VERSION = jsondecode(each.value.stdout).version + BUILD_DATE = var.vault_build_date + CHECK_BUILD_DATE = var.check_build_date EXPECTED_VERSION = var.vault_product_version, VAULT_EDITION = var.vault_edition, VAULT_REVISION = var.vault_product_revision, - CHECK_BUILD_DATE = var.check_build_date - BUILD_DATE = var.vault_build_date + VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status) } scripts = [abspath("${path.module}/scripts/smoke-verify-version.sh")] diff --git a/enos/modules/k8s_vault_verify_version/scripts/get-status.sh b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh index 3d2d1fe97506..26c3c0d55d3a 100755 --- a/enos/modules/k8s_vault_verify_version/scripts/get-status.sh +++ b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh @@ -1,4 +1,7 @@ #!/usr/bin/env sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -e diff --git a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh index d5c439a9b264..4e8fc944db97 100755 --- a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh +++ b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh @@ -1,42 +1,46 @@ #!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # The Vault smoke test to verify the Vault version installed set -e fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } if [[ "${CHECK_BUILD_DATE}" == "false" ]]; then expected_build_date="" else - build_date="${BUILD_DATE}" - if [[ "${build_date}" == "" ]]; then - build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date) + cfg_build_date="${BUILD_DATE}" + if [[ "${cfg_build_date}" == "" ]]; then + cfg_build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date) fi - expected_build_date=", built $build_date" + expected_build_date=", built $cfg_build_date" fi vault_expected_version="Vault v${EXPECTED_VERSION} (${VAULT_REVISION})" case "${VAULT_EDITION}" in - oss) version_expected="${vault_expected_version}${expected_build_date}";; - ent) version_expected="${vault_expected_version}${expected_build_date}";; - ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)";; - ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; - ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + ce) version_expected="${vault_expected_version}${expected_build_date}";; + ent) version_expected="${vault_expected_version}${expected_build_date}";; + ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)";; + ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; *) fail "(${VAULT_EDITION}) does not match any known Vault editions" esac version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') if [[ "${ACTUAL_VERSION}" == "$version_expected_nosha" ]] || [[ "${ACTUAL_VERSION}" == "$version_expected" ]]; then - echo "Version verification succeeded!" + echo "Version verification succeeded!" else - echo "CHECK_BUILD_DATE: ${CHECK_BUILD_DATE}" - echo "BUILD_DATE: ${BUILD_DATE}" - echo "build_date: ${build_date}" - fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}" + echo "Version checking enabled: ${CHECK_BUILD_DATE}" 1>&2 + echo "Given build date: ${BUILD_DATE}" 1>&2 + echo "Interpreted build date: ${cfg_build_date}" 1>&2 + + fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}" fi diff --git a/enos/modules/k8s_vault_verify_version/variables.tf b/enos/modules/k8s_vault_verify_version/variables.tf index ed487831a3c0..05ca66082198 100644 --- a/enos/modules/k8s_vault_verify_version/variables.tf +++ b/enos/modules/k8s_vault_verify_version/variables.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" diff --git a/enos/modules/k8s_vault_verify_write_data/main.tf b/enos/modules/k8s_vault_verify_write_data/main.tf index 01caeaba4c15..52279718a2fb 100644 --- a/enos/modules/k8s_vault_verify_write_data/main.tf +++ b/enos/modules/k8s_vault_verify_write_data/main.tf @@ -1,8 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } diff --git a/enos/modules/k8s_vault_verify_write_data/variables.tf b/enos/modules/k8s_vault_verify_write_data/variables.tf index 7bba75ba68d8..4e1754ebe9f1 100644 --- a/enos/modules/k8s_vault_verify_write_data/variables.tf +++ b/enos/modules/k8s_vault_verify_write_data/variables.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "vault_instance_count" { type = number description = "How many vault instances are in the cluster" diff --git a/enos/modules/load_docker_image/main.tf b/enos/modules/load_docker_image/main.tf index 854c52f9ea69..9f5e15c380ac 100644 --- a/enos/modules/load_docker_image/main.tf +++ b/enos/modules/load_docker_image/main.tf @@ -1,7 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } diff --git a/enos/modules/local_kind_cluster/main.tf b/enos/modules/local_kind_cluster/main.tf index c5da14daf770..b21bfe61da03 100644 --- a/enos/modules/local_kind_cluster/main.tf +++ b/enos/modules/local_kind_cluster/main.tf @@ -1,7 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } random = { source = "hashicorp/random" diff --git a/enos/modules/read_license/main.tf b/enos/modules/read_license/main.tf index 1b645272abe7..823714f5d0b8 100644 --- a/enos/modules/read_license/main.tf +++ b/enos/modules/read_license/main.tf @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + variable "file_name" {} output "license" { diff --git a/enos/modules/replication_data/main.tf b/enos/modules/replication_data/main.tf new file mode 100644 index 000000000000..dec96408373b --- /dev/null +++ b/enos/modules/replication_data/main.tf @@ -0,0 +1,104 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +// An arithmetic module for calculating inputs and outputs for various replication steps. + +// Get the first follower out of the hosts set +variable "follower_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + default = {} +} + +output "follower_host_1" { + value = try(var.follower_hosts[0], null) +} + +output "follower_public_ip_1" { + value = try(var.follower_hosts[0].public_ip, null) +} + +output "follower_private_ip_1" { + value = try(var.follower_hosts[0].private_ip, null) +} + +output "follower_host_2" { + value = try(var.follower_hosts[1], null) +} + +output "follower_public_ip_2" { + value = try(var.follower_hosts[1].public_ip, null) +} + +output "follower_private_ip_2" { + value = try(var.follower_hosts[1].private_ip, null) +} + +// Calculate our remainder hosts after we've added and removed leader +variable "initial_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + default = {} +} + +variable "initial_hosts_count" { + type = number + default = 0 +} + +variable "added_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + default = {} +} + +variable "added_hosts_count" { + type = number + default = 0 +} + +variable "removed_primary_host" { + type = object({ + private_ip = string + public_ip = string + }) + default = null +} + +variable "removed_follower_host" { + type = object({ + private_ip = string + public_ip = string + }) + default = null +} + +locals { + remaining_hosts_count = max((var.initial_hosts_count + var.added_hosts_count - 2), 0) + indices = [for idx in range(local.remaining_hosts_count) : idx] + remaining_initial = setsubtract(values(var.initial_hosts), [var.removed_primary_host, var.removed_follower_host]) + remaining_hosts_list = tolist(setunion(values(var.added_hosts), local.remaining_initial)) + remaining_hosts = zipmap(local.indices, local.remaining_hosts_list) +} + +output "remaining_initial_count" { + value = length(local.remaining_initial) +} + +output "remaining_initial_hosts" { + value = local.remaining_initial +} + +output "remaining_hosts_count" { + value = local.remaining_hosts_count +} + +output "remaining_hosts" { + value = local.remaining_hosts +} diff --git a/enos/modules/seal_awskms/main.tf b/enos/modules/seal_awskms/main.tf new file mode 100644 index 000000000000..e8a1ad39cca2 --- /dev/null +++ b/enos/modules/seal_awskms/main.tf @@ -0,0 +1,68 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { + type = string +} + +variable "cluster_meta" { + type = string + default = null +} + +variable "cluster_ssh_keypair" { + type = string + default = null +} + +variable "common_tags" { + type = map(string) + default = null +} + +variable "other_resources" { + type = list(string) + default = [] +} + +locals { + cluster_name = var.cluster_meta == null ? var.cluster_id : "${var.cluster_id}-${var.cluster_meta}" +} + +resource "aws_kms_key" "key" { + description = "auto-unseal-key-${local.cluster_name}" + deletion_window_in_days = 7 // 7 is the shortest allowed window + tags = var.common_tags +} + +resource "aws_kms_alias" "alias" { + name = "alias/auto-unseal-key-${local.cluster_name}" + target_key_id = aws_kms_key.key.key_id +} + +output "attributes" { + description = "Seal device specific attributes" + value = { + kms_key_id = aws_kms_key.key.arn + } +} + +// We output our resource name and a collection of those passed in to create a full list of key +// resources that might be required for instance roles that are associated with some unseal types. +output "resource_name" { + description = "The awskms key name" + value = aws_kms_key.key.arn +} + +output "resource_names" { + description = "The list of awskms key names to associate with a role" + value = compact(concat([aws_kms_key.key.arn], var.other_resources)) +} diff --git a/enos/modules/seal_pkcs11/main.tf b/enos/modules/seal_pkcs11/main.tf new file mode 100644 index 000000000000..815780893249 --- /dev/null +++ b/enos/modules/seal_pkcs11/main.tf @@ -0,0 +1,126 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +/* + +A seal module that emulates using a real PKCS#11 HSM. For this we'll use softhsm2. You'll +need softhsm2 and opensc installed to get access to the userspace tools and dynamic library that +Vault Enterprise will use. Here we'll take in the vault hosts and use the one of the nodes +to generate the hsm slot and the tokens, and then we'll copy the softhsm tokens to the other nodes. + +Using softhsm2 and opensc is a bit complicated but here's a cheat sheet for getting started. + +$ brew install softhsm opensc +or +$ sudo apt install softhsm2 opensc + +Create a softhsm slot. You can use anything you want for the pin and the supervisor pin. This will +output the slot identifier, which you'll use as the `slot` parameter in the seal config. +$ softhsm2-util --init-token --free --so-pin=1234 --pin=1234 --label="seal" | grep -oE '[0-9]+$' + +You can see the slots: +$ softhsm2-util --show-slots +Or use opensc's pkcs11-tool. Make sure to use your pin for the -p flag. The module that we refer +to is the location of the shared library that we need to provide to Vault Enterprise. Depending on +your platform or installation method this could be different. +$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 -IL + +Find yours +$ find /usr/local -type f -name libsofthsm2.so -print -quit + +Your tokens will be installed in the default directories.tokendir. See man softhsm2.conf(5) for +more details. On macOS from brew this is /usr/local/var/lib/softhsm/tokens/ + +Vault Enterprise supports creating the HSM keys, but for softhsm2 that would require us to +initialize with one node before copying the contents. So instead we'll create an HSM key and HMAC +key that we'll copy everywhere. + +$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 --token-label seal --keygen --usage-sign --label hsm_hmac --id 1 --key-type GENERIC:32 --private --sensitive +$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 --token-label seal --keygen --usage-sign --label hsm_aes --id 2 --key-type AES:32 --private --sensitive --usage-wrap + +Now you should be able to configure Vault Enterprise seal stanza. +*/ + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { + type = string + description = "The VPC ID of the cluster" +} + +variable "cluster_meta" { + type = string + default = null + description = "Any metadata that needs to be passed in. If we're creating multiple softhsm tokens this value could be a prior KEYS_BASE64" +} + +variable "cluster_ssh_keypair" { + type = string + description = "The ssh keypair of the vault cluster. We need this to used the inherited provider for our target" +} + +variable "common_tags" { + type = map(string) + default = null +} + +variable "other_resources" { + type = list(string) + default = [] +} + +resource "random_string" "id" { + length = 8 + numeric = false + special = false + upper = false +} + +module "ec2_info" { + source = "../ec2_info" +} + +locals { + id = "${var.cluster_id}-${random_string.id.result}" +} + +module "target" { + source = "../target_ec2_instances" + ami_id = module.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = local.id + common_tags = var.common_tags + instance_count = 1 + instance_types = { + amd64 = "t3a.small" + arm64 = "t4g.small" + } + // Make sure it's not too long as we use this for aws resources that size maximums that are easy + // to hit. + project_name = substr("vault-ci-softhsm-${local.id}", 0, 32) + ssh_keypair = var.cluster_ssh_keypair + vpc_id = var.cluster_id +} + +module "create_vault_keys" { + source = "../softhsm_create_vault_keys" + + cluster_id = var.cluster_id + hosts = module.target.hosts +} + +// Our attributes contain all required keys for the seal stanza and our base64 encoded softhsm +// token and keys. +output "attributes" { + description = "Seal device specific attributes" + value = module.create_vault_keys.all_attributes +} + +// Shim for chaining seals that require IAM roles +output "resource_name" { value = null } +output "resource_names" { value = var.other_resources } diff --git a/enos/modules/seal_shamir/main.tf b/enos/modules/seal_shamir/main.tf new file mode 100644 index 000000000000..55e26d1547b6 --- /dev/null +++ b/enos/modules/seal_shamir/main.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# A shim seal module for shamir seals. For Shamir seals the enos_vault_init resource will take care +# of creating our seal. + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { default = null } +variable "cluster_meta" { default = null } +variable "cluster_ssh_keypair" { default = null } +variable "common_tags" { default = null } +variable "image_id" { default = null } +variable "other_resources" { + type = list(string) + default = [] +} + +output "resource_name" { value = null } +output "resource_names" { value = var.other_resources } +output "attributes" { value = null } diff --git a/enos/modules/shutdown_multiple_nodes/main.tf b/enos/modules/shutdown_multiple_nodes/main.tf new file mode 100644 index 000000000000..27f23c74c9e2 --- /dev/null +++ b/enos/modules/shutdown_multiple_nodes/main.tf @@ -0,0 +1,43 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "old_vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances to be shutdown" +} + +locals { + public_ips = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.old_vault_instances)[idx].public_ip + private_ip = values(var.old_vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "shutdown_multiple_nodes" { + for_each = local.public_ips + inline = ["sudo shutdown -H --no-wall; exit 0"] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/shutdown_node/main.tf b/enos/modules/shutdown_node/main.tf new file mode 100644 index 000000000000..b31762f37f27 --- /dev/null +++ b/enos/modules/shutdown_node/main.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "node_public_ip" { + type = string + description = "Node Public IP address" +} + +resource "enos_remote_exec" "shutdown_node" { + inline = ["sudo shutdown -H --no-wall; exit 0"] + + transport = { + ssh = { + host = var.node_public_ip + } + } +} diff --git a/enos/modules/softhsm_create_vault_keys/main.tf b/enos/modules/softhsm_create_vault_keys/main.tf new file mode 100644 index 000000000000..d503e0ce65e8 --- /dev/null +++ b/enos/modules/softhsm_create_vault_keys/main.tf @@ -0,0 +1,131 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "cluster_id" { + type = string +} + +variable "hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The hosts that will have access to the softhsm" +} + +locals { + pin = resource.random_string.pin.result + aes_label = "vault_hsm_aes_${local.pin}" + hmac_label = "vault_hsm_hmac_${local.pin}" + target = tomap({ "1" = var.hosts[0] }) + token = "${var.cluster_id}_${local.pin}" +} + +resource "random_string" "pin" { + length = 5 + lower = true + upper = false + numeric = true + special = false +} + +module "install" { + source = "../softhsm_install" + + hosts = local.target + include_tools = true # make sure opensc is also installed as we need it to create keys +} + +module "initialize" { + source = "../softhsm_init" + depends_on = [module.install] + + hosts = local.target +} + +// Create our keys. Our stdout contains the requried the values for the pksc11 seal stanza +// as JSON. https://developer.hashicorp.com/vault/docs/configuration/seal/pkcs11#pkcs11-parameters +resource "enos_remote_exec" "create_keys" { + depends_on = [ + module.install, + module.initialize, + ] + + environment = { + AES_LABEL = local.aes_label + HMAC_LABEL = local.hmac_label + PIN = resource.random_string.pin.result + TOKEN_DIR = module.initialize.token_dir + TOKEN_LABEL = local.token + SO_PIN = resource.random_string.pin.result + } + + scripts = [abspath("${path.module}/scripts/create-keys.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +// Get our softhsm token. Stdout is a base64 encoded gzipped tarball of the softhsm token dir. This +// allows us to pass around binary data inside of Terraform's type system. +resource "enos_remote_exec" "get_keys" { + depends_on = [enos_remote_exec.create_keys] + + environment = { + TOKEN_DIR = module.initialize.token_dir + } + + scripts = [abspath("${path.module}/scripts/get-keys.sh")] + + transport = { + ssh = { + host = var.hosts[0].public_ip + } + } +} + +locals { + seal_attributes = jsondecode(resource.enos_remote_exec.create_keys.stdout) +} + +output "seal_attributes" { + description = "Seal device specific attributes. Contains all required keys for the seal stanza" + value = local.seal_attributes +} + +output "token_base64" { + description = "The softhsm token and keys gzipped tarball in base64" + value = enos_remote_exec.get_keys.stdout +} + +output "token_dir" { + description = "The softhsm directory where tokens and keys are stored" + value = module.initialize.token_dir +} + +output "token_label" { + description = "The HSM slot token label" + value = local.token +} + +output "all_attributes" { + description = "Seal device specific attributes" + value = merge( + local.seal_attributes, + { + token_base64 = enos_remote_exec.get_keys.stdout, + token_dir = module.initialize.token_dir + }, + ) +} diff --git a/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh b/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh new file mode 100644 index 000000000000..533a2de4efce --- /dev/null +++ b/enos/modules/softhsm_create_vault_keys/scripts/create-keys.sh @@ -0,0 +1,82 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$AES_LABEL" ]] && fail "AES_LABEL env variable has not been set" +[[ -z "$HMAC_LABEL" ]] && fail "HMAC_LABEL env variable has not been set" +[[ -z "$PIN" ]] && fail "PIN env variable has not been set" +[[ -z "$SO_PIN" ]] && fail "SO_PIN env variable has not been set" +[[ -z "$TOKEN_LABEL" ]] && fail "TOKEN_LABEL env variable has not been set" +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" + +if ! type softhsm2-util &> /dev/null; then + fail "unable to locate softhsm2-util in PATH. Have you installed softhsm?" +fi + +if ! type pkcs11-tool &> /dev/null; then + fail "unable to locate pkcs11-tool in PATH. Have you installed opensc?" +fi + +# Create an HSM slot and return the slot number in decimal value. +create_slot() { + sudo softhsm2-util --init-token --free --so-pin="$SO_PIN" --pin="$PIN" --label="$TOKEN_LABEL" | grep -oE '[0-9]+$' +} + +# Find the location of our softhsm shared object. +find_softhsm_so() { + sudo find /usr -type f -name libsofthsm2.so -print -quit +} + +# Create key a key in the slot. Args: module, key label, id number, key type +keygen() { + sudo pkcs11-tool --keygen --usage-sign --private --sensitive --usage-wrap \ + --module "$1" \ + -p "$PIN" \ + --token-label "$TOKEN_LABEL" \ + --label "$2" \ + --id "$3" \ + --key-type "$4" +} + +# Create our softhsm slot and keys +main() { + local slot + if ! slot=$(create_slot); then + fail "failed to create softhsm token slot" + fi + + local so + if ! so=$(find_softhsm_so); then + fail "unable to locate libsofthsm2.so shared object" + fi + + if ! keygen "$so" "$AES_LABEL" 1 'AES:32' 1>&2; then + fail "failed to create AES key" + fi + + if ! keygen "$so" "$HMAC_LABEL" 2 'GENERIC:32' 1>&2; then + fail "failed to create HMAC key" + fi + + # Return our seal configuration attributes as JSON + cat <&2 + exit 1 +} + +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" + +# Tar up our token. We have to do this as a superuser because softhsm is owned by root. +sudo tar -czf token.tgz -C "$TOKEN_DIR" . +me="$(whoami)" +sudo chown "$me:$me" token.tgz + +# Write the value STDOUT as base64 so we can handle binary data as a string +base64 -i token.tgz diff --git a/enos/modules/softhsm_distribute_vault_keys/main.tf b/enos/modules/softhsm_distribute_vault_keys/main.tf new file mode 100644 index 000000000000..394f13faf1c6 --- /dev/null +++ b/enos/modules/softhsm_distribute_vault_keys/main.tf @@ -0,0 +1,108 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.9" + } + } +} + +variable "hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The hosts for whom we'll distribute the softhsm tokens and keys" +} + +variable "token_base64" { + type = string + description = "The base64 encoded gzipped tarball of the softhsm token" +} + +locals { + // The user/group name for softhsm + softhsm_groups = { + "rhel" = "ods" + "ubuntu" = "softhsm" + } + + // Determine if we should skip distribution. If we haven't been passed in a base64 token tarball + // we should short circuit the rest of the module. + skip = var.token_base64 == null || var.token_base64 == "" ? true : false +} + +module "install" { + // TODO: Should packages take a string instead of array so we can plan with unknown values that could change? + source = "../softhsm_install" + + hosts = var.hosts + include_tools = false # we don't need opensc on machines that did not create the HSM. +} + +module "initialize" { + source = "../softhsm_init" + depends_on = [module.install] + + hosts = var.hosts + skip = local.skip +} + +# In order for the vault service to access our keys we need to deal with ownership of files. Make +# sure we have a vault user on the machine if it doesn't already exist. Our distribution script +# below will handle adding vault to the "softhsm" group and setting ownership of the tokens. +resource "enos_user" "vault" { + for_each = var.hosts + + name = "vault" + home_dir = "/etc/vault.d" + shell = "/bin/false" + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +// Get the host information so we can ensure that the correct user/group is used for softhsm. +resource "enos_host_info" "hosts" { + for_each = var.hosts + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +// Distribute our softhsm token and keys to the given hosts. +resource "enos_remote_exec" "distribute_token" { + for_each = var.hosts + depends_on = [ + module.initialize, + enos_user.vault, + enos_host_info.hosts, + ] + + environment = { + TOKEN_BASE64 = var.token_base64 + TOKEN_DIR = module.initialize.token_dir + SOFTHSM_GROUP = local.softhsm_groups[enos_host_info.hosts[each.key].distro] + } + + scripts = [abspath("${path.module}/scripts/distribute-token.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +output "lib" { + value = module.install.lib +} diff --git a/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh b/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh new file mode 100644 index 000000000000..95f896c756d1 --- /dev/null +++ b/enos/modules/softhsm_distribute_vault_keys/scripts/distribute-token.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -ex + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# If we're not given keys we'll short circuit. This should only happen if we're skipping distribution +# because we haven't created a token or keys. +if [ -z "$TOKEN_BASE64" ]; then + echo "TOKEN_BASE64 environment variable was unset. Assuming we don't need to distribute our token" 1>&2 + exit 0 +fi + +[[ -z "$SOFTHSM_GROUP" ]] && fail "SOFTHSM_GROUP env variable has not been set" +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" + +# Convert our base64 encoded gzipped tarball of the softhsm token back into a tarball. +base64 --decode - > token.tgz <<< "$TOKEN_BASE64" + +# Expand it. We assume it was written with the correct directory metadata. Do this as a superuser +# because the token directory should be owned by root. +sudo tar -xvf token.tgz -C "$TOKEN_DIR" + +# Make sure the vault user is in the softhsm group to get access to the tokens. +sudo usermod -aG "$SOFTHSM_GROUP" vault +sudo chown -R "vault:$SOFTHSM_GROUP" "$TOKEN_DIR" diff --git a/enos/modules/softhsm_init/main.tf b/enos/modules/softhsm_init/main.tf new file mode 100644 index 000000000000..3d31803e52ad --- /dev/null +++ b/enos/modules/softhsm_init/main.tf @@ -0,0 +1,81 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.9" + } + } +} + +variable "hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The hosts for whom default softhsm configuration will be applied" +} + +variable "skip" { + type = bool + default = false + description = "Whether or not to skip initializing softhsm" +} + +locals { + // The location on disk to write the softhsm tokens to + token_dir = "/var/lib/softhsm/tokens" + + // Where the default configuration is + config_paths = { + "rhel" = "/etc/softhsm2.conf" + "ubuntu" = "/etc/softhsm/softhsm2.conf" + } + + host_key = element(keys(enos_host_info.hosts), 0) + config_path = local.config_paths[enos_host_info.hosts[local.host_key].distro] +} + +resource "enos_host_info" "hosts" { + for_each = var.hosts + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_remote_exec" "init_softhsm" { + for_each = var.hosts + depends_on = [enos_host_info.hosts] + + environment = { + CONFIG_PATH = local.config_paths[enos_host_info.hosts[each.key].distro] + TOKEN_DIR = local.token_dir + SKIP = var.skip ? "true" : "false" + } + + scripts = [abspath("${path.module}/scripts/init-softhsm.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +output "config_path" { + // Technically this is actually just the first config path of our hosts. + value = local.config_path +} + +output "token_dir" { + value = local.token_dir +} + +output "skipped" { + value = var.skip +} diff --git a/enos/modules/softhsm_init/scripts/init-softhsm.sh b/enos/modules/softhsm_init/scripts/init-softhsm.sh new file mode 100644 index 000000000000..82e620794d7a --- /dev/null +++ b/enos/modules/softhsm_init/scripts/init-softhsm.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$CONFIG_PATH" ]] && fail "CONFIG_PATH env variable has not been set" +[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set" +[[ -z "$SKIP" ]] && fail "SKIP env variable has not been set" + +if [ "$SKIP" == "true" ]; then + exit 0 +fi + +cat <&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if so=$(sudo find /usr -type f -name libsofthsm2.so -print -quit); then + echo "$so" + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out trying to locate libsofthsm2.so shared object" diff --git a/enos/modules/start_vault/main.tf b/enos/modules/start_vault/main.tf new file mode 100644 index 000000000000..b0286d24f89c --- /dev/null +++ b/enos/modules/start_vault/main.tf @@ -0,0 +1,239 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.10" + } + } +} + +locals { + bin_path = "${var.install_dir}/vault" + // In order to get Terraform to plan we have to use collections with keys that are known at plan + // time. Here we're creating locals that keep track of index values that point to our target hosts. + followers = toset(slice(local.instances, 1, length(local.instances))) + instances = [for idx in range(length(var.target_hosts)) : tostring(idx)] + leader = toset(slice(local.instances, 0, 1)) + // Handle cases where we might have to distribute HSM tokens for the pkcs11 seal before starting + // vault. + token_base64 = try(lookup(var.seal_attributes, "token_base64", ""), "") + token_base64_secondary = try(lookup(var.seal_attributes_secondary, "token_base64", ""), "") + // This module currently supports up to two defined seals. Most of our locals logic here is for + // creating the correct seal configuration. + seals = { + primary = local.seal_primary + secondary = local.seal_secondary + } + seals_primary = { + awskms = { + type = "awskms" + attributes = merge( + { + name = var.seal_alias + priority = var.seal_priority + }, var.seal_attributes + ) + } + pkcs11 = { + type = "pkcs11" + attributes = merge( + { + name = var.seal_alias + priority = var.seal_priority + }, + // Strip out attributes that aren't supposed to be in seal stanza like our base64 encoded + // softhsm blob and the token directory. We'll also inject the shared object library + // location that we detect on the target machines. This allows use to create the token and + // keys on a machines that have different shared object locations. + merge( + try({ for key, val in var.seal_attributes : key => val if key != "token_base64" && key != "token_dir" }, {}), + try({ lib = module.maybe_configure_hsm.lib }, {}) + ), + ) + } + shamir = { + type = "shamir" + attributes = null + } + } + seal_primary = local.seals_primary[var.seal_type] + seals_secondary = { + awskms = { + type = "awskms" + attributes = merge( + { + name = var.seal_alias_secondary + priority = var.seal_priority_secondary + }, var.seal_attributes_secondary + ) + } + pkcs11 = { + type = "pkcs11" + attributes = merge( + { + name = var.seal_alias_secondary + priority = var.seal_priority_secondary + }, + merge( + try({ for key, val in var.seal_attributes_secondary : key => val if key != "token_base64" && key != "token_dir" }, {}), + try({ lib = module.maybe_configure_hsm_secondary.lib }, {}) + ), + ) + } + none = { + type = "none" + attributes = null + } + } + seal_secondary = local.seals_secondary[var.seal_type_secondary] + storage_config = [for idx, host in var.target_hosts : (var.storage_backend == "raft" ? + merge( + { + node_id = "${var.storage_node_prefix}_${idx}" + }, + var.storage_backend_attrs + ) : + { + address = "127.0.0.1:8500" + path = "vault" + }) + ] +} + +# You might be wondering why our start_vault module, which supports shamir, awskms, and pkcs11 seal +# types, contains sub-modules that are only used for HSM. Well, each of those seal devices has +# different requirements and as such we have some seal specific requirements before starting Vault. +# +# A Shamir seal key cannot exist until Vault has already started, so this modules responsibility for +# shamir seals is ensuring that the seal type is passed to the enos_vault_start resource. That's it. +# +# Auto-unseal with a KMS requires that we configure the enos_vault_start resource with the correct +# seal type and the attributes necessary to know which KMS key to use. Vault should automatically +# unseal if we've given it the correct configuration. As long as Vault is able to access the key +# in the KMS it should be able to start. That's normally done via roles associated to the target +# machines, which is outside the scope of this module. +# +# Auto-unseal with an HSM and PKCS#11 is more complicated because a shared object library, which is +# how we interface with the HSM, must be present on each node in order to start Vault. In the real +# world this means an actual HSM in the same rack or data center as every node in the Vault cluster, +# but in our case we're creating ephemeral infrastructure for these test scenarios and don't have a +# real HSM available. We could use CloudHSM or the like, but at the time of writing CloudHSM +# provisioning takes anywhere from 30 to 60 minutes and costs upwards of $2 dollars an hour. That's +# far too long and expensive for scenarios we'll run fairly frequently. Instead, we test using a +# software HSM. Using a software HSM solves the cost and speed problems but creates new set of +# problems. We need to ensure every node in the cluster has access to the same "HSM" and with +# softhsm that means the same software, configuration, tokens and keys. Our `seal_pkcs11` module +# takes care of creating the token and keys, but that's the end of the road for that module. It's +# our job to ensure that when we're starting Vault with a software HSM that we'll ensure the correct +# software, configuration and data are available on the nodes. That's where the following two +# modules come in. They handle installing the required software, configuring it, and distributing +# the key data that was passed in via seal attributes. +module "maybe_configure_hsm" { + source = "../softhsm_distribute_vault_keys" + + hosts = var.target_hosts + token_base64 = local.token_base64 +} + +module "maybe_configure_hsm_secondary" { + source = "../softhsm_distribute_vault_keys" + depends_on = [module.maybe_configure_hsm] + + hosts = var.target_hosts + token_base64 = local.token_base64_secondary +} + +resource "enos_vault_start" "leader" { + for_each = local.leader + depends_on = [ + module.maybe_configure_hsm_secondary, + ] + + bin_path = local.bin_path + config_dir = var.config_dir + config_mode = var.config_mode + environment = var.environment + config = { + api_addr = "http://${var.target_hosts[each.value].private_ip}:8200" + cluster_addr = "http://${var.target_hosts[each.value].private_ip}:8201" + cluster_name = var.cluster_name + listener = { + type = "tcp" + attributes = { + address = "0.0.0.0:8200" + tls_disable = "true" + } + } + log_level = var.log_level + storage = { + type = var.storage_backend + attributes = ({ for key, value in local.storage_config[each.key] : key => value }) + } + seals = local.seals + ui = true + } + license = var.license + manage_service = var.manage_service + username = var.service_username + unit_name = "vault" + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +resource "enos_vault_start" "followers" { + depends_on = [ + enos_vault_start.leader, + ] + for_each = local.followers + + bin_path = local.bin_path + config_dir = var.config_dir + config_mode = var.config_mode + environment = var.environment + config = { + api_addr = "http://${var.target_hosts[each.value].private_ip}:8200" + cluster_addr = "http://${var.target_hosts[each.value].private_ip}:8201" + cluster_name = var.cluster_name + listener = { + type = "tcp" + attributes = { + address = "0.0.0.0:8200" + tls_disable = "true" + } + } + log_level = var.log_level + storage = { + type = var.storage_backend + attributes = { for key, value in local.storage_config[each.key] : key => value } + } + seals = local.seals + ui = true + } + license = var.license + manage_service = var.manage_service + username = var.service_username + unit_name = "vault" + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +output "token_base64" { + value = local.token_base64 +} + +output "token_base64_secondary" { + value = local.token_base64_secondary +} diff --git a/enos/modules/start_vault/outputs.tf b/enos/modules/start_vault/outputs.tf new file mode 100644 index 000000000000..b3107bc9d407 --- /dev/null +++ b/enos/modules/start_vault/outputs.tf @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "cluster_name" { + description = "The Vault cluster name" + value = var.cluster_name +} + +output "followers" { + description = "The follower enos_vault_start resources" + value = enos_vault_start.followers +} + +output "leader" { + description = "The leader enos_vault_start resource" + value = enos_vault_start.leader +} + +output "private_ips" { + description = "Vault cluster target host private_ips" + value = [for host in var.target_hosts : host.private_ip] +} + +output "public_ips" { + description = "Vault cluster target host public_ips" + value = [for host in var.target_hosts : host.public_ip] +} + +output "target_hosts" { + description = "The vault cluster instances that were created" + + value = var.target_hosts +} diff --git a/enos/modules/start_vault/variables.tf b/enos/modules/start_vault/variables.tf new file mode 100644 index 000000000000..bcfcbd85ae51 --- /dev/null +++ b/enos/modules/start_vault/variables.tf @@ -0,0 +1,152 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "cluster_name" { + type = string + description = "The Vault cluster name" +} + +variable "config_dir" { + type = string + description = "The directory to use for Vault configuration" + default = "/etc/vault.d" +} + +variable "config_mode" { + description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." + default = "file" + + validation { + condition = contains(["env", "file"], var.config_mode) + error_message = "The config_mode must be either 'env' or 'file'. No other configuration modes are supported." + } +} + +variable "environment" { + description = "Optional Vault configuration environment variables to set starting Vault" + type = map(string) + default = null +} + +variable "install_dir" { + type = string + description = "The directory where the vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "license" { + type = string + sensitive = true + description = "The value of the Vault license" + default = null +} + +variable "log_level" { + type = string + description = "The vault service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "manage_service" { + type = bool + description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" + default = true +} + +variable "seal_alias" { + type = string + description = "The primary seal alias name" + default = "primary" +} + +variable "seal_alias_secondary" { + type = string + description = "The secondary seal alias name" + default = "secondary" +} + +variable "seal_attributes" { + description = "The primary auto-unseal attributes" + default = null +} + +variable "seal_attributes_secondary" { + description = "The secondary auto-unseal attributes" + default = null +} + +variable "seal_priority" { + type = string + description = "The primary seal priority" + default = "1" +} + +variable "seal_priority_secondary" { + type = string + description = "The secondary seal priority" + default = "2" +} + +variable "seal_type" { + type = string + description = "The method by which to unseal the Vault cluster" + default = "awskms" + + validation { + condition = contains(["awskms", "pkcs11", "shamir"], var.seal_type) + error_message = "The seal_type must be either 'awskms', 'pkcs11', or 'shamir'. No other seal types are supported." + } +} + +variable "seal_type_secondary" { + type = string + description = "A secondary HA seal method. Only supported in Vault Enterprise >= 1.15" + default = "none" + + validation { + condition = contains(["awskms", "pkcs11", "none"], var.seal_type_secondary) + error_message = "The secondary_seal_type must be 'awskms', 'pkcs11' or 'none'. No other secondary seal types are supported." + } +} + +variable "service_username" { + type = string + description = "The host username to own the vault service" + default = "vault" +} + +variable "storage_backend" { + type = string + description = "The storage backend to use" + default = "raft" + + validation { + condition = contains(["raft", "consul"], var.storage_backend) + error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." + } +} + +variable "storage_backend_attrs" { + type = map(any) + description = "An optional set of key value pairs to inject into the storage block" + default = {} +} + +variable "storage_node_prefix" { + type = string + description = "A prefix to use for each node in the Vault storage configuration" + default = "node" +} + +variable "target_hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + private_ip = string + public_ip = string + })) +} diff --git a/enos/modules/stop_vault/main.tf b/enos/modules/stop_vault/main.tf new file mode 100644 index 000000000000..05582a2ab08b --- /dev/null +++ b/enos/modules/stop_vault/main.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" + } + } +} + +variable "service_name" { + type = string + description = "The Vault systemd service name" + default = "vault" +} + +variable "target_hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + private_ip = string + public_ip = string + })) +} + +resource "enos_remote_exec" "shutdown_multiple_nodes" { + for_each = var.target_hosts + inline = ["sudo systemctl stop ${var.service_name}.service; sleep 5"] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/target_ec2_fleet/main.tf b/enos/modules/target_ec2_fleet/main.tf new file mode 100644 index 000000000000..8375d33dae8e --- /dev/null +++ b/enos/modules/target_ec2_fleet/main.tf @@ -0,0 +1,338 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "vpc" { + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +resource "random_string" "random_cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +// ec2:CreateFleet only allows up to 4 InstanceRequirements overrides so we can only ever request +// a fleet across 4 or fewer subnets if we want to bid with InstanceRequirements instead of +// weighted instance types. +resource "random_shuffle" "subnets" { + input = data.aws_subnets.vpc.ids + result_count = 4 +} + +locals { + spot_allocation_strategy = "lowestPrice" + on_demand_allocation_strategy = "lowestPrice" + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" + fleet_tag = "${local.name_prefix}-spot-fleet-target" + fleet_tags = { + Name = "${local.name_prefix}-${var.cluster_tag_key}-target" + "${var.cluster_tag_key}" = local.cluster_name + Fleet = local.fleet_tag + } +} + +resource "aws_iam_role" "target" { + name = "${local.name_prefix}-target-role" + assume_role_policy = data.aws_iam_policy_document.target_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-target-profile" + role = aws_iam_role.target.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-target-policy" + role = aws_iam_role.target.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-target" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8300 + to_port = 8302 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8302 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8500 + to_port = 8503 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_launch_template" "target" { + name = "${local.name_prefix}-target" + image_id = var.ami_id + key_name = var.ssh_keypair + + iam_instance_profile { + name = aws_iam_instance_profile.target.name + } + + instance_requirements { + burstable_performance = "included" + + memory_mib { + min = var.instance_mem_min + max = var.instance_mem_max + } + + vcpu_count { + min = var.instance_cpu_min + max = var.instance_cpu_max + } + } + + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [aws_security_group.target.id] + } + + tag_specifications { + resource_type = "instance" + + tags = merge( + var.common_tags, + local.fleet_tags, + ) + } +} + +# There are three primary knobs we can turn to try and optimize our costs by +# using a spot fleet: our min and max instance requirements, our max bid +# price, and the allocation strategy to use when fulfilling the spot request. +# We've currently configured our instance requirements to allow for anywhere +# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range +# to allow for a large instance size pool to be considered. Our next knob is our +# max bid price. As we're using spot fleets to save on instance cost, we never +# want to pay more for an instance than we were on-demand. We've set the max price +# to equal what we pay for t3.medium instances on-demand, which are the smallest +# reliable size for Vault scenarios. The final knob is the allocation strategy +# that AWS will use when looking for instances that meet our resource and cost +# requirements. We're using the "lowestPrice" strategy to get the absolute +# cheapest machines that will fit the requirements, but it comes with a slightly +# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". +# Unless we see capacity issues or instances being shut down then we ought to +# stick with that strategy. +resource "aws_ec2_fleet" "targets" { + replace_unhealthy_instances = false + terminate_instances = true // terminate instances when we "delete" the fleet + terminate_instances_with_expiration = false + tags = merge( + var.common_tags, + local.fleet_tags, + ) + type = "instant" // make a synchronous request for the entire fleet + + launch_template_config { + launch_template_specification { + launch_template_id = aws_launch_template.target.id + version = aws_launch_template.target.latest_version + } + + dynamic "override" { + for_each = random_shuffle.subnets.result + + content { + subnet_id = override.value + } + } + } + + on_demand_options { + allocation_strategy = local.on_demand_allocation_strategy + max_total_price = (var.max_price * var.instance_count) + min_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : null + // One of these has to be set to enforce our on-demand target capacity minimum + single_availability_zone = false + single_instance_type = true + } + + spot_options { + allocation_strategy = local.spot_allocation_strategy + // The instance_pools_to_use_count is only valid for the allocation_strategy + // lowestPrice. When we are using that strategy we'll want to always set it + // to non-zero to avoid rebuilding the fleet on a re-run. For any other strategy + // set it to zero to avoid rebuilding the fleet on a re-run. + instance_pools_to_use_count = local.spot_allocation_strategy == "lowestPrice" ? 1 : null + } + + // Try and provision only spot instances and fall back to on-demand. + target_capacity_specification { + default_target_capacity_type = var.capacity_type + spot_target_capacity = var.capacity_type == "spot" ? var.instance_count : 0 + on_demand_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : 0 + target_capacity_unit_type = "units" // units == instance count + total_target_capacity = var.instance_count + } +} + +data "aws_instance" "targets" { + depends_on = [ + aws_ec2_fleet.targets, + ] + for_each = local.instances + + instance_id = aws_ec2_fleet.targets.fleet_instance_set[0].instance_ids[each.key] +} diff --git a/enos/modules/target_ec2_fleet/outputs.tf b/enos/modules/target_ec2_fleet/outputs.tf new file mode 100644 index 000000000000..1672a24179a0 --- /dev/null +++ b/enos/modules/target_ec2_fleet/outputs.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 fleet target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = data.aws_instance.targets[idx].public_ip + private_ip = data.aws_instance.targets[idx].private_ip + } } +} diff --git a/enos/modules/target_ec2_fleet/variables.tf b/enos/modules/target_ec2_fleet/variables.tf new file mode 100644 index 000000000000..606cf5c29849 --- /dev/null +++ b/enos/modules/target_ec2_fleet/variables.tf @@ -0,0 +1,101 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { + Project = "vault-ci" + } +} + +variable "instance_mem_min" { + description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 4096 // ~4 GB +} + +variable "instance_mem_max" { + description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 16385 // ~16 GB +} + +variable "instance_cpu_min" { + description = "The minimum number of vCPU's for each instance in the fleet" + type = number + default = 2 +} + +variable "instance_cpu_max" { + description = "The maximum number of vCPU's for each instance in the fleet" + type = number + default = 8 // Unlikely we'll ever get that high due to spot price bid protection +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "max_price" { + description = "The maximum hourly price to pay for each target instance" + type = string + default = "0.0416" +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = null +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "capacity_type" { + description = "What capacity type to use for EC2 instances" + type = string + default = "on-demand" + + validation { + condition = contains(["on-demand", "spot"], var.capacity_type) + error_message = "The capacity_type must be either 'on-demand' or 'spot'." + } +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/target_ec2_instances/main.tf b/enos/modules/target_ec2_instances/main.tf new file mode 100644 index 000000000000..ddce7ffa7418 --- /dev/null +++ b/enos/modules/target_ec2_instances/main.tf @@ -0,0 +1,259 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_ami" "ami" { + filter { + name = "image-id" + values = [var.ami_id] + } +} + +data "aws_ec2_instance_type_offerings" "instance" { + filter { + name = "instance-type" + values = [local.instance_type] + } + + location_type = "availability-zone" +} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = data.aws_ec2_instance_type_offerings.instance.locations + } +} + +data "aws_subnets" "vpc" { + filter { + name = "availability-zone" + values = data.aws_availability_zones.available.names + } + + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_instance_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +locals { + cluster_name = coalesce(var.cluster_name, random_string.cluster_name.result) + instance_type = local.instance_types[data.aws_ami.ami.architecture] + instance_types = { + "arm64" = var.instance_types["arm64"] + "x86_64" = var.instance_types["amd64"] + } + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" +} + +resource "random_string" "cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +resource "aws_iam_role" "target_instance_role" { + name = "${local.name_prefix}-instance-role" + assume_role_policy = data.aws_iam_policy_document.target_instance_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-instance-profile" + role = aws_iam_role.target_instance_role.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-role-policy" + role = aws_iam_role.target_instance_role.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-sg" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8300 + to_port = 8302 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8302 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8500 + to_port = 8503 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_instance" "targets" { + for_each = local.instances + + ami = var.ami_id + iam_instance_profile = aws_iam_instance_profile.target.name + instance_type = local.instance_type + key_name = var.ssh_keypair + subnet_id = data.aws_subnets.vpc.ids[tonumber(each.key) % length(data.aws_subnets.vpc.ids)] + vpc_security_group_ids = [aws_security_group.target.id] + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-${var.cluster_tag_key}-instance-target" + "${var.cluster_tag_key}" = local.cluster_name + }, + ) +} diff --git a/enos/modules/target_ec2_instances/outputs.tf b/enos/modules/target_ec2_instances/outputs.tf new file mode 100644 index 000000000000..b2bc75ce6054 --- /dev/null +++ b/enos/modules/target_ec2_instances/outputs.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 instance target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = aws_instance.targets[idx].public_ip + private_ip = aws_instance.targets[idx].private_ip + } } +} diff --git a/enos/modules/target_ec2_instances/variables.tf b/enos/modules/target_ec2_instances/variables.tf new file mode 100644 index 000000000000..dc4bfc6c2731 --- /dev/null +++ b/enos/modules/target_ec2_instances/variables.tf @@ -0,0 +1,70 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { "Project" : "vault-ci" } +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "instance_types" { + description = "The instance types to use depending on architecture" + type = object({ + amd64 = string + arm64 = string + }) + default = { + amd64 = "t3a.medium" + arm64 = "t4g.medium" + } +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = [] +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/target_ec2_shim/main.tf b/enos/modules/target_ec2_shim/main.tf new file mode 100644 index 000000000000..429c49ab028f --- /dev/null +++ b/enos/modules/target_ec2_shim/main.tf @@ -0,0 +1,49 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +variable "ami_id" { default = null } +variable "cluster_name" { default = null } +variable "cluster_tag_key" { default = null } +variable "common_tags" { default = null } +variable "instance_count" { default = 3 } +variable "instance_cpu_max" { default = null } +variable "instance_cpu_min" { default = null } +variable "instance_mem_max" { default = null } +variable "instance_mem_min" { default = null } +variable "instance_types" { default = null } +variable "max_price" { default = null } +variable "project_name" { default = null } +variable "seal_key_names" { default = null } +variable "ssh_allow_ips" { default = null } +variable "ssh_keypair" { default = null } +variable "vpc_id" { default = null } + +resource "random_string" "cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +output "cluster_name" { + value = coalesce(var.cluster_name, random_string.cluster_name.result) +} + +output "hosts" { + value = { for idx in range(var.instance_count) : idx => { + public_ip = "null-public-${idx}" + private_ip = "null-private-${idx}" + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/main.tf b/enos/modules/target_ec2_spot_fleet/main.tf new file mode 100644 index 000000000000..37f8e9ffb408 --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/main.tf @@ -0,0 +1,456 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "vpc" { + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "aws_iam_policy_document" "fleet" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeImages", + "ec2:DescribeSubnets", + "ec2:RequestSpotInstances", + "ec2:TerminateInstances", + "ec2:DescribeInstanceStatus", + "ec2:CancelSpotFleetRequests", + "ec2:CreateTags", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + ] + } + + statement { + effect = "Deny" + + resources = [ + "arn:aws:ec2:*:*:instance/*", + ] + + actions = [ + "ec2:RunInstances", + ] + + condition { + test = "StringNotEquals" + variable = "ec2:InstanceMarketType" + values = ["spot"] + } + } + + statement { + resources = ["*"] + + actions = [ + "iam:PassRole", + ] + + condition { + test = "StringEquals" + variable = "iam:PassedToService" + values = [ + "ec2.amazonaws.com", + ] + } + } + + statement { + resources = [ + "arn:aws:elasticloadbalancing:*:*:loadbalancer/*", + ] + + actions = [ + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + ] + } + + statement { + resources = [ + "arn:aws:elasticloadbalancing:*:*:*/*" + ] + + actions = [ + "elasticloadbalancing:RegisterTargets" + ] + } +} + +data "aws_iam_policy_document" "fleet_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["spotfleet.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +resource "random_string" "random_cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +// ec2:RequestSpotFleet only allows up to 4 InstanceRequirements overrides so we can only ever +// request a fleet across 4 or fewer subnets if we want to bid with InstanceRequirements instead of +// weighted instance types. +resource "random_shuffle" "subnets" { + input = data.aws_subnets.vpc.ids + result_count = 4 +} + +locals { + allocation_strategy = "lowestPrice" + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" + fleet_tag = "${local.name_prefix}-spot-fleet-target" + fleet_tags = { + Name = "${local.name_prefix}-${var.cluster_tag_key}-target" + "${var.cluster_tag_key}" = local.cluster_name + Fleet = local.fleet_tag + } +} + +resource "aws_iam_role" "target" { + name = "${local.name_prefix}-target-role" + assume_role_policy = data.aws_iam_policy_document.target_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-target-profile" + role = aws_iam_role.target.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-target-policy" + role = aws_iam_role.target.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_iam_role" "fleet" { + name = "${local.name_prefix}-fleet-role" + assume_role_policy = data.aws_iam_policy_document.fleet_role.json +} + +resource "aws_iam_role_policy" "fleet" { + name = "${local.name_prefix}-fleet-policy" + role = aws_iam_role.fleet.id + policy = data.aws_iam_policy_document.fleet.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-target" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8300 + to_port = 8302 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8302 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8500 + to_port = 8503 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_launch_template" "target" { + name = "${local.name_prefix}-target" + image_id = var.ami_id + instance_type = null + key_name = var.ssh_keypair + + iam_instance_profile { + name = aws_iam_instance_profile.target.name + } + + instance_requirements { + burstable_performance = "included" + + memory_mib { + min = var.instance_mem_min + max = var.instance_mem_max + } + + vcpu_count { + min = var.instance_cpu_min + max = var.instance_cpu_max + } + } + + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [aws_security_group.target.id] + } + + tag_specifications { + resource_type = "instance" + + tags = merge( + var.common_tags, + local.fleet_tags, + ) + } +} + +# There are three primary knobs we can turn to try and optimize our costs by +# using a spot fleet: our min and max instance requirements, our max bid +# price, and the allocation strategy to use when fulfilling the spot request. +# We've currently configured our instance requirements to allow for anywhere +# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range +# to allow for a large instance size pool to be considered. Our next knob is our +# max bid price. As we're using spot fleets to save on instance cost, we never +# want to pay more for an instance than we were on-demand. We've set the max price +# to equal what we pay for t3.medium instances on-demand, which are the smallest +# reliable size for Vault scenarios. The final knob is the allocation strategy +# that AWS will use when looking for instances that meet our resource and cost +# requirements. We're using the "lowestPrice" strategy to get the absolute +# cheapest machines that will fit the requirements, but it comes with a slightly +# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". +# Unless we see capacity issues or instances being shut down then we ought to +# stick with that strategy. +resource "aws_spot_fleet_request" "targets" { + allocation_strategy = local.allocation_strategy + fleet_type = "request" + iam_fleet_role = aws_iam_role.fleet.arn + // The instance_pools_to_use_count is only valid for the allocation_strategy + // lowestPrice. When we are using that strategy we'll want to always set it + // to 1 to avoid rebuilding the fleet on a re-run. For any other strategy + // set it to zero to avoid rebuilding the fleet on a re-run. + instance_pools_to_use_count = local.allocation_strategy == "lowestPrice" ? 1 : 0 + spot_price = var.max_price + target_capacity = var.instance_count + terminate_instances_on_delete = true + wait_for_fulfillment = true + + launch_template_config { + launch_template_specification { + id = aws_launch_template.target.id + version = aws_launch_template.target.latest_version + } + + // We cannot currently use more than one subnet[0]. Until the bug has been resolved + // we'll choose a random subnet. It would be ideal to bid across all subnets to get + // the absolute cheapest available at the time of bidding. + // + // [0] https://github.com/hashicorp/terraform-provider-aws/issues/30505 + + /* + dynamic "overrides" { + for_each = random_shuffle.subnets.result + + content { + subnet_id = overrides.value + } + } + */ + + overrides { + subnet_id = random_shuffle.subnets.result[0] + } + } + + tags = merge( + var.common_tags, + local.fleet_tags, + ) +} + +resource "time_sleep" "wait_for_fulfillment" { + depends_on = [aws_spot_fleet_request.targets] + create_duration = "2s" +} + +data "aws_instances" "targets" { + depends_on = [ + time_sleep.wait_for_fulfillment, + aws_spot_fleet_request.targets, + ] + + instance_tags = local.fleet_tags + instance_state_names = [ + "pending", + "running", + ] + + filter { + name = "image-id" + values = [var.ami_id] + } + + filter { + name = "iam-instance-profile.arn" + values = [aws_iam_instance_profile.target.arn] + } +} + +data "aws_instance" "targets" { + depends_on = [ + aws_spot_fleet_request.targets, + data.aws_instances.targets + ] + for_each = local.instances + + instance_id = data.aws_instances.targets.ids[each.key] +} diff --git a/enos/modules/target_ec2_spot_fleet/outputs.tf b/enos/modules/target_ec2_spot_fleet/outputs.tf new file mode 100644 index 000000000000..1672a24179a0 --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/outputs.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 fleet target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = data.aws_instance.targets[idx].public_ip + private_ip = data.aws_instance.targets[idx].private_ip + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/variables.tf b/enos/modules/target_ec2_spot_fleet/variables.tf new file mode 100644 index 000000000000..c2f5bb60926b --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/variables.tf @@ -0,0 +1,90 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { + Project = "Vault" + } +} + +variable "instance_mem_min" { + description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 4096 // ~4 GB +} + +variable "instance_mem_max" { + description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 16385 // ~16 GB +} + +variable "instance_cpu_min" { + description = "The minimum number of vCPU's for each instance in the fleet" + type = number + default = 2 +} + +variable "instance_cpu_max" { + description = "The maximum number of vCPU's for each instance in the fleet" + type = number + default = 8 // Unlikely we'll ever get that high due to spot price bid protection +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "max_price" { + description = "The maximum hourly price to pay for each target instance" + type = string + default = "0.0416" +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = null +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/vault-verify-replication/main.tf b/enos/modules/vault-verify-replication/main.tf deleted file mode 100644 index 57a97f9ddd1b..000000000000 --- a/enos/modules/vault-verify-replication/main.tf +++ /dev/null @@ -1,31 +0,0 @@ - -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } -} - -resource "enos_remote_exec" "smoke-verify-replication" { - for_each = local.instances - - content = templatefile("${path.module}/templates/smoke-verify-replication.sh", { - vault_edition = var.vault_edition - }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault-verify-replication/variables.tf b/enos/modules/vault-verify-replication/variables.tf deleted file mode 100644 index b335ee45efce..000000000000 --- a/enos/modules/vault-verify-replication/variables.tf +++ /dev/null @@ -1,24 +0,0 @@ - -variable "vault_edition" { - type = string - description = "The vault product edition" - default = null -} - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} diff --git a/enos/modules/vault-verify-ui/main.tf b/enos/modules/vault-verify-ui/main.tf deleted file mode 100644 index 5703326d1a51..000000000000 --- a/enos/modules/vault-verify-ui/main.tf +++ /dev/null @@ -1,31 +0,0 @@ - -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } -} - -resource "enos_remote_exec" "smoke-verify-ui" { - for_each = local.instances - - content = templatefile("${path.module}/templates/smoke-verify-ui.sh", { - vault_install_dir = var.vault_install_dir, - }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault-verify-ui/templates/smoke-verify-ui.sh b/enos/modules/vault-verify-ui/templates/smoke-verify-ui.sh deleted file mode 100644 index bcd7e1cc3055..000000000000 --- a/enos/modules/vault-verify-ui/templates/smoke-verify-ui.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} -if [ "$(curl -s -o /dev/null -w "%%{redirect_url}" http://localhost:8200/)" != "http://localhost:8200/ui/" ]; then - fail "Port 8200 not redirecting to UI" -fi -if curl -s http://localhost:8200/ui/ | grep -q 'Vault UI is not available'; then - fail "Vault UI is not available" -fi diff --git a/enos/modules/vault-verify-ui/variables.tf b/enos/modules/vault-verify-ui/variables.tf deleted file mode 100644 index 7eaf5d1bf7f4..000000000000 --- a/enos/modules/vault-verify-ui/variables.tf +++ /dev/null @@ -1,19 +0,0 @@ - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" - default = null -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} diff --git a/enos/modules/vault-verify-write-data/main.tf b/enos/modules/vault-verify-write-data/main.tf deleted file mode 100644 index 966e833f740b..000000000000 --- a/enos/modules/vault-verify-write-data/main.tf +++ /dev/null @@ -1,50 +0,0 @@ - -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } -} - -resource "enos_remote_exec" "smoke-enable-secrets-kv" { - - content = templatefile("${path.module}/templates/smoke-enable-secrets-kv.sh", { - vault_install_dir = var.vault_install_dir, - vault_token = var.vault_root_token, - }) - - transport = { - ssh = { - host = local.instances[0].public_ip - } - } -} - -# Verify that we can enable the k/v secrets engine and write data to it. -resource "enos_remote_exec" "smoke-write-test-data" { - depends_on = [enos_remote_exec.smoke-enable-secrets-kv] - for_each = local.instances - - content = templatefile("${path.module}/templates/smoke-write-test-data.sh", { - test_key = "smoke${each.key}" - test_value = "fire" - vault_install_dir = var.vault_install_dir, - vault_token = var.vault_root_token, - }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault-verify-write-data/templates/smoke-enable-secrets-kv.sh b/enos/modules/vault-verify-write-data/templates/smoke-enable-secrets-kv.sh deleted file mode 100644 index fb28fd9a8240..000000000000 --- a/enos/modules/vault-verify-write-data/templates/smoke-enable-secrets-kv.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -set -e - -function retry { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - return "$exit" - fi - done - - return 0 -} - -function fail { - echo "$1" 1>&2 - exit 1 -} - -binpath=${vault_install_dir}/vault - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -retry 5 "$binpath" status > /dev/null 2>&1 -retry 5 $binpath secrets enable -path="secret" kv diff --git a/enos/modules/vault-verify-write-data/templates/smoke-write-test-data.sh b/enos/modules/vault-verify-write-data/templates/smoke-write-test-data.sh deleted file mode 100644 index d514881425a1..000000000000 --- a/enos/modules/vault-verify-write-data/templates/smoke-write-test-data.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -set -e - -function retry { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - return "$exit" - fi - done - - return 0 -} - -function fail { - echo "$1" 1>&2 - exit 1 -} - -binpath=${vault_install_dir}/vault -testkey=${test_key} -testvalue=${test_value} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -retry 5 "$binpath" status > /dev/null 2>&1 -retry 5 $binpath kv put secret/test $testkey=$testvalue diff --git a/enos/modules/vault-verify-write-data/variables.tf b/enos/modules/vault-verify-write-data/variables.tf deleted file mode 100644 index ac00f1091fcb..000000000000 --- a/enos/modules/vault-verify-write-data/variables.tf +++ /dev/null @@ -1,25 +0,0 @@ - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" - default = null -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" - default = null -} diff --git a/enos/modules/vault_agent/main.tf b/enos/modules/vault_agent/main.tf index 001a53278a27..c43501f23488 100644 --- a/enos/modules/vault_agent/main.tf +++ b/enos/modules/vault_agent/main.tf @@ -1,10 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { aws = { source = "hashicorp/aws" } enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } @@ -52,12 +55,14 @@ locals { } resource "enos_remote_exec" "set_up_approle_auth_and_agent" { - content = templatefile("${path.module}/templates/set-up-approle-and-agent.sh", { - vault_install_dir = var.vault_install_dir - vault_token = var.vault_root_token - vault_agent_template_destination = var.vault_agent_template_destination - vault_agent_template_contents = var.vault_agent_template_contents - }) + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_TOKEN = var.vault_root_token, + VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination, + VAULT_AGENT_TEMPLATE_CONTENTS = var.vault_agent_template_contents, + } + + scripts = [abspath("${path.module}/scripts/set-up-approle-and-agent.sh")] transport = { ssh = { diff --git a/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh new file mode 100644 index 000000000000..e939ea1d7cd4 --- /dev/null +++ b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) +$binpath auth disable approle || true + +$binpath auth enable approle + +$binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 + +ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id') + +if [[ "$ROLEID" == '' ]]; then + fail "expected ROLEID to be nonempty, but it is empty" +fi + +SECRETID=$($binpath write -f --format=json auth/approle/role/agent-role/secret-id | jq -r '.data.secret_id') + +if [[ "$SECRETID" == '' ]]; then + fail "expected SECRETID to be nonempty, but it is empty" +fi + +echo "$ROLEID" > /tmp/role-id +echo "$SECRETID" > /tmp/secret-id + +cat > /tmp/vault-agent.hcl <<- EOM +pid_file = "/tmp/pidfile" + +vault { + address = "http://127.0.0.1:8200" + tls_skip_verify = true + retry { + num_retries = 10 + } +} + +cache { + enforce_consistency = "always" + use_auto_auth_token = true +} + +listener "tcp" { + address = "127.0.0.1:8100" + tls_disable = true +} + +template { + destination = "${VAULT_AGENT_TEMPLATE_DESTINATION}" + contents = "${VAULT_AGENT_TEMPLATE_CONTENTS}" + exec { + command = "pkill -F /tmp/pidfile" + } +} + +auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/tmp/role-id" + secret_id_file_path = "/tmp/secret-id" + } + } + sink { + type = "file" + config = { + path = "/tmp/token" + } + } +} +EOM + +# If Agent is still running from a previous run, kill it +pkill -F /tmp/pidfile || true + +# If the template file already exists, remove it +rm "${VAULT_AGENT_TEMPLATE_DESTINATION}" || true + +# Run agent (it will kill itself when it finishes rendering the template) +$binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1 diff --git a/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh b/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh deleted file mode 100644 index 5444508de303..000000000000 --- a/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env bash - -set -e - -binpath=${vault_install_dir}/vault - -fail() { - echo "$1" 1>&2 - return 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) -$binpath auth disable approle || true - -approle_create_status=$($binpath auth enable approle) - -approle_status=$($binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000) - -ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id') - -if [[ "$ROLEID" == '' ]]; then - fail "expected ROLEID to be nonempty, but it is empty" -fi - -SECRETID=$($binpath write -f --format=json auth/approle/role/agent-role/secret-id | jq -r '.data.secret_id') - -if [[ "$SECRETID" == '' ]]; then - fail "expected SECRETID to be nonempty, but it is empty" -fi - -echo $ROLEID > /tmp/role-id -echo $SECRETID > /tmp/secret-id - -cat > /tmp/vault-agent.hcl <<- EOM -pid_file = "/tmp/pidfile" - -vault { - address = "http://127.0.0.1:8200" - tls_skip_verify = true - retry { - num_retries = 10 - } -} - -cache { - enforce_consistency = "always" - use_auto_auth_token = true -} - -listener "tcp" { - address = "127.0.0.1:8100" - tls_disable = true -} - -template { - destination = "${vault_agent_template_destination}" - contents = "${vault_agent_template_contents}" - exec { - command = "pkill -F /tmp/pidfile" - } -} - -auto_auth { - method { - type = "approle" - config = { - role_id_file_path = "/tmp/role-id" - secret_id_file_path = "/tmp/secret-id" - } - } - sink { - type = "file" - config = { - path = "/tmp/token" - } - } -} -EOM - -# If Agent is still running from a previous run, kill it -pkill -F /tmp/pidfile || true - -# If the template file already exists, remove it -rm ${vault_agent_template_destination} || true - -# Run agent (it will kill itself when it finishes rendering the template) -$binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1 diff --git a/enos/modules/vault_artifactory_artifact/locals.tf b/enos/modules/vault_artifactory_artifact/locals.tf deleted file mode 100644 index e022a62c3966..000000000000 --- a/enos/modules/vault_artifactory_artifact/locals.tf +++ /dev/null @@ -1,60 +0,0 @@ -locals { - - // file name extensions for the install packages of vault for the various architectures, distributions and editions - package_extensions = { - amd64 = { - ubuntu = { - # "oss" = "-1_amd64.deb" - "ent" = "-1_amd64.deb" - "ent.hsm" = "-1_amd64.deb" - } - rhel = { - # "oss" = "-1.x86_64.rpm" - "ent" = "-1.x86_64.rpm" - "ent.hsm" = "-1.x86_64.rpm" - } - } - arm64 = { - ubuntu = { - # "oss" = "-1_arm64.deb" - "ent" = "-1_arm64.deb" - } - rhel = { - # "oss" = "-1.aarch64.rpm" - "ent" = "-1.aarch64.rpm" - } - } - } - - // product_version --> artifact_version - artifact_version = replace(var.product_version, var.edition, "ent") - - // file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle) - artifact_package_release_names = { - ubuntu = { - "oss" = "vault_" - "ent" = "vault-enterprise_", - "ent.hsm" = "vault-enterprise-hsm_", - }, - rhel = { - "oss" = "vault-" - "ent" = "vault-enterprise-", - "ent.hsm" = "vault-enterprise-hsm-", - } - } - - artifact_types = ["package", "bundle"] - - // edition --> artifact name edition - artifact_name_edition = { - "oss" = "" - "ent" = "" - "ent.hsm" = ".hsm" - "ent.fips1402" = ".fips1402" - "ent.hsm.fips1402" = ".hsm.fips1402" - } - - artifact_name_prefix = var.artifact_type == "package" ? local.artifact_package_release_names[var.distro][var.edition] : "vault_" - artifact_name_extension = var.artifact_type == "package" ? local.package_extensions[var.arch][var.distro][var.edition] : "_linux_${var.arch}.zip" - artifact_name = var.artifact_type == "package" ? "${local.artifact_name_prefix}${replace(local.artifact_version, "-", "~")}${local.artifact_name_extension}" : "${local.artifact_name_prefix}${var.product_version}${local.artifact_name_extension}" -} diff --git a/enos/modules/vault_artifactory_artifact/main.tf b/enos/modules/vault_artifactory_artifact/main.tf deleted file mode 100644 index ebc517030ba1..000000000000 --- a/enos/modules/vault_artifactory_artifact/main.tf +++ /dev/null @@ -1,22 +0,0 @@ -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - version = ">= 0.2.3" - } - } -} - -data "enos_artifactory_item" "vault" { - username = var.artifactory_username - token = var.artifactory_token - name = local.artifact_name - host = var.artifactory_host - repo = var.artifactory_repo - path = var.edition == "oss" ? "vault/*" : "vault-enterprise/*" - properties = tomap({ - "commit" = var.revision - "product-name" = var.edition == "oss" ? "vault" : "vault-enterprise" - "product-version" = local.artifact_version - }) -} diff --git a/enos/modules/vault_artifactory_artifact/outputs.tf b/enos/modules/vault_artifactory_artifact/outputs.tf deleted file mode 100644 index 827b2e773415..000000000000 --- a/enos/modules/vault_artifactory_artifact/outputs.tf +++ /dev/null @@ -1,29 +0,0 @@ - -output "url" { - value = data.enos_artifactory_item.vault.results[0].url - description = "The artifactory download url for the artifact" -} - -output "sha256" { - value = data.enos_artifactory_item.vault.results[0].sha256 - description = "The sha256 checksum for the artifact" -} - -output "size" { - value = data.enos_artifactory_item.vault.results[0].size - description = "The size in bytes of the artifact" -} - -output "name" { - value = data.enos_artifactory_item.vault.results[0].name - description = "The name of the artifact" -} - -output "vault_artifactory_release" { - value = { - url = data.enos_artifactory_item.vault.results[0].url - sha256 = data.enos_artifactory_item.vault.results[0].sha256 - username = var.artifactory_username - token = var.artifactory_token - } -} diff --git a/enos/modules/vault_artifactory_artifact/variables.tf b/enos/modules/vault_artifactory_artifact/variables.tf deleted file mode 100644 index 778354e7deea..000000000000 --- a/enos/modules/vault_artifactory_artifact/variables.tf +++ /dev/null @@ -1,36 +0,0 @@ - -variable "artifactory_username" { - type = string - description = "The username to use when connecting to artifactory" - default = null -} - -variable "artifactory_token" { - type = string - description = "The token to use when connecting to artifactory" - default = null - sensitive = true -} - -variable "artifactory_host" { - type = string - description = "The artifactory host to search for vault artifacts" - default = "https://artifactory.hashicorp.engineering/artifactory" -} - -variable "artifactory_repo" { - type = string - description = "The artifactory repo to search for vault artifacts" - default = "hashicorp-crt-stable-local*" -} -variable "arch" {} -variable "artifact_type" {} -variable "distro" {} -variable "edition" {} -variable "instance_type" {} -variable "revision" {} -variable "product_version" {} -variable "build_tags" { default = null } -variable "bundle_path" { default = null } -variable "goarch" { default = null } -variable "goos" { default = null } diff --git a/enos/modules/vault_cluster/main.tf b/enos/modules/vault_cluster/main.tf new file mode 100644 index 000000000000..e0ee864b91c8 --- /dev/null +++ b/enos/modules/vault_cluster/main.tf @@ -0,0 +1,347 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + version = ">= 0.4.0" + } + } +} + +data "enos_environment" "localhost" {} + +locals { + audit_device_file_path = "/var/log/vault/vault_audit.log" + audit_socket_port = "9090" + bin_path = "${var.install_dir}/vault" + consul_bin_path = "${var.consul_install_dir}/consul" + enable_audit_devices = var.enable_audit_devices && var.initialize_cluster + // In order to get Terraform to plan we have to use collections with keys + // that are known at plan time. In order for our module to work our var.target_hosts + // must be a map with known keys at plan time. Here we're creating locals + // that keep track of index values that point to our target hosts. + followers = toset(slice(local.instances, 1, length(local.instances))) + instances = [for idx in range(length(var.target_hosts)) : tostring(idx)] + key_shares = { + "awskms" = null + "shamir" = 5 + "pkcs11" = null + } + key_threshold = { + "awskms" = null + "shamir" = 3 + "pkcs11" = null + } + leader = toset(slice(local.instances, 0, 1)) + recovery_shares = { + "awskms" = 5 + "shamir" = null + "pkcs11" = 5 + } + recovery_threshold = { + "awskms" = 3 + "shamir" = null + "pkcs11" = 3 + } + vault_service_user = "vault" +} + +resource "enos_bundle_install" "consul" { + for_each = { + for idx, host in var.target_hosts : idx => var.target_hosts[idx] + if var.storage_backend == "consul" + } + + destination = var.consul_install_dir + release = merge(var.consul_release, { product = "consul" }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_bundle_install" "vault" { + for_each = var.target_hosts + + destination = var.install_dir + release = var.release == null ? var.release : merge({ product = "vault" }, var.release) + artifactory = var.artifactory_release + path = var.local_artifact_path + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +module "install_packages" { + source = "../install_packages" + depends_on = [ + enos_bundle_install.vault, // Don't race for the package manager locks with vault install + ] + + hosts = var.target_hosts + packages = var.packages +} + +resource "enos_consul_start" "consul" { + for_each = enos_bundle_install.consul + + bin_path = local.consul_bin_path + data_dir = var.consul_data_dir + config = { + data_dir = var.consul_data_dir + datacenter = "dc1" + retry_join = ["provider=aws tag_key=${var.backend_cluster_tag_key} tag_value=${var.backend_cluster_name}"] + server = false + bootstrap_expect = 0 + license = var.consul_license + log_level = var.consul_log_level + log_file = var.consul_log_file + } + license = var.consul_license + unit_name = "consul" + username = "consul" + + transport = { + ssh = { + host = var.target_hosts[each.key].public_ip + } + } +} + +module "start_vault" { + source = "../start_vault" + + depends_on = [ + enos_consul_start.consul, + enos_bundle_install.vault, + ] + + cluster_name = var.cluster_name + config_dir = var.config_dir + config_mode = var.config_mode + install_dir = var.install_dir + license = var.license + log_level = var.log_level + manage_service = var.manage_service + seal_attributes = var.seal_attributes + seal_attributes_secondary = var.seal_attributes_secondary + seal_type = var.seal_type + seal_type_secondary = var.seal_type_secondary + service_username = local.vault_service_user + storage_backend = var.storage_backend + storage_backend_attrs = var.storage_backend_addl_config + storage_node_prefix = var.storage_node_prefix + target_hosts = var.target_hosts +} + +resource "enos_vault_init" "leader" { + depends_on = [ + module.start_vault, + ] + for_each = toset([ + for idx, leader in local.leader : leader + if var.initialize_cluster + ]) + + bin_path = local.bin_path + vault_addr = module.start_vault.leader[0].config.api_addr + + key_shares = local.key_shares[var.seal_type] + key_threshold = local.key_threshold[var.seal_type] + + recovery_shares = local.recovery_shares[var.seal_type] + recovery_threshold = local.recovery_threshold[var.seal_type] + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +resource "enos_vault_unseal" "leader" { + depends_on = [ + module.start_vault, + enos_vault_init.leader, + ] + for_each = enos_vault_init.leader // only unseal the leader if we initialized it + + bin_path = local.bin_path + vault_addr = module.start_vault.leader[each.key].config.api_addr + seal_type = var.seal_type + unseal_keys = var.seal_type != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) + + transport = { + ssh = { + host = var.target_hosts[tolist(local.leader)[0]].public_ip + } + } +} + +resource "enos_vault_unseal" "followers" { + depends_on = [ + enos_vault_init.leader, + enos_vault_unseal.leader, + ] + // Only unseal followers if we're not using an auto-unseal method and we've + // initialized the cluster + for_each = toset([ + for idx, follower in local.followers : follower + if var.seal_type == "shamir" && var.initialize_cluster + ]) + + bin_path = local.bin_path + vault_addr = module.start_vault.followers[each.key].config.api_addr + seal_type = var.seal_type + unseal_keys = var.seal_type != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +// Force unseal the cluster. This is used if the vault-cluster module is used +// to add additional nodes to a cluster via auto-pilot, or some other means. +// When that happens we'll want to set initialize_cluster to false and +// force_unseal to true. +resource "enos_vault_unseal" "maybe_force_unseal" { + depends_on = [ + module.start_vault.followers, + ] + for_each = { + for idx, host in var.target_hosts : idx => host + if var.force_unseal && !var.initialize_cluster + } + + bin_path = local.bin_path + vault_addr = "http://localhost:8200" + seal_type = var.seal_type + unseal_keys = coalesce( + var.shamir_unseal_keys, + try(enos_vault_init.leader[0].unseal_keys_hex, null), + ) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# Add the vault install location to the PATH and set up VAULT_ADDR and VAULT_TOKEN environement +# variables in the login shell so we don't have to do it if/when we login in to a cluster node. +resource "enos_remote_exec" "configure_login_shell_profile" { + depends_on = [ + enos_vault_init.leader, + enos_vault_unseal.leader, + ] + for_each = var.target_hosts + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.root_token != null ? var.root_token : try(enos_vault_init.leader[0].root_token, "_") + VAULT_INSTALL_DIR = var.install_dir + } + + scripts = [abspath("${path.module}/scripts/set-up-login-shell-profile.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# We need to ensure that the directory used for audit logs is present and accessible to the vault +# user on all nodes, since logging will only happen on the leader. +resource "enos_remote_exec" "create_audit_log_dir" { + depends_on = [ + module.start_vault, + enos_vault_unseal.leader, + enos_vault_unseal.followers, + enos_vault_unseal.maybe_force_unseal, + ] + for_each = toset([ + for idx, host in toset(local.instances) : idx + if var.enable_audit_devices + ]) + + environment = { + LOG_FILE_PATH = local.audit_device_file_path + SERVICE_USER = local.vault_service_user + } + + scripts = [abspath("${path.module}/scripts/create-audit-log-dir.sh")] + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +# We need to ensure that the socket listener used for the audit socket device is listening on each +# node in the cluster. If we have a leader election or vault is restarted it'll fail unless the +# listener is running. +resource "enos_remote_exec" "start_audit_socket_listener" { + depends_on = [ + module.start_vault, + enos_vault_unseal.leader, + enos_vault_unseal.followers, + enos_vault_unseal.maybe_force_unseal, + ] + for_each = toset([ + for idx, host in toset(local.instances) : idx + if var.enable_audit_devices + ]) + + environment = { + SOCKET_PORT = local.audit_socket_port + } + + scripts = [abspath("${path.module}/scripts/start-audit-socket-listener.sh")] + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +resource "enos_remote_exec" "enable_audit_devices" { + depends_on = [ + enos_remote_exec.create_audit_log_dir, + enos_remote_exec.start_audit_socket_listener, + ] + for_each = toset([ + for idx in local.leader : idx + if local.enable_audit_devices + ]) + + environment = { + LOG_FILE_PATH = local.audit_device_file_path + SOCKET_PORT = local.audit_socket_port + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_BIN_PATH = local.bin_path + VAULT_TOKEN = enos_vault_init.leader[each.key].root_token + } + + scripts = [abspath("${path.module}/scripts/enable-audit-devices.sh")] + + transport = { + ssh = { + host = var.target_hosts[each.key].public_ip + } + } +} diff --git a/enos/modules/vault_cluster/outputs.tf b/enos/modules/vault_cluster/outputs.tf new file mode 100644 index 000000000000..c76f09ba370f --- /dev/null +++ b/enos/modules/vault_cluster/outputs.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "audit_device_file_path" { + description = "The file path for the audit device, if enabled" + value = var.enable_audit_devices ? local.audit_device_file_path : "file audit device not enabled" +} + +output "cluster_name" { + description = "The Vault cluster name" + value = var.cluster_name +} + +output "private_ips" { + description = "Vault cluster target host private_ips" + value = [for host in var.target_hosts : host.private_ip] +} + +output "public_ips" { + description = "Vault cluster target host public_ips" + value = [for host in var.target_hosts : host.public_ip] +} + +output "recovery_keys_b64" { + value = try(enos_vault_init.leader[0].recovery_keys_b64, []) +} + +output "recovery_keys_hex" { + value = try(enos_vault_init.leader[0].recovery_keys_hex, []) +} + +output "recovery_key_shares" { + value = try(enos_vault_init.leader[0].recovery_keys_shares, -1) +} + +output "recovery_threshold" { + value = try(enos_vault_init.leader[0].recovery_keys_threshold, -1) +} + +output "root_token" { + value = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none") +} + +output "target_hosts" { + description = "The vault cluster instances that were created" + + value = var.target_hosts +} + +output "unseal_keys_b64" { + value = try(enos_vault_init.leader[0].unseal_keys_b64, []) +} + +output "unseal_keys_hex" { + value = try(enos_vault_init.leader[0].unseal_keys_hex, null) +} + +output "unseal_shares" { + value = try(enos_vault_init.leader[0].unseal_keys_shares, -1) +} + +output "unseal_threshold" { + value = try(enos_vault_init.leader[0].unseal_keys_threshold, -1) +} + +output "keys_base64" { + value = try(module.start_vault.keys_base64, null) +} + +output "keys_base64_secondary" { + value = try(module.start_vault.keys_base64_secondary, null) +} diff --git a/enos/modules/vault_cluster/scripts/create-audit-log-dir.sh b/enos/modules/vault_cluster/scripts/create-audit-log-dir.sh new file mode 100755 index 000000000000..95eeedcc1877 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/create-audit-log-dir.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -eux + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set" +[[ -z "$SERVICE_USER" ]] && fail "SERVICE_USER env variable has not been set" + +LOG_DIR=$(dirname "$LOG_FILE_PATH") + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=10 + count=$((count + 1)) + + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +retry 7 id -a "$SERVICE_USER" + +sudo mkdir -p "$LOG_DIR" +sudo chown -R "$SERVICE_USER":"$SERVICE_USER" "$LOG_DIR" diff --git a/enos/modules/vault_cluster/scripts/enable-audit-devices.sh b/enos/modules/vault_cluster/scripts/enable-audit-devices.sh new file mode 100644 index 000000000000..c74601baf159 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/enable-audit-devices.sh @@ -0,0 +1,48 @@ +#!/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -exo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set" +[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_BIN_PATH" ]] && fail "VAULT_BIN_PATH env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +enable_file_audit_device() { + $VAULT_BIN_PATH audit enable file file_path="$LOG_FILE_PATH" +} + +enable_syslog_audit_device(){ + $VAULT_BIN_PATH audit enable syslog tag="vault" facility="AUTH" +} + +enable_socket_audit_device() { + "$VAULT_BIN_PATH" audit enable socket address="127.0.0.1:$SOCKET_PORT" +} + +main() { + if ! enable_file_audit_device; then + fail "Failed to enable vault file audit device" + fi + + if ! enable_syslog_audit_device; then + fail "Failed to enable vault syslog audit device" + fi + + if ! enable_socket_audit_device; then + local log + log=$(cat /tmp/vault-socket.log) + fail "Failed to enable vault socket audit device: listener log: $log" + fi + + return 0 +} + +main diff --git a/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh b/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh new file mode 100644 index 000000000000..f3a42d22a59b --- /dev/null +++ b/enos/modules/vault_cluster/scripts/set-up-login-shell-profile.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# Determine the profile file we should write to. We only want to affect login shells and bash will +# only read one of these in ordered of precendence. +determineProfileFile() { + if [ -f "$HOME/.bash_profile" ]; then + printf "%s/.bash_profile\n" "$HOME" + return 0 + fi + + if [ -f "$HOME/.bash_login" ]; then + printf "%s/.bash_login\n" "$HOME" + return 0 + fi + + printf "%s/.profile\n" "$HOME" +} + +appendVaultProfileInformation() { + tee -a "$1" <<< "export PATH=$PATH:$VAULT_INSTALL_DIR +export VAULT_ADDR=$VAULT_ADDR +export VAULT_TOKEN=$VAULT_TOKEN" +} + +main() { + local profile_file + if ! profile_file=$(determineProfileFile); then + fail "failed to determine login shell profile file location" + fi + + # If vault_cluster is used more than once, eg: autopilot or replication, this module can + # be called more than once. Short ciruit here if our profile is already set up. + if grep VAULT_ADDR < "$profile_file"; then + exit 0 + fi + + if ! appendVaultProfileInformation "$profile_file"; then + fail "failed to write vault configuration to login shell profile" + fi + + exit 0 +} + +main diff --git a/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh b/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh new file mode 100644 index 000000000000..c1364936ecb4 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/start-audit-socket-listener.sh @@ -0,0 +1,64 @@ +#!/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -exo pipefail + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set" + +socket_listener_procs() { + pgrep -x nc +} + +kill_socket_listener() { + pkill nc +} + +test_socket_listener() { + nc -zvw 2 127.0.0.1 "$SOCKET_PORT" < /dev/null +} + +start_socket_listener() { + if socket_listener_procs; then + test_socket_listener + return $? + fi + + # Run nc to listen on port 9090 for the socket auditor. We spawn nc + # with nohup to ensure that the listener doesn't expect a SIGHUP and + # thus block the SSH session from exiting or terminating on exit. + nohup nc -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null & +} + +read_log() { + local f + f=/tmp/vault-socket.log + [[ -f "$f" ]] && cat "$f" +} + +main() { + if socket_listener_procs; then + # Clean up old nc's that might not be working + kill_socket_listener + fi + + if ! start_socket_listener; then + fail "Failed to start audit socket listener: socket listener log: $(read_log)" + fi + + # wait for nc to listen + sleep 1 + + if ! test_socket_listener; then + fail "Error testing socket listener: socket listener log: $(read_log)" + fi + + return 0 +} + +main diff --git a/enos/modules/vault_cluster/variables.tf b/enos/modules/vault_cluster/variables.tf new file mode 100644 index 000000000000..b29ccfc80a27 --- /dev/null +++ b/enos/modules/vault_cluster/variables.tf @@ -0,0 +1,250 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "artifactory_release" { + type = object({ + username = string + token = string + url = string + sha256 = string + }) + description = "The Artifactory release information to install Vault artifacts from Artifactory" + default = null +} + +variable "backend_cluster_name" { + type = string + description = "The name of the backend cluster" + default = null +} + +variable "backend_cluster_tag_key" { + type = string + description = "The tag key for searching for backend nodes" + default = null +} + +variable "cluster_name" { + type = string + description = "The Vault cluster name" + default = null +} + +variable "config_dir" { + type = string + description = "The directory to use for Vault configuration" + default = "/etc/vault.d" +} + +variable "config_mode" { + description = "The method to use when configuring Vault. When set to 'env' we will configure Vault using VAULT_ style environment variables if possible. When 'file' we'll use the HCL configuration file for all configuration options." + default = "file" + + validation { + condition = contains(["env", "file"], var.config_mode) + error_message = "The config_mode must be either 'env' or 'file'. No other configuration modes are supported." + } +} + +variable "config_env_vars" { + description = "Optional Vault configuration environment variables to set starting Vault" + type = map(string) + default = null +} + +variable "consul_data_dir" { + type = string + description = "The directory where the consul will store data" + default = "/opt/consul/data" +} + +variable "consul_install_dir" { + type = string + description = "The directory where the consul binary will be installed" + default = "/opt/consul/bin" +} + +variable "consul_license" { + type = string + sensitive = true + description = "The consul enterprise license" + default = null +} + +variable "consul_log_file" { + type = string + description = "The file where the consul will write log output" + default = "/var/log/consul.log" +} + +variable "consul_log_level" { + type = string + description = "The consul service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.consul_log_level) + error_message = "The consul_log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "consul_release" { + type = object({ + version = string + edition = string + }) + description = "Consul release version and edition to install from releases.hashicorp.com" + default = { + version = "1.15.1" + edition = "ce" + } +} + +variable "enable_audit_devices" { + description = "If true every audit device will be enabled" + type = bool + default = true +} + +variable "force_unseal" { + type = bool + description = "Always unseal the Vault cluster even if we're not initializing it" + default = false +} + +variable "initialize_cluster" { + type = bool + description = "Initialize the Vault cluster" + default = true +} + +variable "install_dir" { + type = string + description = "The directory where the vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "license" { + type = string + sensitive = true + description = "The value of the Vault license" + default = null +} + +variable "local_artifact_path" { + type = string + description = "The path to a locally built vault artifact to install. It can be a zip archive, RPM, or Debian package" + default = null +} + +variable "log_level" { + type = string + description = "The vault service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "manage_service" { + type = bool + description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" + default = true +} + +variable "packages" { + type = list(string) + description = "A list of packages to install via the target host package manager" + default = [] +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "Vault release version and edition to install from releases.hashicorp.com" + default = null +} + +variable "root_token" { + type = string + description = "The Vault root token that we can use to intialize and configure the cluster" + default = null +} + +variable "seal_ha_beta" { + description = "Enable using Seal HA on clusters that meet minimum version requirements and are enterprise editions" + default = true +} + +variable "seal_attributes" { + description = "The auto-unseal device attributes" + default = null +} + +variable "seal_attributes_secondary" { + description = "The secondary auto-unseal device attributes" + default = null +} + +variable "seal_type" { + type = string + description = "The primary seal device type" + default = "awskms" + + validation { + condition = contains(["awskms", "pkcs11", "shamir"], var.seal_type) + error_message = "The seal_type must be either 'awskms', 'pkcs11', or 'shamir'. No other seal types are supported." + } +} + +variable "seal_type_secondary" { + type = string + description = "A secondary HA seal device type. Only supported in Vault Enterprise >= 1.15" + default = "none" + + validation { + condition = contains(["awskms", "none", "pkcs11"], var.seal_type_secondary) + error_message = "The secondary_seal_type must be 'awskms', 'none', or 'pkcs11'. No other secondary seal types are supported." + } +} + +variable "shamir_unseal_keys" { + type = list(string) + description = "Shamir unseal keys. Often only used adding additional nodes to an already initialized cluster." + default = null +} + +variable "storage_backend" { + type = string + description = "The storage backend to use" + default = "raft" + + validation { + condition = contains(["raft", "consul"], var.storage_backend) + error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." + } +} + +variable "storage_backend_addl_config" { + type = map(any) + description = "An optional set of key value pairs to inject into the storage block" + default = {} +} + +variable "storage_node_prefix" { + type = string + description = "A prefix to use for each node in the Vault storage configuration" + default = "node" +} + +variable "target_hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + private_ip = string + public_ip = string + })) +} diff --git a/enos/modules/vault_get_cluster_ips/main.tf b/enos/modules/vault_get_cluster_ips/main.tf new file mode 100644 index 000000000000..984d00cbeaca --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/main.tf @@ -0,0 +1,115 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_instance_count" { + type = number + description = "The number of instances in the vault cluster" +} + +variable "vault_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts. These are required to map private ip addresses to public addresses." +} + +locals { + follower_hosts_list = [for idx in range(var.vault_instance_count - 1) : { + private_ip = local.follower_private_ips[idx] + public_ip = local.follower_public_ips[idx] + } + ] + follower_hosts = { + for idx in range(var.vault_instance_count - 1) : idx => try(local.follower_hosts_list[idx], null) + } + follower_private_ips = jsondecode(enos_remote_exec.get_follower_private_ips.stdout) + follower_public_ips = [for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if contains( + local.follower_private_ips, var.vault_hosts[idx].private_ip) + ] + leader_host = { + private_ip = local.leader_private_ip + public_ip = local.leader_public_ip + } + leader_private_ip = trimspace(enos_remote_exec.get_leader_private_ip.stdout) + leader_public_ip = element([ + for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if var.vault_hosts[idx].private_ip == local.leader_private_ip + ], 0) + private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])] +} + +resource "enos_remote_exec" "get_leader_private_ip" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/get-leader-private-ip.sh")] + + transport = { + ssh = { + host = var.vault_hosts[0].public_ip + } + } +} + +resource "enos_remote_exec" "get_follower_private_ips" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_LEADER_PRIVATE_IP = local.leader_private_ip + VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips) + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/get-follower-private-ips.sh")] + + transport = { + ssh = { + host = var.vault_hosts[0].public_ip + } + } +} + +output "follower_hosts" { + value = local.follower_hosts +} + +output "follower_private_ips" { + value = local.follower_private_ips +} + +output "follower_public_ips" { + value = local.follower_public_ips +} + +output "leader_host" { + value = local.leader_host +} + +output "leader_private_ip" { + value = local.leader_private_ip +} + +output "leader_public_ip" { + value = local.leader_public_ip +} diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-follower-private-ips.sh b/enos/modules/vault_get_cluster_ips/scripts/get-follower-private-ips.sh new file mode 100644 index 000000000000..084a11a35f4c --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-follower-private-ips.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set" +[[ -z "$VAULT_LEADER_PRIVATE_IP" ]] && fail "VAULT_LEADER_PRIVATE_IP env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +count=0 +retries=10 +while :; do + # Vault >= 1.10.x has the operator members. If we have that then we'll use it. + if $binpath operator -h 2>&1 | grep members &> /dev/null; then + # Get the folllowers that are part of our private ips. + if members=$($binpath operator members -format json); then + if followers=$(echo "$members" | jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")) as $followers | $expected - ($expected - $followers)'); then + # Make sure that we got all the followers + if jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then + echo "$followers" + exit 0 + fi + fi + fi + else + # We're using an old version of vault so we'll just return ips that don't match the leader. + # Get the public ip addresses of the followers + if followers=$(jq --arg ip "$VAULT_LEADER_PRIVATE_IP" -c '. | map(select(.!=$ip))' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then + if [[ -n "$followers" ]]; then + echo "$followers" + exit 0 + fi + fi + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster followers" + fi +done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh new file mode 100644 index 000000000000..ffea30c46220 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +count=0 +retries=5 +while :; do + # Find the leader private IP address + if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + if [[ -n "$ip" ]]; then + echo "$ip" + exit 0 + fi + fi + + # Some older versions of vault don't support reading sys/leader. Try falling back to the cli status. + if ip=$($binpath status -format json | jq -r '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + if [[ -n "$ip" ]]; then + echo "$ip" + exit 0 + fi + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster leader" + fi +done diff --git a/enos/modules/vault_proxy/main.tf b/enos/modules/vault_proxy/main.tf new file mode 100644 index 000000000000..d71232294abf --- /dev/null +++ b/enos/modules/vault_proxy/main.tf @@ -0,0 +1,89 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_proxy_pidfile" { + type = string + description = "The filepath where the Vault Proxy pid file is kept" + default = "/tmp/pidfile" +} + +locals { + vault_instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } + vault_proxy_address = "127.0.0.1:8100" +} + +resource "enos_remote_exec" "set_up_approle_auth_and_proxy" { + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile + VAULT_PROXY_ADDRESS = local.vault_proxy_address + } + + scripts = [abspath("${path.module}/scripts/set-up-approle-and-proxy.sh")] + + transport = { + ssh = { + host = local.vault_instances[0].public_ip + } + } +} + +resource "enos_remote_exec" "use_proxy" { + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile + VAULT_PROXY_ADDRESS = local.vault_proxy_address + } + + scripts = [abspath("${path.module}/scripts/use-proxy.sh")] + + transport = { + ssh = { + host = local.vault_instances[0].public_ip + } + } + + depends_on = [ + enos_remote_exec.set_up_approle_auth_and_proxy + ] +} diff --git a/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh new file mode 100644 index 000000000000..556cb82248dd --- /dev/null +++ b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) +$binpath auth disable approle || true + +$binpath auth enable approle + +$binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 + +ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id') + +if [[ "$ROLEID" == '' ]]; then + fail "expected ROLEID to be nonempty, but it is empty" +fi + +SECRETID=$($binpath write -f --format=json auth/approle/role/proxy-role/secret-id | jq -r '.data.secret_id') + +if [[ "$SECRETID" == '' ]]; then + fail "expected SECRETID to be nonempty, but it is empty" +fi + +echo "$ROLEID" > /tmp/role-id +echo "$SECRETID" > /tmp/secret-id + +# Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl +# The Proxy references the fixed Vault server address of http://127.0.0.1:8200 +# The Proxy itself listens at the address http://127.0.0.1:8100 +cat > /tmp/vault-proxy.hcl <<- EOM +pid_file = "${VAULT_PROXY_PIDFILE}" + +vault { + address = "http://127.0.0.1:8200" + tls_skip_verify = true + retry { + num_retries = 10 + } +} + +api_proxy { + enforce_consistency = "always" + use_auto_auth_token = true +} + +listener "tcp" { + address = "${VAULT_PROXY_ADDRESS}" + tls_disable = true +} + +auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/tmp/role-id" + secret_id_file_path = "/tmp/secret-id" + } + } + sink { + type = "file" + config = { + path = "/tmp/token" + } + } +} +EOM + +# If Proxy is still running from a previous run, kill it +pkill -F "${VAULT_PROXY_PIDFILE}" || true + +# Run proxy in the background +$binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 & diff --git a/enos/modules/vault_proxy/scripts/use-proxy.sh b/enos/modules/vault_proxy/scripts/use-proxy.sh new file mode 100644 index 000000000000..3e7e543e7a31 --- /dev/null +++ b/enos/modules/vault_proxy/scripts/use-proxy.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Will cause the Vault CLI to communicate with the Vault Proxy, since it +# is listening at port 8100. +export VAULT_ADDR="http://${VAULT_PROXY_ADDRESS}" + +# Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token +# is used. +unset VAULT_TOKEN + +# Use the Vault CLI to communicate with the Vault Proxy (via the VAULT_ADDR env +# var) to lookup the details of the Proxy's token and make sure that the +# .data.path field contains 'auth/approle/login', thus confirming that the Proxy +# automatically authenticated itself. +$binpath token lookup -format=json | jq -r '.data.path' | grep -q 'auth/approle/login' + +# Now that we're done, kill the proxy +pkill -F "${VAULT_PROXY_PIDFILE}" || true diff --git a/enos/modules/vault_raft_remove_peer/main.tf b/enos/modules/vault_raft_remove_peer/main.tf new file mode 100644 index 000000000000..7035fe406394 --- /dev/null +++ b/enos/modules/vault_raft_remove_peer/main.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "operator_instance" { + type = string + description = "The ip address of the operator (Voter) node" +} + +variable "remove_vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The old vault nodes to be removed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.remove_vault_instances)[idx].public_ip + private_ip = values(var.remove_vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "vault_raft_remove_peer" { + for_each = local.instances + + environment = { + REMOVE_VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_TOKEN = var.vault_root_token + VAULT_ADDR = "http://localhost:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/raft-remove-peer.sh")] + + transport = { + ssh = { + host = var.operator_instance + } + } +} diff --git a/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh new file mode 100644 index 000000000000..4fcfa513d349 --- /dev/null +++ b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +node_addr=${REMOVE_VAULT_CLUSTER_ADDR} + +fail() { + echo "$1" 2>&1 + return 1 +} + +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + echo "retry $count" + else + return "$exit" + fi + done + + return 0 +} + +remove_peer() { + if ! node_id=$("$binpath" operator raft list-peers -format json | jq -Mr --argjson expected "false" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id'); then + fail "failed to get node id of a non-voter node" + fi + + $binpath operator raft remove-peer "$node_id" +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Retry a few times because it can take some time for things to settle after autopilot upgrade +retry 5 remove_peer diff --git a/enos/modules/vault_setup_perf_primary/main.tf b/enos/modules/vault_setup_perf_primary/main.tf new file mode 100644 index 000000000000..e9779d11af88 --- /dev/null +++ b/enos/modules/vault_setup_perf_primary/main.tf @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "primary_leader_public_ip" { + type = string + description = "Vault primary cluster leader Public IP address" +} + +variable "primary_leader_private_ip" { + type = string + description = "Vault primary cluster leader Private IP address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "configure_pr_primary" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/configure-vault-pr-primary.sh")] + + transport = { + ssh = { + host = var.primary_leader_public_ip + } + } +} diff --git a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh new file mode 100644 index 000000000000..2ccaf14e4db5 --- /dev/null +++ b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Create superuser policy +$binpath policy write superuser -<&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +eval "$binpath" operator step-down diff --git a/enos/modules/vault_test_ui/main.tf b/enos/modules/vault_test_ui/main.tf new file mode 100644 index 000000000000..9fc16a7b62bd --- /dev/null +++ b/enos/modules/vault_test_ui/main.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + # base test environment excludes the filter argument + ui_test_environment_base = { + VAULT_ADDR = "http://${var.vault_addr}:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_UNSEAL_KEYS = jsonencode(slice(var.vault_unseal_keys, 0, var.vault_recovery_threshold)) + } + ui_test_environment = var.ui_test_filter == null || try(length(trimspace(var.ui_test_filter)) == 0, true) ? local.ui_test_environment_base : merge(local.ui_test_environment_base, { + TEST_FILTER = var.ui_test_filter + }) + # The environment variables need to be double escaped since the process of rendering them to the + # outputs eats the escaping. Therefore double escaping ensures that the values are rendered as + # properly escaped json, i.e. "[\"value\"]" suitable to be parsed as json. + escaped_ui_test_environment = [ + for key, value in local.ui_test_environment : "export ${key}='${value}'" + ] +} + +resource "enos_local_exec" "test_ui" { + count = var.ui_run_tests ? 1 : 0 + environment = local.ui_test_environment + scripts = ["${path.module}/scripts/test_ui.sh"] +} diff --git a/enos/modules/vault_test_ui/outputs.tf b/enos/modules/vault_test_ui/outputs.tf new file mode 100644 index 000000000000..ae4f926b3f93 --- /dev/null +++ b/enos/modules/vault_test_ui/outputs.tf @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +output "ui_test_stderr" { + value = var.ui_run_tests ? enos_local_exec.test_ui[0].stderr : "No std out tests where not run" +} + +output "ui_test_stdout" { + value = var.ui_run_tests ? enos_local_exec.test_ui[0].stdout : "No std out tests where not run" +} + +output "ui_test_environment" { + value = join(" \\ \n", local.escaped_ui_test_environment) + description = "The environment variables that are required in order to run the test:enos yarn target" +} diff --git a/enos/modules/vault_test_ui/scripts/test_ui.sh b/enos/modules/vault_test_ui/scripts/test_ui.sh new file mode 100755 index 000000000000..1913f74732dc --- /dev/null +++ b/enos/modules/vault_test_ui/scripts/test_ui.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -eux -o pipefail + +project_root=$(git rev-parse --show-toplevel) +pushd "$project_root" > /dev/null + +echo "running test-ember-enos" +make test-ember-enos +popd > /dev/null diff --git a/enos/modules/vault_test_ui/variables.tf b/enos/modules/vault_test_ui/variables.tf new file mode 100644 index 000000000000..9dec392d09bf --- /dev/null +++ b/enos/modules/vault_test_ui/variables.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "vault_addr" { + description = "The host address for the vault instance to test" + type = string +} + +variable "vault_root_token" { + description = "The vault root token" + type = string +} + +variable "ui_test_filter" { + type = string + description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f='" + default = null +} + +variable "vault_unseal_keys" { + description = "Base64 encoded recovery keys to use for the seal/unseal test" + type = list(string) +} + +variable "vault_recovery_threshold" { + description = "The number of recovery keys to require when unsealing Vault" + type = string +} + +variable "ui_run_tests" { + type = bool + description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" + default = true +} diff --git a/enos/modules/vault_unseal_nodes/main.tf b/enos/modules/vault_unseal_nodes/main.tf new file mode 100644 index 000000000000..472601807b88 --- /dev/null +++ b/enos/modules/vault_unseal_nodes/main.tf @@ -0,0 +1,127 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This module unseals the replication secondary follower nodes +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "follower_public_ips" { + type = list(string) + description = "Vault cluster follower Public IP addresses" +} + +variable "vault_seal_type" { + type = string + description = "The Vault seal type" +} + +variable "vault_unseal_keys" {} + +locals { + followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)]) + vault_bin_path = "${var.vault_install_dir}/vault" +} + +# After replication is enabled the secondary follower nodes are expected to be sealed, +# so we wait for the secondary follower nodes to update the seal status +resource "enos_remote_exec" "wait_until_sealed" { + for_each = { + for idx, follower in local.followers : idx => follower + } + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-until-sealed.sh")] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} + +# The follower nodes on secondary replication cluster incorrectly report +# unseal progress 2/3 (Issue: https://hashicorp.atlassian.net/browse/VAULT-12309), +# so we restart the followers to clear the status and to autounseal incase of awskms seal type +resource "enos_remote_exec" "restart_followers" { + depends_on = [enos_remote_exec.wait_until_sealed] + for_each = { + for idx, follower in local.followers : idx => follower + } + + inline = ["sudo systemctl restart vault"] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} + +# We cannot use the vault_unseal resouce due to the known issue +# (https://hashicorp.atlassian.net/browse/VAULT-12311). We use a custom +# script to allow retry for unsealing the secondary followers +resource "enos_remote_exec" "unseal_followers" { + depends_on = [enos_remote_exec.restart_followers] + # The unseal keys are required only for seal_type shamir + for_each = { + for idx, follower in local.followers : idx => follower + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = [abspath("${path.module}/scripts/unseal-node.sh")] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} + +# This is a second attempt needed to unseal the secondary followers +# using a custom script due to get past the known issue +# (https://hashicorp.atlassian.net/browse/VAULT-12311) +resource "enos_remote_exec" "unseal_followers_again" { + depends_on = [enos_remote_exec.unseal_followers] + for_each = { + for idx, follower in local.followers : idx => follower + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = [abspath("${path.module}/scripts/unseal-node.sh")] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} diff --git a/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh new file mode 100755 index 000000000000..b02ffa52c14c --- /dev/null +++ b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +binpath=${VAULT_INSTALL_DIR}/vault + +IFS="," read -r -a keys <<< "${UNSEAL_KEYS}" + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +count=0 +retries=5 +while :; do + for key in "${keys[@]}"; do + + # Check the Vault seal status + seal_status=$($binpath status -format json | jq '.sealed') + + if [[ "$seal_status" == "true" ]]; then + echo "running unseal with $key count $count with retry $retries" >> /tmp/unseal_script.out + "$binpath" operator unseal "$key" > /dev/null 2>&1 + else + exit 0 + fi + done + + wait=$((1 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "failed to unseal node" + fi +done diff --git a/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh b/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh new file mode 100644 index 000000000000..5654629c5cdd --- /dev/null +++ b/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +binpath=${VAULT_INSTALL_DIR}/vault + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +count=0 +retries=5 +while :; do + # Check the Vault seal status + seal_status=$($binpath status -format json | jq '.sealed') + + if [[ "$seal_status" == "true" ]]; then + exit 0 + fi + + wait=$((3 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Expected node to be sealed" + fi +done diff --git a/enos/modules/vault_upgrade/main.tf b/enos/modules/vault_upgrade/main.tf index 07e65bf197f9..c92ffbacfd62 100644 --- a/enos/modules/vault_upgrade/main.tf +++ b/enos/modules/vault_upgrade/main.tf @@ -1,10 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { aws = { source = "hashicorp/aws" } enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } @@ -89,10 +92,12 @@ resource "enos_bundle_install" "upgrade_vault_binary" { resource "enos_remote_exec" "get_leader_public_ip" { depends_on = [enos_bundle_install.upgrade_vault_binary] - content = templatefile("${path.module}/templates/get-leader-public-ip.sh", { - vault_install_dir = var.vault_install_dir, - vault_instances = jsonencode(local.instances) - }) + scripts = [abspath("${path.module}/scripts/get-leader-public-ip.sh")] + + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_INSTANCES = jsonencode(local.instances) + } transport = { ssh = { @@ -104,10 +109,12 @@ resource "enos_remote_exec" "get_leader_public_ip" { resource "enos_remote_exec" "get_follower_public_ips" { depends_on = [enos_bundle_install.upgrade_vault_binary] - content = templatefile("${path.module}/templates/get-follower-public-ips.sh", { - vault_install_dir = var.vault_install_dir, - vault_instances = jsonencode(local.instances) - }) + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_INSTANCES = jsonencode(local.instances) + } + + scripts = [abspath("${path.module}/scripts/get-follower-public-ips.sh")] transport = { ssh = { @@ -120,7 +127,7 @@ resource "enos_remote_exec" "restart_followers" { for_each = local.followers depends_on = [enos_remote_exec.get_follower_public_ips] - content = file("${path.module}/templates/restart-vault.sh") + scripts = [abspath("${path.module}/scripts/restart-vault.sh")] transport = { ssh = { @@ -150,7 +157,7 @@ resource "enos_vault_unseal" "followers" { resource "enos_remote_exec" "restart_leader" { depends_on = [enos_vault_unseal.followers] - content = file("${path.module}/templates/restart-vault.sh") + scripts = [abspath("${path.module}/scripts/restart-vault.sh")] transport = { ssh = { diff --git a/enos/modules/vault_upgrade/scripts/get-follower-public-ips.sh b/enos/modules/vault_upgrade/scripts/get-follower-public-ips.sh new file mode 100644 index 000000000000..8cfa1b2fa61c --- /dev/null +++ b/enos/modules/vault_upgrade/scripts/get-follower-public-ips.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +export VAULT_ADDR="http://localhost:8200" + +instances=${VAULT_INSTANCES} + +# Find the leader +leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') + +# Get the public ip addresses of the followers +follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances") + +echo "$follower_ips" | sed 's/\"//g' | tr '\n' ' ' diff --git a/enos/modules/vault_upgrade/scripts/get-leader-public-ip.sh b/enos/modules/vault_upgrade/scripts/get-leader-public-ip.sh new file mode 100644 index 000000000000..40444db77424 --- /dev/null +++ b/enos/modules/vault_upgrade/scripts/get-leader-public-ip.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +export VAULT_ADDR="http://localhost:8200" + +instances=${VAULT_INSTANCES} + +# Find the leader +leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') + +# Get the public ip address of the leader +leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances") +#shellcheck disable=SC2001 +echo "$leader_public" | sed 's/\"//g' diff --git a/enos/modules/vault_upgrade/scripts/restart-vault.sh b/enos/modules/vault_upgrade/scripts/restart-vault.sh new file mode 100644 index 000000000000..981ceadcde4b --- /dev/null +++ b/enos/modules/vault_upgrade/scripts/restart-vault.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -eux + +sudo systemctl restart vault diff --git a/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh b/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh deleted file mode 100644 index e424aa44406c..000000000000 --- a/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -e - -binpath=${vault_install_dir}/vault -export VAULT_ADDR="http://localhost:8200" - -instances='${vault_instances}' - -# Find the leader -leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') - -# Get the public ip addresses of the followers -follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances") - -echo "$follower_ips" | sed 's/\"//g' | tr '\n' ' ' diff --git a/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh b/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh deleted file mode 100644 index 5c36dae336f5..000000000000 --- a/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -set -e - -binpath=${vault_install_dir}/vault -export VAULT_ADDR="http://localhost:8200" - -instances='${vault_instances}' - -# Find the leader -leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') - -# Get the public ip address of the leader -leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances") -echo "$leader_public" | sed 's/\"//g' diff --git a/enos/modules/vault_upgrade/templates/restart-vault.sh b/enos/modules/vault_upgrade/templates/restart-vault.sh deleted file mode 100644 index aa6853643056..000000000000 --- a/enos/modules/vault_upgrade/templates/restart-vault.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -eux - -sudo systemctl restart vault diff --git a/enos/modules/vault_verify_agent_output/main.tf b/enos/modules/vault_verify_agent_output/main.tf index 6643c8b62662..70e25516df64 100644 --- a/enos/modules/vault_verify_agent_output/main.tf +++ b/enos/modules/vault_verify_agent_output/main.tf @@ -1,7 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } @@ -39,11 +42,13 @@ locals { } resource "enos_remote_exec" "verify_vault_agent_output" { - content = templatefile("${path.module}/templates/verify-vault-agent-output.sh", { - vault_agent_template_destination = var.vault_agent_template_destination - vault_agent_expected_output = var.vault_agent_expected_output - vault_instances = jsonencode(local.vault_instances) - }) + environment = { + VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination + VAULT_AGENT_EXPECTED_OUTPUT = var.vault_agent_expected_output + VAULT_INSTANCES = jsonencode(local.vault_instances) + } + + scripts = [abspath("${path.module}/scripts/verify-vault-agent-output.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh new file mode 100644 index 000000000000..044c69130b3c --- /dev/null +++ b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +fail() { + echo "$1" 1>&2 + return 1 +} + +actual_output=$(cat "${VAULT_AGENT_TEMPLATE_DESTINATION}") +if [[ "$actual_output" != "${VAULT_AGENT_EXPECTED_OUTPUT}" ]]; then + fail "expected '${VAULT_AGENT_EXPECTED_OUTPUT}' to be the Agent output, but got: '$actual_output'" +fi diff --git a/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh b/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh deleted file mode 100644 index 3c434ba97276..000000000000 --- a/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -set -e - -fail() { - echo "$1" 1>&2 - return 1 -} - -actual_output=$(cat ${vault_agent_template_destination}) -if [[ "$actual_output" != "${vault_agent_expected_output}" ]]; then - fail "expected '${vault_agent_expected_output}' to be the Agent output, but got: '$actual_output'" -fi diff --git a/enos/modules/vault_verify_autopilot/main.tf b/enos/modules/vault_verify_autopilot/main.tf index ca03ea6f6f29..8421ef72ac90 100644 --- a/enos/modules/vault_verify_autopilot/main.tf +++ b/enos/modules/vault_verify_autopilot/main.tf @@ -1,7 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } @@ -51,12 +54,14 @@ locals { resource "enos_remote_exec" "smoke-verify-autopilot" { for_each = local.public_ips - content = templatefile("${path.module}/templates/smoke-verify-autopilot.sh", { - vault_install_dir = var.vault_install_dir - vault_token = var.vault_root_token - vault_autopilot_upgrade_status = var.vault_autopilot_upgrade_status, - vault_autopilot_upgrade_version = var.vault_autopilot_upgrade_version, - }) + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_TOKEN = var.vault_root_token, + VAULT_AUTOPILOT_UPGRADE_STATUS = var.vault_autopilot_upgrade_status, + VAULT_AUTOPILOT_UPGRADE_VERSION = var.vault_autopilot_upgrade_version, + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-autopilot.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh new file mode 100755 index 000000000000..d19f453a071d --- /dev/null +++ b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +fail() { + echo "$1" 1>&2 + exit 1 +} + +export VAULT_ADDR="http://localhost:8200" + +[[ -z "$VAULT_AUTOPILOT_UPGRADE_STATUS" ]] && fail "VAULT_AUTOPILOT_UPGRADE_STATUS env variable has not been set" +[[ -z "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]] && fail "VAULT_AUTOPILOT_UPGRADE_VERSION env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +count=0 +retries=8 +while :; do + state=$($binpath read -format=json sys/storage/raft/autopilot/state) + status="$(jq -r '.data.upgrade_info.status' <<< "$state")" + target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")" + + if [ "$status" = "$VAULT_AUTOPILOT_UPGRADE_STATUS" ] && [ "$target_version" = "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]; then + exit 0 + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" + echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" + sleep "$wait" + else + echo "$state" + echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" + echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" + fail "Autopilot did not get into the correct status" + fi +done diff --git a/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh deleted file mode 100755 index 1dd5d9014710..000000000000 --- a/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -token="${vault_token}" -autopilot_version="${vault_autopilot_upgrade_version}" -autopilot_status="${vault_autopilot_upgrade_status}" - -export VAULT_ADDR="http://localhost:8200" -export VAULT_TOKEN="$token" - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -count=0 -retries=7 -while :; do - state=$(${vault_install_dir}/vault read -format=json sys/storage/raft/autopilot/state) - status="$(jq -r '.data.upgrade_info.status' <<< "$state")" - target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")" - - if [ "$status" = "$autopilot_status" ] && [ "$target_version" = "$autopilot_version" ]; then - exit 0 - fi - - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - echo "$state" - sleep "$wait" - else - fail "Autopilot did not get into the correct status" - fi -done diff --git a/enos/modules/vault_verify_default_lcq/main.tf b/enos/modules/vault_verify_default_lcq/main.tf new file mode 100644 index 000000000000..f8dd999e2437 --- /dev/null +++ b/enos/modules/vault_verify_default_lcq/main.tf @@ -0,0 +1,74 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_autopilot_default_max_leases" { + type = string + description = "The autopilot upgrade expected max_leases" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +locals { + public_ips = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "smoke_verify_default_lcq" { + for_each = local.public_ips + + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = "http://localhost:8200" + VAULT_TOKEN = var.vault_root_token + DEFAULT_LCQ = var.vault_autopilot_default_max_leases + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-default-lcq.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh b/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh new file mode 100755 index 000000000000..493d8b8ba96d --- /dev/null +++ b/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +getMaxLeases() { + curl --request GET --header "X-Vault-Token: $VAULT_TOKEN" \ + "$VAULT_ADDR/v1/sys/quotas/lease-count/default" | jq '.data.max_leases // empty' +} + +waitForMaxLeases() { + local max_leases + if ! max_leases=$(getMaxLeases); then + echo "failed getting /v1/sys/quotas/lease-count/default data" 1>&2 + return 1 + fi + + if [[ "$max_leases" == "$DEFAULT_LCQ" ]]; then + echo "$max_leases" + return 0 + else + echo "Expected Default LCQ $DEFAULT_LCQ but got $max_leases" + return 1 + fi +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if waitForMaxLeases; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for Default LCQ verification to complete. Data:\n\t$(getMaxLeases)" \ No newline at end of file diff --git a/enos/modules/vault_verify_performance_replication/main.tf b/enos/modules/vault_verify_performance_replication/main.tf new file mode 100644 index 000000000000..84b39322a442 --- /dev/null +++ b/enos/modules/vault_verify_performance_replication/main.tf @@ -0,0 +1,106 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "primary_leader_public_ip" { + type = string + description = "Vault primary cluster leader Public IP address" +} + +variable "primary_leader_private_ip" { + type = string + description = "Vault primary cluster leader Private IP address" +} + +variable "secondary_leader_public_ip" { + type = string + description = "Vault secondary cluster leader Public IP address" +} + +variable "secondary_leader_private_ip" { + type = string + description = "Vault secondary cluster leader Private IP address" +} + +variable "wrapping_token" { + type = string + description = "The wrapping token created on primary cluster" + default = null +} + +locals { + primary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_primary.stdout) + secondary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_secondary.stdout) +} + +resource "enos_remote_exec" "verify_replication_status_on_primary" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + PRIMARY_LEADER_PRIV_IP = var.primary_leader_private_ip + SECONDARY_LEADER_PRIV_IP = var.secondary_leader_private_ip + } + + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] + + transport = { + ssh = { + host = var.primary_leader_public_ip + } + } +} + +resource "enos_remote_exec" "verify_replication_status_on_secondary" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + PRIMARY_LEADER_PRIV_IP = var.primary_leader_private_ip + SECONDARY_LEADER_PRIV_IP = var.secondary_leader_private_ip + } + + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] + + transport = { + ssh = { + host = var.secondary_leader_public_ip + } + } +} + +output "primary_replication_status" { + value = local.primary_replication_status +} + +output "known_primary_cluster_addrs" { + value = local.secondary_replication_status.data.known_primary_cluster_addrs +} + +output "secondary_replication_status" { + value = local.secondary_replication_status +} + +output "primary_replication_data_secondaries" { + value = local.primary_replication_status.data.secondaries +} + +output "secondary_replication_data_primaries" { + value = local.secondary_replication_status.data.primaries +} diff --git a/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh new file mode 100644 index 000000000000..687ac3eb5631 --- /dev/null +++ b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +# This script waits for the replication status to be established +# then verifies the performance replication between primary and +# secondary clusters + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PRIMARY_LEADER_PRIV_IP" ]] && fail "PRIMARY_LEADER_PRIV_IP env variable has not been set" +[[ -z "$SECONDARY_LEADER_PRIV_IP" ]] && fail "SECONDARY_LEADER_PRIV_IP env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "$($binpath read -format=json sys/replication/performance/status)" + fi + done +} + +check_pr_status() { + pr_status=$($binpath read -format=json sys/replication/performance/status) + cluster_state=$(echo "$pr_status" | jq -r '.data.state') + connection_mode=$(echo "$pr_status" | jq -r '.data.mode') + + if [[ "$cluster_state" == 'idle' ]]; then + echo "replication cluster state is idle" 1>&2 + return 1 + fi + + if [[ "$connection_mode" == "primary" ]]; then + connection_status=$(echo "$pr_status" | jq -r '.data.secondaries[0].connection_status') + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2 + return 1 + fi + secondary_cluster_addr=$(echo "$pr_status" | jq -r '.data.secondaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') + if [[ "$secondary_cluster_addr" != "$SECONDARY_LEADER_PRIV_IP" ]]; then + echo ".data.secondaries[0].cluster_address should have an IP address of $SECONDARY_LEADER_PRIV_IP, got: $secondary_cluster_addr" 1>&2 + return 1 + fi + else + connection_status=$(echo "$pr_status" | jq -r '.data.primaries[0].connection_status') + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2 + return 1 + fi + primary_cluster_addr=$(echo "$pr_status" | jq -r '.data.primaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') + if [[ "$primary_cluster_addr" != "$PRIMARY_LEADER_PRIV_IP" ]]; then + echo ".data.primaries[0].cluster_address should have an IP address of $PRIMARY_LEADER_PRIV_IP, got: $primary_cluster_addr" 1>&2 + return 1 + fi + known_primary_cluster_addrs=$(echo "$pr_status" | jq -r '.data.known_primary_cluster_addrs') + if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_PRIV_IP"; then + echo "$PRIMARY_LEADER_PRIV_IP is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2 + return 1 + fi + fi + + echo "$pr_status" + return 0 +} + +# Retry for a while because it can take some time for replication to sync +retry 10 check_pr_status diff --git a/enos/modules/vault_verify_raft_auto_join_voter/main.tf b/enos/modules/vault_verify_raft_auto_join_voter/main.tf index ded9c3cc7007..12f0eb4e4acd 100644 --- a/enos/modules/vault_verify_raft_auto_join_voter/main.tf +++ b/enos/modules/vault_verify_raft_auto_join_voter/main.tf @@ -1,7 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } @@ -47,12 +50,14 @@ locals { resource "enos_remote_exec" "verify_raft_auto_join_voter" { for_each = local.instances - content = templatefile("${path.module}/templates/verify-raft-auto-join-voter.sh", { - vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - vault_install_dir = var.vault_install_dir - vault_local_binary_path = "${var.vault_install_dir}/vault" - vault_token = var.vault_root_token - }) + environment = { + VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault" + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/verify-raft-auto-join-voter.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh new file mode 100644 index 000000000000..6512d25876e5 --- /dev/null +++ b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 2>&1 + return 1 +} + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + echo "retry $count" + else + return "$exit" + fi + done + + return 0 +} + +check_voter_status() { + voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR) | .voter == $expected') + + if [[ "$voter_status" != 'true' ]]; then + fail "expected $VAULT_CLUSTER_ADDR to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq -Mr --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR)')" + fi +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# Retry a few times because it can take some time for things to settle after +# all the nodes are unsealed +retry 7 check_voter_status diff --git a/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh b/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh deleted file mode 100644 index e1172d7158f6..000000000000 --- a/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -set -e - -binpath=${vault_install_dir}/vault - -fail() { - echo "$1" 2>&1 - return 1 -} - -retry() { - local retries=$1 - shift - local count=0 - - until "$@"; do - exit=$? - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - echo "retry $count" - else - return "$exit" - fi - done - - return 0 -} - -check_voter_status() { - voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" '.data.config.servers[] | select(.address=="${vault_cluster_addr}") | .voter == $expected') - - if [[ "$voter_status" != 'true' ]]; then - fail "expected ${vault_cluster_addr} to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq '.data.config.servers[] | select(.address==${vault_cluster_addr})')" - fi -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -# Retry a few times because it can take some time for things to settle after -# all the nodes are unsealed -retry 5 check_voter_status diff --git a/enos/modules/vault_verify_read_data/main.tf b/enos/modules/vault_verify_read_data/main.tf new file mode 100644 index 000000000000..4b794942f1fb --- /dev/null +++ b/enos/modules/vault_verify_read_data/main.tf @@ -0,0 +1,48 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "node_public_ips" { + type = list(string) + description = "Vault cluster node Public IP address" +} + +locals { + followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)]) + vault_bin_path = "${var.vault_install_dir}/vault" +} + +resource "enos_remote_exec" "verify_kv_on_node" { + for_each = { + for idx, follower in local.followers : idx => follower + } + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/verify-data.sh")] + + transport = { + ssh = { + host = element(var.node_public_ips, each.key) + } + } +} diff --git a/enos/modules/vault_verify_read_data/scripts/verify-data.sh b/enos/modules/vault_verify_read_data/scripts/verify-data.sh new file mode 100644 index 000000000000..5919aa04c31c --- /dev/null +++ b/enos/modules/vault_verify_read_data/scripts/verify-data.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +fail() { + echo "$1" 1>&2 + return 1 +} + +binpath="${VAULT_INSTALL_DIR}/vault" + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# To keep the authentication method and module verification consistent between all +# Enos scenarios we authenticate using testuser created by vault_verify_write_data module +retry 5 "$binpath" login -method=userpass username=testuser password=passuser1 +retry 5 "$binpath" kv get secret/test diff --git a/enos/modules/vault_verify_replication/main.tf b/enos/modules/vault_verify_replication/main.tf new file mode 100644 index 000000000000..835bb776d374 --- /dev/null +++ b/enos/modules/vault_verify_replication/main.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "smoke-verify-replication" { + for_each = local.instances + + environment = { + VAULT_EDITION = var.vault_edition + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-replication.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault-verify-replication/templates/smoke-verify-replication.sh b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh similarity index 76% rename from enos/modules/vault-verify-replication/templates/smoke-verify-replication.sh rename to enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh index d7bc72f23c24..5ef9afd8b194 100644 --- a/enos/modules/vault-verify-replication/templates/smoke-verify-replication.sh +++ b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh @@ -1,22 +1,23 @@ #!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # The Vault replication smoke test, documented in # https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 set -e -edition=${vault_edition} - function fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } -# Replication status endpoint should have data.mode disabled for OSS release +# Replication status endpoint should have data.mode disabled for CE release status=$(curl -s http://localhost:8200/v1/sys/replication/status) -if [ "$edition" == "oss" ]; then +if [ "$VAULT_EDITION" == "ce" ]; then if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then - fail "replication data mode is not disabled for OSS release!" + fail "replication data mode is not disabled for CE release!" fi else if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then diff --git a/enos/modules/vault_verify_replication/variables.tf b/enos/modules/vault_verify_replication/variables.tf new file mode 100644 index 000000000000..158b699bf4b3 --- /dev/null +++ b/enos/modules/vault_verify_replication/variables.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +variable "vault_edition" { + type = string + description = "The vault product edition" + default = null +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} diff --git a/enos/modules/vault_verify_ui/main.tf b/enos/modules/vault_verify_ui/main.tf new file mode 100644 index 000000000000..b6077c6ed2a0 --- /dev/null +++ b/enos/modules/vault_verify_ui/main.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "smoke-verify-ui" { + for_each = local.instances + + environment = { + VAULT_ADDR = var.vault_addr, + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh new file mode 100644 index 000000000000..25ee334ea997 --- /dev/null +++ b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +url_effective=$(curl -w "%{url_effective}\n" -I -L -s -S "${VAULT_ADDR}" -o /dev/null) +expected="${VAULT_ADDR}/ui/" +if [ "${url_effective}" != "${expected}" ]; then + fail "Expecting Vault to redirect to UI.\nExpected: ${expected}\nGot: ${url_effective}" +fi + +if curl -s "${VAULT_ADDR}/ui/" | grep -q 'Vault UI is not available'; then + fail "Vault UI is not available" +fi diff --git a/enos/modules/vault_verify_ui/variables.tf b/enos/modules/vault_verify_ui/variables.tf new file mode 100644 index 000000000000..d06d60ac9699 --- /dev/null +++ b/enos/modules/vault_verify_ui/variables.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +variable "vault_addr" { + type = string + description = "The vault cluster address" + default = "http://localhost:8200" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} diff --git a/enos/modules/vault_verify_undo_logs/main.tf b/enos/modules/vault_verify_undo_logs/main.tf index 1d16b29abc98..b4a251f11170 100644 --- a/enos/modules/vault_verify_undo_logs/main.tf +++ b/enos/modules/vault_verify_undo_logs/main.tf @@ -1,7 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } @@ -29,18 +32,6 @@ variable "vault_root_token" { description = "The vault root token" } -variable "vault_autopilot_upgrade_version" { - type = string - description = "The vault version to which autopilot upgraded Vault" - default = null -} - -variable "vault_undo_logs_status" { - type = string - description = "An integer either 0 or 1 which indicates whether undo_logs are disabled or enabled" - default = null -} - locals { public_ips = { for idx in range(var.vault_instance_count) : idx => { @@ -54,10 +45,9 @@ resource "enos_remote_exec" "smoke-verify-undo-logs" { for_each = local.public_ips environment = { - VAULT_TOKEN = var.vault_root_token - VAULT_ADDR = "http://localhost:8200" - vault_undo_logs_status = var.vault_undo_logs_status - vault_autopilot_upgrade_version = var.vault_autopilot_upgrade_version + VAULT_ADDR = "http://localhost:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token } scripts = [abspath("${path.module}/scripts/smoke-verify-undo-logs.sh")] diff --git a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh index efc8d0ec0baf..080ec079a836 100644 --- a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh +++ b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh @@ -1,28 +1,36 @@ #!/bin/bash - -undo_logs_status="${vault_undo_logs_status}" +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 function fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + count=0 -retries=7 +retries=5 while :; do - state=$(curl --header "X-Vault-Token: $VAULT_TOKEN" "$VAULT_ADDR/v1/sys/metrics" | jq -r '.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")') - target_undo_logs_status="$(jq -r '.Value' <<< "$state")" + state=$($binpath read sys/metrics -format=json | jq -r '.data.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")') + target_undo_logs_status="$(jq -r '.Value' <<< "$state")" - if [ "$undo_logs_status" = "$target_undo_logs_status" ]; then - exit 0 - fi + if [ "$target_undo_logs_status" == "1" ]; then + exit 0 + fi - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - echo "$state" - sleep "$wait" - else - fail "Undo_logs did not get into the correct status" - fi + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + echo "Waiting for vault.core.replication.write_undo_logs to have Value:1" + echo "$state" + sleep "$wait" + else + fail "Timed out waiting for vault.core.replication.write_undo_logs to have Value:1" + fi done diff --git a/enos/modules/vault_verify_unsealed/main.tf b/enos/modules/vault_verify_unsealed/main.tf index d015adf62af8..cc8e47106322 100644 --- a/enos/modules/vault_verify_unsealed/main.tf +++ b/enos/modules/vault_verify_unsealed/main.tf @@ -1,7 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } @@ -30,11 +33,6 @@ variable "vault_instances" { description = "The vault cluster instances that were created" } -variable "vault_root_token" { - type = string - description = "The vault root token" -} - locals { instances = { for idx in range(var.vault_instance_count) : idx => { @@ -47,12 +45,12 @@ locals { resource "enos_remote_exec" "verify_node_unsealed" { for_each = local.instances - content = templatefile("${path.module}/templates/verify-vault-node-unsealed.sh", { - vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - vault_install_dir = var.vault_install_dir - vault_local_binary_path = "${var.vault_install_dir}/vault" - vault_token = var.vault_root_token - }) + scripts = [abspath("${path.module}/scripts/verify-vault-node-unsealed.sh")] + + environment = { + VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_INSTALL_DIR = var.vault_install_dir + } transport = { ssh = { diff --git a/enos/modules/vault_verify_unsealed/scripts/verify-vault-node-unsealed.sh b/enos/modules/vault_verify_unsealed/scripts/verify-vault-node-unsealed.sh new file mode 100644 index 000000000000..44523f2fd7ba --- /dev/null +++ b/enos/modules/vault_verify_unsealed/scripts/verify-vault-node-unsealed.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + exit 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR=http://localhost:8200 + +count=0 +retries=4 +while :; do + health_status=$(curl -s "${VAULT_CLUSTER_ADDR}/v1/sys/health" |jq '.') + unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected') + if [[ "$unseal_status" == 'true' ]]; then + echo "$health_status" + exit 0 + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "expected ${VAULT_CLUSTER_ADDR} to be unsealed, got unseal status: $unseal_status" + fi +done diff --git a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh b/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh deleted file mode 100644 index de3edd648202..000000000000 --- a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -set -e - -binpath=${vault_install_dir}/vault - -fail() { - echo "$1" 1>&2 - return 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected') -if [[ "$unseal_status" != 'true' ]]; then - fail "expected ${vault_cluster_addr} to be unsealed, got unseal status: $unseal_status" -fi diff --git a/enos/modules/vault_verify_version/main.tf b/enos/modules/vault_verify_version/main.tf index 9e80f456c3b5..8a1d722e50f3 100644 --- a/enos/modules/vault_verify_version/main.tf +++ b/enos/modules/vault_verify_version/main.tf @@ -1,7 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + terraform { required_providers { enos = { - source = "app.terraform.io/hashicorp-qti/enos" + source = "registry.terraform.io/hashicorp-forge/enos" } } } @@ -66,14 +69,16 @@ locals { resource "enos_remote_exec" "verify_all_nodes_have_updated_version" { for_each = local.instances - content = templatefile("${path.module}/templates/verify-cluster-version.sh", { - vault_install_dir = var.vault_install_dir, - vault_build_date = var.vault_build_date, - vault_version = var.vault_product_version, - vault_edition = var.vault_edition, - vault_revision = var.vault_revision, - vault_token = var.vault_root_token, - }) + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_BUILD_DATE = var.vault_build_date, + VAULT_VERSION = var.vault_product_version, + VAULT_EDITION = var.vault_edition, + VAULT_REVISION = var.vault_revision, + VAULT_TOKEN = var.vault_root_token, + } + + scripts = [abspath("${path.module}/scripts/verify-cluster-version.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh new file mode 100644 index 000000000000..9ec43876af5e --- /dev/null +++ b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +# Verify the Vault "version" includes the correct base version, build date, +# revision SHA, and edition metadata. +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +edition=${VAULT_EDITION} +version=${VAULT_VERSION} +sha=${VAULT_REVISION} +build_date=${VAULT_BUILD_DATE} +# VAULT_TOKEN must also be set + +fail() { + echo "$1" 1>&2 + exit 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +version_expected="Vault v$version ($sha), built $build_date" + +case "$edition" in + *ce) ;; + *ent) ;; + *ent.hsm) version_expected="$version_expected (cgo)";; + *ent.fips1402) version_expected="$version_expected (cgo)" ;; + *ent.hsm.fips1402) version_expected="$version_expected (cgo)" ;; + *) fail "Unknown Vault edition: ($edition)" ;; +esac + +version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') +version_output=$("$binpath" version) + +if [[ "$version_output" == "$version_expected_nosha" ]] || [[ "$version_output" == "$version_expected" ]]; then + echo "Version verification succeeded!" +else + fail "expected Version=$version_expected or $version_expected_nosha, got: $version_output" +fi diff --git a/enos/modules/vault_verify_version/templates/verify-cluster-version.sh b/enos/modules/vault_verify_version/templates/verify-cluster-version.sh deleted file mode 100644 index 3fd210235171..000000000000 --- a/enos/modules/vault_verify_version/templates/verify-cluster-version.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash - -# Verify the Vault "version" includes the correct base version, build date, -# revision SHA, and edition metadata. -set -e - -binpath=${vault_install_dir}/vault -edition=${vault_edition} -version=${vault_version} -sha=${vault_revision} -build_date=${vault_build_date} - -fail() { - echo "$1" 1>&2 - exit 1 -} - -test -x "$binpath" || fail "unable to locate vault binary at $binpath" - -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -# Build date was added in 1.11 -if [[ "$(echo "$version" |awk -F'.' '{print $2}')" -ge 11 ]]; then - version_expected="Vault v$version ($sha), built $build_date" -else - version_expected="Vault v$version ($sha)" -fi - -case "$edition" in - *oss) ;; - *ent) ;; - *ent.hsm) version_expected="$version_expected (cgo)";; - *ent.fips1402) version_expected="$version_expected (cgo)" ;; - *ent.hsm.fips1402) version_expected="$version_expected (cgo)" ;; - *) fail "Unknown Vault edition: ($edition)" ;; -esac - -version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') -version_output=$("$binpath" version) - -if [[ "$version_output" == "$version_expected_nosha" ]] || [[ "$version_output" == "$version_expected" ]]; then - echo "Version verification succeeded!" -else - fail "expected Version=$version_expected or $version_expected_nosha, got: $version_output" -fi diff --git a/enos/modules/vault_verify_write_data/main.tf b/enos/modules/vault_verify_write_data/main.tf new file mode 100644 index 000000000000..c0baa9f51d84 --- /dev/null +++ b/enos/modules/vault_verify_write_data/main.tf @@ -0,0 +1,95 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many Vault instances are in the cluster" +} + +variable "leader_public_ip" { + type = string + description = "Vault cluster leader Public IP address" +} + +variable "leader_private_ip" { + type = string + description = "Vault cluster leader Private IP address" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +# We use this module to verify write data in all Enos scenarios. Since we cannot use +# Vault token to authenticate to secondary clusters in replication scenario we add a regular user +# here to keep the authentication method and module verification consistent between all scenarios +resource "enos_remote_exec" "smoke-enable-secrets-kv" { + # Only enable the secrets engine on the leader node + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/smoke-enable-secrets-kv.sh")] + + transport = { + ssh = { + host = var.leader_public_ip + } + } +} + +# Verify that we can enable the k/v secrets engine and write data to it. +resource "enos_remote_exec" "smoke-write-test-data" { + depends_on = [enos_remote_exec.smoke-enable-secrets-kv] + for_each = local.instances + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + TEST_KEY = "smoke${each.key}" + TEST_VALUE = "fire" + } + + scripts = [abspath("${path.module}/scripts/smoke-write-test-data.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh new file mode 100644 index 000000000000..2e90a7352740 --- /dev/null +++ b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +retry 5 "$binpath" status > /dev/null 2>&1 + +# Create user policy +retry 5 "$binpath" policy write reguser -< /dev/null 2>&1 + +# Create new user and attach reguser policy +retry 5 "$binpath" write auth/userpass/users/testuser password="passuser1" policies="reguser" + +retry 5 "$binpath" secrets enable -path="secret" kv diff --git a/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh new file mode 100644 index 000000000000..4bac3b0879a6 --- /dev/null +++ b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$TEST_KEY" ]] && fail "TEST_KEY env variable has not been set" +[[ -z "$TEST_VALUE" ]] && fail "TEST_VALUE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +retry 5 "$binpath" kv put secret/test "$TEST_KEY=$TEST_VALUE" diff --git a/enos/modules/vault_wait_for_leader/main.tf b/enos/modules/vault_wait_for_leader/main.tf new file mode 100644 index 000000000000..0cd1c82a8cdd --- /dev/null +++ b/enos/modules/vault_wait_for_leader/main.tf @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts that can be expected as a leader" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +locals { + private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])] +} + +resource "enos_remote_exec" "wait_for_leader_in_vault_hosts" { + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips) + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-for-leader.sh")] + + transport = { + ssh = { + host = var.vault_hosts[0].public_ip + } + } +} diff --git a/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh new file mode 100644 index 000000000000..f993ae8b62be --- /dev/null +++ b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +findLeaderInPrivateIPs() { + # Find the leader private IP address + local leader_private_ip + if ! leader_private_ip=$($binpath read sys/leader -format=json | jq -er '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') ; then + # Some older versions of vault don't support reading sys/leader. Fallback to the cli status. + if ! leader_private_ip=$($binpath status -format json | jq -er '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + return 1 + fi + fi + + if isIn=$(jq -er --arg ip "$leader_private_ip" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then + if [[ "$isIn" == "true" ]]; then + echo "$leader_private_ip" + return 0 + fi + fi + + return 1 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if findLeaderInPrivateIPs; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for one of $VAULT_INSTANCE_PRIVATE_IPS to be leader." diff --git a/enos/modules/vault_wait_for_seal_rewrap/main.tf b/enos/modules/vault_wait_for_seal_rewrap/main.tf new file mode 100644 index 000000000000..2a234d0deb8f --- /dev/null +++ b/enos/modules/vault_wait_for_seal_rewrap/main.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts that can be expected as a leader" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +locals { + private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])] + first_key = element(keys(enos_remote_exec.wait_for_seal_rewrap_to_be_completed), 0) +} + +resource "enos_remote_exec" "wait_for_seal_rewrap_to_be_completed" { + for_each = var.vault_hosts + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-for-seal-rewrap.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +output "stdout" { + value = enos_remote_exec.wait_for_seal_rewrap_to_be_completed[local.first_key].stdout +} + +output "stderr" { + value = enos_remote_exec.wait_for_seal_rewrap_to_be_completed[local.first_key].stdout +} diff --git a/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh b/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh new file mode 100644 index 000000000000..de5abc5f8fe2 --- /dev/null +++ b/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +getRewrapData() { + $binpath read sys/sealwrap/rewrap -format=json | jq -eMc '.data' +} + +waitForRewrap() { + local data + if ! data=$(getRewrapData); then + echo "failed getting /v1/sys/sealwrap/rewrap data" 1>&2 + return 1 + fi + + if ! jq -e '.is_running == false' <<< "$data" &> /dev/null; then + echo "rewrap is running" 1>&2 + return 1 + fi + + if ! jq -e '.entries.failed == 0' <<< "$data" &> /dev/null; then + local entries + entries=$(jq -Mc '.entries.failed' <<< "$data") + echo "rewrap has $entries failed entries" 1>&2 + return 1 + fi + + if ! jq -e '.entries.processed == .entries.succeeded' <<< "$data" &> /dev/null; then + local processed + local succeeded + processed=$(jq -Mc '.entries.processed' <<< "$data") + succeeded=$(jq -Mc '.entries.succeeded' <<< "$data") + echo "the number of processed entries ($processed) does not equal then number of succeeded ($succeeded)" 1>&2 + return 1 + fi + + if jq -e '.entries.processed == 0' <<< "$data" &> /dev/null; then + echo "A seal rewrap has not been started yet. Number of processed entries is zero and a rewrap is not yet running." + return 1 + fi + + echo "$data" + return 0 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if waitForRewrap; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for seal rewrap to be completed. Data:\n\t$(getRewrapData)" diff --git a/enos/modules/verify_seal_type/main.tf b/enos/modules/verify_seal_type/main.tf new file mode 100644 index 000000000000..7f8fb20b33cb --- /dev/null +++ b/enos/modules/verify_seal_type/main.tf @@ -0,0 +1,47 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "seal_type" { + type = string + description = "The expected seal type" + default = "shamir" +} + +resource "enos_remote_exec" "verify_seal_type" { + for_each = var.vault_hosts + + scripts = [abspath("${path.module}/scripts/verify-seal-type.sh")] + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + EXPECTED_SEAL_TYPE = var.seal_type + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_seal_type/scripts/verify-seal-type.sh b/enos/modules/verify_seal_type/scripts/verify-seal-type.sh new file mode 100644 index 000000000000..73ce06fd9e03 --- /dev/null +++ b/enos/modules/verify_seal_type/scripts/verify-seal-type.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$EXPECTED_SEAL_TYPE" ]] && fail "EXPECTED_SEAL_TYPE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +count=0 +retries=2 +while :; do + if seal_status=$($binpath read sys/seal-status -format=json); then + if jq -Mer --arg expected "$EXPECTED_SEAL_TYPE" '.data.type == $expected' <<< "$seal_status" &> /dev/null; then + exit 0 + fi + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + printf "Seal Status: %s\n" "$seal_status" + got=$(jq -Mer '.data.type' <<< "$seal_status") + fail "Expected seal type to be $EXPECTED_SEAL_TYPE, got: $got" + fi +done diff --git a/go.mod b/go.mod index 979937feefcb..8e1de83f6d0a 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,18 @@ module github.com/hashicorp/vault -go 1.19 +// The go version directive value isn't consulted when building our production binaries, +// and the vault module isn't intended to be imported into other projects. As such the +// impact of this setting is usually rather limited. Note however that in some cases the +// Go project introduces new semantics for handling of go.mod depending on the value. +// +// The general policy for updating it is: when the Go major version used on the branch is +// updated. If we choose not to do so at some point (e.g. because we don't want some new +// semantic related to Go module handling), this comment should be updated to explain that. +// +// Whenever this value gets updated, sdk/go.mod should be updated to the same value. +go 1.21 + +toolchain go1.22.2 replace github.com/hashicorp/vault/api => ./api @@ -12,406 +24,487 @@ replace github.com/hashicorp/vault/api/auth/userpass => ./api/auth/userpass replace github.com/hashicorp/vault/sdk => ./sdk -replace go.etcd.io/etcd/client/pkg/v3 v3.5.0 => go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672 - require ( - cloud.google.com/go/monitoring v1.2.0 - cloud.google.com/go/spanner v1.5.1 - cloud.google.com/go/storage v1.23.0 - github.com/Azure/azure-storage-blob-go v0.14.0 - github.com/Azure/go-autorest/autorest v0.11.28 - github.com/Azure/go-autorest/autorest/adal v0.9.18 - github.com/NYTimes/gziphandler v1.1.1 - github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895 + cloud.google.com/go/cloudsqlconn v1.4.3 + cloud.google.com/go/monitoring v1.17.0 + cloud.google.com/go/spanner v1.55.0 + cloud.google.com/go/storage v1.36.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 + github.com/Azure/azure-storage-blob-go v0.15.0 + github.com/Azure/go-autorest/autorest v0.11.29 + github.com/Azure/go-autorest/autorest/adal v0.9.23 + github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 github.com/SAP/go-hdb v0.14.1 github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a github.com/aerospike/aerospike-client-go/v5 v5.6.0 - github.com/aliyun/alibaba-cloud-sdk-go v1.61.1842 + github.com/aliyun/alibaba-cloud-sdk-go v1.62.676 github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 - github.com/armon/go-metrics v0.4.0 + github.com/armon/go-metrics v0.4.1 github.com/armon/go-radix v1.0.0 - github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef - github.com/aws/aws-sdk-go v1.44.128 - github.com/aws/aws-sdk-go-v2/config v1.6.0 + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 + github.com/aws/aws-sdk-go v1.50.13 + github.com/aws/aws-sdk-go-v2/config v1.18.19 + github.com/aws/aws-sdk-go-v2/service/sqs v1.29.1 github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a github.com/cenkalti/backoff/v3 v3.2.2 github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 - github.com/client9/misspell v0.3.4 - github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c + github.com/cockroachdb/cockroach-go/v2 v2.3.8 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/denisenkom/go-mssqldb v0.12.2 - github.com/docker/docker v20.10.18+incompatible - github.com/docker/go-connections v0.4.0 + github.com/denisenkom/go-mssqldb v0.12.3 + github.com/docker/docker v25.0.5+incompatible github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 - github.com/dustin/go-humanize v1.0.0 - github.com/fatih/color v1.13.0 + github.com/dustin/go-humanize v1.0.1 + github.com/fatih/color v1.16.0 github.com/fatih/structs v1.1.0 - github.com/favadi/protoc-go-inject-tag v1.3.0 + github.com/gammazero/workerpool v1.1.3 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-errors/errors v1.4.1 - github.com/go-ldap/ldap/v3 v3.4.1 - github.com/go-sql-driver/mysql v1.6.0 - github.com/go-test/deep v1.0.8 + github.com/go-errors/errors v1.5.1 + github.com/go-git/go-git/v5 v5.11.0 + github.com/go-jose/go-jose/v3 v3.0.3 + github.com/go-ldap/ldap/v3 v3.4.6 + github.com/go-sql-driver/mysql v1.7.1 + github.com/go-test/deep v1.1.0 + github.com/go-zookeeper/zk v1.0.3 github.com/gocql/gocql v1.0.0 - github.com/golang-jwt/jwt/v4 v4.3.0 - github.com/golang/protobuf v1.5.2 - github.com/google/go-cmp v0.5.9 + github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang/protobuf v1.5.4 + github.com/google/go-cmp v0.6.0 github.com/google/go-github v17.0.0+incompatible github.com/google/go-metrics-stackdriver v0.2.0 - github.com/google/tink/go v1.6.1 - github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220 - github.com/hashicorp/consul-template v0.29.5 - github.com/hashicorp/consul/api v1.15.2 + github.com/google/tink/go v1.7.0 + github.com/hashicorp-forge/bbolt v1.3.8-hc3 + github.com/hashicorp/cap v0.6.0 + github.com/hashicorp/cap/ldap v0.0.0-20240328153749-fcfe271d0227 + github.com/hashicorp/cli v1.1.6 + github.com/hashicorp/consul-template v0.36.1-0.20240213145952-6c83e89b48af + github.com/hashicorp/consul/api v1.27.0 github.com/hashicorp/errwrap v1.1.0 + github.com/hashicorp/eventlogger v0.2.8 + github.com/hashicorp/go-bexpr v0.1.12 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 github.com/hashicorp/go-gcp-common v0.8.0 - github.com/hashicorp/go-hclog v1.4.0 - github.com/hashicorp/go-kms-wrapping/v2 v2.0.5 - github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.4 - github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 - github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.1 - github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.1 - github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.1 - github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.0 - github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.1 - github.com/hashicorp/go-memdb v1.3.3 - github.com/hashicorp/go-msgpack v1.1.5 + github.com/hashicorp/go-hclog v1.6.2 + github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.1 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 + github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.9 + github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.3 + github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.9 + github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11 + github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.12 + github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.11 + github.com/hashicorp/go-memdb v1.3.4 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-plugin v1.4.5 + github.com/hashicorp/go-plugin v1.6.0 github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a - github.com/hashicorp/go-retryablehttp v0.7.1 + github.com/hashicorp/go-retryablehttp v0.7.5 github.com/hashicorp/go-rootcerts v1.0.2 - github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 + github.com/hashicorp/go-secure-stdlib/awsutil v0.3.0 github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1 github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2 - github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 + github.com/hashicorp/go-secure-stdlib/mlock v0.1.3 + github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0 + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 github.com/hashicorp/go-secure-stdlib/password v0.1.1 github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 - github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 - github.com/hashicorp/go-sockaddr v1.0.2 + github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3 + github.com/hashicorp/go-sockaddr v1.0.6 github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.6.0 - github.com/hashicorp/golang-lru v0.5.4 + github.com/hashicorp/golang-lru v1.0.2 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hashicorp/hcl v1.0.1-vault-5 - github.com/hashicorp/hcp-link v0.1.0 - github.com/hashicorp/hcp-scada-provider v0.1.0 - github.com/hashicorp/hcp-sdk-go v0.22.0 - github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28 - github.com/hashicorp/raft v1.3.10 + github.com/hashicorp/hcl/v2 v2.16.2 + github.com/hashicorp/hcp-link v0.2.1 + github.com/hashicorp/hcp-scada-provider v0.2.2 + github.com/hashicorp/hcp-sdk-go v0.75.0 + github.com/hashicorp/nomad/api v0.0.0-20240213164230-c364cb57298d + github.com/hashicorp/raft v1.6.0 github.com/hashicorp/raft-autopilot v0.2.0 - github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c + github.com/hashicorp/raft-boltdb/v2 v2.3.0 github.com/hashicorp/raft-snapshot v1.0.4 - github.com/hashicorp/vault-plugin-auth-alicloud v0.5.4-beta1.0.20221117202053-722c59caa2d0 - github.com/hashicorp/vault-plugin-auth-azure v0.11.2-0.20221108185759-ac6743d5f0f2 - github.com/hashicorp/vault-plugin-auth-centrify v0.13.0 - github.com/hashicorp/vault-plugin-auth-cf v0.13.0 - github.com/hashicorp/vault-plugin-auth-gcp v0.13.2-0.20221103133215-2fc20fb9fc44 - github.com/hashicorp/vault-plugin-auth-jwt v0.14.0 - github.com/hashicorp/vault-plugin-auth-kerberos v0.8.0 - github.com/hashicorp/vault-plugin-auth-kubernetes v0.14.0 - github.com/hashicorp/vault-plugin-auth-oci v0.12.0 - github.com/hashicorp/vault-plugin-database-couchbase v0.8.0 - github.com/hashicorp/vault-plugin-database-elasticsearch v0.12.0 - github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0 - github.com/hashicorp/vault-plugin-database-redis v0.1.0 - github.com/hashicorp/vault-plugin-database-redis-elasticache v0.1.0 - github.com/hashicorp/vault-plugin-database-snowflake v0.6.1 + github.com/hashicorp/raft-wal v0.4.0 + github.com/hashicorp/vault-hcp-lib v0.0.0-20240402205111-2312b38227ab + github.com/hashicorp/vault-plugin-auth-alicloud v0.17.0 + github.com/hashicorp/vault-plugin-auth-azure v0.17.0 + github.com/hashicorp/vault-plugin-auth-centrify v0.15.1 + github.com/hashicorp/vault-plugin-auth-cf v0.16.0 + github.com/hashicorp/vault-plugin-auth-gcp v0.16.2 + github.com/hashicorp/vault-plugin-auth-jwt v0.20.3 + github.com/hashicorp/vault-plugin-auth-kerberos v0.11.0 + github.com/hashicorp/vault-plugin-auth-kubernetes v0.18.0 + github.com/hashicorp/vault-plugin-auth-oci v0.15.1 + github.com/hashicorp/vault-plugin-database-couchbase v0.10.1 + github.com/hashicorp/vault-plugin-database-elasticsearch v0.14.0 + github.com/hashicorp/vault-plugin-database-mongodbatlas v0.11.0 + github.com/hashicorp/vault-plugin-database-redis v0.2.3 + github.com/hashicorp/vault-plugin-database-redis-elasticache v0.3.0 + github.com/hashicorp/vault-plugin-database-snowflake v0.10.0 github.com/hashicorp/vault-plugin-mock v0.16.1 - github.com/hashicorp/vault-plugin-secrets-ad v0.14.0 - github.com/hashicorp/vault-plugin-secrets-alicloud v0.13.0 - github.com/hashicorp/vault-plugin-secrets-azure v0.6.3-0.20221109203402-f955aedc51bf - github.com/hashicorp/vault-plugin-secrets-gcp v0.6.6-0.20221101145740-55dbd0ccd1b8 - github.com/hashicorp/vault-plugin-secrets-gcpkms v0.13.0 - github.com/hashicorp/vault-plugin-secrets-kubernetes v0.2.0 - github.com/hashicorp/vault-plugin-secrets-kv v0.13.3 - github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.8.0 - github.com/hashicorp/vault-plugin-secrets-openldap v0.9.0 - github.com/hashicorp/vault-plugin-secrets-terraform v0.6.0 - github.com/hashicorp/vault-testing-stepwise v0.1.2 - github.com/hashicorp/vault/api v1.8.2 + github.com/hashicorp/vault-plugin-secrets-ad v0.17.0 + github.com/hashicorp/vault-plugin-secrets-alicloud v0.16.0 + github.com/hashicorp/vault-plugin-secrets-azure v0.17.2 + github.com/hashicorp/vault-plugin-secrets-gcp v0.18.0 + github.com/hashicorp/vault-plugin-secrets-gcpkms v0.16.0 + github.com/hashicorp/vault-plugin-secrets-kubernetes v0.7.0 + github.com/hashicorp/vault-plugin-secrets-kv v0.17.0 + github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.11.0 + github.com/hashicorp/vault-plugin-secrets-openldap v0.12.1 + github.com/hashicorp/vault-plugin-secrets-terraform v0.7.5 + github.com/hashicorp/vault-testing-stepwise v0.1.4 + github.com/hashicorp/vault/api v1.12.0 github.com/hashicorp/vault/api/auth/approle v0.1.0 github.com/hashicorp/vault/api/auth/userpass v0.1.0 - github.com/hashicorp/vault/sdk v0.6.1 - github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20221209165735-a2eed407e08d + github.com/hashicorp/vault/sdk v0.11.0 + github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77 github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab - github.com/jackc/pgx/v4 v4.15.0 - github.com/jcmturner/gokrb5/v8 v8.4.2 + github.com/jackc/pgx/v4 v4.18.3 + github.com/jcmturner/gokrb5/v8 v8.4.4 github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f github.com/jefferai/jsonx v1.0.0 github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f - github.com/kr/pretty v0.3.0 + github.com/klauspost/compress v1.16.7 + github.com/kr/pretty v0.3.1 github.com/kr/text v0.2.0 - github.com/mattn/go-colorable v0.1.12 - github.com/mattn/go-isatty v0.0.14 + github.com/mattn/go-colorable v0.1.13 + github.com/mattn/go-isatty v0.0.20 github.com/mholt/archiver/v3 v3.5.1 github.com/michaelklishin/rabbit-hole/v2 v2.12.0 github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a - github.com/mitchellh/cli v1.1.2 github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-testing-interface v1.14.1 - github.com/mitchellh/go-wordwrap v1.0.0 + github.com/mitchellh/go-wordwrap v1.0.1 github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/reflectwalk v1.0.2 github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc github.com/ncw/swift v1.0.47 github.com/oklog/run v1.1.0 github.com/okta/okta-sdk-golang/v2 v2.12.1 - github.com/oracle/oci-go-sdk v13.1.0+incompatible + github.com/oracle/oci-go-sdk v24.3.0+incompatible github.com/ory/dockertest v3.3.5+incompatible - github.com/ory/dockertest/v3 v3.9.1 + github.com/ory/dockertest/v3 v3.10.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pires/go-proxyproto v0.6.1 github.com/pkg/errors v0.9.1 github.com/posener/complete v1.2.3 github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d - github.com/prometheus/client_golang v1.11.1 - github.com/prometheus/common v0.26.0 + github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/common v0.37.0 github.com/rboyer/safeio v0.2.1 - github.com/ryanuber/columnize v2.1.0+incompatible + github.com/robfig/cron/v3 v3.0.1 + github.com/ryanuber/columnize v2.1.2+incompatible github.com/ryanuber/go-glob v1.0.0 - github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da github.com/sasha-s/go-deadlock v0.2.0 github.com/sethvargo/go-limiter v0.7.1 github.com/shirou/gopsutil/v3 v3.22.6 - github.com/stretchr/testify v1.8.1 - go.etcd.io/bbolt v1.3.6 - go.etcd.io/etcd/client/pkg/v3 v3.5.0 - go.etcd.io/etcd/client/v2 v2.305.0 - go.etcd.io/etcd/client/v3 v3.5.0 - go.mongodb.org/atlas v0.15.0 - go.mongodb.org/mongo-driver v1.7.3 - go.opentelemetry.io/otel v0.20.0 - go.opentelemetry.io/otel/sdk v0.20.0 - go.opentelemetry.io/otel/trace v0.20.0 - go.uber.org/atomic v1.9.0 - go.uber.org/goleak v1.1.12 - golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8 - golang.org/x/net v0.4.0 - golang.org/x/oauth2 v0.1.0 - golang.org/x/sys v0.3.0 - golang.org/x/term v0.3.0 - golang.org/x/tools v0.1.12 - google.golang.org/api v0.101.0 - google.golang.org/grpc v1.50.1 - google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 - google.golang.org/protobuf v1.28.1 - gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce + github.com/stretchr/testify v1.9.0 + go.etcd.io/bbolt v1.3.7 + go.etcd.io/etcd/client/pkg/v3 v3.5.7 + go.etcd.io/etcd/client/v2 v2.305.5 + go.etcd.io/etcd/client/v3 v3.5.7 + go.mongodb.org/atlas v0.36.0 + go.mongodb.org/mongo-driver v1.13.1 + go.opentelemetry.io/otel v1.24.0 + go.opentelemetry.io/otel/sdk v1.23.1 + go.opentelemetry.io/otel/trace v1.24.0 + go.uber.org/atomic v1.11.0 + go.uber.org/goleak v1.2.1 + golang.org/x/crypto v0.23.0 + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 + golang.org/x/net v0.25.0 + golang.org/x/oauth2 v0.18.0 + golang.org/x/sync v0.7.0 + golang.org/x/sys v0.20.0 + golang.org/x/term v0.20.0 + golang.org/x/text v0.15.0 + golang.org/x/tools v0.18.0 + google.golang.org/api v0.163.0 + google.golang.org/grpc v1.61.1 + google.golang.org/protobuf v1.34.1 gopkg.in/ory-am/dockertest.v3 v3.3.4 - gopkg.in/square/go-jose.v2 v2.6.0 - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 - layeh.com/radius v0.0.0-20190322222518-890bc1058917 - mvdan.cc/gofumpt v0.3.1 + k8s.io/apimachinery v0.29.1 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b + layeh.com/radius v0.0.0-20231213012653-1006025d24f8 + nhooyr.io/websocket v1.8.7 ) require ( - cloud.google.com/go v0.104.0 // indirect - cloud.google.com/go/compute v1.12.1 // indirect - cloud.google.com/go/compute/metadata v0.2.1 // indirect - cloud.google.com/go/iam v0.3.0 // indirect - cloud.google.com/go/kms v1.4.0 // indirect - code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect + cloud.google.com/go v0.112.0 // indirect + cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v1.1.5 // indirect + cloud.google.com/go/kms v1.15.6 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.2 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go v67.0.0+incompatible // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c // indirect - github.com/BurntSushi/toml v1.2.0 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect github.com/DataDog/datadog-go v3.2.0+incompatible // indirect github.com/Jeffail/gabs v1.1.1 // indirect + github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/sprig v2.22.0+incompatible // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/Microsoft/hcsshim v0.9.0 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/andybalholm/brotli v1.0.4 // indirect - github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64 // indirect - github.com/aws/aws-sdk-go-v2 v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.3.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.3.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.6.1 // indirect - github.com/aws/smithy-go v1.7.0 // indirect + github.com/agext/levenshtein v1.2.1 // indirect + github.com/andybalholm/brotli v1.0.5 // indirect + github.com/apache/arrow/go/v14 v14.0.2 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/aws/aws-sdk-go-v2 v1.23.4 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.18 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 // indirect + github.com/aws/smithy-go v1.18.1 // indirect + github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect + github.com/benbjohnson/immutable v0.4.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect + github.com/boltdb/bolt v1.3.1 // indirect + github.com/boombuler/barcode v1.0.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect github.com/circonus-labs/circonusllhist v0.1.3 // indirect - github.com/cloudflare/circl v1.1.0 // indirect - github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 // indirect - github.com/containerd/cgroups v1.0.3 // indirect - github.com/containerd/containerd v1.5.13 // indirect - github.com/containerd/continuity v0.3.0 // indirect + github.com/cjlapao/common-go v0.0.39 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudfoundry-community/go-cfclient v0.0.0-20220930021109-9c4e6c59ccf1 // indirect + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect + github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 // indirect + github.com/containerd/containerd v1.7.12 // indirect + github.com/containerd/continuity v0.4.2 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/coreos/etcd v3.3.27+incompatible // indirect github.com/coreos/go-oidc v2.2.1+incompatible // indirect - github.com/coreos/go-oidc/v3 v3.1.0 // indirect + github.com/coreos/go-oidc/v3 v3.10.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/couchbase/gocb/v2 v2.3.3 // indirect - github.com/couchbase/gocbcore/v10 v10.0.4 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf // indirect + github.com/couchbase/gocb/v2 v2.6.5 // indirect + github.com/couchbase/gocbcore/v10 v10.3.1 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/digitalocean/godo v1.7.5 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/cli v20.10.18+incompatible // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v25.0.1+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/envoyproxy/go-control-plane v0.11.1 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/gabriel-vasile/mimetype v1.3.1 // indirect - github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 // indirect - github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56 // indirect - github.com/go-asn1-ber/asn1-ber v1.5.1 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gammazero/deque v0.2.1 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.1 // indirect github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 // indirect - github.com/go-logr/logr v1.2.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/analysis v0.20.0 // indirect - github.com/go-openapi/errors v0.19.9 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/loads v0.20.2 // indirect - github.com/go-openapi/runtime v0.19.24 // indirect - github.com/go-openapi/spec v0.20.3 // indirect - github.com/go-openapi/strfmt v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect - github.com/go-openapi/validate v0.20.2 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/runtime v0.26.0 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/validate v0.22.2 // indirect github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect - github.com/go-stack/stack v1.8.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/gofrs/uuid v4.3.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect + github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/flatbuffers v2.0.0+incompatible // indirect + github.com/google/flatbuffers v23.5.26+incompatible // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect - github.com/googleapis/gax-go/v2 v2.6.0 // indirect - github.com/googleapis/gnostic v0.5.5 // indirect - github.com/googleapis/go-type-adapters v1.0.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gophercloud/gophercloud v0.1.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/hashicorp/cronexpr v1.1.1 // indirect + github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0 // indirect - github.com/hashicorp/go-slug v0.7.0 // indirect - github.com/hashicorp/go-tfe v0.20.0 // indirect - github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d // indirect + github.com/hashicorp/go-secure-stdlib/plugincontainer v0.3.0 // indirect + github.com/hashicorp/go-slug v0.13.4 // indirect + github.com/hashicorp/go-tfe v1.44.0 // indirect + github.com/hashicorp/jsonapi v1.3.1 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/mdns v1.0.4 // indirect - github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 // indirect - github.com/hashicorp/serf v0.9.7 // indirect - github.com/hashicorp/vault/api/auth/kubernetes v0.3.0 // indirect + github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/hashicorp/vault/api/auth/kubernetes v0.6.0 // indirect github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect - github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect - github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.11.0 // indirect + github.com/jackc/pgconn v1.14.3 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.2.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.10.0 // indirect - github.com/jackc/pgx v3.3.0+incompatible // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect + github.com/jackc/pgtype v1.14.3 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect - github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/goidentity/v6 v6.0.1 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect - github.com/klauspost/compress v1.13.6 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/klauspost/pgzip v1.2.5 // indirect - github.com/lib/pq v1.10.6 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/linode/linodego v0.7.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mediocregopher/radix/v4 v4.1.1 // indirect - github.com/miekg/dns v1.1.41 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mediocregopher/radix/v4 v4.1.4 // indirect + github.com/microsoft/kiota-abstractions-go v1.5.6 // indirect + github.com/microsoft/kiota-authentication-azure-go v1.0.1 // indirect + github.com/microsoft/kiota-http-go v1.1.1 // indirect + github.com/microsoft/kiota-serialization-form-go v1.0.0 // indirect + github.com/microsoft/kiota-serialization-json-go v1.0.5 // indirect + github.com/microsoft/kiota-serialization-multipart-go v1.0.0 // indirect + github.com/microsoft/kiota-serialization-text-go v1.0.0 // indirect + github.com/microsoftgraph/msgraph-sdk-go v1.32.0 // indirect + github.com/microsoftgraph/msgraph-sdk-go-core v1.0.1 // indirect + github.com/miekg/dns v1.1.43 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect - github.com/mitchellh/pointerstructure v1.2.0 // indirect - github.com/moby/sys/mount v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.5.0 // indirect - github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect + github.com/mitchellh/pointerstructure v1.2.1 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mongodb-forks/digest v1.0.3 // indirect + github.com/mongodb-forks/digest v1.0.5 // indirect + github.com/montanaflynn/stats v0.7.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect github.com/nwaples/rardecode v1.1.2 // indirect + github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.2 // indirect - github.com/opencontainers/runc v1.1.4 // indirect - github.com/openlyinc/pointy v1.1.2 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect + github.com/opencontainers/runc v1.2.0-rc.1 // indirect + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect + github.com/oracle/oci-go-sdk/v59 v59.0.0 // indirect github.com/oracle/oci-go-sdk/v60 v60.0.0 // indirect github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect - github.com/pierrec/lz4/v4 v4.1.8 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pierrec/lz4/v4 v4.1.18 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect - github.com/rogpeppe/go-internal v1.8.1 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/segmentio/fasthash v1.0.3 // indirect + github.com/sergi/go-diff v1.1.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/skeema/knownhosts v1.2.1 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect - github.com/snowflakedb/gosnowflake v1.6.3 // indirect + github.com/snowflakedb/gosnowflake v1.7.2 // indirect github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect - github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect + github.com/sony/gobreaker v0.5.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/std-uritemplate/std-uritemplate/go v0.0.50 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 // indirect - github.com/tilinna/clock v1.0.2 // indirect + github.com/tilinna/clock v1.1.0 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vmware/govmomi v0.18.0 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect - github.com/xdg-go/scram v1.0.2 // indirect - github.com/xdg-go/stringprep v1.0.2 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect @@ -419,28 +512,37 @@ require ( github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - go.etcd.io/etcd/api/v3 v3.5.0 // indirect - go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/otel/metric v0.20.0 // indirect - go.uber.org/multierr v1.7.0 // indirect - go.uber.org/zap v1.19.1 // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/text v0.5.0 // indirect - golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect + github.com/zclconf/go-cty v1.12.1 // indirect + github.com/zeebo/xxh3 v1.0.2 // indirect + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect gopkg.in/resty.v1 v1.12.0 // indirect + gopkg.in/square/go-jose.v2 v2.6.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.22.2 // indirect - k8s.io/apimachinery v0.22.2 // indirect - k8s.io/client-go v0.22.2 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect + k8s.io/api v0.29.1 // indirect + k8s.io/client-go v0.29.1 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) + +replace github.com/ma314smith/signedxml v1.1.1 => github.com/moov-io/signedxml v1.1.1 diff --git a/go.sum b/go.sum index 5280d0540b70..b0cc8032fb32 100644 --- a/go.sum +++ b/go.sum @@ -6,6 +6,7 @@ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSR cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -18,6 +19,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -33,65 +35,1185 @@ cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8= cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= +cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accessapproval v1.7.2/go.mod h1:/gShiq9/kK/h8T/eEn1BTzalDvk0mZxJlhfw0p+Xuc0= +cloud.google.com/go/accessapproval v1.7.3/go.mod h1:4l8+pwIxGTNqSf4T3ds8nLO94NQf0W/KnMNuQ9PbnP8= +cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/accesscontextmanager v1.8.2/go.mod h1:E6/SCRM30elQJ2PKtFMs2YhfJpZSNcJyejhuzoId4Zk= +cloud.google.com/go/accesscontextmanager v1.8.3/go.mod h1:4i/JkF2JiFbhLnnpnfoTX5vRXfhf9ukhU1ANOTALTOQ= +cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/aiplatform v1.50.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= +cloud.google.com/go/aiplatform v1.51.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= +cloud.google.com/go/aiplatform v1.51.1/go.mod h1:kY3nIMAVQOK2XDqDPHaOuD9e+FdMA6OOpfBjsvaFSOo= +cloud.google.com/go/aiplatform v1.51.2/go.mod h1:hCqVYB3mY45w99TmetEoe8eCQEwZEp9WHxeZdcv9phw= +cloud.google.com/go/aiplatform v1.52.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/analytics v0.21.4/go.mod h1:zZgNCxLCy8b2rKKVfC1YkC2vTrpfZmeRCySM3aUbskA= +cloud.google.com/go/analytics v0.21.5/go.mod h1:BQtOBHWTlJ96axpPPnw5CvGJ6i3Ve/qX2fTxR8qWyr8= +cloud.google.com/go/analytics v0.21.6/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigateway v1.6.2/go.mod h1:CwMC90nnZElorCW63P2pAYm25AtQrHfuOkbRSHj0bT8= +cloud.google.com/go/apigateway v1.6.3/go.mod h1:k68PXWpEs6BVDTtnLQAyG606Q3mz8pshItwPXjgv44Y= +cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeconnect v1.6.2/go.mod h1:s6O0CgXT9RgAxlq3DLXvG8riw8PYYbU/v25jqP3Dy18= +cloud.google.com/go/apigeeconnect v1.6.3/go.mod h1:peG0HFQ0si2bN15M6QSjEW/W7Gy3NYkWGz7pFz13cbo= +cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/apigeeregistry v0.7.2/go.mod h1:9CA2B2+TGsPKtfi3F7/1ncCCsL62NXBRfM6iPoGSM+8= +cloud.google.com/go/apigeeregistry v0.8.1/go.mod h1:MW4ig1N4JZQsXmBSwH4rwpgDonocz7FPBSw6XPGHmYw= +cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/appengine v1.8.2/go.mod h1:WMeJV9oZ51pvclqFN2PqHoGnys7rK0rz6s3Mp6yMvDo= +cloud.google.com/go/appengine v1.8.3/go.mod h1:2oUPZ1LVZ5EXi+AF1ihNAF+S8JrzQ3till5m9VQkrsk= +cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/area120 v0.8.2/go.mod h1:a5qfo+x77SRLXnCynFWPUZhnZGeSgvQ+Y0v1kSItkh4= +cloud.google.com/go/area120 v0.8.3/go.mod h1:5zj6pMzVTH+SVHljdSKC35sriR/CVvQZzG/Icdyriw0= +cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/artifactregistry v1.14.2/go.mod h1:Xk+QbsKEb0ElmyeMfdHAey41B+qBq3q5R5f5xD4XT3U= +cloud.google.com/go/artifactregistry v1.14.3/go.mod h1:A2/E9GXnsyXl7GUvQ/2CjHA+mVRoWAXC0brg2os+kNI= +cloud.google.com/go/artifactregistry v1.14.4/go.mod h1:SJJcZTMv6ce0LDMUnihCN7WSrI+kBSFV0KIKo8S8aYU= +cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/asset v1.15.0/go.mod h1:tpKafV6mEut3+vN9ScGvCHXHj7FALFVta+okxFECHcg= +cloud.google.com/go/asset v1.15.1/go.mod h1:yX/amTvFWRpp5rcFq6XbCxzKT8RJUam1UoboE179jU4= +cloud.google.com/go/asset v1.15.2/go.mod h1:B6H5tclkXvXz7PD22qCA2TDxSVQfasa3iDlM89O2NXs= +cloud.google.com/go/asset v1.15.3/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/assuredworkloads v1.11.2/go.mod h1:O1dfr+oZJMlE6mw0Bp0P1KZSlj5SghMBvTpZqIcUAW4= +cloud.google.com/go/assuredworkloads v1.11.3/go.mod h1:vEjfTKYyRUaIeA0bsGJceFV2JKpVRgyG2op3jfa59Zs= +cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/automl v1.13.2/go.mod h1:gNY/fUmDEN40sP8amAX3MaXkxcqPIn7F1UIIPZpy4Mg= +cloud.google.com/go/automl v1.13.3/go.mod h1:Y8KwvyAZFOsMAPqUCfNu1AyclbC6ivCUF/MTwORymyY= +cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/baremetalsolution v1.2.0/go.mod h1:68wi9AwPYkEWIUT4SvSGS9UJwKzNpshjHsH4lzk8iOw= +cloud.google.com/go/baremetalsolution v1.2.1/go.mod h1:3qKpKIw12RPXStwQXcbhfxVj1dqQGEvcmA+SX/mUR88= +cloud.google.com/go/baremetalsolution v1.2.2/go.mod h1:O5V6Uu1vzVelYahKfwEWRMaS3AbCkeYHy3145s1FkhM= +cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/batch v1.4.1/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= +cloud.google.com/go/batch v1.5.0/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= +cloud.google.com/go/batch v1.5.1/go.mod h1:RpBuIYLkQu8+CWDk3dFD/t/jOCGuUpkpX+Y0n1Xccs8= +cloud.google.com/go/batch v1.6.1/go.mod h1:urdpD13zPe6YOK+6iZs/8/x2VBRofvblLpx0t57vM98= +cloud.google.com/go/batch v1.6.3/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/beyondcorp v1.0.1/go.mod h1:zl/rWWAFVeV+kx+X2Javly7o1EIQThU4WlkynffL/lk= +cloud.google.com/go/beyondcorp v1.0.2/go.mod h1:m8cpG7caD+5su+1eZr+TSvF6r21NdLJk4f9u4SP2Ntc= +cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.6.0/go.mod h1:hyFDG0qSGdHNz8Q6nDN8rYIkld0q/+5uBZaelxiDLfE= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.55.0/go.mod h1:9Y5I3PN9kQWuid6183JFhOGOW3GcirA5LpsKCUn+2ec= +cloud.google.com/go/bigquery v1.56.0/go.mod h1:KDcsploXTEY7XT3fDQzMUZlpQLHzE4itubHrnmhUrZA= +cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/billing v1.17.0/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= +cloud.google.com/go/billing v1.17.1/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= +cloud.google.com/go/billing v1.17.2/go.mod h1:u/AdV/3wr3xoRBk5xvUzYMS1IawOAPwQMuHgHMdljDg= +cloud.google.com/go/billing v1.17.3/go.mod h1:z83AkoZ7mZwBGT3yTnt6rSGI1OOsHSIi6a5M3mJ8NaU= +cloud.google.com/go/billing v1.17.4/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/binaryauthorization v1.7.0/go.mod h1:Zn+S6QqTMn6odcMU1zDZCJxPjU2tZPV1oDl45lWY154= +cloud.google.com/go/binaryauthorization v1.7.1/go.mod h1:GTAyfRWYgcbsP3NJogpV3yeunbUIjx2T9xVeYovtURE= +cloud.google.com/go/binaryauthorization v1.7.2/go.mod h1:kFK5fQtxEp97m92ziy+hbu+uKocka1qRRL8MVJIgjv0= +cloud.google.com/go/binaryauthorization v1.7.3/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/certificatemanager v1.7.2/go.mod h1:15SYTDQMd00kdoW0+XY5d9e+JbOPjp24AvF48D8BbcQ= +cloud.google.com/go/certificatemanager v1.7.3/go.mod h1:T/sZYuC30PTag0TLo28VedIRIj1KPGcOQzjWAptHa00= +cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/channel v1.17.0/go.mod h1:RpbhJsGi/lXWAUM1eF4IbQGbsfVlg2o8Iiy2/YLfVT0= +cloud.google.com/go/channel v1.17.1/go.mod h1:xqfzcOZAcP4b/hUDH0GkGg1Sd5to6di1HOJn/pi5uBQ= +cloud.google.com/go/channel v1.17.2/go.mod h1:aT2LhnftnyfQceFql5I/mP8mIbiiJS4lWqgXA815zMk= +cloud.google.com/go/channel v1.17.3/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.14.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.14.1/go.mod h1:K7wGc/3zfvmYWOWwYTgF/d/UVJhS4pu+HAy7PL7mCsU= +cloud.google.com/go/cloudbuild v1.14.2/go.mod h1:Bn6RO0mBYk8Vlrt+8NLrru7WXlQ9/RDWz2uo5KG1/sg= +cloud.google.com/go/cloudbuild v1.14.3/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/clouddms v1.7.0/go.mod h1:MW1dC6SOtI/tPNCciTsXtsGNEM0i0OccykPvv3hiYeM= +cloud.google.com/go/clouddms v1.7.1/go.mod h1:o4SR8U95+P7gZ/TX+YbJxehOCsM+fe6/brlrFquiszk= +cloud.google.com/go/clouddms v1.7.2/go.mod h1:Rk32TmWmHo64XqDvW7jgkFQet1tUKNVzs7oajtJT3jU= +cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= +cloud.google.com/go/cloudsqlconn v1.4.3 h1:/WYFbB1NtMtoMxCbqpzzTFPDkxxlLTPme390KEGaEPc= +cloud.google.com/go/cloudsqlconn v1.4.3/go.mod h1:QL3tuStVOO70txb3rs4G8j5uMfo5ztZii8K3oGD3VYA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/cloudtasks v1.12.2/go.mod h1:A7nYkjNlW2gUoROg1kvJrQGhJP/38UaWwsnuBDOBVUk= +cloud.google.com/go/cloudtasks v1.12.3/go.mod h1:GPVXhIOSGEaR+3xT4Fp72ScI+HjHffSS4B8+BaBB5Ys= +cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= +cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/contactcenterinsights v1.11.0/go.mod h1:hutBdImE4XNZ1NV4vbPJKSFOnQruhC5Lj9bZqWMTKiU= +cloud.google.com/go/contactcenterinsights v1.11.1/go.mod h1:FeNP3Kg8iteKM80lMwSk3zZZKVxr+PGnAId6soKuXwE= +cloud.google.com/go/contactcenterinsights v1.11.2/go.mod h1:A9PIR5ov5cRcd28KlDbmmXE8Aay+Gccer2h4wzkYFso= +cloud.google.com/go/contactcenterinsights v1.11.3/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.26.0/go.mod h1:YJCmRet6+6jnYYRS000T6k0D0xUXQgBSaJ7VwI8FBj4= +cloud.google.com/go/container v1.26.1/go.mod h1:5smONjPRUxeEpDG7bMKWfDL4sauswqEtnBK1/KKpR04= +cloud.google.com/go/container v1.26.2/go.mod h1:YlO84xCt5xupVbLaMY4s3XNE79MUJ+49VmkInr6HvF4= +cloud.google.com/go/container v1.27.1/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/containeranalysis v0.11.0/go.mod h1:4n2e99ZwpGxpNcz+YsFT1dfOHPQFGcAC8FN2M2/ne/U= +cloud.google.com/go/containeranalysis v0.11.1/go.mod h1:rYlUOM7nem1OJMKwE1SadufX0JP3wnXj844EtZAwWLY= +cloud.google.com/go/containeranalysis v0.11.2/go.mod h1:xibioGBC1MD2j4reTyV1xY1/MvKaz+fyM9ENWhmIeP8= +cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E= +cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.17.1/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= +cloud.google.com/go/datacatalog v1.18.0/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= +cloud.google.com/go/datacatalog v1.18.1/go.mod h1:TzAWaz+ON1tkNr4MOcak8EBHX7wIRX/gZKM+yTVsv+A= +cloud.google.com/go/datacatalog v1.18.2/go.mod h1:SPVgWW2WEMuWHA+fHodYjmxPiMqcOiWfhc9OD5msigk= +cloud.google.com/go/datacatalog v1.18.3/go.mod h1:5FR6ZIF8RZrtml0VUao22FxhdjkoG+a0866rEnObryM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataflow v0.9.2/go.mod h1:vBfdBZ/ejlTaYIGB3zB4T08UshH70vbtZeMD+urnUSo= +cloud.google.com/go/dataflow v0.9.3/go.mod h1:HI4kMVjcHGTs3jTHW/kv3501YW+eloiJSLxkJa/vqFE= +cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/dataform v0.8.2/go.mod h1:X9RIqDs6NbGPLR80tnYoPNiO1w0wenKTb8PxxlhTMKM= +cloud.google.com/go/dataform v0.8.3/go.mod h1:8nI/tvv5Fso0drO3pEjtowz58lodx8MVkdV2q0aPlqg= +cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datafusion v1.7.2/go.mod h1:62K2NEC6DRlpNmI43WHMWf9Vg/YvN6QVi8EVwifElI0= +cloud.google.com/go/datafusion v1.7.3/go.mod h1:eoLt1uFXKGBq48jy9LZ+Is8EAVLnmn50lNncLzwYokE= +cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/datalabeling v0.8.2/go.mod h1:cyDvGHuJWu9U/cLDA7d8sb9a0tWLEletStu2sTmg3BE= +cloud.google.com/go/datalabeling v0.8.3/go.mod h1:tvPhpGyS/V7lqjmb3V0TaDdGvhzgR1JoW7G2bpi2UTI= +cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.9.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.10.1/go.mod h1:1MzmBv8FvjYfc7vDdxhnLFNskikkB+3vl475/XdCDhs= +cloud.google.com/go/dataplex v1.10.2/go.mod h1:xdC8URdTrCrZMW6keY779ZT1cTOfV8KEPNsw+LTRT1Y= +cloud.google.com/go/dataplex v1.11.1/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataproc/v2 v2.2.0/go.mod h1:lZR7AQtwZPvmINx5J87DSOOpTfof9LVZju6/Qo4lmcY= +cloud.google.com/go/dataproc/v2 v2.2.1/go.mod h1:QdAJLaBjh+l4PVlVZcmrmhGccosY/omC1qwfQ61Zv/o= +cloud.google.com/go/dataproc/v2 v2.2.2/go.mod h1:aocQywVmQVF4i8CL740rNI/ZRpsaaC1Wh2++BJ7HEJ4= +cloud.google.com/go/dataproc/v2 v2.2.3/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= +cloud.google.com/go/dataqna v0.8.2/go.mod h1:KNEqgx8TTmUipnQsScOoDpq/VlXVptUqVMZnt30WAPs= +cloud.google.com/go/dataqna v0.8.3/go.mod h1:wXNBW2uvc9e7Gl5k8adyAMnLush1KVV6lZUhB+rqNu4= +cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.14.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= +cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/datastream v1.10.1/go.mod h1:7ngSYwnw95YFyTd5tOGBxHlOZiL+OtpjheqU7t2/s/c= +cloud.google.com/go/datastream v1.10.2/go.mod h1:W42TFgKAs/om6x/CdXX5E4oiAsKlH+e8MTGy81zdYt0= +cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/deploy v1.13.1/go.mod h1:8jeadyLkH9qu9xgO3hVWw8jVr29N1mnW42gRJT8GY6g= +cloud.google.com/go/deploy v1.14.1/go.mod h1:N8S0b+aIHSEeSr5ORVoC0+/mOPUysVt8ae4QkZYolAw= +cloud.google.com/go/deploy v1.14.2/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dialogflow v1.43.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= +cloud.google.com/go/dialogflow v1.44.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= +cloud.google.com/go/dialogflow v1.44.1/go.mod h1:n/h+/N2ouKOO+rbe/ZnI186xImpqvCVj2DdsWS/0EAk= +cloud.google.com/go/dialogflow v1.44.2/go.mod h1:QzFYndeJhpVPElnFkUXxdlptx0wPnBWLCBT9BvtC3/c= +cloud.google.com/go/dialogflow v1.44.3/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/dlp v1.10.2/go.mod h1:ZbdKIhcnyhILgccwVDzkwqybthh7+MplGC3kZVZsIOQ= +cloud.google.com/go/dlp v1.10.3/go.mod h1:iUaTc/ln8I+QT6Ai5vmuwfw8fqTk2kaz0FvCwhLCom0= +cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/documentai v1.22.1/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= +cloud.google.com/go/documentai v1.23.0/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= +cloud.google.com/go/documentai v1.23.2/go.mod h1:Q/wcRT+qnuXOpjAkvOV4A+IeQl04q2/ReT7SSbytLSo= +cloud.google.com/go/documentai v1.23.4/go.mod h1:4MYAaEMnADPN1LPN5xboDR5QVB6AgsaxgFdJhitlE2Y= +cloud.google.com/go/documentai v1.23.5/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/domains v0.9.2/go.mod h1:3YvXGYzZG1Temjbk7EyGCuGGiXHJwVNmwIf+E/cUp5I= +cloud.google.com/go/domains v0.9.3/go.mod h1:29k66YNDLDY9LCFKpGFeh6Nj9r62ZKm5EsUJxAl84KU= +cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= +cloud.google.com/go/edgecontainer v1.1.2/go.mod h1:wQRjIzqxEs9e9wrtle4hQPSR1Y51kqN75dgF7UllZZ4= +cloud.google.com/go/edgecontainer v1.1.3/go.mod h1:Ll2DtIABzEfaxaVSbwj3QHFaOOovlDFiWVDu349jSsA= +cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/essentialcontacts v1.6.3/go.mod h1:yiPCD7f2TkP82oJEFXFTou8Jl8L6LBRPeBEkTaO0Ggo= +cloud.google.com/go/essentialcontacts v1.6.4/go.mod h1:iju5Vy3d9tJUg0PYMd1nHhjV7xoCXaOAVabrwLaPBEM= +cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/eventarc v1.13.1/go.mod h1:EqBxmGHFrruIara4FUQ3RHlgfCn7yo1HYsu2Hpt/C3Y= +cloud.google.com/go/eventarc v1.13.2/go.mod h1:X9A80ShVu19fb4e5sc/OLV7mpFUKZMwfJFeeWhcIObM= +cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/filestore v1.7.2/go.mod h1:TYOlyJs25f/omgj+vY7/tIG/E7BX369triSPzE4LdgE= +cloud.google.com/go/filestore v1.7.3/go.mod h1:Qp8WaEERR3cSkxToxFPHh/b8AACkSut+4qlCjAmKTV0= +cloud.google.com/go/filestore v1.7.4/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/firestore v1.13.0/go.mod h1:QojqqOh8IntInDUSTAh0c8ZsPYAr68Ma8c5DWOy8xb8= +cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/functions v1.15.2/go.mod h1:CHAjtcR6OU4XF2HuiVeriEdELNcnvRZSk1Q8RMqy4lE= +cloud.google.com/go/functions v1.15.3/go.mod h1:r/AMHwBheapkkySEhiZYLDBwVJCdlRwsm4ieJu35/Ug= +cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkebackup v1.3.1/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkebackup v1.3.2/go.mod h1:OMZbXzEJloyXMC7gqdSB+EOEQ1AKcpGYvO3s1ec5ixk= +cloud.google.com/go/gkebackup v1.3.3/go.mod h1:eMk7/wVV5P22KBakhQnJxWSVftL1p4VBFLpv0kIft7I= +cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkeconnect v0.8.2/go.mod h1:6nAVhwchBJYgQCXD2pHBFQNiJNyAd/wyxljpaa6ZPrY= +cloud.google.com/go/gkeconnect v0.8.3/go.mod h1:i9GDTrfzBSUZGCe98qSu1B8YB8qfapT57PenIb820Jo= +cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkehub v0.14.2/go.mod h1:iyjYH23XzAxSdhrbmfoQdePnlMj2EWcvnR+tHdBQsCY= +cloud.google.com/go/gkehub v0.14.3/go.mod h1:jAl6WafkHHW18qgq7kqcrXYzN08hXeK/Va3utN8VKg8= +cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gkemulticloud v1.0.1/go.mod h1:AcrGoin6VLKT/fwZEYuqvVominLriQBCKmbjtnbMjG8= +cloud.google.com/go/gkemulticloud v1.0.2/go.mod h1:+ee5VXxKb3H1l4LZAcgWB/rvI16VTNTrInWxDjAGsGo= +cloud.google.com/go/gkemulticloud v1.0.3/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/gsuiteaddons v1.6.2/go.mod h1:K65m9XSgs8hTF3X9nNTPi8IQueljSdYo9F+Mi+s4MyU= +cloud.google.com/go/gsuiteaddons v1.6.3/go.mod h1:sCFJkZoMrLZT3JTb8uJqgKPNshH2tfXeCwTFRebTq48= +cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/kms v1.4.0 h1:iElbfoE61VeLhnZcGOltqL8HIly8Nhbe5t6JlH9GXjo= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= +cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= +cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8= +cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/iap v1.9.0/go.mod h1:01OFxd1R+NFrg78S+hoPV5PxEzv22HXaNqUUlmNHFuY= +cloud.google.com/go/iap v1.9.1/go.mod h1:SIAkY7cGMLohLSdBR25BuIxO+I4fXJiL06IBL7cy/5Q= +cloud.google.com/go/iap v1.9.2/go.mod h1:GwDTOs047PPSnwRD0Us5FKf4WDRcVvHg1q9WVkKBhdI= +cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/ids v1.4.2/go.mod h1:3vw8DX6YddRu9BncxuzMyWn0g8+ooUjI2gslJ7FH3vk= +cloud.google.com/go/ids v1.4.3/go.mod h1:9CXPqI3GedjmkjbMWCUhMZ2P2N7TUMzAkVXYEH2orYU= +cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/iot v1.7.2/go.mod h1:q+0P5zr1wRFpw7/MOgDXrG/HVA+l+cSwdObffkrpnSg= +cloud.google.com/go/iot v1.7.3/go.mod h1:t8itFchkol4VgNbHnIq9lXoOOtHNR3uAACQMYbN9N4I= +cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/monitoring v1.2.0 h1:fEvQITrhVcPM6vuDQcgPMbU5kZFeQFwZmE7v6+S8BPo= -cloud.google.com/go/monitoring v1.2.0/go.mod h1:tE8I08OzjWmXLhCopnPaUDpfGOEJOonfWXGR9E9SsFo= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM= +cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w= +cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= +cloud.google.com/go/kms v1.15.4/go.mod h1:L3Sdj6QTHK8dfwK5D1JLsAyELsNMnd3tAIwGS4ltKpc= +cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= +cloud.google.com/go/kms v1.15.6 h1:ktpEMQmsOAYj3VZwH020FcQlm23BVYg8T8O1woG2GcE= +cloud.google.com/go/kms v1.15.6/go.mod h1:yF75jttnIdHfGBoE51AKsD/Yqf+/jICzB9v1s1acsms= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/language v1.11.0/go.mod h1:uDx+pFDdAKTY8ehpWbiXyQdz8tDSYLJbQcXsCkjYyvQ= +cloud.google.com/go/language v1.11.1/go.mod h1:Xyid9MG9WOX3utvDbpX7j3tXDmmDooMyMDqgUVpH17U= +cloud.google.com/go/language v1.12.1/go.mod h1:zQhalE2QlQIxbKIZt54IASBzmZpN/aDASea5zl1l+J4= +cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= +cloud.google.com/go/lifesciences v0.9.2/go.mod h1:QHEOO4tDzcSAzeJg7s2qwnLM2ji8IRpQl4p6m5Z9yTA= +cloud.google.com/go/lifesciences v0.9.3/go.mod h1:gNGBOJV80IWZdkd+xz4GQj4mbqaz737SCLHn2aRhQKM= +cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= +cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs= +cloud.google.com/go/longrunning v0.5.3/go.mod h1:y/0ga59EYu58J6SHmmQOvekvND2qODbu8ywBBW7EK7Y= +cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= +cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/managedidentities v1.6.2/go.mod h1:5c2VG66eCa0WIq6IylRk3TBW83l161zkFvCj28X7jn8= +cloud.google.com/go/managedidentities v1.6.3/go.mod h1:tewiat9WLyFN0Fi7q1fDD5+0N4VUoL0SCX0OTCthZq4= +cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/maps v1.4.1/go.mod h1:BxSa0BnW1g2U2gNdbq5zikLlHUuHW0GFWh7sgML2kIY= +cloud.google.com/go/maps v1.5.1/go.mod h1:NPMZw1LJwQZYCfz4y+EIw+SI+24A4bpdFJqdKVr0lt4= +cloud.google.com/go/maps v1.6.1/go.mod h1:4+buOHhYXFBp58Zj/K+Lc1rCmJssxxF4pJ5CJnhdz18= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/mediatranslation v0.8.2/go.mod h1:c9pUaDRLkgHRx3irYE5ZC8tfXGrMYwNZdmDqKMSfFp8= +cloud.google.com/go/mediatranslation v0.8.3/go.mod h1:F9OnXTy336rteOEywtY7FOqCk+J43o2RF638hkOQl4Y= +cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/memcache v1.10.2/go.mod h1:f9ZzJHLBrmd4BkguIAa/l/Vle6uTHzHokdnzSWOdQ6A= +cloud.google.com/go/memcache v1.10.3/go.mod h1:6z89A41MT2DVAW0P4iIRdu5cmRTsbsFn4cyiIx8gbwo= +cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/metastore v1.13.0/go.mod h1:URDhpG6XLeh5K+Glq0NOt74OfrPKTwS62gEPZzb5SOk= +cloud.google.com/go/metastore v1.13.1/go.mod h1:IbF62JLxuZmhItCppcIfzBBfUFq0DIB9HPDoLgWrVOU= +cloud.google.com/go/metastore v1.13.2/go.mod h1:KS59dD+unBji/kFebVp8XU/quNSyo8b6N6tPGspKszA= +cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY= +cloud.google.com/go/monitoring v1.16.1/go.mod h1:6HsxddR+3y9j+o/cMJH6q/KJ/CBTvM/38L/1m7bTRJ4= +cloud.google.com/go/monitoring v1.16.2/go.mod h1:B44KGwi4ZCF8Rk/5n+FWeispDXoKSk9oss2QNlXJBgc= +cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= +cloud.google.com/go/monitoring v1.17.0 h1:blrdvF0MkPPivSO041ihul7rFMhXdVp8Uq7F59DKXTU= +cloud.google.com/go/monitoring v1.17.0/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkconnectivity v1.13.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= +cloud.google.com/go/networkconnectivity v1.14.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= +cloud.google.com/go/networkconnectivity v1.14.1/go.mod h1:LyGPXR742uQcDxZ/wv4EI0Vu5N6NKJ77ZYVnDe69Zug= +cloud.google.com/go/networkconnectivity v1.14.2/go.mod h1:5UFlwIisZylSkGG1AdwK/WZUaoz12PKu6wODwIbFzJo= +cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networkmanagement v1.9.0/go.mod h1:UTUaEU9YwbCAhhz3jEOHr+2/K/MrBk2XxOLS89LQzFw= +cloud.google.com/go/networkmanagement v1.9.1/go.mod h1:CCSYgrQQvW73EJawO2QamemYcOb57LvrDdDU51F0mcI= +cloud.google.com/go/networkmanagement v1.9.2/go.mod h1:iDGvGzAoYRghhp4j2Cji7sF899GnfGQcQRQwgVOWnDw= +cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/networksecurity v0.9.2/go.mod h1:jG0SeAttWzPMUILEHDUvFYdQTl8L/E/KC8iZDj85lEI= +cloud.google.com/go/networksecurity v0.9.3/go.mod h1:l+C0ynM6P+KV9YjOnx+kk5IZqMSLccdBqW6GUoF4p/0= +cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/notebooks v1.10.0/go.mod h1:SOPYMZnttHxqot0SGSFSkRrwE29eqnKPBJFqgWmiK2k= +cloud.google.com/go/notebooks v1.10.1/go.mod h1:5PdJc2SgAybE76kFQCWrTfJolCOUQXF97e+gteUUA6A= +cloud.google.com/go/notebooks v1.11.1/go.mod h1:V2Zkv8wX9kDCGRJqYoI+bQAaoVeE5kSiz4yYHd2yJwQ= +cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/optimization v1.5.0/go.mod h1:evo1OvTxeBRBu6ydPlrIRizKY/LJKo/drDMMRKqGEUU= +cloud.google.com/go/optimization v1.5.1/go.mod h1:NC0gnUD5MWVAF7XLdoYVPmYYVth93Q6BUzqAq3ZwtV8= +cloud.google.com/go/optimization v1.6.1/go.mod h1:hH2RYPTTM9e9zOiTaYPTiGPcGdNZVnBSBxjIAJzUkqo= +cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orchestration v1.8.2/go.mod h1:T1cP+6WyTmh6LSZzeUhvGf0uZVmJyTx7t8z7Vg87+A0= +cloud.google.com/go/orchestration v1.8.3/go.mod h1:xhgWAYqlbYjlz2ftbFghdyqENYW+JXuhBx9KsjMoGHs= +cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/orgpolicy v1.11.2/go.mod h1:biRDpNwfyytYnmCRWZWxrKF22Nkz9eNVj9zyaBdpm1o= +cloud.google.com/go/orgpolicy v1.11.3/go.mod h1:oKAtJ/gkMjum5icv2aujkP4CxROxPXsBbYGCDbPO8MM= +cloud.google.com/go/orgpolicy v1.11.4/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/osconfig v1.12.2/go.mod h1:eh9GPaMZpI6mEJEuhEjUJmaxvQ3gav+fFEJon1Y8Iw0= +cloud.google.com/go/osconfig v1.12.3/go.mod h1:L/fPS8LL6bEYUi1au832WtMnPeQNT94Zo3FwwV1/xGM= +cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/oslogin v1.11.0/go.mod h1:8GMTJs4X2nOAUVJiPGqIWVcDaF0eniEto3xlOxaboXE= +cloud.google.com/go/oslogin v1.11.1/go.mod h1:OhD2icArCVNUxKqtK0mcSmKL7lgr0LVlQz+v9s1ujTg= +cloud.google.com/go/oslogin v1.12.1/go.mod h1:VfwTeFJGbnakxAY236eN8fsnglLiVXndlbcNomY4iZU= +cloud.google.com/go/oslogin v1.12.2/go.mod h1:CQ3V8Jvw4Qo4WRhNPF0o+HAM4DiLuE27Ul9CX9g2QdY= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/phishingprotection v0.8.2/go.mod h1:LhJ91uyVHEYKSKcMGhOa14zMMWfbEdxG032oT6ECbC8= +cloud.google.com/go/phishingprotection v0.8.3/go.mod h1:3B01yO7T2Ra/TMojifn8EoGd4G9jts/6cIO0DgDY9J8= +cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/policytroubleshooter v1.9.0/go.mod h1:+E2Lga7TycpeSTj2FsH4oXxTnrbHJGRlKhVZBLGgU64= +cloud.google.com/go/policytroubleshooter v1.9.1/go.mod h1:MYI8i0bCrL8cW+VHN1PoiBTyNZTstCg2WUw2eVC4c4U= +cloud.google.com/go/policytroubleshooter v1.10.1/go.mod h1:5C0rhT3TDZVxAu8813bwmTvd57Phbl8mr9F4ipOsxEs= +cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= +cloud.google.com/go/privatecatalog v0.9.2/go.mod h1:RMA4ATa8IXfzvjrhhK8J6H4wwcztab+oZph3c6WmtFc= +cloud.google.com/go/privatecatalog v0.9.3/go.mod h1:K5pn2GrVmOPjXz3T26mzwXLcKivfIJ9R5N79AFCF9UE= +cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/spanner v1.5.1 h1:dWyj10TLlaxH2No6+tXsSCaq9oWgrRbXy1N3x/bhMGU= -cloud.google.com/go/spanner v1.5.1/go.mod h1:e1+8M6PF3ntV9Xr57X2Gf+UhylXXYF6gI4WRZ1kfu2A= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.0/go.mod h1:QuE8EdU9dEnesG8/kG3XuJyNsjEqMlMzg3v3scCJ46c= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.1/go.mod h1:JZYZJOeZjgSSTGP4uz7NlQ4/d1w5hGmksVgM0lbEij0= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.2/go.mod h1:kpaDBOpkwD4G0GVMzG1W6Doy1tFFC97XAV3xy+Rd/pw= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.3/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommendationengine v0.8.2/go.mod h1:QIybYHPK58qir9CV2ix/re/M//Ty10OxjnnhWdaKS1Y= +cloud.google.com/go/recommendationengine v0.8.3/go.mod h1:m3b0RZV02BnODE9FeSvGv1qibFo8g0OnmB/RMwYy4V8= +cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/recommender v1.11.0/go.mod h1:kPiRQhPyTJ9kyXPCG6u/dlPLbYfFlkwHNRwdzPVAoII= +cloud.google.com/go/recommender v1.11.1/go.mod h1:sGwFFAyI57v2Hc5LbIj+lTwXipGu9NW015rkaEM5B18= +cloud.google.com/go/recommender v1.11.2/go.mod h1:AeoJuzOvFR/emIcXdVFkspVXVTYpliRCmKNYDnyBv6Y= +cloud.google.com/go/recommender v1.11.3/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/redis v1.13.2/go.mod h1:0Hg7pCMXS9uz02q+LoEVl5dNHUkIQv+C/3L76fandSA= +cloud.google.com/go/redis v1.13.3/go.mod h1:vbUpCKUAZSYzFcWKmICnYgRAhTFg9r+djWqFxDYXi4U= +cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcemanager v1.9.2/go.mod h1:OujkBg1UZg5lX2yIyMo5Vz9O5hf7XQOSV7WxqxxMtQE= +cloud.google.com/go/resourcemanager v1.9.3/go.mod h1:IqrY+g0ZgLsihcfcmqSe+RKp1hzjXwG904B92AwBz6U= +cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/resourcesettings v1.6.2/go.mod h1:mJIEDd9MobzunWMeniaMp6tzg4I2GvD3TTmPkc8vBXk= +cloud.google.com/go/resourcesettings v1.6.3/go.mod h1:pno5D+7oDYkMWZ5BpPsb4SO0ewg3IXcmmrUZaMJrFic= +cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/retail v1.14.2/go.mod h1:W7rrNRChAEChX336QF7bnMxbsjugcOCPU44i5kbLiL8= +cloud.google.com/go/retail v1.14.3/go.mod h1:Omz2akDHeSlfCq8ArPKiBxlnRpKEBjUH386JYFLUvXo= +cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/run v1.3.0/go.mod h1:S/osX/4jIPZGg+ssuqh6GNgg7syixKe3YnprwehzHKU= +cloud.google.com/go/run v1.3.1/go.mod h1:cymddtZOzdwLIAsmS6s+Asl4JoXIDm/K1cpZTxV4Q5s= +cloud.google.com/go/run v1.3.2/go.mod h1:SIhmqArbjdU/D9M6JoHaAqnAMKLFtXaVdNeq04NjnVE= +cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/scheduler v1.10.2/go.mod h1:O3jX6HRH5eKCA3FutMw375XHZJudNIKVonSCHv7ropY= +cloud.google.com/go/scheduler v1.10.3/go.mod h1:8ANskEM33+sIbpJ+R4xRfw/jzOG+ZFE8WVLy7/yGvbc= +cloud.google.com/go/scheduler v1.10.4/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/secretmanager v1.11.2/go.mod h1:MQm4t3deoSub7+WNwiC4/tRYgDBHJgJPvswqQVB1Vss= +cloud.google.com/go/secretmanager v1.11.3/go.mod h1:0bA2o6FabmShrEy328i67aV+65XoUFFSmVeLBn/51jI= +cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/security v1.15.2/go.mod h1:2GVE/v1oixIRHDaClVbHuPcZwAqFM28mXuAKCfMgYIg= +cloud.google.com/go/security v1.15.3/go.mod h1:gQ/7Q2JYUZZgOzqKtw9McShH+MjNvtDpL40J1cT+vBs= +cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/securitycenter v1.23.1/go.mod h1:w2HV3Mv/yKhbXKwOCu2i8bCuLtNP1IMHuiYQn4HJq5s= +cloud.google.com/go/securitycenter v1.24.1/go.mod h1:3h9IdjjHhVMXdQnmqzVnM7b0wMn/1O/U20eWVpMpZjI= +cloud.google.com/go/securitycenter v1.24.2/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicedirectory v1.11.1/go.mod h1:tJywXimEWzNzw9FvtNjsQxxJ3/41jseeILgwU/QLrGI= +cloud.google.com/go/servicedirectory v1.11.2/go.mod h1:KD9hCLhncWRV5jJphwIpugKwM5bn1x0GyVVD4NO8mGg= +cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/shell v1.7.2/go.mod h1:KqRPKwBV0UyLickMn0+BY1qIyE98kKyI216sH/TuHmc= +cloud.google.com/go/shell v1.7.3/go.mod h1:cTTEz/JdaBsQAeTQ3B6HHldZudFoYBOqjteev07FbIc= +cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/spanner v1.49.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= +cloud.google.com/go/spanner v1.50.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= +cloud.google.com/go/spanner v1.51.0/go.mod h1:c5KNo5LQ1X5tJwma9rSQZsXNBDNvj4/n8BVc3LNahq0= +cloud.google.com/go/spanner v1.55.0 h1:YF/A/k73EMYCjp8wcJTpkE+TcrWutHRlsCtlRSfWS64= +cloud.google.com/go/spanner v1.55.0/go.mod h1:HXEznMUVhC+PC+HDyo9YFG2Ajj5BQDkcbqB9Z2Ffxi0= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/speech v1.19.1/go.mod h1:WcuaWz/3hOlzPFOVo9DUsblMIHwxP589y6ZMtaG+iAA= +cloud.google.com/go/speech v1.19.2/go.mod h1:2OYFfj+Ch5LWjsaSINuCZsre/789zlcCI3SY4oAi2oI= +cloud.google.com/go/speech v1.20.1/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0 h1:wWRIaDURQA8xxHguFCshYepGlrWIrbBnAmc7wfg07qY= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= -code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= +cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/storagetransfer v1.10.1/go.mod h1:rS7Sy0BtPviWYTTJVWCSV4QrbBitgPeuK4/FKa4IdLs= +cloud.google.com/go/storagetransfer v1.10.2/go.mod h1:meIhYQup5rg9juQJdyppnA/WLQCOguxtk1pr3/vBWzA= +cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/talent v1.6.3/go.mod h1:xoDO97Qd4AK43rGjJvyBHMskiEf3KulgYzcH6YWOVoo= +cloud.google.com/go/talent v1.6.4/go.mod h1:QsWvi5eKeh6gG2DlBkpMaFYZYrYUnIpo34f6/V5QykY= +cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/texttospeech v1.7.2/go.mod h1:VYPT6aTOEl3herQjFHYErTlSZJ4vB00Q2ZTmuVgluD4= +cloud.google.com/go/texttospeech v1.7.3/go.mod h1:Av/zpkcgWfXlDLRYob17lqMstGZ3GqlvJXqKMp2u8so= +cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/tpu v1.6.2/go.mod h1:NXh3NDwt71TsPZdtGWgAG5ThDfGd32X1mJ2cMaRlVgU= +cloud.google.com/go/tpu v1.6.3/go.mod h1:lxiueqfVMlSToZY1151IaZqp89ELPSrk+3HIQ5HRkbY= +cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/trace v1.10.2/go.mod h1:NPXemMi6MToRFcSxRl2uDnu/qAlAQ3oULUphcHGh1vA= +cloud.google.com/go/trace v1.10.3/go.mod h1:Ke1bgfc73RV3wUFml+uQp7EsDw4dGaETLxB7Iq/r4CY= +cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.9.0/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.9.1/go.mod h1:TWIgDZknq2+JD4iRcojgeDtqGEp154HN/uL6hMvylS8= +cloud.google.com/go/translate v1.9.2/go.mod h1:E3Tc6rUTsQkVrXW6avbUhKJSr7ZE3j7zNmqzXKHqRrY= +cloud.google.com/go/translate v1.9.3/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/video v1.20.0/go.mod h1:U3G3FTnsvAGqglq9LxgqzOiBc/Nt8zis8S+850N2DUM= +cloud.google.com/go/video v1.20.1/go.mod h1:3gJS+iDprnj8SY6pe0SwLeC5BUW80NjhwX7INWEuWGU= +cloud.google.com/go/video v1.20.2/go.mod h1:lrixr5JeKNThsgfM9gqtwb6Okuqzfo4VrY2xynaViTA= +cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/videointelligence v1.11.2/go.mod h1:ocfIGYtIVmIcWk1DsSGOoDiXca4vaZQII1C85qtoplc= +cloud.google.com/go/videointelligence v1.11.3/go.mod h1:tf0NUaGTjU1iS2KEkGWvO5hRHeCkFK3nPo0/cOZhZAo= +cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vision/v2 v2.7.3/go.mod h1:V0IcLCY7W+hpMKXK1JYE0LV5llEqVmj+UJChjvA1WsM= +cloud.google.com/go/vision/v2 v2.7.4/go.mod h1:ynDKnsDN/0RtqkKxQZ2iatv3Dm9O+HfRb5djl7l4Vvw= +cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmmigration v1.7.2/go.mod h1:iA2hVj22sm2LLYXGPT1pB63mXHhrH1m/ruux9TwWLd8= +cloud.google.com/go/vmmigration v1.7.3/go.mod h1:ZCQC7cENwmSWlwyTrZcWivchn78YnFniEQYRWQ65tBo= +cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vmwareengine v1.0.1/go.mod h1:aT3Xsm5sNx0QShk1Jc1B8OddrxAScYLwzVoaiXfdzzk= +cloud.google.com/go/vmwareengine v1.0.2/go.mod h1:xMSNjIk8/itYrz1JA8nV3Ajg4L4n3N+ugP8JKzk3OaA= +cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/vpcaccess v1.7.2/go.mod h1:mmg/MnRHv+3e8FJUjeSibVFvQF1cCy2MsFaFqxeY1HU= +cloud.google.com/go/vpcaccess v1.7.3/go.mod h1:YX4skyfW3NC8vI3Fk+EegJnlYFatA+dXK4o236EUCUc= +cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/webrisk v1.9.2/go.mod h1:pY9kfDgAqxUpDBOrG4w8deLfhvJmejKB0qd/5uQIPBc= +cloud.google.com/go/webrisk v1.9.3/go.mod h1:RUYXe9X/wBDXhVilss7EDLW9ZNa06aowPuinUOPCXH8= +cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/websecurityscanner v1.6.2/go.mod h1:7YgjuU5tun7Eg2kpKgGnDuEOXWIrh8x8lWrJT4zfmas= +cloud.google.com/go/websecurityscanner v1.6.3/go.mod h1:x9XANObUFR+83Cya3g/B9M/yoHVqzxPnFtgF8yYGAXw= +cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= +cloud.google.com/go/workflows v1.12.0/go.mod h1:PYhSk2b6DhZ508tj8HXKaBh+OFe+xdl0dHF/tJdzPQM= +cloud.google.com/go/workflows v1.12.1/go.mod h1:5A95OhD/edtOhQd/O741NSfIMezNTbCwLM1P1tBRGHM= +cloud.google.com/go/workflows v1.12.2/go.mod h1:+OmBIgNqYJPVggnMo9nqmizW0qEXHhmnAzK/CnBqsHc= +cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20221206110420-d395f97c4830/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v67.0.0+incompatible h1:SVBwznSETB0Sipd0uyGJr7khLhJOFRUEUb+0JgkCvDo= -github.com/Azure/azure-sdk-for-go v67.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= -github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= -github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0 h1:Hp+EScFOu9HeCbeW8WU2yQPJd4gGwhMgKxWe+G6jNzw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.2.0/go.mod h1:/pz8dyNQe+Ey3yBp/XuYz7oqX8YDNWVpPB0hH3XWfbc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0 h1:z4YeiSXxnUI+PqB46Yj6MZA3nwb1CcJIkEMDrzUd8Cs= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.2.0/go.mod h1:rko9SzMxcMk0NJsNAxALEGaTYyy79bNRwxgJfrH0Spw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0 h1:nBy98uKOIfun5z6wx6jwWLrULcM0+cjBalBFZlEZ7CA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0/go.mod h1:243D9iHbcQXoFUtgHJwL7gl2zx1aDuDMjvBZVGr2uW0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= +github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= +github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -100,20 +1222,23 @@ github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKn github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= @@ -135,27 +1260,36 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= -github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= -github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -164,10 +1298,11 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -175,25 +1310,29 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.9.0 h1:BBgYMxl5YZDZVIijz02AlDINpYZOzQqRNCl9CZM13vk= -github.com/Microsoft/hcsshim v0.9.0/go.mod h1:VBJWdC71NSWPlEo7lwde1aL21748J8B6Sdgno7NqEGE= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.10/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895 h1:NsReiLpErIPzRrnogAXYwSoU7txA977LjDGrbkewJbg= -github.com/ProtonMail/go-crypto v0.0.0-20220824120805-4b6e5c587895/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= +github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE= github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= @@ -204,25 +1343,46 @@ github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KM github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/aerospike/aerospike-client-go/v5 v5.6.0 h1:tRxcUq0HY8fFPQEzF3EgrknF+w1xFO0YDfUb9Nm8yRI= github.com/aerospike/aerospike-client-go/v5 v5.6.0/go.mod h1:rJ/KpmClE7kiBPfvAPrGw9WuNOiz8v2uKbQaUyYPXtI= +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= +github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= -github.com/aliyun/alibaba-cloud-sdk-go v1.61.1842 h1:OU/hXgBKAQRNqGflA3cxC9NZNMnAAKrQjHDCo0/6ND4= -github.com/aliyun/alibaba-cloud-sdk-go v1.61.1842/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c= +github.com/aliyun/alibaba-cloud-sdk-go v1.62.676 h1:ChWMMr76tXrRh3ximWQyg83EROEfkkXQGkrhnzDCpr8= +github.com/aliyun/alibaba-cloud-sdk-go v1.62.676/go.mod h1:CJJYa1ZMxjlN/NbXEwmejEnBkhi0DV+Yb3B2lxf+74o= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= +github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64 h1:ZsPrlYPY/v1PR7pGrmYD/rq5BFiSPalH8i9eEkSfnnI= -github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= +github.com/apache/arrow/go/v14 v14.0.2 h1:N8OkaJEOfI3mEZt07BIkvo4sC6XDbL+48MBPWO5IONw= +github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -230,58 +1390,76 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= -github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.128 h1:X34pX5t0LIZXjBY11yf9JKMP3c1aZgirh+5PjtaZyJ4= -github.com/aws/aws-sdk-go v1.44.128/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go-v2 v1.8.0 h1:HcN6yDnHV9S7D69E7To0aUppJhiJNEzQSNcUxc7r3qo= -github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= -github.com/aws/aws-sdk-go-v2/config v1.6.0 h1:rtoCnNObhVm7me+v9sA2aY+NtHNZjjWWC3ifXVci+wE= -github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2 h1:Uud/fZzm0lqqhE8kvXYJFAJ3PGnagKoUcvHq1hXfBZw= -github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0 h1:SGqDJun6tydgsSIFxv9+EYBJVqVUwg2QMJp6PbNq8C8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0 h1:Iqp2aHeRF3kaaNuDS82bHBzER285NM6lLPAgsxHCR2A= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0 h1:xu45foJnwMwBqSkIMKyJP9kbyHi5hdhZ/WiJ7D2sHZ0= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2 h1:YcGVEqLQGHDa81776C3daai6ZkkRGf/8RAQ07hV0QcU= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2 h1:Xv1rGYgsRRn0xw9JFNnfpBMZam54PrWpC4rJOJ9koA8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2 h1:ewIpdVz12MDinJJB/nu1uUiFIWFnvtd3iV7cEW7lR+M= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= -github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0 h1:cxZbzTYXgiQrZ6u2/RJZAkkgZssqYOdydvJPBgIHlsM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2 h1:b+U3WrF9ON3f32FH19geqmiod4uKcMv/q+wosQjjyyM= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1 h1:1Pls85C5CFjhE3aH+h85/hyAk89kQNlAWlEQtIkaFyc= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= -github.com/aws/smithy-go v1.7.0 h1:+cLHMRrDZvQ4wk+KuQ9yH6eEg6KZEJ9RI2IkDqnygCg= -github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/aws-sdk-go v1.43.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.50.13 h1:yeXram2g7q8uKkQkAEeZyk9FmPzxI4UpGwAZGZtEGmM= +github.com/aws/aws-sdk-go v1.50.13/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.23.4 h1:2P20ZjH0ouSAu/6yZep8oCmTReathLuEu6dwoqEgjts= +github.com/aws/aws-sdk-go-v2 v1.23.4/go.mod h1:t3szzKfP0NeRU27uBFczDivYJjsmSnqI8kIvKyWb9ds= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= +github.com/aws/aws-sdk-go-v2/config v1.18.19 h1:AqFK6zFNtq4i1EYu+eC7lcKHYnZagMn6SW171la0bGw= +github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= +github.com/aws/aws-sdk-go-v2/credentials v1.13.18 h1:EQMdtHwz0ILTW1hoP+EwuWhwCG1hD6l3+RWFQABET4c= +github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 h1:gt57MN3liKiyGopcqgNzJb2+d9MJaKT/q1OksHNXVE4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 h1:E3Y+OfzOK1+rmRo/K2G0ml8Vs+Xqk0kOnf4nS0kUtBc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59/go.mod h1:1M4PLSBUVfBI0aP+C9XI7SM6kZPCGYyI6izWz0TGprE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.7 h1:eMqD7ku6WGdmcWWXPYun9m6yk6feSULLhJlAtN6rYG4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.7/go.mod h1:0oBIfcDV6LScxEW0VgOqxT3e4aqKRp+SYhB9wAd5E3Q= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.7 h1:+XYhWhgWs5F3Zx8oa49CXzNvfXrItaDjZB/M172fcHQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.7/go.mod h1:L6tcSRyCGxcKfDWUrmv2jv8G1cLDU7d0FUpEFpG9bVE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 h1:p5luUImdIqywn6JpQsW3tq5GNOxKmOnEpybzPx+d1lk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 h1:DWYZIsyqagnWL00f8M/SOr9fN063OEQWn9LLTbdYXsk= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23/go.mod h1:uIiFgURZbACBEQJfqTZPb/jxO7R+9LeoHUFudtIdeQI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 h1:CeuSeq/8FnYpPtnuIeLQEEvDv9zUjneuYi8EghMBdwQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26/go.mod h1:2UqAAwMUXKeRkAHIlDJqvMVgOWkUi/AUXPk/YIe+Dg4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 h1:5LHn8JQ0qvjD9L9JhMtylnkcw7j05GDZqM9Oin6hpr0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 h1:e2ooMhpYGhDnBfSvIyusvAwX7KexuZaHbQY2Dyei7VU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0/go.mod h1:bh2E0CXKZsQN+faiKVqC40vfNMAWheoULBCnEgO9K+8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 h1:B1G2pSPvbAtQjilPq+Y7jLIzCOwKzuVEl+aBBaNG0AQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0/go.mod h1:ncltU6n4Nof5uJttDtcNQ537uNuwYqsZZQcpkd2/GUQ= +github.com/aws/aws-sdk-go-v2/service/sqs v1.29.1 h1:OZI2aJxnfOZzB0uhyTaYIW6MeRMb1Qd2eLMjh0bFsRg= +github.com/aws/aws-sdk-go-v2/service/sqs v1.29.1/go.mod h1:GiU88YWgOho2cyEyS2YZo3GYz/j4etRYKWbJdcYgpuQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 h1:5V7DWLBd7wTELVz5bPpwzYy/sikk0gsgZfj40X+l5OI= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 h1:B8cauxOH1W1v7rd8RdI/MWnoR4Ze0wIHWrb90qczxj4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= +github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 h1:bWNgNdRko2x6gqa0blfATqAZKZokPIeM1vfmQt2pnvM= +github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.18.1 h1:pOdBTUfXNazOlxLrgeYalVnuTpKreACHtc62xLwIB3c= +github.com/aws/smithy-go v1.18.1/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a h1:eqjiAL3qooftPm8b9C1GsSSRcmlw7iOva8vdBTmV2PY= github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/immutable v0.4.0 h1:CTqXbEerYso8YzVPxmWxh2gnoRQbbB9X1quUC8+vGZA= +github.com/benbjohnson/immutable v0.4.0/go.mod h1:iAr8OjJGLnLmVUr9MZ/rz4PWUy6Ouc2JLYuMArmvAJM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -293,33 +1471,53 @@ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYE github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bytecodealliance/wasmtime-go v0.36.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 h1:jQ93fKqb/wRmK/KiHpa7Tk9rmHeKXhp4j+5Sg/tENiY= github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166/go.mod h1:c/gmvyN8lq6lYtHvrqqoXrg2xyN65N0mBmbikxFWXNE= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= @@ -334,32 +1532,48 @@ github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/cjlapao/common-go v0.0.39 h1:bAAUrj2B9v0kMzbAOhzjSmiyDy+rd56r2sy7oEiQLlA= +github.com/cjlapao/common-go v0.0.39/go.mod h1:M3dzazLjTjEtZJbbxoA5ZDiGCiHmpwqW9l4UWaddwOA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= -github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= -github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 h1:k8q2Nsz7kNaUlysVCnWIFLMUSqiKXaGLdIf9P0GsX2Y= -github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306/go.mod h1:0FdHblxw7g3M2PPICOw9i8YZOHP9dZTHbJUtoxL7Z/E= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20220930021109-9c4e6c59ccf1 h1:ef0OsiQjSQggHrLFAMDRiu6DfkVSElA5jfG1/Nkyu6c= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20220930021109-9c4e6c59ccf1/go.mod h1:sgaEj3tRn0hwe7GPdEUwxrdOqjBzyjyvyOCGf1OQyZY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/cockroachdb/cockroach-go/v2 v2.3.8 h1:53yoUo4+EtrC1NrAEgnnad4AS3ntNvGup1PAXZ7UmpE= +github.com/cockroachdb/cockroach-go/v2 v2.3.8/go.mod h1:9uH5jK4yQ3ZQUT9IXe4I2fHzMIF5+JC/oOdzTRgJYJk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/container-orchestrated-devices/container-device-interface v0.6.1/go.mod h1:40T6oW59rFrL/ksiSs7q45GzjGlbvxnA4xaK6cyq+kA= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= @@ -367,6 +1581,7 @@ github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k= github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= @@ -374,8 +1589,10 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -390,34 +1607,46 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE= -github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= +github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= +github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= +github.com/containerd/containerd v1.6.9/go.mod h1:XVicUvkxOrftE2Q1YWUXgZwkkAxwQYNOFzYWvfVfEfQ= +github.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw= +github.com/containerd/containerd v1.6.23/go.mod h1:UrQOiyzrLi3n4aezYJbQH6Il+YzTvnHFbEuO3yfDrM4= +github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= +github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= -github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= +github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -427,42 +1656,68 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= +github.com/containerd/imgcrypt v1.1.7/go.mod h1:FD8gqIcX5aTotCtOmjeCsi3A1dHmTZpnMISGKSczt4k= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.4.0/go.mod h1:Zw9q2lP16sdg0zYybemZ9yTDy8g7fPCIB3KXOGlggXI= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3/go.mod h1:YYyNVhZrTMiaf51Vj6WhAJqJw+vl/nzABhj8pWrzle4= +github.com/containerd/ttrpc v1.1.2/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= +github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.27+incompatible h1:QIudLb9KeBsE5zyYxd1mjzRSkzLg9Wf9QlRwFgd6oTA= +github.com/coreos/etcd v3.3.27+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-oidc/v3 v3.1.0 h1:6avEvcdvTa1qYsOZ6I5PRkSYHzpTNWgKYmaJfaYbrRw= -github.com/coreos/go-oidc/v3 v3.1.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= +github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= +github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -474,38 +1729,59 @@ github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pq github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/couchbase/gocb/v2 v2.3.3 h1:OItaIrFqXR1ba9J77E2YOU+CSF9G9FHYivV26Xgoi98= -github.com/couchbase/gocb/v2 v2.3.3/go.mod h1:h4b3UYDnGI89hMW9VypVjAr+EE0Ki4jjlXJrVdmSZhQ= -github.com/couchbase/gocbcore/v10 v10.0.4 h1:RJ+dSXxMUbrpfgYEEUhMYwPH1S5KvcQYve3D2aKHP28= -github.com/couchbase/gocbcore/v10 v10.0.4/go.mod h1:s6dwBFs4c3+cAzZbo1q0VW+QasudhHJuehE8b8U2YNg= +github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf h1:GOPo6vn/vTN+3IwZBvXX0y5doJfSC7My0cdzelyOCsQ= +github.com/coreos/pkg v0.0.0-20220810130054-c7d1c02cb6cf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/couchbase/gocb/v2 v2.6.5 h1:xaZu29o8UJEV1ZQ3n2s9jcRCUHz/JsQ6+y6JBnVsy5A= +github.com/couchbase/gocb/v2 v2.6.5/go.mod h1:0vFM09y+VPhnXeNrIb8tS0wKHGpJvjJBrJnriWEiwGs= +github.com/couchbase/gocbcore/v10 v10.2.9/go.mod h1:lYQIIk+tzoMcwtwU5GzPbDdqEkwkH3isI2rkSpfL0oM= +github.com/couchbase/gocbcore/v10 v10.3.1 h1:dx+lub02eDYiQXavtF0EwYMppVUcbjCxAAqa6/nQldg= +github.com/couchbase/gocbcore/v10 v10.3.1/go.mod h1:lYQIIk+tzoMcwtwU5GzPbDdqEkwkH3isI2rkSpfL0oM= +github.com/couchbaselabs/gocaves/client v0.0.0-20230307083111-cc3960c624b1/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY= +github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259 h1:2TXy68EGEzIMHOx9UvczR5ApVecwCfQZ0LjkmwMI6g4= +github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.12.2 h1:1OcPn5GBIobjWNd+8yjfHNIaFX14B1pWI3F9HZy5KXw= -github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE= +github.com/denisenkom/go-mssqldb v0.12.3 h1:pBSGx9Tq67pBOTLmxNuirNTeB8Vjmf886Kx+8Y+8shw= +github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -514,33 +1790,42 @@ github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nb github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.18+incompatible h1:f/GQLsVpo10VvToRay2IraVA1wHz9KktZyjev3SIVDU= -github.com/docker/cli v20.10.18+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v23.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU= +github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= -github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -553,11 +1838,23 @@ github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdf github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M= github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -567,215 +1864,233 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/favadi/protoc-go-inject-tag v1.3.0 h1:JPrmsmc/uBShG85uY5xGZIa5WJ0IaNZn6LZhQR9tIQE= -github.com/favadi/protoc-go-inject-tag v1.3.0/go.mod h1:SSkUBgfqw2IJ2p7NPNKWk0Idwxt/qIt2LQgFPUgRGtc= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/gabriel-vasile/mimetype v1.3.1 h1:qevA6c2MtE1RorlScnixeG0VA1H4xrXyhyX3oWBynNQ= -github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= -github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 h1:D2LrfOPgGHQprIxmsTpxtzhpmF66HoM6rXSmcqaX7h8= -github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= -github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56 h1:VzbudKn/nvxYKOdzgkEBS6SSreRjAgoJ+ZeS4wPFkgc= -github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= +github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= +github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= -github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-errors/errors v1.4.1 h1:IvVlgbzSsaUNudsw5dcXSzF3EWyXTi5XrAdngnuhRyg= -github.com/go-errors/errors v1.4.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= +github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-ini/ini v1.66.6/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-ldap/ldap/v3 v3.1.7/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= -github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= -github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= +github.com/go-ldap/ldap/v3 v3.4.4/go.mod h1:fe1MsuN5eJJ1FeLT/LEBVdWfNWKh459R7aXgXtJC+aI= +github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A= +github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc= github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 h1:sfz1YppV05y4sYaW7kXZtrocU/+vimnIWt4cxAYh7+o= github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3/go.mod h1:ZXFhGda43Z2TVbfGZefXyMJzsDHhCh0go3bZUcwTx7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= -github.com/go-openapi/analysis v0.20.0 h1:UN09o0kNhleunxW7LR+KnltD0YrJ8FF03pSqvAN3Vro= -github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9 h1:9SnKdGhiPZHF3ttwFMiCBEb8jQ4IDdrK+5+a0oTygA4= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= -github.com/go-openapi/loads v0.20.2 h1:z5p5Xf5wujMxS1y8aP+vxwW5qYT2zdJBbXKmQUG3lcc= -github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= -github.com/go-openapi/runtime v0.19.24 h1:TqagMVlRAOTwllE/7hNKx6rQ10O6T8ZzeJdMjSTKaD4= -github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= +github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= -github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= -github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.0 h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM= -github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= -github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= -github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= -github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= -github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts= -github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.22.2 h1:Lda8nadL/5kIvS5mdXCAIuZ7IVXvKFIppLnw+EZh+n0= +github.com/go-openapi/validate v0.22.2/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE= github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= -github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= -github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gocql/gocql v1.0.0 h1:UnbTERpP72VZ/viKE1Q1gPtmLvyTZTvuAstvSRydw/c= github.com/gocql/gocql v1.0.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.3.0+incompatible h1:CaSVZxm5B+7o45rtab4jC2G37WGYX1zQfuU2i6DSvnc= github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -791,13 +2106,21 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= -github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -832,19 +2155,28 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= -github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= +github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -859,25 +2191,31 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-metrics-stackdriver v0.2.0 h1:rbs2sxHAPn2OtUj9JdR/Gij1YKGl0BTVD0augB+HEjE= github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJSZktIsTjb50eB72U2YZ9K0= +github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -887,26 +2225,43 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= +github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/tink/go v1.6.1 h1:t7JHqO8Ath2w2ig5vjwQYJzhGEZymedQc90lQXUBa4I= github.com/google/tink/go v1.6.1/go.mod h1:IGW53kTgag+st5yPhKKwJ6u2l+SSp5/v9XF7spovjlY= +github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= +github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -914,58 +2269,88 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220 h1:Vgv3jG0kicczshK+lOHWJ9OososZjnjSu1YslqofFYY= -github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220/go.mod h1:zb3VvIFA0lM2lbmO69NjowV9dJzJnZS89TaM9blXPJA= -github.com/hashicorp/consul-template v0.29.5 h1:tzEo93RqODAX2cgOe/ke8xcpdPdxg5rxl6d22wE3f6c= -github.com/hashicorp/consul-template v0.29.5/go.mod h1:SZGBPz/t0JaBwMOqM6q/mG66cBRA8IeDUjOwjO0Pa5M= -github.com/hashicorp/consul/api v1.15.2 h1:3Q/pDqvJ7udgt/60QOOW/p/PeKioQN+ncYzzCdN2av0= -github.com/hashicorp/consul/api v1.15.2/go.mod h1:v6nvB10borjOuIwNRZYPZiHKrTM/AyrGtd0WVVodKM8= +github.com/hashicorp-forge/bbolt v1.3.8-hc3 h1:iTWR3RDPj0TGChAvJ8QjHFcNFWAUVgNQV73IE6gAX4E= +github.com/hashicorp-forge/bbolt v1.3.8-hc3/go.mod h1:sQBu5UIJ+rcUFU4Fo9rpTHNV935jwmGWS3dQ/MV8810= +github.com/hashicorp/cap v0.6.0 h1:uOSdbtXu8zsbRyjwpiTy6QiuX3+5paAbNkYlop7QexM= +github.com/hashicorp/cap v0.6.0/go.mod h1:DwzHkoG6pxSARiqwvAgxmCPUpTTCCw2wVuPrIFOzpe0= +github.com/hashicorp/cap/ldap v0.0.0-20230914221201-c4eecc7e31f7/go.mod h1:q+c9XV1VqloZFZMu+zdvfb0cm7UrvKbvtmTF5wX5Q9o= +github.com/hashicorp/cap/ldap v0.0.0-20240328153749-fcfe271d0227 h1:R5CMNyBNZqODw2DcGaSa2X96AgtLotXsH7aOa07zTTI= +github.com/hashicorp/cap/ldap v0.0.0-20240328153749-fcfe271d0227/go.mod h1:Ofp5fMLl1ImcwjNGu9FtEwNOdxA0LYoWpcWQE2vltuI= +github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8= +github.com/hashicorp/cli v1.1.6/go.mod h1:MPon5QYlgjjo0BSoAiN0ESeT5fRzDjVRp+uioJ0piz4= +github.com/hashicorp/consul-template v0.36.1-0.20240213145952-6c83e89b48af h1:DrkJy2yiqrHIVEqgtn4X0A7j5wjy5MxrJXvGNVwtSsY= +github.com/hashicorp/consul-template v0.36.1-0.20240213145952-6c83e89b48af/go.mod h1:bvidXKwpfXzJ1X4wDw68OXnVxy5k7HLOHhOf5gnQr3M= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.27.0 h1:gmJ6DPKQog1426xsdmgk5iqDyoRiNc+ipBdJOqKQFjc= +github.com/hashicorp/consul/api v1.27.0/go.mod h1:JkekNRSou9lANFdt+4IKx3Za7XY0JzzpQjEb4Ivo1c8= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= -github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= -github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= -github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/consul/sdk v0.15.1 h1:kKIGxc7CZtflcF5DLfHeq7rOQmRq3vk7kwISN9bif8Q= +github.com/hashicorp/consul/sdk v0.15.1/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/eventlogger v0.2.8 h1:WITwN1QGMtxHQE7l69WBwLJIJA5vH5pcvX69peURU14= +github.com/hashicorp/eventlogger v0.2.8/go.mod h1://CHt6/j+Q2lc0NlUB5af4aS2M0c0aVBg9/JfcpAyhM= +github.com/hashicorp/go-bexpr v0.1.12 h1:XrdVhmwu+9iYxIUWxsGVG7NQwrhzJZ0vR6nbN5bLgrA= +github.com/hashicorp/go-bexpr v0.1.12/go.mod h1:ACktpcSySkFNpcxWSClFrut7wicd9WzisnvHuw+g9K8= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -973,63 +2358,69 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 h1:eje2KOX8Sf7aYPiAsLnpWdAIrGRMcpFjN/Go/Exb7Zo= github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192/go.mod h1:3/4dzY4lR1Hzt9bBqMhBzG7lngZ0GKx/nL6G/ad62wE= github.com/hashicorp/go-gatedio v0.5.0 h1:Jm1X5yP4yCqqWj5L1TgW7iZwCVPGtVc+mro5r/XX7Tg= +github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= github.com/hashicorp/go-gcp-common v0.8.0 h1:/2vGAbCU1v+BZ3YHXTCzTvxqma9WOJHYtADTfhZixLo= github.com/hashicorp/go-gcp-common v0.8.0/go.mod h1:Q7zYRy9ue9SuaEN2s9YLIQs4SoKHdoRmKRcImY3SLgs= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.5 h1:rOFDv+3k05mnW0oaDLffhVUwg03Csn0mvfO98Wdd2bE= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.5/go.mod h1:sDQAfwJGv25uGPZA04x87ERglCG6avnRcBT9wYoMII8= -github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.4 h1:ws2CPDuXMKwaBb2z/duBCdnB9pSxlN2nuDZWXcVj6RU= -github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.4/go.mod h1:dDxt3GXi5QONVHYrJi2+EjsJLCUs59FktZQA8ZMnm+U= -github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 h1:ydUCtmr8f9F+mHZ1iCsvzqFTXqNVpewX3s9zcYipMKI= -github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1/go.mod h1:Sl/ffzV57UAyjtSg1h5Km0rN5+dtzZJm1CUztkoCW2c= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.1 h1:WxpTuafkDjdeeu0Xtk9y3m9YAJhfFMb8+y6eTnxvV8A= -github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.1/go.mod h1:3D5UB9fjot4oUTYGQ5gGmhLJKreyLZeI0XB+NxcLTKs= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.1 h1:6joKpqCFveaNMEwC3qna67usws6DjdxqfCuQEHSM0aM= -github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.1/go.mod h1:sDmsWR/W2LqwU217o32RzdHMb/FywGLF72PVIhpZ3hE= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.1 h1:+paf/3ompzaXe07BdxkV1vTnqvhwtmZPE4yQnMPTThI= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.1/go.mod h1:YRtkersQ2N3iHlPDG5B3xBQtBsNZ3bjmlCwnrl26jVE= -github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.0 h1:FnWV2E0NLj+yYdhToUQjU81ayCMgURiL2WbJ0V7u/XY= -github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.0/go.mod h1:17twrc0lM8IpfGqIv69WQvwgDiu3nRwWlk5YfCSQduY= -github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.1 h1:72zlIBTJd2pvYmINqotpvcI4ZXLxhRq2cVPTuqv0xqY= -github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.1/go.mod h1:JytRAxdJViV+unUUWedb7uzEy5pgu7OurbqX0eHEikE= -github.com/hashicorp/go-memdb v1.3.3 h1:oGfEWrFuxtIUF3W2q/Jzt6G85TrMk9ey6XfYLvVe1Wo= -github.com/hashicorp/go-memdb v1.3.3/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.1 h1:KIge4FHZEDb2/xjaWgmBheCTgRL6HV4sgTfDsH876L8= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.1/go.mod h1:aHO1EoFD0kBYLBedqxXgalfFT8lrWfP7kpuSoaqGjH0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16 h1:WZeXfD26QMWYC35at25KgE021SF9L3u9UMHK8fJAdV0= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.16/go.mod h1:ZiKZctjRTLEppuRwrttWkp71VYMbTTCkazK4xT7U/NQ= +github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.9 h1:HpGOHc0Vd3aacMAEtAUVe38zMcq7BfYQSjrGCmtRNx0= +github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.9/go.mod h1:ygxw8l40DbAQQ587OzoB3bsBWVpB0e/BOWKlXgYkfG8= +github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.3 h1:36Pxy8BQd7DAJ2Mk6vuJlIjqQ80e20vlO7a4Ep3RTOg= +github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.3/go.mod h1:heY2PS1SGU0cMamgv+zId/sKT+XFHaf61bLOSnP1Gb8= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.9 h1:qdxeZvDMRGZ3YSE4Oz0Pp7WUSUn5S6cWZguEOkEVL50= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.9/go.mod h1:DcXbvVpgNWbxGmxgmu3QN64bEydMu14Cpe34RRR30HY= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11 h1:/7SKkYIhA8cr3l8m1EKT6Q90bPoSVqqVBuQ6HgoMIkw= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.11/go.mod h1:LepS5s6ESGE0qQMpYaui5lX+mQYeiYiy06VzwWRioO8= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.12 h1:PCqWzT/Hii0KL07JsBZ3lJbv/wx02IAHYlhWQq8rxRY= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.12/go.mod h1:HSaOaX/lv3ShCdilUYbOTPnSvmoZ9xtQhgw+8hYcZkg= +github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 h1:KeG3QGrbxbr2qAqCJdf3NR4ijAYwdcWLTmwSbR0yusM= +github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7/go.mod h1:rXxYzjjGw4HltEwxPp9zYSRIo6R+rBf1MSPk01bvodc= +github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.11 h1:hdzSrDJ0CgHgGFx+1toaf7Z5bmQ2EYaFQ/dtWNXxu1I= +github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.11/go.mod h1:ywjP17x2t88pT3GA8gCc2vEH1vhvU1R9d5XwRQ0d7PQ= +github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c= +github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= +github.com/hashicorp/go-msgpack/v2 v2.1.1 h1:xQEY9yB2wnHitoSzk/B9UjXWRQ67QKu5AOm8aFp8N3I= +github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= -github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= +github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA= -github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg= +github.com/hashicorp/go-secure-stdlib/awsutil v0.3.0 h1:I8bynUKMh9I7JdwtW9voJ0xmHvBpxQtLjrMFDYmhOxY= +github.com/hashicorp/go-secure-stdlib/awsutil v0.3.0/go.mod h1:oKHSQs4ivIfZ3fbXGQOop1XuDfdSb8RIsWTGaAanSfg= github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= @@ -1039,148 +2430,166 @@ github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1 h1:9um9R8i0+HbRHS9d64kd github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1/go.mod h1:6RoRTSMDK2H/rKh3P/JIsk1tK8aatKTt3JyvIopi3GQ= github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2 h1:NS6BHieb/pDfx3M9jDdaPpGyyVp+aD4A3DjX3dgRmzs= github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2/go.mod h1:rf5JPE13wi+NwjgsmGkbg4b2CgHq8v7Htn/F0nDe/hg= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.3 h1:kH3Rhiht36xhAfhuHyWJDgdXXEx9IIZhDGRk24CDhzg= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.3/go.mod h1:ov1Q0oEDjC3+A4BwsG2YdKltrmEw8sf9Pau4V9JQ4Vo= +github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0 h1:iJG9Q3iUme12yH+wzBMGYrw/Am4CfX3sDcA8m5OGfhQ= +github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0/go.mod h1:s28ohJ0kU6tersf0it/WsBCyZSdziPlP+G1FRA3ar28= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/plugincontainer v0.3.0 h1:KMWpBsC65ZBXDpoxJ0n2/zVfZaZIW73k2d8cy5Dv/Kk= +github.com/hashicorp/go-secure-stdlib/plugincontainer v0.3.0/go.mod h1:qKYwSZ2EOpppko5ud+Sh9TrUgiTAZSaQCr8XWIYXsbM= github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 h1:SMGUnbpAcat8rIKHkBPjfv81yC46a8eCNZ2hsR2l1EI= github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1/go.mod h1:Ch/bf00Qnx77MZd49JRgHYqHQjtEmTgGU2faufpVZb0= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= -github.com/hashicorp/go-slug v0.7.0 h1:8HIi6oreWPtnhpYd8lIGQBgp4rXzDWQTOhfILZm+nok= -github.com/hashicorp/go-slug v0.7.0/go.mod h1:Ib+IWBYfEfJGI1ZyXMGNbu2BU+aa3Dzu41RKLH301v4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3 h1:xbrxd0U9XQW8qL1BAz2XrAjAF/P2vcqUTAues9c24B8= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.3/go.mod h1:LWq2Sy8UoKKuK4lFuCNWSjJj57MhNNf2zzBWMtkAIX4= +github.com/hashicorp/go-slug v0.13.4 h1:dIyjGKFVwbOVAqp0/s7tmONwCNr9D2UvmMuVE4mPfv0= +github.com/hashicorp/go-slug v0.13.4/go.mod h1:THWVTAXwJEinbsp4/bBRcmbaO5EYNLTqxbG4tZ3gCYQ= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= +github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-tfe v0.20.0 h1:XUAhKoCX8ZUQfwBebC8hz7nkSSnqgNkaablIfxnZ0PQ= -github.com/hashicorp/go-tfe v0.20.0/go.mod h1:gyXLXbpBVxA2F/6opah8XBsOkZJxHYQmghl0OWi8keI= +github.com/hashicorp/go-tfe v1.44.0 h1:eQ9n2Ecfel6O5j03UW6B9LNsM1x6KbHErsjwSd9BLmg= +github.com/hashicorp/go-tfe v1.44.0/go.mod h1:3ZGX+wxeyp/JnP8qEZo8m3s0ggJ7H+L2BvJRpkRdtVU= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/hcp-link v0.1.0 h1:F6F1cpADc+o5EBI5CbJn5RX4qdFSLpuA4fN69eeE5lQ= -github.com/hashicorp/hcp-link v0.1.0/go.mod h1:BWVDuJDHrKJtWc5qI07bX5xlLjSgWq6kYLQUeG1g5dM= -github.com/hashicorp/hcp-scada-provider v0.1.0 h1:FSjTw7EBl6GJFv5533harm1vw15OaEYodNGHde908MI= -github.com/hashicorp/hcp-scada-provider v0.1.0/go.mod h1:8Pp3pBLzZ9DL56OHSbf55qhh+TpvmXBuR5cJx9jcdcA= -github.com/hashicorp/hcp-sdk-go v0.22.0 h1:LWkLOkJFYWSojBM3IkwvYK6nrwrL+p4Fw8zEaoCQG10= -github.com/hashicorp/hcp-sdk-go v0.22.0/go.mod h1:mM3nYdVHuv2X2tv88MGVKRf/o2k3zF8jUZSMkwICQ28= -github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d h1:9ARUJJ1VVynB176G1HCwleORqCaXm/Vx0uUi0dL26I0= -github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d/go.mod h1:Yog5+CPEM3c99L1CL2CFCYoSzgWm5vTU58idbRUaLik= +github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0= +github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= +github.com/hashicorp/hcp-link v0.2.1 h1:8w4YVJxRb2C7oXN+hCPSyDbBeo7RQsIYTR6nQXJt6f8= +github.com/hashicorp/hcp-link v0.2.1/go.mod h1:6otT7bD+nBW1cyzgz8Z4BPziZfwxTtAEkYUrF/MOT8o= +github.com/hashicorp/hcp-scada-provider v0.2.2 h1:S4Kz+Vc02XOz/5Sm9Gug6ivfyfgchM6qv48cgz0uRls= +github.com/hashicorp/hcp-scada-provider v0.2.2/go.mod h1:Q0WpS2RyhBKOPD4X/8oW7AJe7jA2HXB09EwDzwRTao0= +github.com/hashicorp/hcp-sdk-go v0.75.0 h1:5SLvNpcTeZnG7YnwWIaZlqCottFCGKldEIQnaYjOIq8= +github.com/hashicorp/hcp-sdk-go v0.75.0/go.mod h1:5GwdT+HGhEQsh4n1yK+RADnQkfOo6vHgr2BpYUt2t9U= +github.com/hashicorp/jsonapi v1.3.1 h1:GtPvnmcWgYwCuDGvYT5VZBHcUyFdq9lSyCzDjn1DdPo= +github.com/hashicorp/jsonapi v1.3.1/go.mod h1:kWfdn49yCjQvbpnvY1dxxAuAFzISwrrMDQOcu6NsFoM= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= -github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE= -github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= -github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28 h1:fo8EbQ6tc9hYqxik9CAdFMqy48TW8hh2I3znysPqf+0= -github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28/go.mod h1:FslB+3eLbZgkuPWffqO1GeNzBFw1SuVqN2PXsMNe0Fg= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 h1:kBpVVl1sl3MaSrs97e0+pDQhSrqJv9gVbSUrPpVfl1w= +github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0/go.mod h1:6pdNz0vo0mF0GvhwDG56O3N18qBrAz/XRIcfINfTbwo= +github.com/hashicorp/nomad/api v0.0.0-20240213164230-c364cb57298d h1:nvfutImOr3GgkMSMjfNdTil9e54vtyQxxyHZ+NHII3Y= +github.com/hashicorp/nomad/api v0.0.0-20240213164230-c364cb57298d/go.mod h1:ijDwa6o1uG1jFSq6kERiX2PamKGpZzTmo0XOFNeFZgw= github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= -github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft v1.3.10 h1:LR5QZX1VQd0DFWZfeCwWawyeKfpS/Tm1yjnJIY5X4Tw= -github.com/hashicorp/raft v1.3.10/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4= +github.com/hashicorp/raft v1.6.0 h1:tkIAORZy2GbJ2Trp5eUSggLXDPOJLXC+JJLNMMqtgtM= +github.com/hashicorp/raft v1.6.0/go.mod h1:Xil5pDgeGwRWuX4uPUmwa+7Vagg4N804dz6mhNi6S7o= github.com/hashicorp/raft-autopilot v0.2.0 h1:2/R2RPgamgRKgNWGQioULZvjeKXQZmDuw5Ty+6c+H7Y= github.com/hashicorp/raft-autopilot v0.2.0/go.mod h1:q6tZ8UAZ5xio2gv2JvjgmtOlh80M6ic8xQYBe2Egkg8= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c h1:oiKun9QlrOz5yQxMZJ3tf1kWtFYuKSJzxzEDxDPevj4= -github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c/go.mod h1:kiPs9g148eLShc2TYagUAyKDnD+dH9U+CQKsXzlY9xo= +github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ= +github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= +github.com/hashicorp/raft-boltdb/v2 v2.3.0 h1:fPpQR1iGEVYjZ2OELvUHX600VAK5qmdnDEv3eXOwZUA= +github.com/hashicorp/raft-boltdb/v2 v2.3.0/go.mod h1:YHukhB04ChJsLHLJEUD6vjFyLX2L3dsX3wPBZcX4tmc= github.com/hashicorp/raft-snapshot v1.0.4 h1:EuDuayAJPdiDmVk1ygTDnG2zDzrs0/6/yBuma1IYSow= github.com/hashicorp/raft-snapshot v1.0.4/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= -github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= -github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/vault-plugin-auth-alicloud v0.5.4-beta1.0.20221117202053-722c59caa2d0 h1:f4Ay9naDgZwW77q6Jpiy/zMlXC1MDWV2Kwop6uud3f8= -github.com/hashicorp/vault-plugin-auth-alicloud v0.5.4-beta1.0.20221117202053-722c59caa2d0/go.mod h1:EjGPliIfEWITTGsi8KD/aZgIActKDfDVwStpqpCtrM0= -github.com/hashicorp/vault-plugin-auth-azure v0.11.2-0.20221108185759-ac6743d5f0f2 h1:cVT7MJAl5uwXFtLMQBA7DDE5GDLEU+1BE03ew1ygY88= -github.com/hashicorp/vault-plugin-auth-azure v0.11.2-0.20221108185759-ac6743d5f0f2/go.mod h1:AhoXnb4AfiYU9Q3k2njrdovIIftmyaQtdt6cm8Q6sQc= -github.com/hashicorp/vault-plugin-auth-centrify v0.13.0 h1:IbtgJAY3EFyY+8n9A3QMn3MDGsvfQKDdH60r8G/C0nA= -github.com/hashicorp/vault-plugin-auth-centrify v0.13.0/go.mod h1:3fDbIVdwA/hkOVhwktKHDX5lo4DqIUUVbBdwQNNvxHw= -github.com/hashicorp/vault-plugin-auth-cf v0.13.0 h1:Iu4nRoZrkaLbW4vJ8t/wYS8z5BG4VQI7nKpBuwPTpOU= -github.com/hashicorp/vault-plugin-auth-cf v0.13.0/go.mod h1:Tktv1OXUjFobzjAU5qNJA8t1KC0109eu6Pcgm1uiwHg= -github.com/hashicorp/vault-plugin-auth-gcp v0.13.2-0.20221103133215-2fc20fb9fc44 h1:fs7xVuv/ZDWMY0vsOc4GX2gsjV/aLjbL6nmwBRqkZEI= -github.com/hashicorp/vault-plugin-auth-gcp v0.13.2-0.20221103133215-2fc20fb9fc44/go.mod h1:jVm8AWcxaSZSxBXaQOdMLUXMY/NpeLnc2fVH5cO4sAs= -github.com/hashicorp/vault-plugin-auth-jwt v0.14.0 h1:Wzg9qqAdEh1DQwsKf2ruggqaSbIdeTaZfDmO1Nn7YqA= -github.com/hashicorp/vault-plugin-auth-jwt v0.14.0/go.mod h1:oWM7Naj8lo4J9vJ23S0kpNW9pmeiHRiG/9ghLlPu6N0= -github.com/hashicorp/vault-plugin-auth-kerberos v0.8.0 h1:5PiNahpVYFnQIg0Np3wLiFnfhHfnAHcWTl3VSzUVu/Y= -github.com/hashicorp/vault-plugin-auth-kerberos v0.8.0/go.mod h1:eqjae8tMBpAWgJNk1NjV/vtJYXQRZnYudUkBFowz3bY= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.14.0 h1:Hz/CcpNYfi99cUUMg5Tfx3uElKuvQ0wGGpy0L2bqAzk= -github.com/hashicorp/vault-plugin-auth-kubernetes v0.14.0/go.mod h1:rouq4XoBoCzXtECtxGCWHS++g6Nzw2HOms6p6N+Uzkw= -github.com/hashicorp/vault-plugin-auth-oci v0.12.0 h1:7Tuj5q+rwyPm1aS1rsLg2TRo2QIrPTz1qNHGDkUvz18= -github.com/hashicorp/vault-plugin-auth-oci v0.12.0/go.mod h1:oj2gh7qH2VzjelFeul8FzDmmYrJXnCuLUUeQAA6fMN8= -github.com/hashicorp/vault-plugin-database-couchbase v0.8.0 h1:lDZ1OazKfSPIb1DXLbq7NCf1BZwB1cFN3OG3NedXB/s= -github.com/hashicorp/vault-plugin-database-couchbase v0.8.0/go.mod h1:skmG6MgIG6fjIOlOEgVKOcNlr1PcgHPUb9q1YQ5+Q9k= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.12.0 h1:g+jD35qUZlDcS2YWQBqXbfpMNBTvGEvRzSYjwLgWOK4= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.12.0/go.mod h1:wO8EPQs5bsBERD6MSQ+7Az+YJ4TFclCNxBo3r3VKeao= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0 h1:wx/9Dh9YGGU7GiijwRfwPFBlWdmBEdf6n2VhgTdRtJU= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0/go.mod h1:eWwd1Ba7aLU1tIAtmFsEhu9E023jkkypHawxhnAbZfc= -github.com/hashicorp/vault-plugin-database-redis v0.1.0 h1:fDT32ZphGdvVdenvieWb+ZjWmCOHFtZ1Qjv581BloHw= -github.com/hashicorp/vault-plugin-database-redis v0.1.0/go.mod h1:bzrD2dQUClKcl89yYsaZqboFDEzst+TpXROWuhVxLEM= -github.com/hashicorp/vault-plugin-database-redis-elasticache v0.1.0 h1:qwDcp1vdlT0Io0x5YjtvhXtndfQB66jnDICg7NHxKQk= -github.com/hashicorp/vault-plugin-database-redis-elasticache v0.1.0/go.mod h1:gB/SMtnIf0NdDyPSIo0KgSNp1ajTvLDiwP+lIAy8uHs= -github.com/hashicorp/vault-plugin-database-snowflake v0.6.1 h1:jnWNKqRmRXNpXOC4FvAOXHxPKAmDZRVS+d5fcbRQ/Xw= -github.com/hashicorp/vault-plugin-database-snowflake v0.6.1/go.mod h1:QJ8IL/Qlu4Me1KkL0OpaWO7aMFL0TNoSEKVB5F+lCiM= +github.com/hashicorp/raft-wal v0.4.0 h1:oHCQLPa3gBTrfuBVHaDg2b/TVXpU0RIyeH/mU9ovk3Y= +github.com/hashicorp/raft-wal v0.4.0/go.mod h1:A6vP5o8hGOs1LHfC1Okh9xPwWDcmb6Vvuz/QyqUXlOE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hashicorp/vault-hcp-lib v0.0.0-20240402205111-2312b38227ab h1:n1GzFf7LwpVebVIjh5XKW2IQa/BqI/zPlFg2mmB26dQ= +github.com/hashicorp/vault-hcp-lib v0.0.0-20240402205111-2312b38227ab/go.mod h1:Nb41BTPvmFbKB73D/+XpxIw6Nf2Rt+AOUvLzlDxwAGQ= +github.com/hashicorp/vault-plugin-auth-alicloud v0.17.0 h1:0SOkYxjMjph3Tbtv37+pANJQnYDvlAdjKpdEbK6zzZs= +github.com/hashicorp/vault-plugin-auth-alicloud v0.17.0/go.mod h1:79KUWOxY6Ftoad7b+vEmyCmY6eYKdHiADTP0w0TunsE= +github.com/hashicorp/vault-plugin-auth-azure v0.17.0 h1:nFsWQV+sMEdJCvKpVODNeTPP36n5bi6yiQpBOdBsQWw= +github.com/hashicorp/vault-plugin-auth-azure v0.17.0/go.mod h1:bkIcQTa19JcR426IkSoGEmXX5Yi7nfLp/6M965zvuww= +github.com/hashicorp/vault-plugin-auth-centrify v0.15.1 h1:6StAr5tltpySNgyUwWC8czm9ZqkO7NIZfcRmxxtFwQ8= +github.com/hashicorp/vault-plugin-auth-centrify v0.15.1/go.mod h1:xXs4I5yLxbQ5VHcpvSxkRhShCTXd8Zyrni8qnFrfQ4Y= +github.com/hashicorp/vault-plugin-auth-cf v0.16.0 h1:t4+0LY6002NQvY6c0c43ikZjxqReCHUiy7+YXiMRbKo= +github.com/hashicorp/vault-plugin-auth-cf v0.16.0/go.mod h1:q+Lt3FhtFlP+pulKSjrbnR8ecu4vY9TlgPvs+nnBey8= +github.com/hashicorp/vault-plugin-auth-gcp v0.16.2 h1:HC1PpXxGNzfu7IUfN7Ok7dIMV29R8a/2EJ5uDnrpxz0= +github.com/hashicorp/vault-plugin-auth-gcp v0.16.2/go.mod h1:8FWNvFElzQBWJGCZ3SBPqsSc/x9bge9Et+JuwVLlJPM= +github.com/hashicorp/vault-plugin-auth-jwt v0.20.3 h1:mLsdorH4m43rBqybHDZKl33rrmc80ens4hSB6E7i9o0= +github.com/hashicorp/vault-plugin-auth-jwt v0.20.3/go.mod h1:1IQjNAZ2z8GdTPM/XizC6eA4X9brnOXiwSoYEOfuDlM= +github.com/hashicorp/vault-plugin-auth-kerberos v0.11.0 h1:XjdH8nqosqgKeAwBptMS7DoXsdi8IKL2fbBSyvL/HRM= +github.com/hashicorp/vault-plugin-auth-kerberos v0.11.0/go.mod h1:xupzh9O6Us6bqKLZ6wfRsjqlf1Mb1TRylKpxZPJd5rA= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.18.0 h1:mGVVdcTI55t/NrMefkLjnenAVunJiQZg5o0opuU7ixw= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.18.0/go.mod h1:ju7B2fxYr3EtC2jX0ft79mUMuEGozz1Ws/ABpVvtlto= +github.com/hashicorp/vault-plugin-auth-oci v0.15.1 h1:frikend6vdC09I60qmFkRwBVgXLlBz2qe1869bC5J5s= +github.com/hashicorp/vault-plugin-auth-oci v0.15.1/go.mod h1:i3KYRLQFpAIJuvbXHBMgXzw0563Sp/2mMpAFU5F6Z9I= +github.com/hashicorp/vault-plugin-database-couchbase v0.10.1 h1:U+UPB8FIh5UJo8mziK36waZ0o6q8Ik6hgncFTuJ1Bwg= +github.com/hashicorp/vault-plugin-database-couchbase v0.10.1/go.mod h1:yxvB4Ky2JhtUtZOp+7M8z9jupxfEBKIIyiBNs9qvXpA= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.14.0 h1:7v7+WTlQKG/ZikiW3Q4Hef6UBw9A2Q4xAB0ytOkXNdU= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.14.0/go.mod h1:JKcIsHm0bi9tdNnwyOQKGkt8vEz/oO3KjQIsisViu1s= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.11.0 h1:DNIwrmviDOq/BdIhFaU6wMYolOl/0N54xYBCy41HN3U= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.11.0/go.mod h1:DTrqLTHGxHVPudf4OUnxA3RPFDYwDzvTPuGinok/sH8= +github.com/hashicorp/vault-plugin-database-redis v0.2.3 h1:Tp/gxLv1XysUgmaufzm9UbP82wZSp8D6nmLw7NqXS8E= +github.com/hashicorp/vault-plugin-database-redis v0.2.3/go.mod h1:VdBXwRbeN597kcmBptvfPRD55BjrExEFEMPOK+NjH5M= +github.com/hashicorp/vault-plugin-database-redis-elasticache v0.3.0 h1:8sNYuHOxpUxcq1pxhR4HdYfZIaeEVCiWV5lV25u10ic= +github.com/hashicorp/vault-plugin-database-redis-elasticache v0.3.0/go.mod h1:IoJwgHiY1vvIbBWO/lidH6wN85jVGnY4k78tZe1jctU= +github.com/hashicorp/vault-plugin-database-snowflake v0.10.0 h1:XmGY3YsEwhs/LHHO6I9MmiHcI0peL31cQCbHMCniMro= +github.com/hashicorp/vault-plugin-database-snowflake v0.10.0/go.mod h1:COMbAUyRr1KgNLv0R3n0/olFoy3JkXq57VYd5+9ulPw= github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0= github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM= -github.com/hashicorp/vault-plugin-secrets-ad v0.14.0 h1:64qTDXSj3tw1li7lWh13OJoYIbJ/dp9F0kWdHo6vBiU= -github.com/hashicorp/vault-plugin-secrets-ad v0.14.0/go.mod h1:5XIn6cw1+gG+WWxK0SdEAKCDOXTp+MX90PzZ7f3Eks0= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.13.0 h1:eWDAAvZsKHhnXF8uCiyF/wDqT57fflCs54PTIolONBo= -github.com/hashicorp/vault-plugin-secrets-alicloud v0.13.0/go.mod h1:F4KWrlCQZbhP2dFXCkRvbHX2J6CTydlaY0cH+OrLHCE= -github.com/hashicorp/vault-plugin-secrets-azure v0.6.3-0.20221109203402-f955aedc51bf h1:rT7FlTIQwohl9mLjoanAbFc0HbTrgM5au7dTsZQcP+o= -github.com/hashicorp/vault-plugin-secrets-azure v0.6.3-0.20221109203402-f955aedc51bf/go.mod h1:e4xZZjNwmtiQZ/aneqVI1EDpiRFX2J5GBKuMjFQdhFs= -github.com/hashicorp/vault-plugin-secrets-gcp v0.6.6-0.20221101145740-55dbd0ccd1b8 h1:1RN7Fqc0gtXtL3tdJsmVOHywNs/oiLme9OhAxO5y/3A= -github.com/hashicorp/vault-plugin-secrets-gcp v0.6.6-0.20221101145740-55dbd0ccd1b8/go.mod h1:fBS4EGykPm4AzgzeL6DdFOL7C0dNxVjzwL2E7h1rTC0= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.13.0 h1:R36pNaaN4tJyIrPJej7/355Qt5+Q5XUTB+Az6rGs5xg= -github.com/hashicorp/vault-plugin-secrets-gcpkms v0.13.0/go.mod h1:n2VKlYDCuO8+OXN4S1Im8esIL53/ENRFa4gXrvhCVIM= -github.com/hashicorp/vault-plugin-secrets-kubernetes v0.2.0 h1:iPue19f7LW63lAo8YFsm0jmo49gox0oIYFPAtVtnzGg= -github.com/hashicorp/vault-plugin-secrets-kubernetes v0.2.0/go.mod h1:WO0wUxGh1PxhwdBHD7mXU5XQTqLwMZiJrUwVuzx3tIg= -github.com/hashicorp/vault-plugin-secrets-kv v0.13.3 h1:TUKpQY6fmTiUhaZ71/WTamEuK2JH7ESB92T4VZsq4+g= -github.com/hashicorp/vault-plugin-secrets-kv v0.13.3/go.mod h1:ikPuEWi2rHaGQCHZuPdn/6D3Bq/25ElX3G9pGeDr0Yg= -github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.8.0 h1:VREm+cJGUXcPCakaYVxQt8wTVqTwJclsIIk2XuqpPbs= -github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.8.0/go.mod h1:PLx2vxXukfsKsDRo/PlG4fxmJ1d+H2h82wT3vf4buuI= -github.com/hashicorp/vault-plugin-secrets-openldap v0.9.0 h1:/6FQzNB4zjep7O14pkVOapwRJvnQ4gINGAc1Ss1IYg8= -github.com/hashicorp/vault-plugin-secrets-openldap v0.9.0/go.mod h1:o7mF9tWgDkAD5OvvXWM3bOCqN+n/cCpaMm1CrEUZkHc= -github.com/hashicorp/vault-plugin-secrets-terraform v0.6.0 h1:N5s1ojXyG8gBZlx6BdqE04LviR0rw4vX1dDDMdnEzX8= -github.com/hashicorp/vault-plugin-secrets-terraform v0.6.0/go.mod h1:GzYAJYytgbNNyT3S7rspz1cLE53E1oajFbEtaDUlVGU= -github.com/hashicorp/vault-testing-stepwise v0.1.1/go.mod h1:3vUYn6D0ZadvstNO3YQQlIcp7u1a19MdoOC0NQ0yaOE= -github.com/hashicorp/vault-testing-stepwise v0.1.2 h1:3obC/ziAPGnsz2IQxr5e4Ayb7tu7WL6pm6mmZ5gwhhs= -github.com/hashicorp/vault-testing-stepwise v0.1.2/go.mod h1:TeU6B+5NqxUjto+Zey+QQEH1iywuHn0ciHZNYh4q3uI= -github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20221209165735-a2eed407e08d h1:U692VbDl6ww5GQsNFClJVFJDaPeuqtDt1Mwqf21KYek= -github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20221209165735-a2eed407e08d/go.mod h1:a2crHoMWwY6aiL8GWT8hYj7vKD64uX0EdRPbnsHF5wU= +github.com/hashicorp/vault-plugin-secrets-ad v0.17.0 h1:yXyjHkFduORBwI6g9GxIorXXKRb/wTwbMLkFEgnqzso= +github.com/hashicorp/vault-plugin-secrets-ad v0.17.0/go.mod h1:HXT1QFK8wN+HYhWWPAIVYSXnNuBqUDM2TsRgiJT6qUc= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.16.0 h1:rkMe/n9/VylQEm7QeNXgdUaESvLz5UjkokMH1WkFiKU= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.16.0/go.mod h1:xkGzU7LrkgoRhdN2NwLsshqCpjPz2aqkMVzqS6JKJeg= +github.com/hashicorp/vault-plugin-secrets-azure v0.17.2 h1:k1IQ6T5I+AkeEw0HI1yRsulCqfMUVm/S7T/gYIogXp0= +github.com/hashicorp/vault-plugin-secrets-azure v0.17.2/go.mod h1:R4SSIIC5/NPpeV7GO1ZQ9z0cLUNufAAVi+oO7bpguUM= +github.com/hashicorp/vault-plugin-secrets-gcp v0.18.0 h1:RPKGn6Ai/t4QtdCWg9W7VYTe44cN3jDxgtobTsHHfqE= +github.com/hashicorp/vault-plugin-secrets-gcp v0.18.0/go.mod h1:b5ZdWNoPDo64g5mp16U6UVPTqCU3gKNIZ7Knc//uypg= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.16.0 h1:1wEYeplJl/9FLwBQSmfpqMdKKwmNz/b3e6K9ZOdJK/s= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.16.0/go.mod h1:dTyKQItGjGeCnjdWZtO+LPIpzi192uEwk8UxP/R3rMQ= +github.com/hashicorp/vault-plugin-secrets-kubernetes v0.7.0 h1:IRsrGZyYjecQrAvpKXodQDus4oQRpGDUsQiyJ8Szlmk= +github.com/hashicorp/vault-plugin-secrets-kubernetes v0.7.0/go.mod h1:StJ4o4D+ChaQ3lKzaWB5CAGDx62ONP9pDFr7iK8i9HU= +github.com/hashicorp/vault-plugin-secrets-kv v0.17.0 h1:UNYINnuymcGRT+7UA0MciYAz06/6CB3Ll/P32nTXrNk= +github.com/hashicorp/vault-plugin-secrets-kv v0.17.0/go.mod h1:2U8dr0BrVNIndr1lYrJ3Q92RCrJzpnDCkAmZe/JRyFo= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.11.0 h1:NU7X28xzc/WBY0jMJNnan+elmKFWv/n5zbWXHfKf9/I= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.11.0/go.mod h1:l6kmbSsAVTrzhsliH283dTo9LYZ4ClPMbBgEyWiUtz8= +github.com/hashicorp/vault-plugin-secrets-openldap v0.12.1 h1:8BSRXpPplF15ZL77vIQFi9+8zUmbVWjHpdEmkIaqVLg= +github.com/hashicorp/vault-plugin-secrets-openldap v0.12.1/go.mod h1:epAxjKFROBOI5rUg/8UgRmQlboR4l0AMoAPP5Mx9qkI= +github.com/hashicorp/vault-plugin-secrets-terraform v0.7.5 h1:nyhdeSdkcb5ZT0drFaW3IePL0aUmcVTzuOToG7RjHwY= +github.com/hashicorp/vault-plugin-secrets-terraform v0.7.5/go.mod h1:mVZiKjHtll1vqOvThL6F29W1DM2DK5FerAmO7SNz/VE= +github.com/hashicorp/vault-testing-stepwise v0.1.4 h1:Lsv1KdpQyjhvmLgKeH65FG5MmY5hMkF5LoX3xIxurjg= +github.com/hashicorp/vault-testing-stepwise v0.1.4/go.mod h1:Ym1T/kMM2sT6qgCIIJ3an7uaSWCJ8O7ohsWB9UiB5tI= +github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77 h1:Y/+BtwxmRak3Us9jrByARvYW6uNeqZlEpMylIdXVIjY= +github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77/go.mod h1:a2crHoMWwY6aiL8GWT8hYj7vKD64uX0EdRPbnsHF5wU= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= -github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -1189,18 +2598,22 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb v1.7.6/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= +github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= @@ -1208,8 +2621,9 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.11.0 h1:HiHArx4yFbwl91X3qqIHtUFoiIfLNJXCQRsnzkiwwaQ= -github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -1225,40 +2639,48 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= -github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.10.0 h1:ILnBWrRMSXGczYvmkYD6PsYyVFUNLTnIUJHHDLmqk38= -github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.3 h1:h6W9cPuHsRWQFTWUZMAKMgG5jSwQI0Zurzdvlx3Plus= +github.com/jackc/pgtype v1.14.3/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.15.0 h1:B7dTkXsdILD3MF987WGGCcg+tvLW6bZJdEcqVFeU//w= -github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= +github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jarcoal/httpmock v1.0.7 h1:d1a2VFpSdm5gtjhCPWsQHSnx8+5V3ms5431YwvmkuNk= +github.com/jarcoal/httpmock v1.0.7/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= -github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= -github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= -github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 h1:mex1izRBCD+7WjieGgRdy7e651vD/lvB1bD9vNE/3K4= @@ -1268,8 +2690,15 @@ github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f/go.mod h1:3J2 github.com/jefferai/jsonx v1.0.0 h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI= github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= +github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= +github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= +github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jimlambrt/gldap v0.1.4/go.mod h1:ia/l4Jhm+tdupLvZe7tRCbpv+HyXr1B5QFirsewfWEA= +github.com/jimlambrt/gldap v0.1.13 h1:jxmVQn0lfmFbM9jglueoau5LLF/IGRti0SKf0vB753M= +github.com/jimlambrt/gldap v0.1.13/go.mod h1:nlC30c7xVphjImg6etk7vg7ZewHCCvl1dfAhO3ZJzPg= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -1279,10 +2708,15 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 h1:hgVxRoDDPtQE68PT4LFvNlPz2nBKd3OMlGKIQ69OmR4= +github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531/go.mod h1:fqTUQpVYBvhCNIsMXGl2GE9q6z94DIP6NtFKXCSTVbg= +github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d h1:J8tJzRyiddAFF65YVgxli+TyWBi0f79Sld6rJP6CBcY= +github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d/go.mod h1:b+Q3v8Yrg5o15d71PSUraUzYb+jWl6wQMSBXSGS/hv0= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f h1:ENpDacvnr8faw5ugQmEF1QYk+f/Y9lXFvuYmRxykago= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= @@ -1302,67 +2736,90 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= +github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= +github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY= +github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= @@ -1371,8 +2828,9 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1383,78 +2841,122 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/mediocregopher/radix/v4 v4.1.1 h1:JkZBEp0y8pWGNZkmO3RR5oEO5huwd4zKKt4rh1C+P8s= -github.com/mediocregopher/radix/v4 v4.1.1/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= +github.com/mediocregopher/radix/v4 v4.1.4 h1:Uze6DEbEAvL+VHXUEu/EDBTkUk5CLct5h3nVSGpc6Ts= +github.com/mediocregopher/radix/v4 v4.1.4/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo= github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= github.com/michaelklishin/rabbit-hole/v2 v2.12.0 h1:946p6jOYFcVJdtBBX8MwXvuBkpPjwm1Nm2Qg8oX+uFk= github.com/michaelklishin/rabbit-hole/v2 v2.12.0/go.mod h1:AN/3zyz7d++OHf+4WUo/LR0+Q5nlPHMaXasIsG/mPY0= +github.com/microsoft/go-mssqldb v1.5.0 h1:CgENxkwtOBNj3Jg6T1X209y2blCfTTcwuOlznd2k9fk= +github.com/microsoft/go-mssqldb v1.5.0/go.mod h1:lmWsjHD8XX/Txr0f8ZqgbEZSC+BZjmEQy/Ms+rLrvho= +github.com/microsoft/kiota-abstractions-go v1.5.6 h1:3hd1sACWB2B9grv8KG1T8g/gGQ4A8kTLv91OUxHSxkE= +github.com/microsoft/kiota-abstractions-go v1.5.6/go.mod h1:2WX7Oh8V9SAdZ80OGeE53rcbdys54Pd38rAeDUghrpM= +github.com/microsoft/kiota-authentication-azure-go v1.0.1 h1:F4HH+2QQHSecQg50gVEZaUcxA8/XxCaC2oOMYv2gTIM= +github.com/microsoft/kiota-authentication-azure-go v1.0.1/go.mod h1:IbifJeoi+sULI0vjnsWYSmDu5atFo/4FZ6WCoAkPjsc= +github.com/microsoft/kiota-http-go v1.1.1 h1:W4Olo7Z/MwNZCfkcvH/5eLhnn7koRBMMRhLEnf5MPKo= +github.com/microsoft/kiota-http-go v1.1.1/go.mod h1:QzhhfW5xkoUuT+/ohflpHJvumWeXIxa/Xl0GmQ2M6mY= +github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI= +github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA= +github.com/microsoft/kiota-serialization-json-go v1.0.5 h1:DKLs/zcRlY+UrcmI8bCprqYeh3UKfbgbzwy/H2elrmM= +github.com/microsoft/kiota-serialization-json-go v1.0.5/go.mod h1:SgAmhkzRPX1cjnzEWTv988IcBet7wbr4y6y014l5Y1w= +github.com/microsoft/kiota-serialization-multipart-go v1.0.0 h1:3O5sb5Zj+moLBiJympbXNaeV07K0d46IfuEd5v9+pBs= +github.com/microsoft/kiota-serialization-multipart-go v1.0.0/go.mod h1:yauLeBTpANk4L03XD985akNysG24SnRJGaveZf+p4so= +github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA= +github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M= +github.com/microsoftgraph/msgraph-sdk-go v1.32.0 h1:nWhgHiCYAuctf9j/2S84um23tU1fAg+jBoAF5QefoxE= +github.com/microsoftgraph/msgraph-sdk-go v1.32.0/go.mod h1:YDj5bLJkl2kMu50iwG5b3PKGmkUsF26Wto5heVGVfNQ= +github.com/microsoftgraph/msgraph-sdk-go-core v1.0.1 h1:uq4qZD8VXLiNZY0t4NoRpLDoEiNYJvAQK3hc0ZMmdxs= +github.com/microsoftgraph/msgraph-sdk-go-core v1.0.1/go.mod h1:HUITyuFN556+0QZ/IVfH5K4FyJM7kllV6ExKi2ImKhE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a h1:eU8j/ClY2Ty3qdHnn0TyW3ivFoPC/0F1gQZz8yTxbbE= github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a/go.mod h1:v8eSC2SMp9/7FTKUncp7fH9IwPfw+ysMObcEz5FWheQ= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= -github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= +github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= -github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= -github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1463,13 +2965,19 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/mongodb-forks/digest v1.0.3 h1:ZUK1vyZnBiRMvET0O1SzmnBmv935CkcOTjhfR4zIQ2s= -github.com/mongodb-forks/digest v1.0.3/go.mod h1:eHRfgovT+dvSFfltrOa27hy1oR/rcwyDdp5H1ZQxEMA= +github.com/mongodb-forks/digest v1.0.5 h1:EJu3wtLZcA0HCvsZpX5yuD193/sW9tHiNvrEM5apXMk= +github.com/mongodb-forks/digest v1.0.5/go.mod h1:rb+EX8zotClD5Dj4NdgxnJXG9nwrlx3NWKJ8xttz1Dg= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -1478,6 +2986,7 @@ github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc h1:7xGrl4tTpBQu5Z github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc/go.mod h1:1rLVY/DWf3U6vSZgH16S7pymfrhK2lcUlXjgGglw/lY= github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -1490,11 +2999,13 @@ github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/okta/okta-sdk-golang/v2 v2.12.1 h1:U+smE7trkHSZO8Mval3Ow85dbxawO+pMAr692VZq9gM= github.com/okta/okta-sdk-golang/v2 v2.12.1/go.mod h1:KRoAArk1H216oiRnQT77UN6JAhBOnOWkK27yA1SM7FQ= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1504,13 +3015,22 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= +github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= @@ -1518,8 +3038,19 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= +github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/open-policy-agent/opa v0.42.2/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1528,39 +3059,54 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg= -github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runc v1.2.0-rc.1 h1:SMjop2pxxYRTfKdsigna/8xRoaoCfIQfD2cVuOb64/o= +github.com/opencontainers/runc v1.2.0-rc.1/go.mod h1:m9JwxfHzXz5YTTXBQr7EY9KTuazFAGPyMQx2nRR3vTw= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/openlyinc/pointy v1.1.2 h1:LywVV2BWC5Sp5v7FoP4bUD+2Yn5k0VNeRbU5vq9jUMY= -github.com/openlyinc/pointy v1.1.2/go.mod h1:w2Sytx+0FVuMKn37xpXIAyBNhFNBIJGR/v2m7ik1WtM= -github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= -github.com/oracle/oci-go-sdk v13.1.0+incompatible h1:inwbT0b/mMbnTfzYoW2xcU1cCMIlU6Fz973at5phRXM= -github.com/oracle/oci-go-sdk v13.1.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= +github.com/oracle/oci-go-sdk v24.3.0+incompatible h1:x4mcfb4agelf1O4/1/auGlZ1lr97jXRSSN5MxTgG/zU= +github.com/oracle/oci-go-sdk v24.3.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/oracle/oci-go-sdk/v59 v59.0.0 h1:+zTvWfj9ZK0OwLRyXjUkZ8dPN3WvkQSRd3iooaOxNVs= +github.com/oracle/oci-go-sdk/v59 v59.0.0/go.mod h1:PWyWRn+xkQxwwmLq/oO03X3tN1tk2vEIE2tFaJmldHM= github.com/oracle/oci-go-sdk/v60 v60.0.0 h1:EJAWjEi4SY5Raha6iUzq4LTQ0uM5YFw/wat/L1ehIEM= github.com/oracle/oci-go-sdk/v60 v60.0.0/go.mod h1:krz+2gkSzlSL/L4PvP0Z9pZpag9HYLNtsMd1PmxlA2w= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= -github.com/ory/dockertest/v3 v3.8.0/go.mod h1:9zPATATlWQru+ynXP+DytBQrsXV7Tmlx7K86H6fQaDo= -github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= -github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= +github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= +github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= @@ -1570,33 +3116,40 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4= -github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pires/go-proxyproto v0.6.1 h1:EBupykFmo22SDjv4fQVQd2J9NOoLPmyZA/15ldOGkPw= github.com/pires/go-proxyproto v0.6.1/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= -github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1610,6 +3163,7 @@ github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8 github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d h1:PinQItctnaL2LtkaSM678+ZLLy5TajwOeXzWvYC7tII= github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= @@ -1619,14 +3173,20 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -1635,8 +3195,11 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1648,66 +3211,90 @@ github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU= github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.2+incompatible h1:C89EOx/XBWwIXl8wm8OPJBd7kPF25UfsK2X7Ph/zCAk= +github.com/ryanuber/columnize v2.1.2+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.2.0/go.mod h1:WkKB1DnNtvsMlDmQ50sgwowDJV/hGbJSOvJoEXs1AJQ= github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= +github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-limiter v0.7.1 h1:wWNhTj0pxjyJ7wuJHpRJpYwJn+bUnjYfw2a85eu5w9U= github.com/sethvargo/go-limiter v0.7.1/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU= github.com/shirou/gopsutil/v3 v3.22.6 h1:FnHOFOh+cYAM0C30P+zysPISzlknLC5Z1G4EAElznfQ= github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= +github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= +github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -1715,22 +3302,36 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.6.3 h1:EJDdDi74YbYt1ty164ge3fMZ0eVZ6KA7b1zmAa/wnRo= -github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg= +github.com/snowflakedb/gosnowflake v1.7.2 h1:HRSwva8YXC64WUppfmHcMNVVzSE1+EwXXaJxgS0EkTo= +github.com/snowflakedb/gosnowflake v1.7.2/go.mod h1:03tW856vc3ceM4rJuj7KO4dzqN7qoezTm+xw7aPIIFo= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= +github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1740,6 +3341,9 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/std-uritemplate/std-uritemplate/go v0.0.50 h1:LAE6WYRmLlDXPtEzr152BnD/MHxGCKmcp5D2Pw0NvmU= +github.com/std-uritemplate/std-uritemplate/go v0.0.50/go.mod h1:CLZ1543WRCuUQQjK0BvPM4QrG2toY8xNZUm8Vbt7vTc= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo= @@ -1749,8 +3353,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1762,56 +3367,82 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 h1:8fDzz4GuVg4skjY2B0nMN7h6uN61EDVkuLyI2+qGHhI= github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tilinna/clock v1.0.2 h1:6BO2tyAC9JbPExKH/z9zl44FLu1lImh3nDNKA0kgrkI= github.com/tilinna/clock v1.0.2/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= +github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= +github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= +github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= +github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1824,7 +3455,7 @@ github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofm github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yhat/scrape v0.0.0-20161128144610-24b7890b0945/go.mod h1:4vRFPPNYllgCacoj+0FoKOjTW68rUhEfqPLiEJaK2w8= +github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1832,6 +3463,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= @@ -1841,33 +3473,47 @@ github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= +github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= +github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672 h1:19vOZe7geDEympjWIVidGi6/psR5Y+aaKnF17PSpdXA= -go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672/go.mod h1:wSVAyLiSU4JOBlqGr29lZeKbllk31oCAXAdTa6MioWQ= -go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= +go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= +go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= +go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= +go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= +go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= +go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= +go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.mongodb.org/atlas v0.13.0/go.mod h1:wVCnHcm/7/IfTjEB6K8K35PLG70yGz8BdkRwX0oK9/M= -go.mongodb.org/atlas v0.15.0 h1:YyOBdBIuI//krRITf4r7PSirJ3YDNNUfNmapxwSyDow= -go.mongodb.org/atlas v0.15.0/go.mod h1:lQhRHIxc6jQHEK3/q9WLu/SdBkPj2fQYhjLGUF6Z3U8= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.7.3 h1:G4l/eYY9VrQAK/AUgkV0koQKzQnyddnWxrd/Etf0jIs= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= +go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= +go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= +go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc= +go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= +go.mongodb.org/atlas v0.36.0 h1:m05S3AO7zkl+bcG1qaNsEKBnAqnKx2FDwLooHpIG3j4= +go.mongodb.org/atlas v0.36.0/go.mod h1:nfPldE9dSama6G2IbIzmEza02Ly7yFZjMMVscaM0uEc= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -1875,43 +3521,114 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0/go.mod h1:vsh3ySueQCiKPxFLvjWC4Z135gIa34TQ/NSqkDTZYUM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= +go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= +go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= +go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= +go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= +go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= +go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= +go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/sdk v1.23.1 h1:O7JmZw0h76if63LQdsBMKQDWNb5oEcOThG9IrxscV+E= +go.opentelemetry.io/otel/sdk v1.23.1/go.mod h1:LzdEVR5am1uKOOwfBWFef2DCi1nu3SA8XQxx2IerWFk= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= +go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1919,50 +3636,83 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8 h1:GIAS/yBem/gq2MUqgNIzUHW7cJMmx3TGZOrnyYaNQ6c= -golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1974,7 +3724,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1987,14 +3736,22 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2003,7 +3760,6 @@ golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -2027,12 +3783,10 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -2050,16 +3804,15 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -2068,9 +3821,31 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2091,15 +3866,27 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= -golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2109,8 +3896,15 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2125,10 +3919,8 @@ golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2136,7 +3928,6 @@ golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2176,21 +3967,17 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2209,13 +3996,17 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2227,23 +4018,30 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025112917-711f33c9992c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2251,17 +4049,53 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2273,40 +4107,51 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= -golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -2315,10 +4160,12 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2338,8 +4185,6 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200409170454-77362c5149f0/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200416214402-fc959738d646/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2353,19 +4198,34 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2375,8 +4235,19 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -2390,7 +4261,6 @@ google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.21.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= @@ -2417,23 +4287,48 @@ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/S google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.83.0/go.mod h1:CNywQoj/AfhTw26ZWAa6LwOv+6WFxHmeLPZq2uncLZk= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.101.0 h1:lJPPeEBIRxGpGLwnBTam1NPEM8Z2BmmXEd3z812pjwM= -google.golang.org/api v0.101.0/go.mod h1:CjxAAWWt3A3VrUE2IGDY2bgK5qhoG/OkyWVlYcP05MY= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= +google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= +google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= +google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= +google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk= +google.golang.org/api v0.139.0/go.mod h1:CVagp6Eekz9CjGZ718Z+sloknzkDJE7Vc1Ckj9+viBk= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= +google.golang.org/api v0.163.0 h1:4BBDpPaSH+H28NhnX+WwjXxbRLQ7TWuEKp4BQyEjxvk= +google.golang.org/api v0.163.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2458,8 +4353,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200416231807-8751e049a2a0/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -2477,7 +4371,9 @@ google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2488,7 +4384,6 @@ google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxH google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= @@ -2504,6 +4399,7 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= @@ -2511,22 +4407,133 @@ google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= +google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= +google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo= +google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= +google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= +google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe h1:0poefMBYvYbs7g5UkjS6HcxBPaTRAmznle9jnxYoAI8= +google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230720185612-659f7aaaa771/go.mod h1:3QoBVwTHkXbY1oRGzlhwhOykfcATQN43LJ6iT8Wy8kE= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920183334-c177e329c48b/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -2540,7 +4547,6 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -2559,14 +4565,34 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2582,8 +4608,14 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.2-0.20230222093303-bc1253ad3743/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2591,24 +4623,21 @@ gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/ory-am/dockertest.v3 v3.3.4 h1:oen8RiwxVNxtQ1pRoV4e4jqh6UjNsOuIZ1NXns6jdcw= gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= @@ -2621,6 +4650,8 @@ gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2642,7 +4673,10 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I= +gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2650,39 +4684,57 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= -k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= +k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= +k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= +k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= +k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= -k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= +k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= +k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= +k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= +k8s.io/apiserver v0.26.2/go.mod h1:GHcozwXgXsPuOJ28EnQ/jXEM9QeG6HT22YxSNmpYNh8= k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= -k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= +k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= +k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= +k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= +k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= +k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= +k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc= +k8s.io/cri-api v0.25.3/go.mod h1:riC/P0yOGUf2K1735wW+CXs1aY2ctBgePtnnoFLd0dU= +k8s.io/cri-api v0.27.1/go.mod h1:+Ts/AVYbIo04S86XbTD73UPp/DkTiYxtsFeOFEu32L0= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= @@ -2690,35 +4742,112 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kms v0.26.2/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -layeh.com/radius v0.0.0-20190322222518-890bc1058917 h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU= -layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= -mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= -mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +layeh.com/radius v0.0.0-20231213012653-1006025d24f8 h1:orYXpi6BJZdvgytfHH4ybOe4wHnLbbS71Cmd8mWdZjs= +layeh.com/radius v0.0.0-20231213012653-1006025d24f8/go.mod h1:QRf+8aRqXc019kHkpcs/CTgyWXFzf+bxlsyuo2nAl1o= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= +modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= +modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= +modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= +modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +mvdan.cc/gofumpt v0.2.1/go.mod h1:a/rvZPhsNaedOJBzqRD9omnwVwHZsBdJirXHa9Gh9Ig= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/helper/benchhelpers/benchhelpers.go b/helper/benchhelpers/benchhelpers.go index 9c0feac15c83..afb0d4a88a49 100644 --- a/helper/benchhelpers/benchhelpers.go +++ b/helper/benchhelpers/benchhelpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package benchhelpers import ( diff --git a/helper/builtinplugins/builtinplugins_test.go b/helper/builtinplugins/builtinplugins_test.go new file mode 100644 index 000000000000..8df20209c1d7 --- /dev/null +++ b/helper/builtinplugins/builtinplugins_test.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package builtinplugins + +import ( + "testing" + + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/audit" + logicalDb "github.com/hashicorp/vault/builtin/logical/database" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +// TestBuiltinPluginsWork exists to confirm that all the credential and secrets plugins in Registry can successfully be +// initialized. Database plugins are excluded as there is no general way to initialize them - they require +// plugin-specific configuration at the time of initialization. +// +// This detects coding errors which would cause the plugins to panic on initialization - various aspects of the +// configuration of a framework.Backend are checked during Backend.init(), which runs as a sync.Once function triggered +// upon first request. +// +// In this test, a help request is used to trigger that initialization, since it is valid for all plugins. +func TestBuiltinPluginsWork(t *testing.T) { + cluster := vault.NewTestCluster( + t, + &vault.CoreConfig{ + BuiltinRegistry: Registry, + LogicalBackends: map[string]logical.Factory{ + // This needs to be here for madly overcomplicated reasons, otherwise we end up mounting a KV v1 even + // when we try to explicitly mount a KV v2... + // + // vault.NewCore hardcodes "kv" to vault.PassthroughBackendFactory if no explicit entry is configured, + // and this hardcoding is re-overridden in command.logicalBackends to point back to the real KV plugin. + // As far as I can tell, nothing at all relies upon the definition of "kv" in builtinplugins.Registry, + // as it always gets resolved via the logicalBackends map and the pluginCatalog is never queried. + "kv": logicalKv.Factory, + // Semi-similarly, "database" is added in command.logicalBackends and not at all in + // builtinplugins.Registry, so we need to add it here to be able to test it! + "database": logicalDb.Factory, + }, + PendingRemovalMountsAllowed: true, + // Specifying at least one audit backend factory will prevent NewTestCluster + // from attempting to enable a noop audit, and audit isn't required for this test. + AuditBackends: map[string]audit.Factory{ + "noop": corehelpers.NoopAuditFactory(nil), + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + }, + ) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + for _, authType := range append( + Registry.Keys(consts.PluginTypeCredential), + "token", + ) { + deprecationStatus, _ := Registry.DeprecationStatus(authType, consts.PluginTypeCredential) + if deprecationStatus == consts.Removed { + continue + } + + t.Run("Auth Method "+authType, func(t *testing.T) { + // This builtin backend is automatically mounted and should not be mounted again + if authType != "token" { + if err := client.Sys().EnableAuthWithOptions(authType, &api.EnableAuthOptions{ + Type: authType, + }); err != nil { + t.Fatal(err) + } + } + + if _, err := client.Logical().ReadWithData( + "auth/"+authType, + map[string][]string{"help": {"1"}}, + ); err != nil { + t.Fatal(err) + } + }) + } + + for _, secretsType := range append( + Registry.Keys(consts.PluginTypeSecrets), + "database", + "cubbyhole", + "identity", + "sys", + ) { + deprecationStatus, _ := Registry.DeprecationStatus(secretsType, consts.PluginTypeSecrets) + if deprecationStatus == consts.Removed { + continue + } + + t.Run("Secrets Engine "+secretsType, func(t *testing.T) { + switch secretsType { + // These three builtin backends are automatically mounted and should not be mounted again + case "cubbyhole": + case "identity": + case "sys": + + default: + if err := client.Sys().Mount(secretsType, &api.MountInput{ + Type: secretsType, + }); err != nil { + t.Fatal(err) + } + } + + if _, err := client.Logical().ReadWithData( + secretsType, + map[string][]string{"help": {"1"}}, + ); err != nil { + t.Fatal(err) + } + }) + } + + t.Run("Secrets Engine kv v2", func(t *testing.T) { + if err := client.Sys().Mount("kv-v2", &api.MountInput{ + Type: "kv", + Options: map[string]string{ + "version": "2", + }, + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().ReadWithData( + "kv-v2", + map[string][]string{"help": {"1"}}, + ); err != nil { + t.Fatal(err) + } + }) + + // This last part is not strictly necessary for original purpose of this test (checking the plugins initialize + // without errors), but whilst we have a test Vault with one of everything mounted, let's also test that the full + // OpenAPI document generation succeeds too. + t.Run("Whole OpenAPI document", func(t *testing.T) { + if _, err := client.Logical().Read("sys/internal/specs/openapi"); err != nil { + t.Fatal(err) + } + }) +} diff --git a/helper/builtinplugins/registry.go b/helper/builtinplugins/registry.go index 9bd5c7c0289c..978321c8ad3c 100644 --- a/helper/builtinplugins/registry.go +++ b/helper/builtinplugins/registry.go @@ -1,6 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package builtinplugins import ( + "context" + credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud" credAzure "github.com/hashicorp/vault-plugin-auth-azure" credCentrify "github.com/hashicorp/vault-plugin-auth-centrify" @@ -26,7 +31,6 @@ import ( logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas" logicalLDAP "github.com/hashicorp/vault-plugin-secrets-openldap" logicalTerraform "github.com/hashicorp/vault-plugin-secrets-terraform" - credAppId "github.com/hashicorp/vault/builtin/credential/app-id" credAppRole "github.com/hashicorp/vault/builtin/credential/approle" credAws "github.com/hashicorp/vault/builtin/credential/aws" credCert "github.com/hashicorp/vault/builtin/credential/cert" @@ -36,14 +40,9 @@ import ( credRadius "github.com/hashicorp/vault/builtin/credential/radius" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" logicalAws "github.com/hashicorp/vault/builtin/logical/aws" - logicalCass "github.com/hashicorp/vault/builtin/logical/cassandra" logicalConsul "github.com/hashicorp/vault/builtin/logical/consul" - logicalMongo "github.com/hashicorp/vault/builtin/logical/mongodb" - logicalMssql "github.com/hashicorp/vault/builtin/logical/mssql" - logicalMysql "github.com/hashicorp/vault/builtin/logical/mysql" logicalNomad "github.com/hashicorp/vault/builtin/logical/nomad" logicalPki "github.com/hashicorp/vault/builtin/logical/pki" - logicalPostgres "github.com/hashicorp/vault/builtin/logical/postgresql" logicalRabbit "github.com/hashicorp/vault/builtin/logical/rabbitmq" logicalSsh "github.com/hashicorp/vault/builtin/logical/ssh" logicalTotp "github.com/hashicorp/vault/builtin/logical/totp" @@ -56,6 +55,7 @@ import ( dbMysql "github.com/hashicorp/vault/plugins/database/mysql" dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql" dbRedshift "github.com/hashicorp/vault/plugins/database/redshift" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" ) @@ -64,8 +64,6 @@ import ( // Thus, rather than creating multiple instances of it, we only need one. var Registry = newRegistry() -var addExternalPlugins = addExtPluginsImpl - // BuiltinFactory is the func signature that should be returned by // the plugin's New() func. type BuiltinFactory func() (interface{}, error) @@ -86,18 +84,31 @@ type logicalBackend struct { consts.DeprecationStatus } +type removedBackend struct { + *framework.Backend +} + +func removedFactory(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + removedBackend := &removedBackend{} + removedBackend.Backend = &framework.Backend{} + return removedBackend, nil +} + func newRegistry() *registry { reg := ®istry{ credentialBackends: map[string]credentialBackend{ "alicloud": {Factory: credAliCloud.Factory}, "app-id": { - Factory: credAppId.Factory, - DeprecationStatus: consts.PendingRemoval, + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "approle": {Factory: credAppRole.Factory}, + "aws": {Factory: credAws.Factory}, + "azure": {Factory: credAzure.Factory}, + "centrify": { + Factory: credCentrify.Factory, + DeprecationStatus: consts.Deprecated, }, - "approle": {Factory: credAppRole.Factory}, - "aws": {Factory: credAws.Factory}, - "azure": {Factory: credAzure.Factory}, - "centrify": {Factory: credCentrify.Factory}, "cert": {Factory: credCert.Factory}, "cf": {Factory: credCF.Factory}, "gcp": {Factory: credGcp.Factory}, @@ -139,13 +150,16 @@ func newRegistry() *registry { "snowflake-database-plugin": {Factory: dbSnowflake.New}, }, logicalBackends: map[string]logicalBackend{ - "ad": {Factory: logicalAd.Factory}, + "ad": { + Factory: logicalAd.Factory, + DeprecationStatus: consts.Deprecated, + }, "alicloud": {Factory: logicalAlicloud.Factory}, "aws": {Factory: logicalAws.Factory}, "azure": {Factory: logicalAzure.Factory}, "cassandra": { - Factory: logicalCass.Factory, - DeprecationStatus: consts.PendingRemoval, + Factory: removedFactory, + DeprecationStatus: consts.Removed, }, "consul": {Factory: logicalConsul.Factory}, "gcp": {Factory: logicalGcp.Factory}, @@ -153,25 +167,27 @@ func newRegistry() *registry { "kubernetes": {Factory: logicalKube.Factory}, "kv": {Factory: logicalKv.Factory}, "mongodb": { - Factory: logicalMongo.Factory, - DeprecationStatus: consts.PendingRemoval, + Factory: removedFactory, + DeprecationStatus: consts.Removed, }, + // The mongodbatlas secrets engine is not the same as the database plugin equivalent + // (`mongodbatlas-database-plugin`), and thus will not be deprecated at this time. "mongodbatlas": {Factory: logicalMongoAtlas.Factory}, "mssql": { - Factory: logicalMssql.Factory, - DeprecationStatus: consts.PendingRemoval, + Factory: removedFactory, + DeprecationStatus: consts.Removed, }, "mysql": { - Factory: logicalMysql.Factory, - DeprecationStatus: consts.PendingRemoval, + Factory: removedFactory, + DeprecationStatus: consts.Removed, }, "nomad": {Factory: logicalNomad.Factory}, "openldap": {Factory: logicalLDAP.Factory}, "ldap": {Factory: logicalLDAP.Factory}, "pki": {Factory: logicalPki.Factory}, "postgresql": { - Factory: logicalPostgres.Factory, - DeprecationStatus: consts.PendingRemoval, + Factory: removedFactory, + DeprecationStatus: consts.Removed, }, "rabbitmq": {Factory: logicalRabbit.Factory}, "ssh": {Factory: logicalSsh.Factory}, @@ -181,7 +197,7 @@ func newRegistry() *registry { }, } - addExternalPlugins(reg) + entAddExtPlugins(reg) return reg } @@ -222,16 +238,16 @@ func (r *registry) Keys(pluginType consts.PluginType) []string { var keys []string switch pluginType { case consts.PluginTypeDatabase: - for key := range r.databasePlugins { - keys = append(keys, key) + for key, backend := range r.databasePlugins { + keys = appendIfNotRemoved(keys, key, backend.DeprecationStatus) } case consts.PluginTypeCredential: - for key := range r.credentialBackends { - keys = append(keys, key) + for key, backend := range r.credentialBackends { + keys = appendIfNotRemoved(keys, key, backend.DeprecationStatus) } case consts.PluginTypeSecrets: - for key := range r.logicalBackends { - keys = append(keys, key) + for key, backend := range r.logicalBackends { + keys = appendIfNotRemoved(keys, key, backend.DeprecationStatus) } } return keys @@ -273,3 +289,10 @@ func toFunc(ifc interface{}) func() (interface{}, error) { return ifc, nil } } + +func appendIfNotRemoved(keys []string, name string, status consts.DeprecationStatus) []string { + if status != consts.Removed { + return append(keys, name) + } + return keys +} diff --git a/helper/builtinplugins/registry_stubs_oss.go b/helper/builtinplugins/registry_stubs_oss.go new file mode 100644 index 000000000000..fa0dab8b6656 --- /dev/null +++ b/helper/builtinplugins/registry_stubs_oss.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package builtinplugins + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func entAddExtPlugins(r *registry) { +} diff --git a/helper/builtinplugins/registry_test.go b/helper/builtinplugins/registry_test.go index 5e63ba3e701f..99bbd05e61ac 100644 --- a/helper/builtinplugins/registry_test.go +++ b/helper/builtinplugins/registry_test.go @@ -1,12 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package builtinplugins import ( + "bufio" + "fmt" + "os" "reflect" + "regexp" "testing" - credAppId "github.com/hashicorp/vault/builtin/credential/app-id" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/helper/constants" dbMysql "github.com/hashicorp/vault/plugins/database/mysql" "github.com/hashicorp/vault/sdk/helper/consts" + + "golang.org/x/exp/slices" ) // Test_RegistryGet exercises the (registry).Get functionality by comparing @@ -35,9 +45,16 @@ func Test_RegistryGet(t *testing.T) { }, { name: "known builtin lookup", + builtin: "userpass", + pluginType: consts.PluginTypeCredential, + want: toFunc(credUserpass.Factory), + wantOk: true, + }, + { + name: "removed builtin lookup", builtin: "app-id", pluginType: consts.PluginTypeCredential, - want: toFunc(credAppId.Factory), + want: nil, wantOk: true, }, { @@ -71,6 +88,7 @@ func Test_RegistryKeyCounts(t *testing.T) { name string pluginType consts.PluginType want int // use slice length as test condition + entWant int wantOk bool }{ { @@ -81,7 +99,8 @@ func Test_RegistryKeyCounts(t *testing.T) { { name: "number of auth plugins", pluginType: consts.PluginTypeCredential, - want: 20, + want: 19, + entWant: 1, }, { name: "number of database plugins", @@ -91,14 +110,19 @@ func Test_RegistryKeyCounts(t *testing.T) { { name: "number of secrets plugins", pluginType: consts.PluginTypeSecrets, - want: 24, + want: 19, + entWant: 3, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { keys := Registry.Keys(tt.pluginType) - if len(keys) != tt.want { - t.Fatalf("got size: %d, want size: %d", len(keys), tt.want) + want := tt.want + if constants.IsEnterprise { + want += tt.entWant + } + if len(keys) != want { + t.Fatalf("got size: %d, want size: %d", len(keys), want) } }) } @@ -126,10 +150,16 @@ func Test_RegistryContains(t *testing.T) { }, { name: "known builtin lookup", - builtin: "app-id", + builtin: "approle", pluginType: consts.PluginTypeCredential, want: true, }, + { + name: "removed builtin lookup", + builtin: "app-id", + pluginType: consts.PluginTypeCredential, + want: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -186,10 +216,10 @@ func Test_RegistryStatus(t *testing.T) { wantOk: true, }, { - name: "pending removal builtin lookup", + name: "removed builtin lookup", builtin: "app-id", pluginType: consts.PluginTypeCredential, - want: consts.PendingRemoval, + want: consts.Removed, wantOk: true, }, } @@ -205,3 +235,116 @@ func Test_RegistryStatus(t *testing.T) { }) } } + +// Test_RegistryMatchesGenOpenapi ensures that the plugins mounted in gen_openapi.sh match registry.go +func Test_RegistryMatchesGenOpenapi(t *testing.T) { + const scriptPath = "../../scripts/gen_openapi.sh" + + // parseScript fetches the contents of gen_openapi.sh script & extract the relevant lines + parseScript := func(path string) ([]string, []string, error) { + f, err := os.Open(scriptPath) + if err != nil { + return nil, nil, fmt.Errorf("could not open gen_openapi.sh script: %w", err) + } + defer f.Close() + + // This is a hack: the gen_openapi script contains a conditional block to + // enable the enterprise plugins, whose lines are indented. Tweak the + // regexp to only include the indented lines on enterprise. + leading := "^" + if constants.IsEnterprise { + leading = "^ *" + } + + var ( + credentialBackends []string + credentialBackendsRe = regexp.MustCompile(leading + `vault auth enable (?:-.+ )*(?:"([a-zA-Z]+)"|([a-zA-Z]+))$`) + + secretsBackends []string + secretsBackendsRe = regexp.MustCompile(leading + `vault secrets enable (?:-.+ )*(?:"([a-zA-Z]+)"|([a-zA-Z]+))$`) + ) + + scanner := bufio.NewScanner(f) + + for scanner.Scan() { + line := scanner.Text() + + if m := credentialBackendsRe.FindStringSubmatch(line); m != nil { + credentialBackends = append(credentialBackends, m[1]) + } + if m := secretsBackendsRe.FindStringSubmatch(line); m != nil { + secretsBackends = append(secretsBackends, m[1]) + } + } + + if err := scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("error scanning gen_openapi.sh: %v", err) + } + + return credentialBackends, secretsBackends, nil + } + + // ensureInRegistry ensures that the given plugin is in registry and marked as "supported" + ensureInRegistry := func(t *testing.T, name string, pluginType consts.PluginType) { + t.Helper() + + // "database" will not be present in registry, it is represented as + // a list of database plugins instead + if name == "database" && pluginType == consts.PluginTypeSecrets { + return + } + + deprecationStatus, ok := Registry.DeprecationStatus(name, pluginType) + if !ok { + t.Errorf("%q %s backend is missing from registry.go; please remove it from gen_openapi.sh", name, pluginType) + } + + if deprecationStatus == consts.Removed { + t.Errorf("%q %s backend is marked 'removed' in registry.go; please remove it from gen_openapi.sh", name, pluginType) + } + } + + // ensureInScript ensures that the given plugin name is in gen_openapi.sh script + ensureInScript := func(t *testing.T, scriptBackends []string, name string) { + t.Helper() + + for _, excluded := range []string{ + "oidc", // alias for "jwt" + "openldap", // alias for "ldap" + } { + if name == excluded { + return + } + } + + if !slices.Contains(scriptBackends, name) { + t.Errorf("%q backend could not be found in gen_openapi.sh, please add it there", name) + } + } + + // test starts here + scriptCredentialBackends, scriptSecretsBackends, err := parseScript(scriptPath) + if err != nil { + t.Fatal(err) + } + + for _, name := range scriptCredentialBackends { + ensureInRegistry(t, name, consts.PluginTypeCredential) + } + + for _, name := range scriptSecretsBackends { + ensureInRegistry(t, name, consts.PluginTypeSecrets) + } + + for name, backend := range Registry.credentialBackends { + if backend.DeprecationStatus == consts.Supported { + ensureInScript(t, scriptCredentialBackends, name) + } + } + + for name, backend := range Registry.logicalBackends { + if backend.DeprecationStatus == consts.Supported { + ensureInScript(t, scriptSecretsBackends, name) + } + } +} diff --git a/helper/builtinplugins/registry_util.go b/helper/builtinplugins/registry_util.go new file mode 100644 index 000000000000..257bc855628d --- /dev/null +++ b/helper/builtinplugins/registry_util.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package builtinplugins + +import "github.com/hashicorp/vault/sdk/helper/consts" + +// IsBuiltinEntPlugin checks whether the plugin is an enterprise only builtin plugin +func (r *registry) IsBuiltinEntPlugin(name string, pluginType consts.PluginType) bool { + return false +} diff --git a/helper/constants/constants_oss.go b/helper/constants/constants_oss.go index 22b9928fd3c4..ac1179cc22e7 100644 --- a/helper/constants/constants_oss.go +++ b/helper/constants/constants_oss.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !enterprise package constants diff --git a/helper/constants/fips.go b/helper/constants/fips.go index 2a9f7ee7aae6..f5ecc66ce47e 100644 --- a/helper/constants/fips.go +++ b/helper/constants/fips.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !fips package constants diff --git a/helper/constants/fips_build_check.go b/helper/constants/fips_build_check.go index 1e865b499f62..cb2bf5edc836 100644 --- a/helper/constants/fips_build_check.go +++ b/helper/constants/fips_build_check.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build (!fips && (fips_140_2 || fips_140_3)) || (fips && !fips_140_2 && !fips_140_3) || (fips_140_2 && fips_140_3) package constants diff --git a/helper/constants/fips_cgo_check.go b/helper/constants/fips_cgo_check.go index 56eabb6c81e5..409b713afdc9 100644 --- a/helper/constants/fips_cgo_check.go +++ b/helper/constants/fips_cgo_check.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build (fips || fips_140_2 || fips_140_3) && !cgo package constants diff --git a/helper/dhutil/dhutil.go b/helper/dhutil/dhutil.go index a0ddde25bd43..364fc3c14ace 100644 --- a/helper/dhutil/dhutil.go +++ b/helper/dhutil/dhutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package dhutil import ( diff --git a/helper/dhutil/dhutil_test.go b/helper/dhutil/dhutil_test.go index 46e90196d15e..18cd2c064f8e 100644 --- a/helper/dhutil/dhutil_test.go +++ b/helper/dhutil/dhutil_test.go @@ -1 +1,4 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package dhutil diff --git a/helper/experiments/experiments.go b/helper/experiments/experiments.go new file mode 100644 index 000000000000..7bbe8b6af1eb --- /dev/null +++ b/helper/experiments/experiments.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package experiments + +import "slices" + +const ( + VaultExperimentCoreAuditEventsAlpha1 = "core.audit.events.alpha1" + VaultExperimentSecretsImport = "secrets.import.alpha1" + + // Unused experiments. We keep them so that we don't break users who include them in their + // flags or configs, but they no longer have any effect. + VaultExperimentEventsAlpha1 = "events.alpha1" +) + +var validExperiments = []string{ + VaultExperimentEventsAlpha1, + VaultExperimentCoreAuditEventsAlpha1, + VaultExperimentSecretsImport, +} + +var unusedExperiments = []string{ + VaultExperimentEventsAlpha1, +} + +// ValidExperiments exposes the list of valid experiments without exposing a mutable +// global variable. Experiments can only be enabled when starting a server, and will +// typically enable pre-GA API functionality. +func ValidExperiments() []string { + return slices.Clone(validExperiments) +} + +// IsUnused returns true if the given experiment is in the unused list. +func IsUnused(experiment string) bool { + return slices.Contains(unusedExperiments, experiment) +} diff --git a/helper/fairshare/fairshare_testing_util.go b/helper/fairshare/fairshare_testing_util.go index 1f65acd94d90..5aae025f5e92 100644 --- a/helper/fairshare/fairshare_testing_util.go +++ b/helper/fairshare/fairshare_testing_util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package fairshare import ( diff --git a/helper/fairshare/jobmanager.go b/helper/fairshare/jobmanager.go index 75c7662fc765..086a549f647e 100644 --- a/helper/fairshare/jobmanager.go +++ b/helper/fairshare/jobmanager.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package fairshare import ( @@ -11,6 +14,7 @@ import ( "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/sdk/helper/logging" ) @@ -122,7 +126,7 @@ func (j *JobManager) AddJob(job Job, queueID string) { } } -// GetCurrentJobCount returns the total number of pending jobs in the job manager +// GetPendingJobCount returns the total number of pending jobs in the job manager func (j *JobManager) GetPendingJobCount() int { j.l.RLock() defer j.l.RUnlock() @@ -139,7 +143,12 @@ func (j *JobManager) GetPendingJobCount() int { func (j *JobManager) GetWorkerCounts() map[string]int { j.l.RLock() defer j.l.RUnlock() - return j.workerCount + workerCounts := make(map[string]int, len(j.workerCount)) + for k, v := range j.workerCount { + workerCounts[k] = v + } + + return workerCounts } // GetWorkQueueLengths() returns a map of queue ID to number of jobs in the queue diff --git a/helper/fairshare/jobmanager_test.go b/helper/fairshare/jobmanager_test.go index d90314782294..288f0d2f949a 100644 --- a/helper/fairshare/jobmanager_test.go +++ b/helper/fairshare/jobmanager_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package fairshare import ( @@ -744,3 +747,23 @@ func TestFairshare_queueWorkersSaturated(t *testing.T) { j.l.RUnlock() } } + +func TestJobManager_GetWorkerCounts_RaceCondition(t *testing.T) { + j := NewJobManager("test-job-mgr", 20, nil, nil) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 10; i++ { + j.incrementWorkerCount("a") + } + }() + wcs := j.GetWorkerCounts() + wcs["foo"] = 10 + for worker, count := range wcs { + _ = worker + _ = count + } + + wg.Wait() +} diff --git a/helper/fairshare/workerpool.go b/helper/fairshare/workerpool.go index f5179ba4e408..ef6ee3ad7b5f 100644 --- a/helper/fairshare/workerpool.go +++ b/helper/fairshare/workerpool.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package fairshare import ( diff --git a/helper/fairshare/workerpool_test.go b/helper/fairshare/workerpool_test.go index a3c3f68a1e3f..d347c6734662 100644 --- a/helper/fairshare/workerpool_test.go +++ b/helper/fairshare/workerpool_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package fairshare import ( diff --git a/helper/flag-kv/flag.go b/helper/flag-kv/flag.go index 06ae27111a8a..f09332c3ff2c 100644 --- a/helper/flag-kv/flag.go +++ b/helper/flag-kv/flag.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package kvFlag import ( diff --git a/helper/flag-kv/flag_test.go b/helper/flag-kv/flag_test.go index 2fc88aa5f3ed..91a344403975 100644 --- a/helper/flag-kv/flag_test.go +++ b/helper/flag-kv/flag_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package kvFlag import ( diff --git a/helper/flag-slice/flag.go b/helper/flag-slice/flag.go index da75149dc488..1824fc8477f2 100644 --- a/helper/flag-slice/flag.go +++ b/helper/flag-slice/flag.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package sliceflag import "strings" diff --git a/helper/flag-slice/flag_test.go b/helper/flag-slice/flag_test.go index f72e1d9605d6..6662446df7d7 100644 --- a/helper/flag-slice/flag_test.go +++ b/helper/flag-slice/flag_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package sliceflag import ( diff --git a/helper/forwarding/types.pb.go b/helper/forwarding/types.pb.go index caf9270f55bc..fbd67a4933b5 100644 --- a/helper/forwarding/types.pb.go +++ b/helper/forwarding/types.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: helper/forwarding/types.proto package forwarding diff --git a/helper/forwarding/types.proto b/helper/forwarding/types.proto index 8f1376a18004..9dfce583c21c 100644 --- a/helper/forwarding/types.proto +++ b/helper/forwarding/types.proto @@ -1,49 +1,52 @@ -syntax = "proto3"; +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 -option go_package = "github.com/hashicorp/vault/helper/forwarding"; +syntax = "proto3"; package forwarding; +option go_package = "github.com/hashicorp/vault/helper/forwarding"; + message Request { - // Not used right now but reserving in case it turns out that streaming - // makes things more economical on the gRPC side - //uint64 id = 1; - string method = 2; - URL url = 3; - map header_entries = 4; - bytes body = 5; - string host = 6; - string remote_addr = 7; - repeated bytes peer_certificates = 8; + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + //uint64 id = 1; + string method = 2; + URL url = 3; + map header_entries = 4; + bytes body = 5; + string host = 6; + string remote_addr = 7; + repeated bytes peer_certificates = 8; } message URL { - string scheme = 1; - string opaque = 2; - // This isn't needed now but might be in the future, so we'll skip the - // number to keep the ordering in net/url - //UserInfo user = 3; - string host = 4; - string path = 5; - string raw_path = 6; - // This also isn't needed right now, but we'll reserve the number - //bool force_query = 7; - string raw_query = 8; - string fragment = 9; + string scheme = 1; + string opaque = 2; + // This isn't needed now but might be in the future, so we'll skip the + // number to keep the ordering in net/url + //UserInfo user = 3; + string host = 4; + string path = 5; + string raw_path = 6; + // This also isn't needed right now, but we'll reserve the number + //bool force_query = 7; + string raw_query = 8; + string fragment = 9; } message HeaderEntry { - repeated string values = 1; + repeated string values = 1; } message Response { - // Not used right now but reserving in case it turns out that streaming - // makes things more economical on the gRPC side - //uint64 id = 1; - uint32 status_code = 2; - bytes body = 3; - // Added in 0.6.2 to ensure that the content-type is set appropriately, as - // well as any other information - map header_entries = 4; - uint64 last_remote_wal = 5; + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + //uint64 id = 1; + uint32 status_code = 2; + bytes body = 3; + // Added in 0.6.2 to ensure that the content-type is set appropriately, as + // well as any other information + map header_entries = 4; + uint64 last_remote_wal = 5; } diff --git a/helper/forwarding/util.go b/helper/forwarding/util.go index de92639afbee..e9d06de04854 100644 --- a/helper/forwarding/util.go +++ b/helper/forwarding/util.go @@ -1,12 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package forwarding import ( "bytes" "crypto/tls" "crypto/x509" - "errors" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -60,19 +61,7 @@ func GenerateForwardedHTTPRequest(req *http.Request, addr string) (*http.Request func GenerateForwardedRequest(req *http.Request) (*Request, error) { var reader io.Reader = req.Body - ctx := req.Context() - maxRequestSize := ctx.Value("max_request_size") - if maxRequestSize != nil { - max, ok := maxRequestSize.(int64) - if !ok { - return nil, errors.New("could not parse max_request_size from request context") - } - if max > 0 { - reader = io.LimitReader(req.Body, max) - } - } - - body, err := ioutil.ReadAll(reader) + body, err := io.ReadAll(reader) if err != nil { return nil, err } diff --git a/helper/forwarding/util_test.go b/helper/forwarding/util_test.go index 0af2b89e989b..0bf4be76945e 100644 --- a/helper/forwarding/util_test.go +++ b/helper/forwarding/util_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package forwarding import ( diff --git a/helper/hostutil/hostinfo.go b/helper/hostutil/hostinfo.go index d35afb57d900..e892ae8a6292 100644 --- a/helper/hostutil/hostinfo.go +++ b/helper/hostutil/hostinfo.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !openbsd package hostutil diff --git a/helper/hostutil/hostinfo_error.go b/helper/hostutil/hostinfo_error.go index ca5d8a2941c0..2cce22cb7d52 100644 --- a/helper/hostutil/hostinfo_error.go +++ b/helper/hostutil/hostinfo_error.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package hostutil import "fmt" diff --git a/helper/hostutil/hostinfo_openbsd.go b/helper/hostutil/hostinfo_openbsd.go index 8f01458afe1a..a73bb2df16f1 100644 --- a/helper/hostutil/hostinfo_openbsd.go +++ b/helper/hostutil/hostinfo_openbsd.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build openbsd package hostutil diff --git a/helper/hostutil/hostinfo_test.go b/helper/hostutil/hostinfo_test.go index c54893b17dbb..6862cacf790d 100644 --- a/helper/hostutil/hostinfo_test.go +++ b/helper/hostutil/hostinfo_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package hostutil import ( diff --git a/helper/identity/identity.go b/helper/identity/identity.go index 9a28be715679..2d625c4bcc6c 100644 --- a/helper/identity/identity.go +++ b/helper/identity/identity.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package identity import ( diff --git a/helper/identity/mfa/mfa.go b/helper/identity/mfa/mfa.go index d4bbf10b4846..a040563b502b 100644 --- a/helper/identity/mfa/mfa.go +++ b/helper/identity/mfa/mfa.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mfa import ( diff --git a/helper/identity/mfa/sentinel.go b/helper/identity/mfa/sentinel.go index f6d8c7b99422..02bc857ceb03 100644 --- a/helper/identity/mfa/sentinel.go +++ b/helper/identity/mfa/sentinel.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mfa func (c *Config) SentinelGet(key string) (interface{}, error) { diff --git a/helper/identity/mfa/types.pb.go b/helper/identity/mfa/types.pb.go index 5a1d8e98e4f1..9d68aba1b507 100644 --- a/helper/identity/mfa/types.pb.go +++ b/helper/identity/mfa/types.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: helper/identity/mfa/types.proto package mfa diff --git a/helper/identity/mfa/types.proto b/helper/identity/mfa/types.proto index decade25b9af..f125a3d2f5f7 100644 --- a/helper/identity/mfa/types.proto +++ b/helper/identity/mfa/types.proto @@ -1,33 +1,36 @@ -syntax = "proto3"; +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 -option go_package = "github.com/hashicorp/vault/helper/identity/mfa"; +syntax = "proto3"; package mfa; +option go_package = "github.com/hashicorp/vault/helper/identity/mfa"; + // Config represents the configuration information used *along with* the MFA // secret tied to caller's identity, to verify the MFA credentials supplied. // Configuration information differs by type. Handler of each type should know // what to expect from the Config field. message Config { - // @inject_tag: sentinel:"-" - string type = 1; - // @inject_tag: sentinel:"-" - string name = 2; - // @inject_tag: sentinel:"-" - string id = 3; - // @inject_tag: sentinel:"-" - string mount_accessor = 4; - // @inject_tag: sentinel:"-" - string username_format = 5; - // @inject_tag: sentinel:"-" - oneof config { - TOTPConfig totp_config = 6; - OktaConfig okta_config = 7; - DuoConfig duo_config = 8; - PingIDConfig pingid_config = 9; - } - // @inject_tag: sentinel:"-" - string namespace_id = 10; + // @inject_tag: sentinel:"-" + string type = 1; + // @inject_tag: sentinel:"-" + string name = 2; + // @inject_tag: sentinel:"-" + string id = 3; + // @inject_tag: sentinel:"-" + string mount_accessor = 4; + // @inject_tag: sentinel:"-" + string username_format = 5; + // @inject_tag: sentinel:"-" + oneof config { + TOTPConfig totp_config = 6; + OktaConfig okta_config = 7; + DuoConfig duo_config = 8; + PingIDConfig pingid_config = 9; + } + // @inject_tag: sentinel:"-" + string namespace_id = 10; } // TOTPConfig represents the configuration information required to generate @@ -36,115 +39,115 @@ message Config { // by the information stored in the entity and not from the values in the // configuration. message TOTPConfig { - // @inject_tag: sentinel:"-" - string issuer = 1; - // @inject_tag: sentinel:"-" - uint32 period = 2; - // @inject_tag: sentinel:"-" - int32 algorithm = 3; - // @inject_tag: sentinel:"-" - int32 digits = 4; - // @inject_tag: sentinel:"-" - uint32 skew = 5; - // @inject_tag: sentinel:"-" - uint32 key_size = 6; - // @inject_tag: sentinel:"-" - int32 qr_size = 7; - // @inject_tag: sentinel:"-" - uint32 max_validation_attempts = 8; + // @inject_tag: sentinel:"-" + string issuer = 1; + // @inject_tag: sentinel:"-" + uint32 period = 2; + // @inject_tag: sentinel:"-" + int32 algorithm = 3; + // @inject_tag: sentinel:"-" + int32 digits = 4; + // @inject_tag: sentinel:"-" + uint32 skew = 5; + // @inject_tag: sentinel:"-" + uint32 key_size = 6; + // @inject_tag: sentinel:"-" + int32 qr_size = 7; + // @inject_tag: sentinel:"-" + uint32 max_validation_attempts = 8; } // DuoConfig represents the configuration information required to perform // Duo authentication. message DuoConfig { - // @inject_tag: sentinel:"-" - string integration_key = 1; - // @inject_tag: sentinel:"-" - string secret_key = 2; - // @inject_tag: sentinel:"-" - string api_hostname = 3; - // @inject_tag: sentinel:"-" - string push_info = 4; - // @inject_tag: sentinel:"-" - bool use_passcode = 5; + // @inject_tag: sentinel:"-" + string integration_key = 1; + // @inject_tag: sentinel:"-" + string secret_key = 2; + // @inject_tag: sentinel:"-" + string api_hostname = 3; + // @inject_tag: sentinel:"-" + string push_info = 4; + // @inject_tag: sentinel:"-" + bool use_passcode = 5; } // OktaConfig contains Okta configuration parameters required to perform Okta // authentication. message OktaConfig { - // @inject_tag: sentinel:"-" - string org_name = 1; - // @inject_tag: sentinel:"-" - string api_token = 2; - // @inject_tag: sentinel:"-" - bool production = 3; - // @inject_tag: sentinel:"-" - string base_url = 4; - // @inject_tag: sentinel:"-" - bool primary_email = 5; + // @inject_tag: sentinel:"-" + string org_name = 1; + // @inject_tag: sentinel:"-" + string api_token = 2; + // @inject_tag: sentinel:"-" + bool production = 3; + // @inject_tag: sentinel:"-" + string base_url = 4; + // @inject_tag: sentinel:"-" + bool primary_email = 5; } // PingIDConfig contains PingID configuration information message PingIDConfig { - // @inject_tag: sentinel:"-" - string use_base64_key = 1; - // @inject_tag: sentinel:"-" - bool use_signature = 2; - // @inject_tag: sentinel:"-" - string token = 3; - // @inject_tag: sentinel:"-" - string idp_url = 4; - // @inject_tag: sentinel:"-" - string org_alias = 5; - // @inject_tag: sentinel:"-" - string admin_url = 6; - // @inject_tag: sentinel:"-" - string authenticator_url = 7; + // @inject_tag: sentinel:"-" + string use_base64_key = 1; + // @inject_tag: sentinel:"-" + bool use_signature = 2; + // @inject_tag: sentinel:"-" + string token = 3; + // @inject_tag: sentinel:"-" + string idp_url = 4; + // @inject_tag: sentinel:"-" + string org_alias = 5; + // @inject_tag: sentinel:"-" + string admin_url = 6; + // @inject_tag: sentinel:"-" + string authenticator_url = 7; } // Secret represents all the types of secrets which the entity can hold. // Each MFA type should add a secret type to the oneof block in this message. message Secret { - // @inject_tag: sentinel:"-" - string method_name = 1; - oneof value { - // @inject_tag: sentinel:"-" - TOTPSecret totp_secret = 2; - } + // @inject_tag: sentinel:"-" + string method_name = 1; + oneof value { + // @inject_tag: sentinel:"-" + TOTPSecret totp_secret = 2; + } } // TOTPSecret represents the secret that gets stored in the entity about a // particular MFA method. This information is used to validate the MFA // credential supplied over the API during request time. message TOTPSecret { - // @inject_tag: sentinel:"-" - string issuer = 1; - // @inject_tag: sentinel:"-" - uint32 period = 2; - // @inject_tag: sentinel:"-" - int32 algorithm = 3; - // @inject_tag: sentinel:"-" - int32 digits = 4; - // @inject_tag: sentinel:"-" - uint32 skew = 5; - // @inject_tag: sentinel:"-" - uint32 key_size = 6; - // reserving 7 here just to keep parity with the config message above - // @inject_tag: sentinel:"-" - string account_name = 8; - // @inject_tag: sentinel:"-" - string key = 9; + // @inject_tag: sentinel:"-" + string issuer = 1; + // @inject_tag: sentinel:"-" + uint32 period = 2; + // @inject_tag: sentinel:"-" + int32 algorithm = 3; + // @inject_tag: sentinel:"-" + int32 digits = 4; + // @inject_tag: sentinel:"-" + uint32 skew = 5; + // @inject_tag: sentinel:"-" + uint32 key_size = 6; + // reserving 7 here just to keep parity with the config message above + // @inject_tag: sentinel:"-" + string account_name = 8; + // @inject_tag: sentinel:"-" + string key = 9; } // MFAEnforcementConfig is what the user provides to the // mfa/login_enforcement endpoint. message MFAEnforcementConfig { - string name = 1; - string namespace_id = 2; - repeated string mfa_method_ids = 3; - repeated string auth_method_accessors = 4; - repeated string auth_method_types = 5; - repeated string identity_group_ids = 6; - repeated string identity_entity_ids = 7; - string id = 8; + string name = 1; + string namespace_id = 2; + repeated string mfa_method_ids = 3; + repeated string auth_method_accessors = 4; + repeated string auth_method_types = 5; + repeated string identity_group_ids = 6; + repeated string identity_entity_ids = 7; + string id = 8; } diff --git a/helper/identity/sentinel.go b/helper/identity/sentinel.go index 2c2bc4b940f2..a7ff44c91d79 100644 --- a/helper/identity/sentinel.go +++ b/helper/identity/sentinel.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package identity import "github.com/golang/protobuf/ptypes" diff --git a/helper/identity/types.pb.go b/helper/identity/types.pb.go index c4a4dadf676f..031581da705c 100644 --- a/helper/identity/types.pb.go +++ b/helper/identity/types.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: helper/identity/types.proto package identity diff --git a/helper/identity/types.proto b/helper/identity/types.proto index 0ea7525eb0f5..7c6c49bd6050 100644 --- a/helper/identity/types.proto +++ b/helper/identity/types.proto @@ -1,261 +1,264 @@ -syntax = "proto3"; +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 -option go_package = "github.com/hashicorp/vault/helper/identity"; +syntax = "proto3"; package identity; import "google/protobuf/timestamp.proto"; import "helper/identity/mfa/types.proto"; +option go_package = "github.com/hashicorp/vault/helper/identity"; + // Group represents an identity group. message Group { - // ID is the unique identifier for this group - // @inject_tag: sentinel:"-" - string id = 1; - - // Name is the unique name for this group - // @inject_tag: sentinel:"-" - string name = 2; - - // Policies are the vault policies to be granted to members of this group - // @inject_tag: sentinel:"-" - repeated string policies = 3; - - // ParentGroupIDs are the identifiers of those groups to which this group is a - // member of. These will serve as references to the parent group in the - // hierarchy. - // @inject_tag: sentinel:"-" - repeated string parent_group_ids = 4; - - // MemberEntityIDs are the identifiers of entities which are members of this - // group - // @inject_tag: sentinel:"-" - repeated string member_entity_ids = 5; - - // Metadata represents the custom data tied with this group - // @inject_tag: sentinel:"-" - map metadata = 6; - - // CreationTime is the time at which this group was created - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp creation_time = 7; - - // LastUpdateTime is the time at which this group was last modified - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp last_update_time= 8; - - // ModifyIndex tracks the number of updates to the group. It is useful to detect - // updates to the groups. - // @inject_tag: sentinel:"-" - uint64 modify_index = 9; - - // BucketKey is the path of the storage packer key into which this group is - // stored. - // @inject_tag: sentinel:"-" - string bucket_key = 10; - - // Alias is used to mark this group as an internal mapping of a group that - // is external to the identity store. Alias can only be set if the 'type' - // is set to 'external'. - // @inject_tag: sentinel:"-" - Alias alias = 11; - - // Type indicates if this group is an internal group or an external group. - // Memberships of the internal groups can be managed over the API whereas - // the memberships on the external group --for which a corresponding alias - // will be set-- will be managed automatically. - // @inject_tag: sentinel:"-" - string type = 12; - - // NamespaceID is the identifier of the namespace to which this group - // belongs to. Do not return this value over the API when reading the - // group. - // @inject_tag: sentinel:"-" - string namespace_id = 13; + // ID is the unique identifier for this group + // @inject_tag: sentinel:"-" + string id = 1; + + // Name is the unique name for this group + // @inject_tag: sentinel:"-" + string name = 2; + + // Policies are the vault policies to be granted to members of this group + // @inject_tag: sentinel:"-" + repeated string policies = 3; + + // ParentGroupIDs are the identifiers of those groups to which this group is a + // member of. These will serve as references to the parent group in the + // hierarchy. + // @inject_tag: sentinel:"-" + repeated string parent_group_ids = 4; + + // MemberEntityIDs are the identifiers of entities which are members of this + // group + // @inject_tag: sentinel:"-" + repeated string member_entity_ids = 5; + + // Metadata represents the custom data tied with this group + // @inject_tag: sentinel:"-" + map metadata = 6; + + // CreationTime is the time at which this group was created + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp creation_time = 7; + + // LastUpdateTime is the time at which this group was last modified + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp last_update_time = 8; + + // ModifyIndex tracks the number of updates to the group. It is useful to detect + // updates to the groups. + // @inject_tag: sentinel:"-" + uint64 modify_index = 9; + + // BucketKey is the path of the storage packer key into which this group is + // stored. + // @inject_tag: sentinel:"-" + string bucket_key = 10; + + // Alias is used to mark this group as an internal mapping of a group that + // is external to the identity store. Alias can only be set if the 'type' + // is set to 'external'. + // @inject_tag: sentinel:"-" + Alias alias = 11; + + // Type indicates if this group is an internal group or an external group. + // Memberships of the internal groups can be managed over the API whereas + // the memberships on the external group --for which a corresponding alias + // will be set-- will be managed automatically. + // @inject_tag: sentinel:"-" + string type = 12; + + // NamespaceID is the identifier of the namespace to which this group + // belongs to. Do not return this value over the API when reading the + // group. + // @inject_tag: sentinel:"-" + string namespace_id = 13; } // LocalAliases holds the aliases belonging to an entity that are local to the // cluster. message LocalAliases { - repeated Alias aliases = 1; + repeated Alias aliases = 1; } // Entity represents an entity that gets persisted and indexed. // Entity is fundamentally composed of zero or many aliases. message Entity { - // Aliases are the identities that this entity is made of. This can be - // empty as well to favor being able to create the entity first and then - // incrementally adding aliases. - // @inject_tag: sentinel:"-" - repeated Alias aliases = 1; - - // ID is the unique identifier of the entity which always be a UUID. This - // should never be allowed to be updated. - // @inject_tag: sentinel:"-" - string id = 2; - - // Name is a unique identifier of the entity which is intended to be - // human-friendly. The default name might not be human friendly since it - // gets suffixed by a UUID, but it can optionally be updated, unlike the ID - // field. - // @inject_tag: sentinel:"-" - string name = 3; - - // Metadata represents the explicit metadata which is set by the - // clients. This is useful to tie any information pertaining to the - // aliases. This is a non-unique field of entity, meaning multiple - // entities can have the same metadata set. Entities will be indexed based - // on this explicit metadata. This enables virtual groupings of entities - // based on its metadata. - // @inject_tag: sentinel:"-" - map metadata = 4; - - // CreationTime is the time at which this entity is first created. - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp creation_time = 5; - - // LastUpdateTime is the most recent time at which the properties of this - // entity got modified. This is helpful in filtering out entities based on - // its age and to take action on them, if desired. - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp last_update_time= 6; - - // MergedEntityIDs are the entities which got merged to this one. Entities - // will be indexed based on all the entities that got merged into it. This - // helps to apply the actions on this entity on the tokens that are merged - // to the merged entities. Merged entities will be deleted entirely and - // this is the only trackable trail of its earlier presence. - // @inject_tag: sentinel:"-" - repeated string merged_entity_ids = 7; - - // Policies the entity is entitled to - // @inject_tag: sentinel:"-" - repeated string policies = 8; - - // BucketKey is the path of the storage packer key into which this entity is - // stored. - // @inject_tag: sentinel:"-" - string bucket_key = 9; - - // MFASecrets holds the MFA secrets indexed by the identifier of the MFA - // method configuration. - // @inject_tag: sentinel:"-" - map mfa_secrets = 10; - - // Disabled indicates whether tokens associated with the account should not - // be able to be used - // @inject_tag: sentinel:"-" - bool disabled = 11; - - // NamespaceID is the identifier of the namespace to which this entity - // belongs to. Do not return this value over the API when reading the - // entity. - // @inject_tag: sentinel:"-" - string namespace_id = 12; + // Aliases are the identities that this entity is made of. This can be + // empty as well to favor being able to create the entity first and then + // incrementally adding aliases. + // @inject_tag: sentinel:"-" + repeated Alias aliases = 1; + + // ID is the unique identifier of the entity which always be a UUID. This + // should never be allowed to be updated. + // @inject_tag: sentinel:"-" + string id = 2; + + // Name is a unique identifier of the entity which is intended to be + // human-friendly. The default name might not be human friendly since it + // gets suffixed by a UUID, but it can optionally be updated, unlike the ID + // field. + // @inject_tag: sentinel:"-" + string name = 3; + + // Metadata represents the explicit metadata which is set by the + // clients. This is useful to tie any information pertaining to the + // aliases. This is a non-unique field of entity, meaning multiple + // entities can have the same metadata set. Entities will be indexed based + // on this explicit metadata. This enables virtual groupings of entities + // based on its metadata. + // @inject_tag: sentinel:"-" + map metadata = 4; + + // CreationTime is the time at which this entity is first created. + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp creation_time = 5; + + // LastUpdateTime is the most recent time at which the properties of this + // entity got modified. This is helpful in filtering out entities based on + // its age and to take action on them, if desired. + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp last_update_time = 6; + + // MergedEntityIDs are the entities which got merged to this one. Entities + // will be indexed based on all the entities that got merged into it. This + // helps to apply the actions on this entity on the tokens that are merged + // to the merged entities. Merged entities will be deleted entirely and + // this is the only trackable trail of its earlier presence. + // @inject_tag: sentinel:"-" + repeated string merged_entity_ids = 7; + + // Policies the entity is entitled to + // @inject_tag: sentinel:"-" + repeated string policies = 8; + + // BucketKey is the path of the storage packer key into which this entity is + // stored. + // @inject_tag: sentinel:"-" + string bucket_key = 9; + + // MFASecrets holds the MFA secrets indexed by the identifier of the MFA + // method configuration. + // @inject_tag: sentinel:"-" + map mfa_secrets = 10; + + // Disabled indicates whether tokens associated with the account should not + // be able to be used + // @inject_tag: sentinel:"-" + bool disabled = 11; + + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. Do not return this value over the API when reading the + // entity. + // @inject_tag: sentinel:"-" + string namespace_id = 12; } // Alias represents the alias that gets stored inside of the // entity object in storage and also represents in an in-memory index of an // alias object. message Alias { - // ID is the unique identifier that represents this alias - // @inject_tag: sentinel:"-" - string id = 1; - - // CanonicalID is the entity identifier to which this alias belongs to - // @inject_tag: sentinel:"-" - string canonical_id = 2; - - // MountType is the backend mount's type to which this alias belongs to. - // This enables categorically querying aliases of specific backend types. - // @inject_tag: sentinel:"-" - string mount_type = 3; - - // MountAccessor is the backend mount's accessor to which this alias - // belongs to. - // @inject_tag: sentinel:"-" - string mount_accessor = 4; - - // MountPath is the backend mount's path to which the Maccessor belongs to. This - // field is not used for any operational purposes. This is only returned when - // alias is read, only as a nicety. - // @inject_tag: sentinel:"-" - string mount_path = 5; - - // Metadata is the explicit metadata that clients set against an entity - // which enables virtual grouping of aliases. Aliases will be indexed - // against their metadata. - // @inject_tag: sentinel:"-" - map metadata = 6; - - // Name is the identifier of this alias in its authentication source. - // This does not uniquely identify an alias in Vault. This in conjunction - // with MountAccessor form to be the factors that represent an alias in a - // unique way. Aliases will be indexed based on this combined uniqueness - // factor. - // @inject_tag: sentinel:"-" - string name = 7; - - // CreationTime is the time at which this alias was first created - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp creation_time = 8; - - // LastUpdateTime is the most recent time at which the properties of this - // alias got modified. This is helpful in filtering out aliases based - // on its age and to take action on them, if desired. - // @inject_tag: sentinel:"-" - google.protobuf.Timestamp last_update_time = 9; - - // MergedFromCanonicalIDs is the FIFO history of merging activity - // @inject_tag: sentinel:"-" - repeated string merged_from_canonical_ids = 10; - - // NamespaceID is the identifier of the namespace to which this alias - // belongs. - // @inject_tag: sentinel:"-" - string namespace_id = 11; - - // Custom Metadata represents the custom data tied to this alias - // @inject_tag: sentinel:"-" - map custom_metadata = 12; - - // Local indicates if the alias only belongs to the cluster where it was - // created. If true, the alias will be stored in a location that is ignored by - // the performance replication subsystem. - // @inject_tag: sentinel:"-" - bool local = 13; - - // LocalBucketKey is the identifying element of the location where this alias - // is stored in the storage packer. This helps in querying local aliases - // during invalidation of local aliases in performance standbys. - // @inject_tag: sentinel:"-" - string local_bucket_key = 14; + // ID is the unique identifier that represents this alias + // @inject_tag: sentinel:"-" + string id = 1; + + // CanonicalID is the entity identifier to which this alias belongs to + // @inject_tag: sentinel:"-" + string canonical_id = 2; + + // MountType is the backend mount's type to which this alias belongs to. + // This enables categorically querying aliases of specific backend types. + // @inject_tag: sentinel:"-" + string mount_type = 3; + + // MountAccessor is the backend mount's accessor to which this alias + // belongs to. + // @inject_tag: sentinel:"-" + string mount_accessor = 4; + + // MountPath is the backend mount's path to which the Maccessor belongs to. This + // field is not used for any operational purposes. This is only returned when + // alias is read, only as a nicety. + // @inject_tag: sentinel:"-" + string mount_path = 5; + + // Metadata is the explicit metadata that clients set against an entity + // which enables virtual grouping of aliases. Aliases will be indexed + // against their metadata. + // @inject_tag: sentinel:"-" + map metadata = 6; + + // Name is the identifier of this alias in its authentication source. + // This does not uniquely identify an alias in Vault. This in conjunction + // with MountAccessor form to be the factors that represent an alias in a + // unique way. Aliases will be indexed based on this combined uniqueness + // factor. + // @inject_tag: sentinel:"-" + string name = 7; + + // CreationTime is the time at which this alias was first created + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp creation_time = 8; + + // LastUpdateTime is the most recent time at which the properties of this + // alias got modified. This is helpful in filtering out aliases based + // on its age and to take action on them, if desired. + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp last_update_time = 9; + + // MergedFromCanonicalIDs is the FIFO history of merging activity + // @inject_tag: sentinel:"-" + repeated string merged_from_canonical_ids = 10; + + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + // @inject_tag: sentinel:"-" + string namespace_id = 11; + + // Custom Metadata represents the custom data tied to this alias + // @inject_tag: sentinel:"-" + map custom_metadata = 12; + + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that is ignored by + // the performance replication subsystem. + // @inject_tag: sentinel:"-" + bool local = 13; + + // LocalBucketKey is the identifying element of the location where this alias + // is stored in the storage packer. This helps in querying local aliases + // during invalidation of local aliases in performance standbys. + // @inject_tag: sentinel:"-" + string local_bucket_key = 14; } // Deprecated. Retained for backwards compatibility. message EntityStorageEntry { - repeated PersonaIndexEntry personas = 1; - string id = 2; - string name = 3; - map metadata = 4; - google.protobuf.Timestamp creation_time = 5; - google.protobuf.Timestamp last_update_time= 6; - repeated string merged_entity_ids = 7; - repeated string policies = 8; - string bucket_key_hash = 9; - map mfa_secrets = 10; + repeated PersonaIndexEntry personas = 1; + string id = 2; + string name = 3; + map metadata = 4; + google.protobuf.Timestamp creation_time = 5; + google.protobuf.Timestamp last_update_time = 6; + repeated string merged_entity_ids = 7; + repeated string policies = 8; + string bucket_key_hash = 9; + map mfa_secrets = 10; } // Deprecated. Retained for backwards compatibility. message PersonaIndexEntry { - string id = 1; - string entity_id = 2; - string mount_type = 3; - string mount_accessor = 4; - string mount_path = 5; - map metadata = 6; - string name = 7; - google.protobuf.Timestamp creation_time = 8; - google.protobuf.Timestamp last_update_time = 9; - repeated string merged_from_entity_ids = 10; + string id = 1; + string entity_id = 2; + string mount_type = 3; + string mount_accessor = 4; + string mount_path = 5; + map metadata = 6; + string name = 7; + google.protobuf.Timestamp creation_time = 8; + google.protobuf.Timestamp last_update_time = 9; + repeated string merged_from_entity_ids = 10; } diff --git a/helper/locking/deadlock.go b/helper/locking/deadlock.go deleted file mode 100644 index e250abd1aecb..000000000000 --- a/helper/locking/deadlock.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build deadlock - -package locking - -import ( - "github.com/sasha-s/go-deadlock" -) - -// DeadlockMutex, when the build tag `deadlock` is present, behaves like a -// sync.Mutex but does periodic checking to see if outstanding locks and requests -// look like a deadlock. If it finds a deadlock candidate it will output it -// prefixed with "POTENTIAL DEADLOCK", as described at -// https://github.com/sasha-s/go-deadlock -type DeadlockMutex struct { - deadlock.Mutex -} - -// DeadlockRWMutex is the RW version of DeadlockMutex. -type DeadlockRWMutex struct { - deadlock.RWMutex -} diff --git a/helper/locking/lock.go b/helper/locking/lock.go index 1b1fae3af9ec..cc2e50345229 100644 --- a/helper/locking/lock.go +++ b/helper/locking/lock.go @@ -1,19 +1,49 @@ -//go:build !deadlock +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 package locking import ( "sync" + + "github.com/sasha-s/go-deadlock" ) -// DeadlockMutex is just a sync.Mutex when the build tag `deadlock` is absent. -// See its other definition in the corresponding deadlock-build-tag-constrained -// file for more details. +// Common mutex interface to allow either built-in or imported deadlock use +type Mutex interface { + Lock() + Unlock() +} + +// Common r/w mutex interface to allow either built-in or imported deadlock use +type RWMutex interface { + Lock() + RLock() + RLocker() sync.Locker + RUnlock() + Unlock() +} + +// DeadlockMutex (used when requested via config option `detact_deadlocks`), +// behaves like a sync.Mutex but does periodic checking to see if outstanding +// locks and requests look like a deadlock. If it finds a deadlock candidate it +// will output it prefixed with "POTENTIAL DEADLOCK", as described at +// https://github.com/sasha-s/go-deadlock type DeadlockMutex struct { - sync.Mutex + deadlock.Mutex } // DeadlockRWMutex is the RW version of DeadlockMutex. type DeadlockRWMutex struct { + deadlock.RWMutex +} + +// Regular sync/mutex. +type SyncMutex struct { + sync.Mutex +} + +// DeadlockRWMutex is the RW version of SyncMutex. +type SyncRWMutex struct { sync.RWMutex } diff --git a/helper/logging/logfile.go b/helper/logging/logfile.go index 93d84a8ddd08..2f2eb8fbc46d 100644 --- a/helper/logging/logfile.go +++ b/helper/logging/logfile.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package logging import ( @@ -54,7 +57,8 @@ func (l *LogFile) Write(b []byte) (n int, err error) { if err := l.openNew(); err != nil { return 0, err } - } else if err := l.rotate(); err != nil { // Check for the last contact and rotate if necessary + } + if err := l.rotate(); err != nil { // Check for the last contact and rotate if necessary return 0, err } @@ -79,21 +83,20 @@ func (l *LogFile) fileNamePattern() string { } func (l *LogFile) openNew() error { - fileNamePattern := l.fileNamePattern() - - createTime := now() - newFileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10)) + newFileName := l.fileName newFilePath := filepath.Join(l.logPath, newFileName) - // Try creating a file. We truncate the file because we are the only authority to write the logs - filePointer, err := os.OpenFile(newFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o640) + // Try creating or opening the active log file. Since the active log file + // always has the same name, append log entries to prevent overwriting + // previous log data. + filePointer, err := os.OpenFile(newFilePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o640) if err != nil { return err } - // New file, new 'bytes' tracker, new creation time :) :) + // New file, new bytes tracker, new creation time :) l.fileInfo = filePointer - l.lastCreated = createTime + l.lastCreated = now() l.bytesWritten = 0 return nil } @@ -106,6 +109,9 @@ func (l *LogFile) rotate() error { if err := l.fileInfo.Close(); err != nil { return err } + if err := l.renameCurrentFile(); err != nil { + return err + } if err := l.pruneFiles(); err != nil { return err } @@ -145,3 +151,13 @@ func removeFiles(files []string) (err error) { } return err } + +func (l *LogFile) renameCurrentFile() error { + fileNamePattern := l.fileNamePattern() + createTime := now() + currentFilePath := filepath.Join(l.logPath, l.fileName) + oldFileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10)) + oldFilePath := filepath.Join(l.logPath, oldFileName) + + return os.Rename(currentFilePath, oldFilePath) +} diff --git a/helper/logging/logfile_test.go b/helper/logging/logfile_test.go index 86153f17e258..8cb66693d8ee 100644 --- a/helper/logging/logfile_test.go +++ b/helper/logging/logfile_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package logging import ( diff --git a/helper/logging/logger.go b/helper/logging/logger.go index 05ef205ee9a5..b37134b93cb4 100644 --- a/helper/logging/logger.go +++ b/helper/logging/logger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package logging import ( @@ -8,7 +11,7 @@ import ( "strings" "time" - log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" ) @@ -29,7 +32,7 @@ type LogConfig struct { Name string // LogLevel is the minimum level to be logged. - LogLevel log.Level + LogLevel hclog.Level // LogFormat is the log format to use, supported formats are 'standard' and 'json'. LogFormat LogFormat @@ -45,10 +48,27 @@ type LogConfig struct { // LogRotateMaxFiles is the maximum number of past archived log files to keep LogRotateMaxFiles int + + // DefaultFileName should be set to the value to be used if the LogFilePath + // ends in a path separator such as '/var/log/' + // Examples of the default name are as follows: 'vault', 'agent' or 'proxy. + // The creator of this struct *must* ensure that it is assigned before doing + // anything with LogConfig! + DefaultFileName string +} + +// NewLogConfig should be used to initialize the LogConfig struct. +func NewLogConfig(defaultFileName string) (*LogConfig, error) { + defaultFileName = strings.TrimSpace(defaultFileName) + if defaultFileName == "" { + return nil, errors.New("default file name is required") + } + + return &LogConfig{DefaultFileName: defaultFileName}, nil } func (c *LogConfig) isLevelInvalid() bool { - return c.LogLevel == log.NoLevel || c.LogLevel == log.Off || c.LogLevel.String() == "unknown" + return c.LogLevel == hclog.NoLevel || c.LogLevel == hclog.Off || c.LogLevel.String() == "unknown" } func (c *LogConfig) isFormatJson() bool { @@ -101,7 +121,7 @@ func parseFullPath(fullPath string) (directory, fileName string, err error) { } // Setup creates a new logger with the specified configuration and writer -func Setup(config *LogConfig, w io.Writer) (log.InterceptLogger, error) { +func Setup(config *LogConfig, w io.Writer) (hclog.InterceptLogger, error) { // Validate the log level if config.isLevelInvalid() { return nil, fmt.Errorf("invalid log level: %v", config.LogLevel) @@ -118,10 +138,13 @@ func Setup(config *LogConfig, w io.Writer) (log.InterceptLogger, error) { if err != nil { return nil, err } - + if fileName == "" { + fileName = fmt.Sprintf("%s.log", config.DefaultFileName) + } if config.LogRotateDuration == 0 { config.LogRotateDuration = defaultRotateDuration } + logFile := &LogFile{ fileName: fileName, logPath: dir, @@ -138,7 +161,7 @@ func Setup(config *LogConfig, w io.Writer) (log.InterceptLogger, error) { writers = append(writers, logFile) } - logger := log.NewInterceptLogger(&log.LoggerOptions{ + logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ Name: config.Name, Level: config.LogLevel, IndependentLevels: true, @@ -165,21 +188,21 @@ func ParseLogFormat(format string) (LogFormat, error) { // ParseLogLevel returns the hclog.Level that corresponds with the provided level string. // This differs hclog.LevelFromString in that it supports additional level strings. -func ParseLogLevel(logLevel string) (log.Level, error) { - var result log.Level +func ParseLogLevel(logLevel string) (hclog.Level, error) { + var result hclog.Level logLevel = strings.ToLower(strings.TrimSpace(logLevel)) switch logLevel { case "trace": - result = log.Trace + result = hclog.Trace case "debug": - result = log.Debug + result = hclog.Debug case "notice", "info", "": - result = log.Info + result = hclog.Info case "warn", "warning": - result = log.Warn + result = hclog.Warn case "err", "error": - result = log.Error + result = hclog.Error default: return -1, errors.New(fmt.Sprintf("unknown log level: %s", logLevel)) } @@ -188,11 +211,11 @@ func ParseLogLevel(logLevel string) (log.Level, error) { } // TranslateLoggerLevel returns the string that corresponds with logging level of the hclog.Logger. -func TranslateLoggerLevel(logger log.Logger) (string, error) { +func TranslateLoggerLevel(logger hclog.Logger) (string, error) { logLevel := logger.GetLevel() switch logLevel { - case log.Trace, log.Debug, log.Info, log.Warn, log.Error: + case hclog.Trace, hclog.Debug, hclog.Info, hclog.Warn, hclog.Error: return logLevel.String(), nil default: return "", fmt.Errorf("unknown log level") diff --git a/helper/logging/logger_test.go b/helper/logging/logger_test.go index efeabc5d5658..c5f7ec50d9bc 100644 --- a/helper/logging/logger_test.go +++ b/helper/logging/logger_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package logging import ( @@ -5,15 +8,17 @@ import ( "encoding/json" "errors" "os" + "path/filepath" "testing" - log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestLogger_SetupBasic(t *testing.T) { - cfg := &LogConfig{Name: "test-system", LogLevel: log.Info} + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info logger, err := Setup(cfg, nil) require.NoError(t, err) @@ -23,16 +28,15 @@ func TestLogger_SetupBasic(t *testing.T) { } func TestLogger_SetupInvalidLogLevel(t *testing.T) { - cfg := &LogConfig{} + cfg := newTestLogConfig(t) _, err := Setup(cfg, nil) assert.Containsf(t, err.Error(), "invalid log level", "expected error %s", err) } func TestLogger_SetupLoggerErrorLevel(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Error, - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Error var buf bytes.Buffer @@ -45,15 +49,16 @@ func TestLogger_SetupLoggerErrorLevel(t *testing.T) { output := buf.String() - require.Contains(t, output, "[ERROR] test error msg") - require.NotContains(t, output, "[INFO] test info msg") + require.Contains(t, output, "[ERROR] test-system: test error msg") + require.NotContains(t, output, "[INFO] test-system: test info msg") } func TestLogger_SetupLoggerDebugLevel(t *testing.T) { - cfg := LogConfig{LogLevel: log.Debug} + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Debug var buf bytes.Buffer - logger, err := Setup(&cfg, &buf) + logger, err := Setup(cfg, &buf) require.NoError(t, err) require.NotNil(t, logger) @@ -62,15 +67,14 @@ func TestLogger_SetupLoggerDebugLevel(t *testing.T) { output := buf.String() - require.Contains(t, output, "[INFO] test info msg") - require.Contains(t, output, "[DEBUG] test debug msg") + require.Contains(t, output, "[INFO] test-system: test info msg") + require.Contains(t, output, "[DEBUG] test-system: test debug msg") } -func TestLogger_SetupLoggerWithName(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Debug, - Name: "test-system", - } +func TestLogger_SetupLoggerWithoutName(t *testing.T) { + cfg := newTestLogConfig(t) + cfg.Name = "" + cfg.LogLevel = hclog.Info var buf bytes.Buffer logger, err := Setup(cfg, &buf) @@ -79,15 +83,13 @@ func TestLogger_SetupLoggerWithName(t *testing.T) { logger.Warn("test warn msg") - require.Contains(t, buf.String(), "[WARN] test-system: test warn msg") + require.Contains(t, buf.String(), "[WARN] test warn msg") } func TestLogger_SetupLoggerWithJSON(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Debug, - LogFormat: JSONFormat, - Name: "test-system", - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Debug + cfg.LogFormat = JSONFormat var buf bytes.Buffer logger, err := Setup(cfg, &buf) @@ -105,13 +107,68 @@ func TestLogger_SetupLoggerWithJSON(t *testing.T) { require.Equal(t, jsonOutput["@message"], "test warn msg") } -func TestLogger_SetupLoggerWithValidLogPath(t *testing.T) { +func TestLogger_SetupLoggerWithValidLogPathMissingFileName(t *testing.T) { tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tmpDir + "/" // add the trailing slash to the temp dir + var buf bytes.Buffer - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: tmpDir, //+ "/", - } + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Info("juan?") + + m, err := filepath.Glob(cfg.LogFilePath + "*") + require.NoError(t, err) + require.Truef(t, len(m) == 1, "no files were found") +} + +func TestLogger_SetupLoggerWithValidLogPathFileName(t *testing.T) { + tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = filepath.Join(tmpDir, "juan.log") + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Info("juan?") + f, err := os.Stat(cfg.LogFilePath) + require.NoError(t, err) + require.NotNil(t, f) +} + +func TestLogger_SetupLoggerWithValidLogPathFileNameRotate(t *testing.T) { + tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = filepath.Join(tmpDir, "juan.log") + cfg.LogRotateBytes = 1 // set a tiny number of bytes to force rotation + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Info("juan?") + logger.Info("john?") + f, err := os.Stat(cfg.LogFilePath) + require.NoError(t, err) + require.NotNil(t, f) + m, err := filepath.Glob(tmpDir + "/juan-*") // look for juan-{timestamp}.log + require.NoError(t, err) + require.Truef(t, len(m) == 1, "no files were found") +} + +func TestLogger_SetupLoggerWithValidLogPath(t *testing.T) { + tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tmpDir + "/" // add the trailing slash to the temp dir var buf bytes.Buffer logger, err := Setup(cfg, &buf) @@ -120,10 +177,10 @@ func TestLogger_SetupLoggerWithValidLogPath(t *testing.T) { } func TestLogger_SetupLoggerWithInValidLogPath(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: "nonexistentdir/", - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogLevel = hclog.Info + cfg.LogFilePath = "nonexistentdir/" var buf bytes.Buffer logger, err := Setup(cfg, &buf) @@ -139,10 +196,9 @@ func TestLogger_SetupLoggerWithInValidLogPathPermission(t *testing.T) { assert.NoError(t, err, "unexpected error testing with invalid log path permission") defer os.RemoveAll(tmpDir) - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: tmpDir + "/", - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tmpDir + "/" var buf bytes.Buffer logger, err := Setup(cfg, &buf) @@ -185,10 +241,10 @@ func TestLogger_SetupLoggerWithInvalidLogFilePath(t *testing.T) { for name, tc := range cases { name := name tc := tc - cfg := &LogConfig{ - LogLevel: log.Info, - LogFilePath: tc.path, - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tc.path + _, err := Setup(cfg, &bytes.Buffer{}) assert.Error(t, err, "%s: expected error due to *", name) assert.Contains(t, err.Error(), tc.message, "%s: error message does not match: %s", name, err.Error()) @@ -196,26 +252,34 @@ func TestLogger_SetupLoggerWithInvalidLogFilePath(t *testing.T) { } func TestLogger_ChangeLogLevels(t *testing.T) { - cfg := &LogConfig{ - LogLevel: log.Debug, - Name: "test-system", - } + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Debug var buf bytes.Buffer logger, err := Setup(cfg, &buf) require.NoError(t, err) require.NotNil(t, logger) - assert.Equal(t, log.Debug, logger.GetLevel()) + assert.Equal(t, hclog.Debug, logger.GetLevel()) // Create new named loggers from the base logger and change the levels logger2 := logger.Named("test2") logger3 := logger.Named("test3") - logger2.SetLevel(log.Info) - logger3.SetLevel(log.Error) + logger2.SetLevel(hclog.Info) + logger3.SetLevel(hclog.Error) + + assert.Equal(t, hclog.Debug, logger.GetLevel()) + assert.Equal(t, hclog.Info, logger2.GetLevel()) + assert.Equal(t, hclog.Error, logger3.GetLevel()) +} + +func newTestLogConfig(t *testing.T) *LogConfig { + t.Helper() + + cfg, err := NewLogConfig("test") + require.NoError(t, err) + cfg.Name = "test-system" - assert.Equal(t, log.Debug, logger.GetLevel()) - assert.Equal(t, log.Info, logger2.GetLevel()) - assert.Equal(t, log.Error, logger3.GetLevel()) + return cfg } diff --git a/helper/metricsutil/bucket.go b/helper/metricsutil/bucket.go index 9cbb2cdc2daf..f25df4d28178 100644 --- a/helper/metricsutil/bucket.go +++ b/helper/metricsutil/bucket.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package metricsutil import ( diff --git a/helper/metricsutil/bucket_test.go b/helper/metricsutil/bucket_test.go index f37584781627..19b6636ed404 100644 --- a/helper/metricsutil/bucket_test.go +++ b/helper/metricsutil/bucket_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package metricsutil import ( diff --git a/helper/metricsutil/gauge_process.go b/helper/metricsutil/gauge_process.go index 0ad0e9d876cf..bb61f24ddc09 100644 --- a/helper/metricsutil/gauge_process.go +++ b/helper/metricsutil/gauge_process.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package metricsutil import ( @@ -8,24 +11,9 @@ import ( "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/timeutil" ) -// This interface allows unit tests to substitute in a simulated clock. -type clock interface { - Now() time.Time - NewTicker(time.Duration) *time.Ticker -} - -type defaultClock struct{} - -func (_ defaultClock) Now() time.Time { - return time.Now() -} - -func (_ defaultClock) NewTicker(d time.Duration) *time.Ticker { - return time.NewTicker(d) -} - // GaugeLabelValues is one gauge in a set sharing a single key, that // are measured in a batch. type GaugeLabelValues struct { @@ -73,7 +61,7 @@ type GaugeCollectionProcess struct { maxGaugeCardinality int // time source - clock clock + clock timeutil.Clock } // NewGaugeCollectionProcess creates a new collection process for the callback @@ -98,7 +86,7 @@ func NewGaugeCollectionProcess( gaugeInterval, maxGaugeCardinality, logger, - defaultClock{}, + timeutil.DefaultClock{}, ) } @@ -121,7 +109,7 @@ func (m *ClusterMetricSink) NewGaugeCollectionProcess( m.GaugeInterval, m.MaxGaugeCardinality, logger, - defaultClock{}, + timeutil.DefaultClock{}, ) } @@ -134,7 +122,7 @@ func newGaugeCollectionProcessWithClock( gaugeInterval time.Duration, maxGaugeCardinality int, logger log.Logger, - clock clock, + clock timeutil.Clock, ) (*GaugeCollectionProcess, error) { process := &GaugeCollectionProcess{ stop: make(chan struct{}, 1), diff --git a/helper/metricsutil/gauge_process_test.go b/helper/metricsutil/gauge_process_test.go index 9971714e04e3..e5e1c6145b5c 100644 --- a/helper/metricsutil/gauge_process_test.go +++ b/helper/metricsutil/gauge_process_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package metricsutil import ( @@ -12,6 +15,7 @@ import ( "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/timeutil" ) // SimulatedTime maintains a virtual clock so the test isn't @@ -21,9 +25,10 @@ import ( type SimulatedTime struct { now time.Time tickerBarrier chan *SimulatedTicker + timeutil.DefaultClock } -var _ clock = &SimulatedTime{} +var _ timeutil.Clock = &SimulatedTime{} type SimulatedTicker struct { ticker *time.Ticker @@ -118,7 +123,7 @@ func TestGauge_Creation(t *testing.T) { t.Fatalf("Error creating collection process: %v", err) } - if _, ok := p.clock.(defaultClock); !ok { + if _, ok := p.clock.(timeutil.DefaultClock); !ok { t.Error("Default clock not installed.") } diff --git a/helper/metricsutil/metricsutil.go b/helper/metricsutil/metricsutil.go index de85c7e4628e..9654d5234277 100644 --- a/helper/metricsutil/metricsutil.go +++ b/helper/metricsutil/metricsutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package metricsutil import ( diff --git a/helper/metricsutil/metricsutil_test.go b/helper/metricsutil/metricsutil_test.go index 1b817ddad192..ffe77b56cb0a 100644 --- a/helper/metricsutil/metricsutil_test.go +++ b/helper/metricsutil/metricsutil_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package metricsutil import ( diff --git a/helper/metricsutil/wrapped_metrics.go b/helper/metricsutil/wrapped_metrics.go index 67deb3bee1cd..8b33c8802003 100644 --- a/helper/metricsutil/wrapped_metrics.go +++ b/helper/metricsutil/wrapped_metrics.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package metricsutil import ( @@ -34,12 +37,14 @@ type ClusterMetricSink struct { } type TelemetryConstConfig struct { - LeaseMetricsEpsilon time.Duration - NumLeaseMetricsTimeBuckets int - LeaseMetricsNameSpaceLabels bool + LeaseMetricsEpsilon time.Duration + NumLeaseMetricsTimeBuckets int + LeaseMetricsNameSpaceLabels bool + RollbackMetricsIncludeMountPoint bool } type Metrics interface { + SetGauge(key []string, val float32) SetGaugeWithLabels(key []string, val float32, labels []Label) IncrCounterWithLabels(key []string, val float32, labels []Label) AddSampleWithLabels(key []string, val float32, labels []Label) diff --git a/helper/metricsutil/wrapped_metrics_test.go b/helper/metricsutil/wrapped_metrics_test.go index c0fb2c386677..34c5cdda8c87 100644 --- a/helper/metricsutil/wrapped_metrics_test.go +++ b/helper/metricsutil/wrapped_metrics_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package metricsutil import ( diff --git a/helper/monitor/monitor.go b/helper/monitor/monitor.go index 490e2fa08bba..ea4799ff09f5 100644 --- a/helper/monitor/monitor.go +++ b/helper/monitor/monitor.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package monitor import ( diff --git a/helper/monitor/monitor_test.go b/helper/monitor/monitor_test.go index 0133a351b99d..e281952fe686 100644 --- a/helper/monitor/monitor_test.go +++ b/helper/monitor/monitor_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package monitor import ( diff --git a/helper/namespace/namespace.go b/helper/namespace/namespace.go index 93d68622dec5..1a2346511dd5 100644 --- a/helper/namespace/namespace.go +++ b/helper/namespace/namespace.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package namespace import ( @@ -61,11 +64,8 @@ func RootContext(ctx context.Context) context.Context { return ContextWithNamespace(ctx, RootNamespace) } -// This function caches the ns to avoid doing a .Value lookup over and over, -// because it's called a *lot* in the request critical path. .Value is -// concurrency-safe so uses some kind of locking/atomicity, but it should never -// be read before first write, plus we don't believe this will be called from -// different goroutines, so it should be safe. +// FromContext retrieves the namespace from a context, or an error +// if there is no namespace in the context. func FromContext(ctx context.Context) (*Namespace, error) { if ctx == nil { return nil, errors.New("context was nil") diff --git a/helper/namespace/namespace_test.go b/helper/namespace/namespace_test.go index 442b46b90447..10ee981b91d7 100644 --- a/helper/namespace/namespace_test.go +++ b/helper/namespace/namespace_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package namespace import ( diff --git a/helper/osutil/fileinfo.go b/helper/osutil/fileinfo.go index 4b6ba7910f50..dffcc0f05917 100644 --- a/helper/osutil/fileinfo.go +++ b/helper/osutil/fileinfo.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package osutil import ( @@ -64,3 +67,17 @@ func OwnerPermissionsMatch(path string, uid int, permissions int) error { return nil } + +// OwnerPermissionsMatchFile checks if vault user is the owner and permissions are secure for the input file +func OwnerPermissionsMatchFile(file *os.File, uid int, permissions int) error { + info, err := file.Stat() + if err != nil { + return fmt.Errorf("file stat error on path %q: %w", file.Name(), err) + } + err = checkPathInfo(info, file.Name(), uid, permissions) + if err != nil { + return err + } + + return nil +} diff --git a/helper/osutil/fileinfo_test.go b/helper/osutil/fileinfo_test.go index 0c77d4873ed1..edf7c50c9d85 100644 --- a/helper/osutil/fileinfo_test.go +++ b/helper/osutil/fileinfo_test.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package osutil import ( "io/fs" "os" "os/user" + "path/filepath" "runtime" "strconv" "testing" @@ -82,3 +86,98 @@ func TestCheckPathInfo(t *testing.T) { } } } + +// TestOwnerPermissionsMatchFile creates a file and verifies that the current user of the process is the owner of the +// file +func TestOwnerPermissionsMatchFile(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Fatal("failed to get current user", err) + } + uid, err := strconv.ParseInt(currentUser.Uid, 0, 64) + if err != nil { + t.Fatal("failed to convert uid", err) + } + dir := t.TempDir() + path := filepath.Join(dir, "foo") + f, err := os.Create(path) + if err != nil { + t.Fatal("failed to create test file", err) + } + defer f.Close() + + info, err := os.Stat(path) + if err != nil { + t.Fatal("failed to stat test file", err) + } + + if err := OwnerPermissionsMatchFile(f, int(uid), int(info.Mode())); err != nil { + t.Fatalf("expected no error but got %v", err) + } +} + +// TestOwnerPermissionsMatchFile_OtherUser creates a file using the user that started the current process and verifies +// that a different user is not the owner of the file +func TestOwnerPermissionsMatchFile_OtherUser(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Fatal("failed to get current user", err) + } + uid, err := strconv.ParseInt(currentUser.Uid, 0, 64) + if err != nil { + t.Fatal("failed to convert uid", err) + } + dir := t.TempDir() + path := filepath.Join(dir, "foo") + f, err := os.Create(path) + if err != nil { + t.Fatal("failed to create test file", err) + } + defer f.Close() + + info, err := os.Stat(path) + if err != nil { + t.Fatal("failed to stat test file", err) + } + + if err := OwnerPermissionsMatchFile(f, int(uid)+1, int(info.Mode())); err == nil { + t.Fatalf("expected error but none") + } +} + +// TestOwnerPermissionsMatchFile_Symlink creates a file and a symlink to that file. The test verifies that the current +// user of the process is the owner of the file +func TestOwnerPermissionsMatchFile_Symlink(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Fatal("failed to get current user", err) + } + uid, err := strconv.ParseInt(currentUser.Uid, 0, 64) + if err != nil { + t.Fatal("failed to convert uid", err) + } + dir := t.TempDir() + path := filepath.Join(dir, "foo") + f, err := os.Create(path) + if err != nil { + t.Fatal("failed to create test file", err) + } + defer f.Close() + + symlink := filepath.Join(dir, "symlink") + err = os.Symlink(path, symlink) + if err != nil { + t.Fatal("failed to symlink file", err) + } + symlinkedFile, err := os.Open(symlink) + if err != nil { + t.Fatal("failed to open file", err) + } + info, err := os.Stat(symlink) + if err != nil { + t.Fatal("failed to stat test file", err) + } + if err := OwnerPermissionsMatchFile(symlinkedFile, int(uid), int(info.Mode())); err != nil { + t.Fatalf("expected no error but got %v", err) + } +} diff --git a/helper/osutil/fileinfo_unix.go b/helper/osutil/fileinfo_unix.go index c49a591ce9c6..da7b58d61c0d 100644 --- a/helper/osutil/fileinfo_unix.go +++ b/helper/osutil/fileinfo_unix.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !windows package osutil diff --git a/helper/osutil/fileinfo_unix_test.go b/helper/osutil/fileinfo_unix_test.go index c31ca5bdc372..65ed863febb2 100644 --- a/helper/osutil/fileinfo_unix_test.go +++ b/helper/osutil/fileinfo_unix_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !windows package osutil diff --git a/helper/osutil/fileinfo_windows.go b/helper/osutil/fileinfo_windows.go index 0869c97e7d98..9292b4613af1 100644 --- a/helper/osutil/fileinfo_windows.go +++ b/helper/osutil/fileinfo_windows.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build windows package osutil diff --git a/helper/parseip/parseip.go b/helper/parseip/parseip.go index 414a3f05c81b..f4e6e0fc3e4d 100644 --- a/helper/parseip/parseip.go +++ b/helper/parseip/parseip.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package parseip import ( diff --git a/helper/parseip/parseip_test.go b/helper/parseip/parseip_test.go index 5d2b3645b853..fd8169a55311 100644 --- a/helper/parseip/parseip_test.go +++ b/helper/parseip/parseip_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package parseip import ( diff --git a/helper/pgpkeys/encrypt_decrypt.go b/helper/pgpkeys/encrypt_decrypt.go index 554013d6af49..44738caa0eaa 100644 --- a/helper/pgpkeys/encrypt_decrypt.go +++ b/helper/pgpkeys/encrypt_decrypt.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pgpkeys import ( @@ -53,7 +56,6 @@ func GetFingerprints(pgpKeys []string, entities []*openpgp.Entity) ([]string, er if entities == nil { var err error entities, err = GetEntities(pgpKeys) - if err != nil { return nil, err } diff --git a/helper/pgpkeys/flag.go b/helper/pgpkeys/flag.go index e107bc994338..7749e5c8bffb 100644 --- a/helper/pgpkeys/flag.go +++ b/helper/pgpkeys/flag.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pgpkeys import ( diff --git a/helper/pgpkeys/flag_test.go b/helper/pgpkeys/flag_test.go index ec6402d5eb2f..f8447b61c588 100644 --- a/helper/pgpkeys/flag_test.go +++ b/helper/pgpkeys/flag_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pgpkeys import ( diff --git a/helper/pgpkeys/keybase.go b/helper/pgpkeys/keybase.go index b2571b451d9b..541841720f5d 100644 --- a/helper/pgpkeys/keybase.go +++ b/helper/pgpkeys/keybase.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pgpkeys import ( diff --git a/helper/pgpkeys/keybase_test.go b/helper/pgpkeys/keybase_test.go index 3faa3f5d8db2..2c8c229cc8cb 100644 --- a/helper/pgpkeys/keybase_test.go +++ b/helper/pgpkeys/keybase_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pgpkeys import ( diff --git a/helper/pgpkeys/test_keys.go b/helper/pgpkeys/test_keys.go index c10a9055ed00..cccda6e2536b 100644 --- a/helper/pgpkeys/test_keys.go +++ b/helper/pgpkeys/test_keys.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package pgpkeys const ( diff --git a/builtin/credential/aws/pkcs7/LICENSE b/helper/pkcs7/LICENSE similarity index 100% rename from builtin/credential/aws/pkcs7/LICENSE rename to helper/pkcs7/LICENSE diff --git a/builtin/credential/aws/pkcs7/README.md b/helper/pkcs7/README.md similarity index 100% rename from builtin/credential/aws/pkcs7/README.md rename to helper/pkcs7/README.md diff --git a/builtin/credential/aws/pkcs7/ber.go b/helper/pkcs7/ber.go similarity index 98% rename from builtin/credential/aws/pkcs7/ber.go rename to helper/pkcs7/ber.go index 0b18a6c8d361..eb6b1d0af660 100644 --- a/builtin/credential/aws/pkcs7/ber.go +++ b/helper/pkcs7/ber.go @@ -149,14 +149,14 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { for ber[offset] >= 0x80 { tag = tag*128 + ber[offset] - 0x80 offset++ - if offset > berLen { + if offset >= berLen { return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") } } // jvehent 20170227: this doesn't appear to be used anywhere... // tag = tag*128 + ber[offset] - 0x80 offset++ - if offset > berLen { + if offset >= berLen { return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") } } @@ -172,7 +172,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { var length int l := ber[offset] offset++ - if offset > berLen { + if offset >= berLen { return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") } indefinite := false @@ -192,7 +192,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) { for i := 0; i < numberOfBytes; i++ { length = length*256 + (int)(ber[offset]) offset++ - if offset > berLen { + if offset >= berLen { return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") } } diff --git a/builtin/credential/aws/pkcs7/ber_test.go b/helper/pkcs7/ber_test.go similarity index 85% rename from builtin/credential/aws/pkcs7/ber_test.go rename to helper/pkcs7/ber_test.go index 169c78ab701e..d3908f6bc32c 100644 --- a/builtin/credential/aws/pkcs7/ber_test.go +++ b/helper/pkcs7/ber_test.go @@ -9,6 +9,38 @@ import ( "testing" ) +// FuzzReadObject is a fuzz test that will generate random input data in an +// attempt to find crash-causing inputs +// https://go.dev/doc/security/fuzz +func FuzzReadObject(f *testing.F) { + // seed corpus used to guide the fuzzing engine + seedCorpus := []struct { + input []byte + offset int + }{ + {[]byte{0x30, 0x85}, 0}, + {[]byte{0x30, 0x84, 0x80, 0x0, 0x0, 0x0}, 0}, + {[]byte{0x30, 0x82, 0x0, 0x1}, 0}, + {[]byte{0x30, 0x80, 0x1, 0x2, 0x1, 0x2}, 0}, + {[]byte{0x30, 0x80, 0x1, 0x2}, 0}, + {[]byte{0x30, 0x03, 0x01, 0x02}, 0}, + {[]byte{0x30}, 0}, + {[]byte("?0"), 0}, + } + for _, tc := range seedCorpus { + f.Add(tc.input, tc.offset) // Use f.Add to provide a seed corpus + } + f.Fuzz(func(t *testing.T, ber []byte, offset int) { + if offset < 0 { + return + } + _, _, err := readObject(ber, offset) + if err != nil { + t.Log(ber, offset) + } + }) +} + func TestBer2Der(t *testing.T) { // indefinite length fixture ber := []byte{0x30, 0x80, 0x02, 0x01, 0x01, 0x00, 0x00} @@ -44,13 +76,14 @@ func TestBer2Der_Negatives(t *testing.T) { Input []byte ErrorContains string }{ - {[]byte{0x30, 0x85}, "tag length too long"}, + {[]byte{0x30, 0x85}, "end of ber data reached"}, {[]byte{0x30, 0x84, 0x80, 0x0, 0x0, 0x0}, "length is negative"}, {[]byte{0x30, 0x82, 0x0, 0x1}, "length has leading zero"}, {[]byte{0x30, 0x80, 0x1, 0x2, 0x1, 0x2}, "Invalid BER format"}, - {[]byte{0x30, 0x80, 0x1, 0x2}, "BER tag length is more than available data"}, + {[]byte{0x30, 0x80, 0x1, 0x2}, "end of ber data reached"}, {[]byte{0x30, 0x03, 0x01, 0x02}, "length is more than available data"}, {[]byte{0x30}, "end of ber data reached"}, + {[]byte("?0"), "end of ber data reached"}, } for _, fixture := range fixtures { diff --git a/builtin/credential/aws/pkcs7/decrypt.go b/helper/pkcs7/decrypt.go similarity index 100% rename from builtin/credential/aws/pkcs7/decrypt.go rename to helper/pkcs7/decrypt.go diff --git a/builtin/credential/aws/pkcs7/decrypt_test.go b/helper/pkcs7/decrypt_test.go similarity index 100% rename from builtin/credential/aws/pkcs7/decrypt_test.go rename to helper/pkcs7/decrypt_test.go diff --git a/builtin/credential/aws/pkcs7/encrypt.go b/helper/pkcs7/encrypt.go similarity index 100% rename from builtin/credential/aws/pkcs7/encrypt.go rename to helper/pkcs7/encrypt.go diff --git a/builtin/credential/aws/pkcs7/encrypt_test.go b/helper/pkcs7/encrypt_test.go similarity index 100% rename from builtin/credential/aws/pkcs7/encrypt_test.go rename to helper/pkcs7/encrypt_test.go diff --git a/builtin/credential/aws/pkcs7/pkcs7.go b/helper/pkcs7/pkcs7.go similarity index 100% rename from builtin/credential/aws/pkcs7/pkcs7.go rename to helper/pkcs7/pkcs7.go diff --git a/builtin/credential/aws/pkcs7/pkcs7_test.go b/helper/pkcs7/pkcs7_test.go similarity index 100% rename from builtin/credential/aws/pkcs7/pkcs7_test.go rename to helper/pkcs7/pkcs7_test.go diff --git a/builtin/credential/aws/pkcs7/sign.go b/helper/pkcs7/sign.go similarity index 99% rename from builtin/credential/aws/pkcs7/sign.go rename to helper/pkcs7/sign.go index 72b99388548e..b64fcb11da47 100644 --- a/builtin/credential/aws/pkcs7/sign.go +++ b/helper/pkcs7/sign.go @@ -12,14 +12,8 @@ import ( "fmt" "math/big" "time" - - "github.com/hashicorp/vault/internal" ) -func init() { - internal.PatchSha1() -} - // SignedData is an opaque data structure for creating signed data payloads type SignedData struct { sd signedData diff --git a/builtin/credential/aws/pkcs7/sign_test.go b/helper/pkcs7/sign_test.go similarity index 88% rename from builtin/credential/aws/pkcs7/sign_test.go rename to helper/pkcs7/sign_test.go index 641cb0465fd0..e08d737563f8 100644 --- a/builtin/credential/aws/pkcs7/sign_test.go +++ b/helper/pkcs7/sign_test.go @@ -89,6 +89,27 @@ func TestSign(t *testing.T) { } func TestDSASignAndVerifyWithOpenSSL(t *testing.T) { + dsaPublicCert := []byte(`-----BEGIN CERTIFICATE----- +MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV +bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD +VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du +MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r +bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE +ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC +AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD +Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE +exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii +Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4 +V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI +puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl +nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp +rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt +1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT +ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G +CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688 +qzy/7yePTlhlpj+ahMM= +-----END CERTIFICATE-----`) + content := []byte("Hello World") // write the content to a temp file tmpContentFile, err := ioutil.TempFile("", "TestDSASignAndVerifyWithOpenSSL_content") diff --git a/builtin/credential/aws/pkcs7/verify.go b/helper/pkcs7/verify.go similarity index 99% rename from builtin/credential/aws/pkcs7/verify.go rename to helper/pkcs7/verify.go index 002e77f6e16e..8dff1eaecf15 100644 --- a/builtin/credential/aws/pkcs7/verify.go +++ b/helper/pkcs7/verify.go @@ -118,7 +118,7 @@ func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPo } } -// dsaSignature verifies the DSA signature on a PKCS7 document. DSA support was +// dsaCheckSignature verifies the DSA signature on a PKCS7 document. DSA support was // removed from Go's crypto/x509 support prior to Go 1.16. This allows // verifying legacy signatures until affected applications can be migrated off // of DSA. diff --git a/helper/policies/policies.go b/helper/policies/policies.go index 729ce10b2fc6..ea180b4e90e4 100644 --- a/helper/policies/policies.go +++ b/helper/policies/policies.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package policies import "sort" diff --git a/helper/policies/policies_test.go b/helper/policies/policies_test.go index ba9b0a8f70b0..09c104d21ea4 100644 --- a/helper/policies/policies_test.go +++ b/helper/policies/policies_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package policies import "testing" diff --git a/helper/proxyutil/proxyutil.go b/helper/proxyutil/proxyutil.go index fdb20973e003..7724dde2fd3a 100644 --- a/helper/proxyutil/proxyutil.go +++ b/helper/proxyutil/proxyutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package proxyutil import ( diff --git a/helper/random/parser.go b/helper/random/parser.go index 3184db8aa5c6..f3523226eafd 100644 --- a/helper/random/parser.go +++ b/helper/random/parser.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( diff --git a/helper/random/parser_test.go b/helper/random/parser_test.go index 59cdb8143043..7ca05fd00b14 100644 --- a/helper/random/parser_test.go +++ b/helper/random/parser_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( diff --git a/helper/random/random_api.go b/helper/random/random_api.go index 9bc89f9af7e5..0dea4a0a6566 100644 --- a/helper/random/random_api.go +++ b/helper/random/random_api.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( diff --git a/helper/random/registry.go b/helper/random/registry.go index efdcf5c302f5..59393561a0fb 100644 --- a/helper/random/registry.go +++ b/helper/random/registry.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( diff --git a/helper/random/registry_test.go b/helper/random/registry_test.go index 3d7060650b4f..10e6af0de2c8 100644 --- a/helper/random/registry_test.go +++ b/helper/random/registry_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( diff --git a/helper/random/rules.go b/helper/random/rules.go index fead5b4ffe65..240437a2f865 100644 --- a/helper/random/rules.go +++ b/helper/random/rules.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( diff --git a/helper/random/rules_test.go b/helper/random/rules_test.go index 18aa00879826..535bac80670e 100644 --- a/helper/random/rules_test.go +++ b/helper/random/rules_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( diff --git a/helper/random/serializing.go b/helper/random/serializing.go index 93371df02878..d3a51ca2d079 100644 --- a/helper/random/serializing.go +++ b/helper/random/serializing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( diff --git a/helper/random/serializing_test.go b/helper/random/serializing_test.go index 171053742993..b05afd66029e 100644 --- a/helper/random/serializing_test.go +++ b/helper/random/serializing_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( diff --git a/helper/random/string_generator.go b/helper/random/string_generator.go index 621930eb66f2..62a5ab9061d5 100644 --- a/helper/random/string_generator.go +++ b/helper/random/string_generator.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( @@ -7,6 +10,7 @@ import ( "io" "math" "sort" + "sync" "time" "unicode" @@ -66,7 +70,7 @@ func sortCharset(chars string) string { return string(r) } -// StringGenerator generats random strings from the provided charset & adhering to a set of rules. The set of rules +// StringGenerator generates random strings from the provided charset & adhering to a set of rules. The set of rules // are things like CharsetRule which requires a certain number of characters from a sub-charset. type StringGenerator struct { // Length of the string to generate. @@ -76,7 +80,8 @@ type StringGenerator struct { Rules serializableRules `mapstructure:"-" json:"rule"` // This is "rule" in JSON so it matches the HCL property type // CharsetRule to choose runes from. This is computed from the rules, not directly configurable - charset runes + charset runes + charsetLock sync.RWMutex } // Generate a random string from the charset and adhering to the provided rules. @@ -116,7 +121,10 @@ func (g *StringGenerator) generate(rng io.Reader) (str string, err error) { // If performance improvements need to be made, this can be changed to read a batch of // potential strings at once rather than one at a time. This will significantly // improve performance, but at the cost of added complexity. - candidate, err := randomRunes(rng, g.charset, g.Length) + g.charsetLock.RLock() + charset := g.charset + g.charsetLock.RUnlock() + candidate, err := randomRunes(rng, charset, g.Length) if err != nil { return "", fmt.Errorf("unable to generate random characters: %w", err) } @@ -229,6 +237,8 @@ func (g *StringGenerator) validateConfig() (err error) { merr = multierror.Append(merr, fmt.Errorf("specified rules require at least %d characters but %d is specified", minLen, g.Length)) } + g.charsetLock.Lock() + defer g.charsetLock.Unlock() // Ensure we have a charset & all characters are printable if len(g.charset) == 0 { // Yes this is mutating the generator but this is done so we don't have to compute this on every generation diff --git a/helper/random/string_generator_test.go b/helper/random/string_generator_test.go index af4e7da14962..c8ab3b6ace35 100644 --- a/helper/random/string_generator_test.go +++ b/helper/random/string_generator_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package random import ( diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker.go index 4633d523bea5..219049b1bb0a 100644 --- a/helper/storagepacker/storagepacker.go +++ b/helper/storagepacker/storagepacker.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package storagepacker import ( diff --git a/helper/storagepacker/storagepacker_test.go b/helper/storagepacker/storagepacker_test.go index cc2448b2bc90..d1f4f66e7415 100644 --- a/helper/storagepacker/storagepacker_test.go +++ b/helper/storagepacker/storagepacker_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package storagepacker import ( @@ -11,6 +14,7 @@ import ( uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/sdk/logical" + "google.golang.org/protobuf/types/known/anypb" ) func BenchmarkStoragePacker(b *testing.B) { @@ -151,7 +155,7 @@ func TestStoragePacker_SerializeDeserializeComplexItem(t *testing.T) { Policies: []string{"policy1", "policy2"}, } - marshaledEntity, err := ptypes.MarshalAny(entity) + marshaledEntity, err := anypb.New(entity) if err != nil { t.Fatal(err) } diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index c602303af6f7..778ebea1b826 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: helper/storagepacker/types.proto package storagepacker diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto index 4edfaf4f8572..062f602bf1ee 100644 --- a/helper/storagepacker/types.proto +++ b/helper/storagepacker/types.proto @@ -1,21 +1,24 @@ -syntax = "proto3"; +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 -option go_package = "github.com/hashicorp/vault/helper/storagepacker"; +syntax = "proto3"; package storagepacker; import "google/protobuf/any.proto"; +option go_package = "github.com/hashicorp/vault/helper/storagepacker"; + // Item represents an entry that gets inserted into the storage packer message Item { - // ID must be provided by the caller; the same value, if used with GetItem, - // can be used to fetch the item. However, when iterating through a bucket, - // this ID will be an internal ID. In other words, outside of the use-case - // described above, the caller *must not* rely on this value to be - // consistent with what they passed in. - string id = 1; - // message is the contents of the item - google.protobuf.Any message = 2; + // ID must be provided by the caller; the same value, if used with GetItem, + // can be used to fetch the item. However, when iterating through a bucket, + // this ID will be an internal ID. In other words, outside of the use-case + // described above, the caller *must not* rely on this value to be + // consistent with what they passed in. + string id = 1; + // message is the contents of the item + google.protobuf.Any message = 2; } // Bucket is a construct to hold multiple items within itself. This @@ -25,10 +28,10 @@ message Item { // to become independent buckets. Hence, this can grow infinitely in terms of // storage space for items that get inserted. message Bucket { - // Key is the storage path where the bucket gets stored - string key = 1; - // Items holds the items contained within this bucket. Used by v1. - repeated Item items = 2; - // ItemMap stores a mapping of item ID to message. Used by v2. - map item_map = 3; + // Key is the storage path where the bucket gets stored + string key = 1; + // Items holds the items contained within this bucket. Used by v1. + repeated Item items = 2; + // ItemMap stores a mapping of item ID to message. Used by v2. + map item_map = 3; } diff --git a/helper/syncmap/syncmap.go b/helper/syncmap/syncmap.go new file mode 100644 index 000000000000..0734d953858a --- /dev/null +++ b/helper/syncmap/syncmap.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package syncmap + +import "sync" + +// SyncMap implements a map similar to sync.Map, but with generics and with an equality +// in the values specified by an "ID()" method. +type SyncMap[K comparable, V IDer] struct { + // lock is used to synchronize access to the map + lock sync.RWMutex + // data holds the actual data + data map[K]V +} + +// NewSyncMap returns a new, empty SyncMap. +func NewSyncMap[K comparable, V IDer]() *SyncMap[K, V] { + return &SyncMap[K, V]{ + data: make(map[K]V), + } +} + +// Get returns the value for the given key. +func (m *SyncMap[K, V]) Get(k K) V { + m.lock.RLock() + defer m.lock.RUnlock() + return m.data[k] +} + +// Pop deletes and returns the value for the given key, if it exists. +func (m *SyncMap[K, V]) Pop(k K) V { + m.lock.Lock() + defer m.lock.Unlock() + v, ok := m.data[k] + if ok { + delete(m.data, k) + } + return v +} + +// PopIfEqual deletes and returns the value for the given key, if it exists +// and only if the ID is equal to the provided string. +func (m *SyncMap[K, V]) PopIfEqual(k K, id string) V { + m.lock.Lock() + defer m.lock.Unlock() + v, ok := m.data[k] + if ok && v.ID() == id { + delete(m.data, k) + return v + } + var zero V + return zero +} + +// Put adds the given key-value pair to the map and returns the previous value, if any. +func (m *SyncMap[K, V]) Put(k K, v V) V { + m.lock.Lock() + defer m.lock.Unlock() + oldV := m.data[k] + m.data[k] = v + return oldV +} + +// PutIfEmpty adds the given key-value pair to the map only if there is no value already in it, +// and returns the new value and true if so. +// If there is already a value, it returns the existing value and false. +func (m *SyncMap[K, V]) PutIfEmpty(k K, v V) (V, bool) { + m.lock.Lock() + defer m.lock.Unlock() + oldV, ok := m.data[k] + if ok { + return oldV, false + } + m.data[k] = v + return v, true +} + +// Clear deletes all entries from the map, and returns the previous map. +func (m *SyncMap[K, V]) Clear() map[K]V { + m.lock.Lock() + defer m.lock.Unlock() + old := m.data + m.data = make(map[K]V) + return old +} + +// Values returns a copy of all values in the map. +func (m *SyncMap[K, V]) Values() []V { + m.lock.RLock() + defer m.lock.RUnlock() + + values := make([]V, 0, len(m.data)) + for _, v := range m.data { + values = append(values, v) + } + return values +} + +// IDer is used to extract an ID that SyncMap uses for equality checking. +type IDer interface { + ID() string +} diff --git a/helper/syncmap/syncmap_test.go b/helper/syncmap/syncmap_test.go new file mode 100644 index 000000000000..c66a131df4c1 --- /dev/null +++ b/helper/syncmap/syncmap_test.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package syncmap + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +type stringID struct { + val string + id string +} + +func (s stringID) ID() string { + return s.id +} + +var _ IDer = stringID{"", ""} + +// TestSyncMap_Get tests that basic getting and putting works. +func TestSyncMap_Get(t *testing.T) { + m := NewSyncMap[string, stringID]() + m.Put("a", stringID{"b", "b"}) + assert.Equal(t, stringID{"b", "b"}, m.Get("a")) + assert.Equal(t, stringID{"", ""}, m.Get("c")) +} + +// TestSyncMap_Pop tests that basic Pop operations work. +func TestSyncMap_Pop(t *testing.T) { + m := NewSyncMap[string, stringID]() + m.Put("a", stringID{"b", "b"}) + assert.Equal(t, stringID{"b", "b"}, m.Pop("a")) + assert.Equal(t, stringID{"", ""}, m.Pop("a")) + assert.Equal(t, stringID{"", ""}, m.Pop("c")) +} + +// TestSyncMap_PopIfEqual tests that basic PopIfEqual operations pop only if the IDs are equal. +func TestSyncMap_PopIfEqual(t *testing.T) { + m := NewSyncMap[string, stringID]() + m.Put("a", stringID{"b", "c"}) + assert.Equal(t, stringID{"", ""}, m.PopIfEqual("a", "b")) + assert.Equal(t, stringID{"b", "c"}, m.PopIfEqual("a", "c")) + assert.Equal(t, stringID{"", ""}, m.PopIfEqual("a", "c")) +} + +// TestSyncMap_Clear checks that clearing works as expected and returns a copy of the original map. +func TestSyncMap_Clear(t *testing.T) { + m := NewSyncMap[string, stringID]() + assert.Equal(t, map[string]stringID{}, m.data) + oldMap := m.Clear() + assert.Equal(t, map[string]stringID{}, m.data) + assert.Equal(t, map[string]stringID{}, oldMap) + + m.Put("a", stringID{"b", "b"}) + m.Put("c", stringID{"d", "d"}) + oldMap = m.Clear() + + assert.Equal(t, map[string]stringID{"a": {"b", "b"}, "c": {"d", "d"}}, oldMap) + assert.Equal(t, map[string]stringID{}, m.data) +} + +// TestSyncMap_Values checks that the Values method returns an array of the values. +func TestSyncMap_Values(t *testing.T) { + m := NewSyncMap[string, stringID]() + assert.Equal(t, []stringID{}, m.Values()) + m.Put("a", stringID{"b", "b"}) + assert.Equal(t, []stringID{{"b", "b"}}, m.Values()) + m.Put("c", stringID{"d", "d"}) + values := m.Values() + sort.Slice(values, func(i, j int) bool { + return values[i].val < values[j].val + }) + assert.Equal(t, []stringID{{"b", "b"}, {"d", "d"}}, values) +} diff --git a/helper/testhelpers/azurite/azurite.go b/helper/testhelpers/azurite/azurite.go index 13d65750d4ce..a9b291744a7d 100644 --- a/helper/testhelpers/azurite/azurite.go +++ b/helper/testhelpers/azurite/azurite.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package azurite import ( @@ -7,7 +10,7 @@ import ( "testing" "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) type Config struct { diff --git a/helper/testhelpers/cassandra/cassandrahelper.go b/helper/testhelpers/cassandra/cassandrahelper.go index 899136f16c9a..b774a1690e70 100644 --- a/helper/testhelpers/cassandra/cassandrahelper.go +++ b/helper/testhelpers/cassandra/cassandrahelper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cassandra import ( @@ -6,11 +9,13 @@ import ( "net" "os" "path/filepath" + "runtime" + "strings" "testing" "time" "github.com/gocql/gocql" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) type containerConfig struct { @@ -77,6 +82,12 @@ func (h Host) ConnectionURL() string { func PrepareTestContainer(t *testing.T, opts ...ContainerOpt) (Host, func()) { t.Helper() + + // Skipping on ARM, as this image can't run on ARM architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + if os.Getenv("CASSANDRA_HOSTS") != "" { host, port, err := net.SplitHostPort(os.Getenv("CASSANDRA_HOSTS")) if err != nil { diff --git a/helper/testhelpers/certhelpers/cert_helpers.go b/helper/testhelpers/certhelpers/cert_helpers.go index b84bbf961e5a..d9c89735c618 100644 --- a/helper/testhelpers/certhelpers/cert_helpers.go +++ b/helper/testhelpers/certhelpers/cert_helpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package certhelpers import ( diff --git a/helper/testhelpers/consul/cluster_storage.go b/helper/testhelpers/consul/cluster_storage.go new file mode 100644 index 000000000000..9ca1080c6f9c --- /dev/null +++ b/helper/testhelpers/consul/cluster_storage.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package consul + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/testcluster" +) + +type ClusterStorage struct { + // Set these after calling `NewConsulClusterStorage` but before `Start` (or + // passing in to NewDockerCluster) to control Consul version specifically in + // your test. Leave empty for latest OSS (defined in consulhelper.go). + ConsulVersion string + ConsulEnterprise bool + + cleanup func() + config *Config +} + +var _ testcluster.ClusterStorage = &ClusterStorage{} + +func NewClusterStorage() *ClusterStorage { + return &ClusterStorage{} +} + +func (s *ClusterStorage) Start(ctx context.Context, opts *testcluster.ClusterOptions) error { + prefix := "" + if opts != nil && opts.ClusterName != "" { + prefix = fmt.Sprintf("%s-", opts.ClusterName) + } + cleanup, config, err := RunContainer(ctx, prefix, s.ConsulVersion, s.ConsulEnterprise, true) + if err != nil { + return err + } + s.cleanup = cleanup + s.config = config + + return nil +} + +func (s *ClusterStorage) Cleanup() error { + if s.cleanup != nil { + s.cleanup() + s.cleanup = nil + } + return nil +} + +func (s *ClusterStorage) Opts() map[string]interface{} { + if s.config == nil { + return nil + } + return map[string]interface{}{ + "address": s.config.ContainerHTTPAddr, + "token": s.config.Token, + "max_parallel": "32", + } +} + +func (s *ClusterStorage) Type() string { + return "consul" +} + +func (s *ClusterStorage) Config() *Config { + return s.config +} diff --git a/helper/testhelpers/consul/consulhelper.go b/helper/testhelpers/consul/consulhelper.go index 7d7984b17495..d6ab5d72b5c1 100644 --- a/helper/testhelpers/consul/consulhelper.go +++ b/helper/testhelpers/consul/consulhelper.go @@ -1,19 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( "context" + "fmt" "os" "strings" "testing" consulapi "github.com/hashicorp/consul/api" goversion "github.com/hashicorp/go-version" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) +// LatestConsulVersion is the most recent version of Consul which is used unless +// another version is specified in the test config or environment. This will +// probably go stale as we don't always update it on every release but we rarely +// rely on specific new Consul functionality so that's probably not a problem. +const LatestConsulVersion = "1.15.3" + type Config struct { docker.ServiceHostPort - Token string + Token string + ContainerHTTPAddr string } func (c *Config) APIConfig() *consulapi.Config { @@ -23,19 +34,39 @@ func (c *Config) APIConfig() *consulapi.Config { return apiConfig } -// PrepareTestContainer creates a Consul docker container. If version is empty, -// the Consul version used will be given by the environment variable -// CONSUL_DOCKER_VERSION, or if that's empty, whatever we've hardcoded as the -// the latest Consul version. +// PrepareTestContainer is a test helper that creates a Consul docker container +// or fails the test if unsuccessful. See RunContainer for more details on the +// configuration. func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBootstrapSetup bool) (func(), *Config) { t.Helper() + cleanup, config, err := RunContainer(context.Background(), "", version, isEnterprise, doBootstrapSetup) + if err != nil { + t.Fatalf("failed starting consul: %s", err) + } + return cleanup, config +} + +// RunContainer runs Consul in a Docker container unless CONSUL_HTTP_ADDR is +// already found in the environment. Consul version is determined by the version +// argument. If version is empty string, the CONSUL_DOCKER_VERSION environment +// variable is used and if that is empty too, LatestConsulVersion is used +// (defined above). If namePrefix is provided we assume you have chosen a unique +// enough prefix to avoid collision with other tests that may be running in +// parallel and so _do not_ add an additional unique ID suffix. We will also +// ensure previous instances are deleted and leave the container running for +// debugging. This is useful for using Consul as part of at testcluster (i.e. +// when Vault is in Docker too). If namePrefix is empty then a unique suffix is +// added since many older tests rely on a uniq instance of the container. This +// is used by `PrepareTestContainer` which is used typically in tests that rely +// on Consul but run tested code within the test process. +func RunContainer(ctx context.Context, namePrefix, version string, isEnterprise bool, doBootstrapSetup bool) (func(), *Config, error) { if retAddress := os.Getenv("CONSUL_HTTP_ADDR"); retAddress != "" { shp, err := docker.NewServiceHostPortParse(retAddress) if err != nil { - t.Fatal(err) + return nil, nil, err } - return func() {}, &Config{ServiceHostPort: *shp, Token: os.Getenv("CONSUL_HTTP_TOKEN")} + return func() {}, &Config{ServiceHostPort: *shp, Token: os.Getenv("CONSUL_HTTP_TOKEN")}, nil } config := `acl { enabled = true default_policy = "deny" }` @@ -44,7 +75,7 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo if consulVersion != "" { version = consulVersion } else { - version = "1.11.3" // Latest Consul version, update as new releases come out + version = LatestConsulVersion } } if strings.HasPrefix(version, "1.3") { @@ -52,7 +83,7 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo } name := "consul" - repo := "consul" + repo := "docker.mirror.hashicorp.services/library/consul" var envVars []string // If running the enterprise container, set the appropriate values below. if isEnterprise { @@ -63,15 +94,18 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo envVars = append(envVars, "CONSUL_LICENSE="+license) if !hasLicense { - t.Fatalf("Failed to find enterprise license") + return nil, nil, fmt.Errorf("Failed to find enterprise license") } } + if namePrefix != "" { + name = namePrefix + name + } if dockerRepo, hasEnvRepo := os.LookupEnv("CONSUL_DOCKER_REPO"); hasEnvRepo { repo = dockerRepo } - runner, err := docker.NewServiceRunner(docker.RunOptions{ + dockerOpts := docker.RunOptions{ ContainerName: name, ImageRepo: repo, ImageTag: version, @@ -80,12 +114,25 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo Ports: []string{"8500/tcp"}, AuthUsername: os.Getenv("CONSUL_DOCKER_USERNAME"), AuthPassword: os.Getenv("CONSUL_DOCKER_PASSWORD"), - }) + } + + // Add a unique suffix if there is no per-test prefix provided + addSuffix := true + if namePrefix != "" { + // Don't add a suffix if the caller already provided a prefix + addSuffix = false + // Also enable predelete and non-removal to make debugging easier for test + // cases with named containers). + dockerOpts.PreDelete = true + dockerOpts.DoNotAutoRemove = true + } + + runner, err := docker.NewServiceRunner(dockerOpts) if err != nil { - t.Fatalf("Could not start docker Consul: %s", err) + return nil, nil, fmt.Errorf("Could not start docker Consul: %s", err) } - svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + svc, _, err := runner.StartNewService(ctx, addSuffix, false, func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { shp := docker.NewServiceHostPort(host, port) apiConfig := consulapi.DefaultNonPooledConfig() apiConfig.Address = shp.Address() @@ -162,7 +209,7 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo } } - // Configure a namespace and parition if testing enterprise Consul + // Configure a namespace and partition if testing enterprise Consul if isEnterprise { // Namespaces require Consul 1.7 or newer namespaceVersion, _ := goversion.NewVersion("1.7") @@ -226,8 +273,20 @@ func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBoo }, nil }) if err != nil { - t.Fatalf("Could not start docker Consul: %s", err) + return nil, nil, err } - return svc.Cleanup, svc.Config.(*Config) + // Find the container network info. + if len(svc.Container.NetworkSettings.Networks) < 1 { + svc.Cleanup() + return nil, nil, fmt.Errorf("failed to find any network settings for container") + } + cfg := svc.Config.(*Config) + for _, eps := range svc.Container.NetworkSettings.Networks { + // Just pick the first network, we assume only one for now. + // Pull out the real container IP and set that up + cfg.ContainerHTTPAddr = fmt.Sprintf("http://%s:8500", eps.IPAddress) + break + } + return svc.Cleanup, cfg, nil } diff --git a/helper/testhelpers/corehelpers/corehelpers.go b/helper/testhelpers/corehelpers/corehelpers.go new file mode 100644 index 000000000000..e3071653b5b1 --- /dev/null +++ b/helper/testhelpers/corehelpers/corehelpers.go @@ -0,0 +1,631 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +// Package corehelpers contains testhelpers that don't depend on package vault, +// and thus can be used within vault (as well as elsewhere.) +package corehelpers + +import ( + "context" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/plugins/database/mysql" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/go-testing-interface" +) + +var ( + _ audit.Backend = (*NoopAudit)(nil) + _ eventlogger.Node = (*noopWrapper)(nil) +) + +var externalPlugins = []string{"transform", "kmip", "keymgmt"} + +// RetryUntil runs f until it returns a nil result or the timeout is reached. +// If a nil result hasn't been obtained by timeout, calls t.Fatal. +func RetryUntil(t testing.T, timeout time.Duration, f func() error) { + t.Helper() + deadline := time.Now().Add(timeout) + var err error + for time.Now().Before(deadline) { + if err = f(); err == nil { + return + } + time.Sleep(100 * time.Millisecond) + } + t.Fatalf("did not complete before deadline, err: %v", err) +} + +// MakeTestPluginDir creates a temporary directory suitable for holding plugins. +// This helper also resolves symlinks to make tests happy on OS X. +func MakeTestPluginDir(t testing.T) string { + t.Helper() + + dir, err := os.MkdirTemp("", "") + if err != nil { + t.Fatal(err) + } + + // OSX tempdir are /var, but actually symlinked to /private/var + dir, err = filepath.EvalSymlinks(dir) + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + if err := os.RemoveAll(dir); err != nil { + t.Fatal(err) + } + }) + + return dir +} + +func NewMockBuiltinRegistry() *mockBuiltinRegistry { + return &mockBuiltinRegistry{ + forTesting: map[string]mockBackend{ + "mysql-database-plugin": {PluginType: consts.PluginTypeDatabase}, + "postgresql-database-plugin": {PluginType: consts.PluginTypeDatabase}, + "approle": {PluginType: consts.PluginTypeCredential}, + "pending-removal-test-plugin": { + PluginType: consts.PluginTypeCredential, + DeprecationStatus: consts.PendingRemoval, + }, + "aws": {PluginType: consts.PluginTypeCredential}, + "consul": {PluginType: consts.PluginTypeSecrets}, + }, + } +} + +type mockBackend struct { + consts.PluginType + consts.DeprecationStatus +} + +type mockBuiltinRegistry struct { + forTesting map[string]mockBackend +} + +func toFunc(f logical.Factory) func() (interface{}, error) { + return func() (interface{}, error) { + return f, nil + } +} + +func (m *mockBuiltinRegistry) Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool) { + testBackend, ok := m.forTesting[name] + if !ok { + return nil, false + } + testPluginType := testBackend.PluginType + if pluginType != testPluginType { + return nil, false + } + + switch name { + case "approle", "pending-removal-test-plugin": + return toFunc(approle.Factory), true + case "aws": + return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + b := new(framework.Backend) + b.Setup(ctx, config) + b.BackendType = logical.TypeCredential + return b, nil + }), true + case "postgresql-database-plugin": + return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + b := new(framework.Backend) + b.Setup(ctx, config) + b.BackendType = logical.TypeLogical + return b, nil + }), true + case "mysql-database-plugin": + return mysql.New(mysql.DefaultUserNameTemplate), true + case "consul": + return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + b := new(framework.Backend) + b.Setup(ctx, config) + b.BackendType = logical.TypeLogical + return b, nil + }), true + default: + return nil, false + } +} + +// Keys only supports getting a realistic list of the keys for database plugins, +// and approle +func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string { + switch pluginType { + case consts.PluginTypeDatabase: + // This is a hard-coded reproduction of the db plugin keys in + // helper/builtinplugins/registry.go. The registry isn't directly used + // because it causes import cycles. + return []string{ + "mysql-database-plugin", + "mysql-aurora-database-plugin", + "mysql-rds-database-plugin", + "mysql-legacy-database-plugin", + + "cassandra-database-plugin", + "couchbase-database-plugin", + "elasticsearch-database-plugin", + "hana-database-plugin", + "influxdb-database-plugin", + "mongodb-database-plugin", + "mongodbatlas-database-plugin", + "mssql-database-plugin", + "postgresql-database-plugin", + "redis-elasticache-database-plugin", + "redshift-database-plugin", + "redis-database-plugin", + "snowflake-database-plugin", + } + case consts.PluginTypeCredential: + return []string{ + "pending-removal-test-plugin", + "approle", + } + + case consts.PluginTypeSecrets: + return append(externalPlugins, "kv") + } + + return []string{} +} + +func (r *mockBuiltinRegistry) IsBuiltinEntPlugin(name string, pluginType consts.PluginType) bool { + for _, i := range externalPlugins { + if i == name { + return true + } + } + return false +} + +func (m *mockBuiltinRegistry) Contains(name string, pluginType consts.PluginType) bool { + for _, key := range m.Keys(pluginType) { + if key == name { + return true + } + } + return false +} + +func (m *mockBuiltinRegistry) DeprecationStatus(name string, pluginType consts.PluginType) (consts.DeprecationStatus, bool) { + if m.Contains(name, pluginType) { + return m.forTesting[name].DeprecationStatus, true + } + + return consts.Unknown, false +} + +func TestNoopAudit(t testing.T, path string, config map[string]string) *NoopAudit { + cfg := &audit.BackendConfig{ + Config: config, + MountPath: path, + Logger: NewTestLogger(t), + } + n, err := NewNoopAudit(cfg) + if err != nil { + t.Fatal(err) + } + return n +} + +// NoopHeaderFormatter can be used within no-op audit devices to do nothing when +// it comes to only allow configured headers to appear in the result. +// Whatever is passed in will be returned (nil becomes an empty map) in lowercase. +type NoopHeaderFormatter struct{} + +// ApplyConfig implements the relevant interface to make NoopHeaderFormatter an audit.HeaderFormatter. +func (f *NoopHeaderFormatter) ApplyConfig(_ context.Context, headers map[string][]string, _ audit.Salter) (result map[string][]string, retErr error) { + if len(headers) < 1 { + return map[string][]string{}, nil + } + + // Make a copy of the incoming headers with everything lower so we can + // case-insensitively compare + lowerHeaders := make(map[string][]string, len(headers)) + for k, v := range headers { + lowerHeaders[strings.ToLower(k)] = v + } + + return lowerHeaders, nil +} + +// NewNoopAudit should be used to create a NoopAudit as it handles creation of a +// predictable salt and wraps eventlogger nodes so information can be retrieved on +// what they've seen or formatted. +func NewNoopAudit(config *audit.BackendConfig) (*NoopAudit, error) { + view := &logical.InmemStorage{} + + // Create the salt with a known key for predictable hmac values. + se := &logical.StorageEntry{Key: "salt", Value: []byte("foo")} + err := view.Put(context.Background(), se) + if err != nil { + return nil, err + } + + // Override the salt related config settings. + backendConfig := &audit.BackendConfig{ + SaltView: view, + SaltConfig: &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + }, + Config: config.Config, + MountPath: config.MountPath, + } + + noopBackend := &NoopAudit{ + Config: backendConfig, + nodeIDList: make([]eventlogger.NodeID, 2), + nodeMap: make(map[eventlogger.NodeID]eventlogger.Node, 2), + } + + cfg, err := audit.NewFormatterConfig(&NoopHeaderFormatter{}) + if err != nil { + return nil, err + } + + formatterNodeID, err := event.GenerateNodeID() + if err != nil { + return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err) + } + + formatterNode, err := audit.NewEntryFormatter(config.MountPath, cfg, noopBackend, config.Logger) + if err != nil { + return nil, fmt.Errorf("error creating formatter: %w", err) + } + + // Wrap the formatting node, so we can get any bytes that were formatted etc. + wrappedFormatter := &noopWrapper{format: "json", node: formatterNode, backend: noopBackend} + + noopBackend.nodeIDList[0] = formatterNodeID + noopBackend.nodeMap[formatterNodeID] = wrappedFormatter + + sinkNode := event.NewNoopSink() + sinkNodeID, err := event.GenerateNodeID() + if err != nil { + return nil, fmt.Errorf("error generating random NodeID for sink node: %w", err) + } + + noopBackend.nodeIDList[1] = sinkNodeID + noopBackend.nodeMap[sinkNodeID] = sinkNode + + return noopBackend, nil +} + +// NoopAuditFactory should be used when the test needs a way to access bytes that +// have been formatted by the pipeline during audit requests. +// The records parameter will be repointed to the one used within the pipeline. +func NoopAuditFactory(records **[][]byte) audit.Factory { + return func(_ context.Context, config *audit.BackendConfig, _ audit.HeaderFormatter) (audit.Backend, error) { + n, err := NewNoopAudit(config) + if err != nil { + return nil, err + } + if records != nil { + *records = &n.records + } + + return n, nil + } +} + +// noopWrapper is designed to wrap a formatter node in order to allow access to +// bytes formatted, headers formatted and parts of the logical.LogInput. +// Some older tests relied on being able to query this information so while those +// tests stick around we should look after them. +type noopWrapper struct { + format string + node eventlogger.Node + backend *NoopAudit +} + +// NoopAuditEventListener is a callback used by noopWrapper.Process() to notify +// of each received audit event. +type NoopAuditEventListener func(*audit.AuditEvent) + +type NoopAudit struct { + Config *audit.BackendConfig + + ReqErr error + ReqAuth []*logical.Auth + Req []*logical.Request + ReqHeaders []map[string][]string + ReqNonHMACKeys []string + ReqErrs []error + + RespErr error + RespAuth []*logical.Auth + RespReq []*logical.Request + Resp []*logical.Response + RespNonHMACKeys [][]string + RespReqNonHMACKeys [][]string + RespErrs []error + records [][]byte + l sync.RWMutex + salt *salt.Salt + saltMutex sync.RWMutex + + nodeIDList []eventlogger.NodeID + nodeMap map[eventlogger.NodeID]eventlogger.Node + + listener NoopAuditEventListener +} + +// Process handles the contortions required by older test code to ensure behavior. +// It will attempt to do some pre/post processing of the logical.LogInput that should +// form part of the event's payload data, as well as capturing the resulting headers +// that were formatted and track the overall bytes that a formatted event uses when +// it's ready to head down the pipeline to the sink node (a noop for us). +func (n *noopWrapper) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + n.backend.l.Lock() + defer n.backend.l.Unlock() + + var err error + + // We're expecting audit events since this is an audit device. + a, ok := e.Payload.(*audit.AuditEvent) + if !ok { + return nil, errors.New("cannot parse payload as an audit event") + } + + if n.backend.listener != nil { + n.backend.listener(a) + } + + in := a.Data + + // Depending on the type of the audit event (request or response) we need to + // track different things. + switch a.Subtype { + case audit.RequestType: + n.backend.ReqAuth = append(n.backend.ReqAuth, in.Auth) + n.backend.Req = append(n.backend.Req, in.Request) + n.backend.ReqNonHMACKeys = in.NonHMACReqDataKeys + n.backend.ReqErrs = append(n.backend.ReqErrs, in.OuterErr) + + if n.backend.ReqErr != nil { + return nil, n.backend.ReqErr + } + case audit.ResponseType: + n.backend.RespAuth = append(n.backend.RespAuth, in.Auth) + n.backend.RespReq = append(n.backend.RespReq, in.Request) + n.backend.Resp = append(n.backend.Resp, in.Response) + n.backend.RespErrs = append(n.backend.RespErrs, in.OuterErr) + + if in.Response != nil { + n.backend.RespNonHMACKeys = append(n.backend.RespNonHMACKeys, in.NonHMACRespDataKeys) + n.backend.RespReqNonHMACKeys = append(n.backend.RespReqNonHMACKeys, in.NonHMACReqDataKeys) + } + + if n.backend.RespErr != nil { + return nil, n.backend.RespErr + } + default: + return nil, fmt.Errorf("unknown audit event type: %q", a.Subtype) + } + + // Once we've taken note of the relevant properties of the event, we get the + // underlying (wrapped) node to process it as normal. + e, err = n.node.Process(ctx, e) + if err != nil { + return nil, fmt.Errorf("error processing wrapped node: %w", err) + } + + // Once processing has been carried out, the underlying node (a formatter node) + // should contain the output ready for the sink node. We'll get that in order + // to track how many bytes we formatted. + b, ok := e.Format(n.format) + if ok { + n.backend.records = append(n.backend.records, b) + } + + // Finally, the last bit of post-processing is to make sure that we track the + // formatted headers that would have made it to the logs via the sink node. + // They only appear in requests. + if a.Subtype == audit.RequestType { + reqEntry := &audit.RequestEntry{} + err = json.Unmarshal(b, &reqEntry) + if err != nil { + return nil, fmt.Errorf("unable to parse formatted audit entry data: %w", err) + } + + n.backend.ReqHeaders = append(n.backend.ReqHeaders, reqEntry.Request.Headers) + } + + // Return the event and no error in order to let the pipeline continue on. + return e, nil +} + +func (n *noopWrapper) Reopen() error { + return n.node.Reopen() +} + +func (n *noopWrapper) Type() eventlogger.NodeType { + return n.node.Type() +} + +// LogTestMessage will manually crank the handle on the nodes associated with this backend. +func (n *NoopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput) error { + if len(n.nodeIDList) > 0 { + return audit.ProcessManual(ctx, in, n.nodeIDList, n.nodeMap) + } + + return nil +} + +func (n *NoopAudit) Salt(ctx context.Context) (*salt.Salt, error) { + n.saltMutex.RLock() + if n.salt != nil { + defer n.saltMutex.RUnlock() + return n.salt, nil + } + n.saltMutex.RUnlock() + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + if n.salt != nil { + return n.salt, nil + } + s, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig) + if err != nil { + return nil, err + } + n.salt = s + return s, nil +} + +func (n *NoopAudit) GetHash(ctx context.Context, data string) (string, error) { + s, err := n.Salt(ctx) + if err != nil { + return "", err + } + return s.GetIdentifiedHMAC(data), nil +} + +func (n *NoopAudit) Reload(_ context.Context) error { + return nil +} + +func (n *NoopAudit) Invalidate(_ context.Context) { + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + n.salt = nil +} + +// RegisterNodesAndPipeline registers the nodes and a pipeline as required by +// the audit.Backend interface. +func (n *NoopAudit) RegisterNodesAndPipeline(broker *eventlogger.Broker, name string) error { + for id, node := range n.nodeMap { + if err := broker.RegisterNode(id, node); err != nil { + return err + } + } + + pipeline := eventlogger.Pipeline{ + PipelineID: eventlogger.PipelineID(name), + EventType: event.AuditType.AsEventType(), + NodeIDs: n.nodeIDList, + } + + return broker.RegisterPipeline(pipeline) +} + +func (n *NoopAudit) SetListener(listener NoopAuditEventListener) { + n.listener = listener +} + +type TestLogger struct { + hclog.InterceptLogger + Path string + File *os.File + sink hclog.SinkAdapter +} + +func NewTestLogger(t testing.T) *TestLogger { + return NewTestLoggerWithSuffix(t, "") +} + +func NewTestLoggerWithSuffix(t testing.T, logFileSuffix string) *TestLogger { + var logFile *os.File + var logPath string + output := os.Stderr + + logDir := os.Getenv("VAULT_TEST_LOG_DIR") + if logDir != "" { + if logFileSuffix != "" && !strings.HasPrefix(logFileSuffix, "_") { + logFileSuffix = "_" + logFileSuffix + } + logPath = filepath.Join(logDir, t.Name()+logFileSuffix+".log") + // t.Name may include slashes. + dir, _ := filepath.Split(logPath) + err := os.MkdirAll(dir, 0o755) + if err != nil { + t.Fatal(err) + } + logFile, err = os.Create(logPath) + if err != nil { + t.Fatal(err) + } + output = logFile + } + + // We send nothing on the regular logger, that way we can later deregister + // the sink to stop logging during cluster cleanup. + logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Output: io.Discard, + IndependentLevels: true, + Name: t.Name(), + }) + sink := hclog.NewSinkAdapter(&hclog.LoggerOptions{ + Output: output, + Level: hclog.Trace, + IndependentLevels: true, + }) + logger.RegisterSink(sink) + + testLogger := &TestLogger{ + Path: logPath, + File: logFile, + InterceptLogger: logger, + sink: sink, + } + + t.Cleanup(func() { + testLogger.StopLogging() + if t.Failed() { + _ = testLogger.File.Close() + } else { + _ = os.Remove(testLogger.Path) + } + }) + return testLogger +} + +func (tl *TestLogger) StopLogging() { + tl.InterceptLogger.DeregisterSink(tl.sink) +} + +func (n *NoopAudit) EventType() eventlogger.EventType { + return event.AuditType.AsEventType() +} + +func (n *NoopAudit) HasFiltering() bool { + return false +} + +func (n *NoopAudit) Name() string { + return n.Config.MountPath +} + +func (n *NoopAudit) Nodes() map[eventlogger.NodeID]eventlogger.Node { + return n.nodeMap +} + +func (n *NoopAudit) NodeIDs() []eventlogger.NodeID { + return n.nodeIDList +} + +func (n *NoopAudit) IsFallback() bool { + return false +} diff --git a/helper/testhelpers/docker/testhelpers.go b/helper/testhelpers/docker/testhelpers.go deleted file mode 100644 index 51b35cb16319..000000000000 --- a/helper/testhelpers/docker/testhelpers.go +++ /dev/null @@ -1,715 +0,0 @@ -package docker - -import ( - "archive/tar" - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/cenkalti/backoff/v3" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/go-connections/nat" - "github.com/hashicorp/go-uuid" -) - -type Runner struct { - DockerAPI *client.Client - RunOptions RunOptions -} - -type RunOptions struct { - ImageRepo string - ImageTag string - ContainerName string - Cmd []string - Entrypoint []string - Env []string - NetworkID string - CopyFromTo map[string]string - Ports []string - DoNotAutoRemove bool - AuthUsername string - AuthPassword string - LogConsumer func(string) -} - -func NewServiceRunner(opts RunOptions) (*Runner, error) { - dapi, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion("1.39")) - if err != nil { - return nil, err - } - - if opts.NetworkID == "" { - opts.NetworkID = os.Getenv("TEST_DOCKER_NETWORK_ID") - } - if opts.ContainerName == "" { - if strings.Contains(opts.ImageRepo, "/") { - return nil, fmt.Errorf("ContainerName is required for non-library images") - } - // If there's no slash in the repo it's almost certainly going to be - // a good container name. - opts.ContainerName = opts.ImageRepo - } - return &Runner{ - DockerAPI: dapi, - RunOptions: opts, - }, nil -} - -type ServiceConfig interface { - Address() string - URL() *url.URL -} - -func NewServiceHostPort(host string, port int) *ServiceHostPort { - return &ServiceHostPort{address: fmt.Sprintf("%s:%d", host, port)} -} - -func NewServiceHostPortParse(s string) (*ServiceHostPort, error) { - pieces := strings.Split(s, ":") - if len(pieces) != 2 { - return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) - } - - port, err := strconv.Atoi(pieces[1]) - if err != nil || port < 1 { - return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) - } - - return &ServiceHostPort{s}, nil -} - -type ServiceHostPort struct { - address string -} - -func (s ServiceHostPort) Address() string { - return s.address -} - -func (s ServiceHostPort) URL() *url.URL { - return &url.URL{Host: s.address} -} - -func NewServiceURLParse(s string) (*ServiceURL, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - return &ServiceURL{u: *u}, nil -} - -func NewServiceURL(u url.URL) *ServiceURL { - return &ServiceURL{u: u} -} - -type ServiceURL struct { - u url.URL -} - -func (s ServiceURL) Address() string { - return s.u.Host -} - -func (s ServiceURL) URL() *url.URL { - return &s.u -} - -// ServiceAdapter verifies connectivity to the service, then returns either the -// connection string (typically a URL) and nil, or empty string and an error. -type ServiceAdapter func(ctx context.Context, host string, port int) (ServiceConfig, error) - -// StartService will start the runner's configured docker container with a -// random UUID suffix appended to the name to make it unique and will return -// either a hostname or local address depending on if a Docker network was given. -// -// Most tests can default to using this. -func (d *Runner) StartService(ctx context.Context, connect ServiceAdapter) (*Service, error) { - serv, _, err := d.StartNewService(ctx, true, false, connect) - - return serv, err -} - -// StartNewService will start the runner's configured docker container but with the -// ability to control adding a name suffix or forcing a local address to be returned. -// 'addSuffix' will add a random UUID to the end of the container name. -// 'forceLocalAddr' will force the container address returned to be in the -// form of '127.0.0.1:1234' where 1234 is the mapped container port. -func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr bool, connect ServiceAdapter) (*Service, string, error) { - container, hostIPs, containerID, err := d.Start(context.Background(), addSuffix, forceLocalAddr) - if err != nil { - return nil, "", err - } - - cleanup := func() { - if d.RunOptions.LogConsumer != nil { - rc, err := d.DockerAPI.ContainerLogs(ctx, container.ID, types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Timestamps: true, - Details: true, - }) - if err == nil { - b, err := ioutil.ReadAll(rc) - if err != nil { - d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs, err=%v, read: %s", err, string(b))) - } else { - d.RunOptions.LogConsumer(string(b)) - } - } - } - - for i := 0; i < 10; i++ { - err := d.DockerAPI.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}) - if err == nil { - return - } - time.Sleep(1 * time.Second) - } - } - - bo := backoff.NewExponentialBackOff() - bo.MaxInterval = time.Second * 5 - bo.MaxElapsedTime = 2 * time.Minute - - pieces := strings.Split(hostIPs[0], ":") - portInt, err := strconv.Atoi(pieces[1]) - if err != nil { - return nil, "", err - } - - var config ServiceConfig - err = backoff.Retry(func() error { - c, err := connect(ctx, pieces[0], portInt) - if err != nil { - return err - } - if c == nil { - return fmt.Errorf("service adapter returned nil error and config") - } - config = c - return nil - }, bo) - - if err != nil { - if !d.RunOptions.DoNotAutoRemove { - cleanup() - } - return nil, "", err - } - - return &Service{ - Config: config, - Cleanup: cleanup, - Container: container, - }, containerID, nil -} - -type Service struct { - Config ServiceConfig - Cleanup func() - Container *types.ContainerJSON -} - -func (d *Runner) Start(ctx context.Context, addSuffix, forceLocalAddr bool) (*types.ContainerJSON, []string, string, error) { - name := d.RunOptions.ContainerName - if addSuffix { - suffix, err := uuid.GenerateUUID() - if err != nil { - return nil, nil, "", err - } - name += "-" + suffix - } - - cfg := &container.Config{ - Hostname: name, - Image: fmt.Sprintf("%s:%s", d.RunOptions.ImageRepo, d.RunOptions.ImageTag), - Env: d.RunOptions.Env, - Cmd: d.RunOptions.Cmd, - } - if len(d.RunOptions.Ports) > 0 { - cfg.ExposedPorts = make(map[nat.Port]struct{}) - for _, p := range d.RunOptions.Ports { - cfg.ExposedPorts[nat.Port(p)] = struct{}{} - } - } - if len(d.RunOptions.Entrypoint) > 0 { - cfg.Entrypoint = strslice.StrSlice(d.RunOptions.Entrypoint) - } - - hostConfig := &container.HostConfig{ - AutoRemove: !d.RunOptions.DoNotAutoRemove, - PublishAllPorts: true, - } - - netConfig := &network.NetworkingConfig{} - if d.RunOptions.NetworkID != "" { - netConfig.EndpointsConfig = map[string]*network.EndpointSettings{ - d.RunOptions.NetworkID: {}, - } - } - - // best-effort pull - var opts types.ImageCreateOptions - if d.RunOptions.AuthUsername != "" && d.RunOptions.AuthPassword != "" { - var buf bytes.Buffer - auth := map[string]string{ - "username": d.RunOptions.AuthUsername, - "password": d.RunOptions.AuthPassword, - } - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, nil, "", err - } - opts.RegistryAuth = base64.URLEncoding.EncodeToString(buf.Bytes()) - } - resp, _ := d.DockerAPI.ImageCreate(ctx, cfg.Image, opts) - if resp != nil { - _, _ = ioutil.ReadAll(resp) - } - - c, err := d.DockerAPI.ContainerCreate(ctx, cfg, hostConfig, netConfig, nil, cfg.Hostname) - if err != nil { - return nil, nil, "", fmt.Errorf("container create failed: %v", err) - } - - for from, to := range d.RunOptions.CopyFromTo { - if err := copyToContainer(ctx, d.DockerAPI, c.ID, from, to); err != nil { - _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) - return nil, nil, "", err - } - } - - err = d.DockerAPI.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}) - if err != nil { - _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) - return nil, nil, "", fmt.Errorf("container start failed: %v", err) - } - - inspect, err := d.DockerAPI.ContainerInspect(ctx, c.ID) - if err != nil { - _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) - return nil, nil, "", err - } - - var addrs []string - for _, port := range d.RunOptions.Ports { - pieces := strings.Split(port, "/") - if len(pieces) < 2 { - return nil, nil, "", fmt.Errorf("expected port of the form 1234/tcp, got: %s", port) - } - if d.RunOptions.NetworkID != "" && !forceLocalAddr { - addrs = append(addrs, fmt.Sprintf("%s:%s", cfg.Hostname, pieces[0])) - } else { - mapped, ok := inspect.NetworkSettings.Ports[nat.Port(port)] - if !ok || len(mapped) == 0 { - return nil, nil, "", fmt.Errorf("no port mapping found for %s", port) - } - addrs = append(addrs, fmt.Sprintf("127.0.0.1:%s", mapped[0].HostPort)) - } - } - - return &inspect, addrs, c.ID, nil -} - -func (d *Runner) Stop(ctx context.Context, containerID string) error { - if d.RunOptions.NetworkID != "" { - if err := d.DockerAPI.NetworkDisconnect(ctx, d.RunOptions.NetworkID, containerID, true); err != nil { - return fmt.Errorf("error disconnecting network (%v): %v", d.RunOptions.NetworkID, err) - } - } - - timeout := 5 * time.Second - if err := d.DockerAPI.ContainerStop(ctx, containerID, &timeout); err != nil { - return fmt.Errorf("error stopping container: %v", err) - } - - return nil -} - -func (d *Runner) Restart(ctx context.Context, containerID string) error { - if err := d.DockerAPI.ContainerStart(ctx, containerID, types.ContainerStartOptions{}); err != nil { - return err - } - - ends := &network.EndpointSettings{ - NetworkID: d.RunOptions.NetworkID, - } - - return d.DockerAPI.NetworkConnect(ctx, d.RunOptions.NetworkID, containerID, ends) -} - -func copyToContainer(ctx context.Context, dapi *client.Client, containerID, from, to string) error { - srcInfo, err := archive.CopyInfoSourcePath(from, false) - if err != nil { - return fmt.Errorf("error copying from source %q: %v", from, err) - } - - srcArchive, err := archive.TarResource(srcInfo) - if err != nil { - return fmt.Errorf("error creating tar from source %q: %v", from, err) - } - defer srcArchive.Close() - - dstInfo := archive.CopyInfo{Path: to} - - dstDir, content, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) - if err != nil { - return fmt.Errorf("error preparing copy from %q -> %q: %v", from, to, err) - } - defer content.Close() - err = dapi.CopyToContainer(ctx, containerID, dstDir, content, types.CopyToContainerOptions{}) - if err != nil { - return fmt.Errorf("error copying from %q -> %q: %v", from, to, err) - } - - return nil -} - -type RunCmdOpt interface { - Apply(cfg *types.ExecConfig) error -} - -type RunCmdUser string - -var _ RunCmdOpt = (*RunCmdUser)(nil) - -func (u RunCmdUser) Apply(cfg *types.ExecConfig) error { - cfg.User = string(u) - return nil -} - -func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) { - runCfg := types.ExecConfig{ - AttachStdout: true, - AttachStderr: true, - Cmd: cmd, - } - - for index, opt := range opts { - if err := opt.Apply(&runCfg); err != nil { - return nil, nil, -1, fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) - } - } - - ret, err := d.DockerAPI.ContainerExecCreate(ctx, container, runCfg) - if err != nil { - return nil, nil, -1, fmt.Errorf("error creating execution environment: %v\ncfg: %v\n", err, runCfg) - } - - resp, err := d.DockerAPI.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{}) - if err != nil { - return nil, nil, -1, fmt.Errorf("error attaching to command execution: %v\ncfg: %v\nret: %v\n", err, runCfg, ret) - } - defer resp.Close() - - var stdoutB bytes.Buffer - var stderrB bytes.Buffer - if _, err := stdcopy.StdCopy(&stdoutB, &stderrB, resp.Reader); err != nil { - return nil, nil, -1, fmt.Errorf("error reading command output: %v", err) - } - - stdout := stdoutB.Bytes() - stderr := stderrB.Bytes() - - // Fetch return code. - info, err := d.DockerAPI.ContainerExecInspect(ctx, ret.ID) - if err != nil { - return stdout, stderr, -1, fmt.Errorf("error reading command exit code: %v", err) - } - - return stdout, stderr, info.ExitCode, nil -} - -func (d *Runner) RunCmdInBackground(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) { - runCfg := types.ExecConfig{ - AttachStdout: true, - AttachStderr: true, - Cmd: cmd, - } - - for index, opt := range opts { - if err := opt.Apply(&runCfg); err != nil { - return "", fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) - } - } - - ret, err := d.DockerAPI.ContainerExecCreate(ctx, container, runCfg) - if err != nil { - return "", fmt.Errorf("error creating execution environment: %w\ncfg: %v\n", err, runCfg) - } - - err = d.DockerAPI.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{}) - if err != nil { - return "", fmt.Errorf("error starting command execution: %w\ncfg: %v\nret: %v\n", err, runCfg, ret) - } - - return ret.ID, nil -} - -// Mapping of path->contents -type PathContents interface { - UpdateHeader(header *tar.Header) error - Get() ([]byte, error) -} - -type FileContents struct { - Data []byte - Mode int64 - UID int - GID int -} - -func (b FileContents) UpdateHeader(header *tar.Header) error { - header.Mode = b.Mode - header.Uid = b.UID - header.Gid = b.GID - return nil -} - -func (b FileContents) Get() ([]byte, error) { - return b.Data, nil -} - -func PathContentsFromBytes(data []byte) PathContents { - return FileContents{ - Data: data, - Mode: 0o644, - } -} - -func PathContentsFromString(data string) PathContents { - return PathContentsFromBytes([]byte(data)) -} - -type BuildContext map[string]PathContents - -func NewBuildContext() BuildContext { - return BuildContext{} -} - -func BuildContextFromTarball(reader io.Reader) (BuildContext, error) { - archive := tar.NewReader(reader) - bCtx := NewBuildContext() - - for true { - header, err := archive.Next() - if err != nil { - if err == io.EOF { - break - } - - return nil, fmt.Errorf("failed to parse provided tarball: %v", err) - } - - data := make([]byte, int(header.Size)) - read, err := archive.Read(data) - if err != nil { - return nil, fmt.Errorf("failed to parse read from provided tarball: %v", err) - } - - if read != int(header.Size) { - return nil, fmt.Errorf("unexpectedly short read on tarball: %v of %v", read, header.Size) - } - - bCtx[header.Name] = FileContents{ - Data: data, - Mode: header.Mode, - UID: header.Uid, - GID: header.Gid, - } - } - - return bCtx, nil -} - -func (bCtx *BuildContext) ToTarball() (io.Reader, error) { - var err error - buffer := new(bytes.Buffer) - tarBuilder := tar.NewWriter(buffer) - defer tarBuilder.Close() - - for filepath, contents := range *bCtx { - fileHeader := &tar.Header{Name: filepath} - if contents == nil && !strings.HasSuffix(filepath, "/") { - return nil, fmt.Errorf("expected file path (%v) to have trailing / due to nil contents, indicating directory", filepath) - } - - if err := contents.UpdateHeader(fileHeader); err != nil { - return nil, fmt.Errorf("failed to update tar header entry for %v: %w", filepath, err) - } - - var rawContents []byte - if contents != nil { - rawContents, err = contents.Get() - if err != nil { - return nil, fmt.Errorf("failed to get file contents for %v: %w", filepath, err) - } - - fileHeader.Size = int64(len(rawContents)) - } - - if err := tarBuilder.WriteHeader(fileHeader); err != nil { - return nil, fmt.Errorf("failed to write tar header entry for %v: %w", filepath, err) - } - - if contents != nil { - if _, err := tarBuilder.Write(rawContents); err != nil { - return nil, fmt.Errorf("failed to write tar file entry for %v: %w", filepath, err) - } - } - } - - return bytes.NewReader(buffer.Bytes()), nil -} - -type BuildOpt interface { - Apply(cfg *types.ImageBuildOptions) error -} - -type BuildRemove bool - -var _ BuildOpt = (*BuildRemove)(nil) - -func (u BuildRemove) Apply(cfg *types.ImageBuildOptions) error { - cfg.Remove = bool(u) - return nil -} - -type BuildForceRemove bool - -var _ BuildOpt = (*BuildForceRemove)(nil) - -func (u BuildForceRemove) Apply(cfg *types.ImageBuildOptions) error { - cfg.ForceRemove = bool(u) - return nil -} - -type BuildPullParent bool - -var _ BuildOpt = (*BuildPullParent)(nil) - -func (u BuildPullParent) Apply(cfg *types.ImageBuildOptions) error { - cfg.PullParent = bool(u) - return nil -} - -type BuildArgs map[string]*string - -var _ BuildOpt = (*BuildArgs)(nil) - -func (u BuildArgs) Apply(cfg *types.ImageBuildOptions) error { - cfg.BuildArgs = u - return nil -} - -type BuildTags []string - -var _ BuildOpt = (*BuildTags)(nil) - -func (u BuildTags) Apply(cfg *types.ImageBuildOptions) error { - cfg.Tags = u - return nil -} - -const containerfilePath = "_containerfile" - -func (d *Runner) BuildImage(ctx context.Context, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) { - var cfg types.ImageBuildOptions - - // Build container context tarball, provisioning containerfile in. - containerContext[containerfilePath] = PathContentsFromBytes([]byte(containerfile)) - tar, err := containerContext.ToTarball() - if err != nil { - return nil, fmt.Errorf("failed to create build image context tarball: %w", err) - } - cfg.Dockerfile = "/" + containerfilePath - - // Apply all given options - for index, opt := range opts { - if err := opt.Apply(&cfg); err != nil { - return nil, fmt.Errorf("failed to apply option (%d / %v): %w", index, opt, err) - } - } - - resp, err := d.DockerAPI.ImageBuild(ctx, tar, cfg) - if err != nil { - return nil, fmt.Errorf("failed to build image: %v", err) - } - - output, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read image build output: %w", err) - } - - return output, nil -} - -func (d *Runner) CopyTo(container string, destination string, contents BuildContext) error { - // XXX: currently we use the default options but we might want to allow - // modifying cfg.CopyUIDGID in the future. - var cfg types.CopyToContainerOptions - - // Convert our provided contents to a tarball to ship up. - tar, err := contents.ToTarball() - if err != nil { - return fmt.Errorf("failed to build contents into tarball: %v", err) - } - - return d.DockerAPI.CopyToContainer(context.Background(), container, destination, tar, cfg) -} - -func (d *Runner) CopyFrom(container string, source string) (BuildContext, *types.ContainerPathStat, error) { - reader, stat, err := d.DockerAPI.CopyFromContainer(context.Background(), container, source) - if err != nil { - return nil, nil, fmt.Errorf("failed to read %v from container: %v", source, err) - } - - result, err := BuildContextFromTarball(reader) - if err != nil { - return nil, nil, fmt.Errorf("failed to build archive from result: %v", err) - } - - return result, &stat, nil -} - -func (d *Runner) GetNetworkAndAddresses(container string) (map[string]string, error) { - response, err := d.DockerAPI.ContainerInspect(context.Background(), container) - if err != nil { - return nil, fmt.Errorf("failed to fetch container inspection data: %v", err) - } - - if response.NetworkSettings == nil || len(response.NetworkSettings.Networks) == 0 { - return nil, fmt.Errorf("container (%v) had no associated network settings: %v", container, response) - } - - ret := make(map[string]string) - ns := response.NetworkSettings.Networks - for network, data := range ns { - if data == nil { - continue - } - - ret[network] = data.IPAddress - } - - if len(ret) == 0 { - return nil, fmt.Errorf("no valid network data for container (%v): %v", container, response) - } - - return ret, nil -} diff --git a/helper/testhelpers/etcd/etcdhelper.go b/helper/testhelpers/etcd/etcdhelper.go index 1051dd1405a4..dc8f796e1564 100644 --- a/helper/testhelpers/etcd/etcdhelper.go +++ b/helper/testhelpers/etcd/etcdhelper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package etcd import ( @@ -8,7 +11,7 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" clientv3 "go.etcd.io/etcd/client/v3" ) diff --git a/helper/testhelpers/fakegcsserver/fake-gcs-server.go b/helper/testhelpers/fakegcsserver/fake-gcs-server.go index ed83970d17af..e3f6c7021e96 100644 --- a/helper/testhelpers/fakegcsserver/fake-gcs-server.go +++ b/helper/testhelpers/fakegcsserver/fake-gcs-server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package fakegcsserver import ( @@ -9,7 +12,7 @@ import ( "testing" "cloud.google.com/go/storage" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "google.golang.org/api/iterator" "google.golang.org/api/option" ) diff --git a/helper/testhelpers/generaterootkind_enumer.go b/helper/testhelpers/generaterootkind_enumer.go new file mode 100644 index 000000000000..496b4eb98e76 --- /dev/null +++ b/helper/testhelpers/generaterootkind_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=GenerateRootKind -trimprefix=GenerateRoot"; DO NOT EDIT. + +package testhelpers + +import ( + "fmt" +) + +const _GenerateRootKindName = "RegularDRGenerateRecovery" + +var _GenerateRootKindIndex = [...]uint8{0, 7, 9, 25} + +func (i GenerateRootKind) String() string { + if i < 0 || i >= GenerateRootKind(len(_GenerateRootKindIndex)-1) { + return fmt.Sprintf("GenerateRootKind(%d)", i) + } + return _GenerateRootKindName[_GenerateRootKindIndex[i]:_GenerateRootKindIndex[i+1]] +} + +var _GenerateRootKindValues = []GenerateRootKind{0, 1, 2} + +var _GenerateRootKindNameToValueMap = map[string]GenerateRootKind{ + _GenerateRootKindName[0:7]: 0, + _GenerateRootKindName[7:9]: 1, + _GenerateRootKindName[9:25]: 2, +} + +// GenerateRootKindString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func GenerateRootKindString(s string) (GenerateRootKind, error) { + if val, ok := _GenerateRootKindNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to GenerateRootKind values", s) +} + +// GenerateRootKindValues returns all values of the enum +func GenerateRootKindValues() []GenerateRootKind { + return _GenerateRootKindValues +} + +// IsAGenerateRootKind returns "true" if the value is listed in the enum definition. "false" otherwise +func (i GenerateRootKind) IsAGenerateRootKind() bool { + for _, v := range _GenerateRootKindValues { + if i == v { + return true + } + } + return false +} diff --git a/helper/testhelpers/ldap/ldaphelper.go b/helper/testhelpers/ldap/ldaphelper.go index b248c0294fda..e206d10795cb 100644 --- a/helper/testhelpers/ldap/ldaphelper.go +++ b/helper/testhelpers/ldap/ldaphelper.go @@ -1,16 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package ldap import ( "context" "fmt" + "runtime" + "strings" "testing" - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/cap/ldap" + + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/ldaputil" ) func PrepareTestContainer(t *testing.T, version string) (cleanup func(), cfg *ldaputil.ConfigEntry) { + // note: this image isn't supported on arm64 architecture in CI. + // but if you're running on Apple Silicon, feel free to comment out the code below locally. + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + runner, err := docker.NewServiceRunner(docker.RunOptions{ // Currently set to "michelvocks" until https://github.com/rroemhild/docker-test-openldap/pull/14 // has been merged. @@ -33,23 +45,21 @@ func PrepareTestContainer(t *testing.T, version string) (cleanup func(), cfg *ld cfg.GroupDN = "ou=people,dc=planetexpress,dc=com" cfg.GroupAttr = "cn" cfg.RequestTimeout = 60 + cfg.MaximumPageSize = 1000 svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { connURL := fmt.Sprintf("ldap://%s:%d", host, port) cfg.Url = connURL - logger := hclog.New(nil) - client := ldaputil.Client{ - LDAP: ldaputil.NewLDAP(), - Logger: logger, - } - conn, err := client.DialLDAP(cfg) + client, err := ldap.NewClient(ctx, ldaputil.ConvertConfig(cfg)) if err != nil { return nil, err } - defer conn.Close() - if _, err := client.GetUserBindDN(cfg, conn, "Philip J. Fry"); err != nil { + defer client.Close(ctx) + + _, err = client.Authenticate(ctx, "Philip J. Fry", "fry") + if err != nil { return nil, err } diff --git a/helper/testhelpers/logical/testing.go b/helper/testhelpers/logical/testing.go index 4740ff6be370..ad8149c1ec18 100644 --- a/helper/testhelpers/logical/testing.go +++ b/helper/testhelpers/logical/testing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package testing import ( @@ -9,11 +12,11 @@ import ( "sort" "testing" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" @@ -166,7 +169,7 @@ func Test(tt TestT, c TestCase) { config := &vault.CoreConfig{ Physical: phys, DisableMlock: true, - BuiltinRegistry: vault.NewMockBuiltinRegistry(), + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), } if c.LogicalBackend != nil || c.LogicalFactory != nil { diff --git a/helper/testhelpers/logical/testing_test.go b/helper/testhelpers/logical/testing_test.go index 5a4096bfc74b..a73b91ecd05a 100644 --- a/helper/testhelpers/logical/testing_test.go +++ b/helper/testhelpers/logical/testing_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package testing import ( diff --git a/helper/testhelpers/minimal/minimal.go b/helper/testhelpers/minimal/minimal.go new file mode 100644 index 000000000000..55e5617f554f --- /dev/null +++ b/helper/testhelpers/minimal/minimal.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package minimal + +import ( + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/audit" + auditFile "github.com/hashicorp/vault/builtin/audit/file" + auditSocket "github.com/hashicorp/vault/builtin/audit/socket" + auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog" + logicalDb "github.com/hashicorp/vault/builtin/logical/database" + "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/go-testing-interface" +) + +// NewTestSoloCluster is a simpler version of NewTestCluster that only creates +// single-node clusters. It is intentionally minimalist, if you need something +// from vault.TestClusterOptions, use NewTestCluster instead. It should work fine +// with a nil config argument. There is no need to call Start or Cleanup or +// TestWaitActive on the resulting cluster. +func NewTestSoloCluster(t testing.T, config *vault.CoreConfig) *vault.TestCluster { + logger := corehelpers.NewTestLogger(t) + + mycfg := &vault.CoreConfig{} + + if config != nil { + // It's rude to modify an input argument as a side-effect + copy, err := copystructure.Copy(config) + if err != nil { + t.Fatal(err) + } + mycfg = copy.(*vault.CoreConfig) + } + if mycfg.Physical == nil { + // Don't use NewTransactionalInmem because that would enable replication, + // which we don't care about in our case (use NewTestCluster for that.) + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + mycfg.Physical = inm + } + if mycfg.CredentialBackends == nil { + mycfg.CredentialBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + } + } + if mycfg.LogicalBackends == nil { + mycfg.LogicalBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + "database": logicalDb.Factory, + // This is also available in the plugin catalog, but is here due to the need to + // automatically mount it. + "kv": logicalKv.Factory, + } + } + if mycfg.AuditBackends == nil { + mycfg.AuditBackends = map[string]audit.Factory{ + "file": auditFile.Factory, + "socket": auditSocket.Factory, + "syslog": auditSyslog.Factory, + } + } + if mycfg.BuiltinRegistry == nil { + mycfg.BuiltinRegistry = builtinplugins.Registry + } + + cluster := vault.NewTestCluster(t, mycfg, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: http.Handler, + Logger: logger, + }) + t.Cleanup(cluster.Cleanup) + return cluster +} diff --git a/helper/testhelpers/minio/miniohelper.go b/helper/testhelpers/minio/miniohelper.go index 2969ce21546a..67d611e40a4c 100644 --- a/helper/testhelpers/minio/miniohelper.go +++ b/helper/testhelpers/minio/miniohelper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package minio import ( @@ -11,7 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) type Config struct { diff --git a/helper/testhelpers/mongodb/mongodbhelper.go b/helper/testhelpers/mongodb/mongodbhelper.go index c4288a4b5513..7ca214fcc081 100644 --- a/helper/testhelpers/mongodb/mongodbhelper.go +++ b/helper/testhelpers/mongodb/mongodbhelper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mongodb import ( @@ -7,7 +10,7 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readpref" diff --git a/helper/testhelpers/mssql/mssqlhelper.go b/helper/testhelpers/mssql/mssqlhelper.go index 01bfd54b5387..94b34a4f3d85 100644 --- a/helper/testhelpers/mssql/mssqlhelper.go +++ b/helper/testhelpers/mssql/mssqlhelper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mssqlhelper import ( @@ -6,9 +9,11 @@ import ( "fmt" "net/url" "os" + "runtime" + "strings" "testing" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) const mssqlPassword = "yourStrong(!)Password" @@ -19,6 +24,10 @@ const mssqlPassword = "yourStrong(!)Password" const numRetries = 3 func PrepareMSSQLTestContainer(t *testing.T) (cleanup func(), retURL string) { + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + if os.Getenv("MSSQL_URL") != "" { return func() {}, os.Getenv("MSSQL_URL") } diff --git a/helper/testhelpers/mysql/mysqlhelper.go b/helper/testhelpers/mysql/mysqlhelper.go index 82b47f07e1e7..93b2cd551e05 100644 --- a/helper/testhelpers/mysql/mysqlhelper.go +++ b/helper/testhelpers/mysql/mysqlhelper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mysqlhelper import ( @@ -5,10 +8,11 @@ import ( "database/sql" "fmt" "os" + "runtime" "strings" "testing" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) type Config struct { @@ -23,6 +27,12 @@ func PrepareTestContainer(t *testing.T, legacy bool, pw string) (func(), string) return func() {}, os.Getenv("MYSQL_URL") } + // ARM64 is only supported on MySQL 8.0 and above. If we update + // our image and support to 8.0, we can unskip these tests. + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as MySQL 5.7 is not supported on ARM architectures") + } + imageVersion := "5.7" if legacy { imageVersion = "5.6" diff --git a/helper/testhelpers/pluginhelpers/pluginhelpers.go b/helper/testhelpers/pluginhelpers/pluginhelpers.go new file mode 100644 index 000000000000..40035fc59f38 --- /dev/null +++ b/helper/testhelpers/pluginhelpers/pluginhelpers.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +// Package pluginhelpers contains testhelpers that don't depend on package +// vault, and thus can be used within vault (as well as elsewhere.) +package pluginhelpers + +import ( + "crypto/sha256" + "fmt" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/go-testing-interface" +) + +var ( + testPluginCacheLock sync.Mutex + testPluginCache = map[string][]byte{} +) + +type TestPlugin struct { + Name string + Typ consts.PluginType + Version string + FileName string + Sha256 string + Image string + ImageSha256 string +} + +func GetPlugin(t testing.T, typ consts.PluginType) (string, string, string, string) { + t.Helper() + var pluginName string + var pluginType string + var pluginMain string + var pluginVersionLocation string + + switch typ { + case consts.PluginTypeCredential: + pluginType = "approle" + pluginName = "vault-plugin-auth-" + pluginType + pluginMain = filepath.Join("builtin", "credential", pluginType, "cmd", pluginType, "main.go") + pluginVersionLocation = fmt.Sprintf("github.com/hashicorp/vault/builtin/credential/%s.ReportedVersion", pluginType) + case consts.PluginTypeSecrets: + pluginType = "consul" + pluginName = "vault-plugin-secrets-" + pluginType + pluginMain = filepath.Join("builtin", "logical", pluginType, "cmd", pluginType, "main.go") + pluginVersionLocation = fmt.Sprintf("github.com/hashicorp/vault/builtin/logical/%s.ReportedVersion", pluginType) + case consts.PluginTypeDatabase: + pluginType = "postgresql" + pluginName = "vault-plugin-database-" + pluginType + pluginMain = filepath.Join("plugins", "database", pluginType, fmt.Sprintf("%s-database-plugin", pluginType), "main.go") + pluginVersionLocation = fmt.Sprintf("github.com/hashicorp/vault/plugins/database/%s.ReportedVersion", pluginType) + default: + t.Fatal(typ.String()) + } + return pluginName, pluginType, pluginMain, pluginVersionLocation +} + +// to mount a plugin, we need a working binary plugin, so we compile one here. +// pluginVersion is used to override the plugin's self-reported version +func CompilePlugin(t testing.T, typ consts.PluginType, pluginVersion string, pluginDir string) TestPlugin { + t.Helper() + + pluginName, pluginType, pluginMain, pluginVersionLocation := GetPlugin(t, typ) + + testPluginCacheLock.Lock() + defer testPluginCacheLock.Unlock() + + var pluginBytes []byte + + dir := "" + var err error + pluginRootDir := "builtin" + if typ == consts.PluginTypeDatabase { + pluginRootDir = "plugins" + } + for { + dir, err = os.Getwd() + if err != nil { + t.Fatal(err) + } + // detect if we are in a subdirectory or the root directory and compensate + if _, err := os.Stat(pluginRootDir); os.IsNotExist(err) { + err := os.Chdir("..") + if err != nil { + t.Fatal(err) + } + } else { + break + } + } + + pluginPath := path.Join(pluginDir, pluginName) + if pluginVersion != "" { + pluginPath += "-" + pluginVersion + } + + key := fmt.Sprintf("%s %s %s", pluginName, pluginType, pluginVersion) + // cache the compilation to only run once + var ok bool + pluginBytes, ok = testPluginCache[key] + if !ok { + // we need to compile + line := []string{"build"} + if pluginVersion != "" { + line = append(line, "-ldflags", fmt.Sprintf("-X %s=%s", pluginVersionLocation, pluginVersion)) + } + line = append(line, "-o", pluginPath, pluginMain) + cmd := exec.Command("go", line...) + cmd.Env = append(os.Environ(), "CGO_ENABLED=0") + cmd.Dir = dir + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(fmt.Errorf("error running go build %v output: %s", err, output)) + } + testPluginCache[key], err = os.ReadFile(pluginPath) + if err != nil { + t.Fatal(err) + } + pluginBytes = testPluginCache[key] + } + + // write the cached plugin if necessary + if _, err := os.Stat(pluginPath); os.IsNotExist(err) { + err = os.WriteFile(pluginPath, pluginBytes, 0o755) + } + if err != nil { + t.Fatal(err) + } + + sha := sha256.New() + _, err = sha.Write(pluginBytes) + if err != nil { + t.Fatal(err) + } + return TestPlugin{ + Name: pluginName, + Typ: typ, + Version: pluginVersion, + FileName: path.Base(pluginPath), + Sha256: fmt.Sprintf("%x", sha.Sum(nil)), + } +} + +func BuildPluginContainerImage(t testing.T, plugin TestPlugin, pluginDir string) (image string, sha256 string) { + t.Helper() + ref := plugin.Name + if plugin.Version != "" { + ref += ":" + strings.TrimPrefix(plugin.Version, "v") + } else { + ref += ":latest" + } + args := []string{"build", "--tag=" + ref, "--build-arg=plugin=" + plugin.FileName, "--file=vault/testdata/Dockerfile", pluginDir} + cmd := exec.Command("docker", args...) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(fmt.Errorf("error running docker build %v output: %s", err, output)) + } + + cmd = exec.Command("docker", "images", ref, "--format={{ .ID }}", "--no-trunc") + id, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(fmt.Errorf("error running docker build %v output: %s", err, output)) + } + + return plugin.Name, strings.TrimSpace(strings.TrimPrefix(string(id), "sha256:")) +} diff --git a/helper/testhelpers/postgresql/postgresqlhelper.go b/helper/testhelpers/postgresql/postgresqlhelper.go index 17b2151abb52..7e5f25c626af 100644 --- a/helper/testhelpers/postgresql/postgresqlhelper.go +++ b/helper/testhelpers/postgresql/postgresqlhelper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package postgresql import ( @@ -8,7 +11,7 @@ import ( "os" "testing" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" ) func PrepareTestContainer(t *testing.T, version string) (func(), string) { @@ -22,6 +25,26 @@ func PrepareTestContainer(t *testing.T, version string) (func(), string) { return cleanup, url } +// PrepareTestContainerWithVaultUser will setup a test container with a Vault +// admin user configured so that we can safely call rotate-root without +// rotating the root DB credentials +func PrepareTestContainerWithVaultUser(t *testing.T, ctx context.Context, version string) (func(), string) { + env := []string{ + "POSTGRES_PASSWORD=secret", + "POSTGRES_DB=database", + } + + runner, cleanup, url, id := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env) + + cmd := []string{"psql", "-U", "postgres", "-c", "CREATE USER vaultadmin WITH LOGIN PASSWORD 'vaultpass' SUPERUSER"} + _, err := runner.RunCmdInBackground(ctx, id, cmd) + if err != nil { + t.Fatalf("Could not run command (%v) in container: %v", cmd, err) + } + + return cleanup, url +} + func PrepareTestContainerWithPassword(t *testing.T, version, password string) (func(), string) { env := []string{ "POSTGRES_PASSWORD=" + password, diff --git a/helper/testhelpers/seal/sealhelper.go b/helper/testhelpers/seal/sealhelper.go index 4087f6fc0d92..580f42e9e081 100644 --- a/helper/testhelpers/seal/sealhelper.go +++ b/helper/testhelpers/seal/sealhelper.go @@ -1,16 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package sealhelper import ( "path" "strconv" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/testhelpers/teststorage" "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/internalshared/configutil" - "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/seal" @@ -30,7 +32,7 @@ func NewTransitSealServer(t testing.T, idx int) *TransitSealServer { opts := &vault.TestClusterOptions{ NumCores: 1, HandlerFunc: http.Handler, - Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit-seal" + strconv.Itoa(idx)), + Logger: corehelpers.NewTestLogger(t).Named("transit-seal" + strconv.Itoa(idx)), } teststorage.InmemBackendSetup(conf, opts) cluster := vault.NewTestCluster(t, conf, opts) @@ -66,12 +68,14 @@ func (tss *TransitSealServer) MakeSeal(t testing.T, key string) (vault.Seal, err "key_name": key, "tls_ca_cert": tss.CACertPEMFile, } - transitSeal, _, err := configutil.GetTransitKMSFunc(&configutil.KMS{Config: wrapperConfig}) + transitSealWrapper, _, err := configutil.GetTransitKMSFunc(&configutil.KMS{Config: wrapperConfig}) if err != nil { t.Fatalf("error setting wrapper config: %v", err) } - return vault.NewAutoSeal(&seal.Access{ - Wrapper: transitSeal, - }) + access, err := seal.NewAccessFromWrapper(tss.Logger, transitSealWrapper, vault.SealConfigTypeTransit.String()) + if err != nil { + return nil, err + } + return vault.NewAutoSeal(access), nil } diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index 6899dd4cbe55..904bdc3e7f6f 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package testhelpers import ( @@ -8,10 +11,8 @@ import ( "fmt" "io/ioutil" "math/rand" - "net/url" "os" "strings" - "sync/atomic" "time" "github.com/armon/go-metrics" @@ -25,6 +26,7 @@ import ( "github.com/mitchellh/go-testing-interface" ) +//go:generate enumer -type=GenerateRootKind -trimprefix=GenerateRoot type GenerateRootKind int const ( @@ -33,7 +35,7 @@ const ( GenerateRecovery ) -// Generates a root token on the target cluster. +// GenerateRoot generates a root token on the target cluster. func GenerateRoot(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) string { t.Helper() token, err := GenerateRootWithError(t, cluster, kind) @@ -53,6 +55,9 @@ func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, kind Generat keys = cluster.BarrierKeys } client := cluster.Cores[0].Client + oldNS := client.Namespace() + defer client.SetNamespace(oldNS) + client.ClearNamespace() var err error var status *api.GenerateRootStatusResponse @@ -174,6 +179,10 @@ func AttemptUnsealCore(c *vault.TestCluster, core *vault.TestClusterCore) error } client := core.Client + oldNS := client.Namespace() + defer client.SetNamespace(oldNS) + client.ClearNamespace() + client.Sys().ResetUnsealProcess() for j := 0; j < len(c.BarrierKeys); j++ { statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j])) @@ -242,7 +251,10 @@ func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestCluste t.Helper() for i := 0; i < 60; i++ { for _, core := range cluster.Cores { + oldNS := core.Client.Namespace() + core.Client.ClearNamespace() leaderResp, err := core.Client.Sys().Leader() + core.Client.SetNamespace(oldNS) if err != nil { t.Fatal(err) } @@ -260,7 +272,10 @@ func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestCl t.Helper() cores := make([]*vault.TestClusterCore, 0, 2) for _, core := range cluster.Cores { + oldNS := core.Client.Namespace() + core.Client.ClearNamespace() leaderResp, err := core.Client.Sys().Leader() + core.Client.SetNamespace(oldNS) if err != nil { t.Fatal(err) } @@ -419,57 +434,19 @@ func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]by return newKeys } -// TestRaftServerAddressProvider is a ServerAddressProvider that uses the -// ClusterAddr() of each node to provide raft addresses. -// -// Note that TestRaftServerAddressProvider should only be used in cases where -// cores that are part of a raft configuration have already had -// startClusterListener() called (via either unsealing or raft joining). -type TestRaftServerAddressProvider struct { - Cluster *vault.TestCluster -} - -func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) { - for _, core := range p.Cluster.Cores { - if core.NodeID == string(id) { - parsed, err := url.Parse(core.ClusterAddr()) - if err != nil { - return "", err - } - - return raftlib.ServerAddress(parsed.Host), nil - } - } - - return "", errors.New("could not find cluster addr") -} - func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { - addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} - - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - leader := cluster.Cores[0] - // Seal the leader so we can install an address provider - { - EnsureCoreSealed(t, leader) - leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - cluster.UnsealCore(t, leader) - vault.TestWaitActive(t, leader.Core) - } - leaderInfos := []*raft.LeaderJoinInfo{ { LeaderAPIAddr: leader.Client.Address(), - TLSConfig: leader.TLSConfig, + TLSConfig: leader.TLSConfig(), }, } // Join followers for i := 1; i < len(cluster.Cores); i++ { core := cluster.Cores[i] - core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) if err != nil { t.Fatal(err) @@ -596,18 +573,16 @@ func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} { t.Helper() stopCh := make(chan struct{}) - ticker := time.NewTicker(time.Second) - var err error go func() { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() for { select { case <-stopCh: - ticker.Stop() - stopCh <- struct{}{} return case <-ticker.C: - err = client.Sys().Mount("foo", &api.MountInput{ + err := client.Sys().Mount("foo", &api.MountInput{ Type: "kv", Options: map[string]string{ "version": "1", @@ -767,9 +742,19 @@ func SetNonRootToken(client *api.Client) error { return nil } -// RetryUntil runs f until it returns a nil result or the timeout is reached. +// RetryUntilAtCadence runs f until it returns a nil result or the timeout is reached. // If a nil result hasn't been obtained by timeout, calls t.Fatal. -func RetryUntil(t testing.T, timeout time.Duration, f func() error) { +func RetryUntilAtCadence(t testing.T, timeout, sleepTime time.Duration, f func() error) { + t.Helper() + fail := func(err error) { + t.Fatalf("did not complete before deadline, err: %v", err) + } + RetryUntilAtCadenceWithHandler(t, timeout, sleepTime, fail, f) +} + +// RetryUntilAtCadence runs f until it returns a nil result or the timeout is reached. +// If a nil result hasn't been obtained by timeout, onFailure is called. +func RetryUntilAtCadenceWithHandler(t testing.T, timeout, sleepTime time.Duration, onFailure func(error), f func() error) { t.Helper() deadline := time.Now().Add(timeout) var err error @@ -777,14 +762,27 @@ func RetryUntil(t testing.T, timeout time.Duration, f func() error) { if err = f(); err == nil { return } - time.Sleep(100 * time.Millisecond) + time.Sleep(sleepTime) } - t.Fatalf("did not complete before deadline, err: %v", err) + onFailure(err) +} + +// RetryUntil runs f until it returns a nil result or the timeout is reached. +// If a nil result hasn't been obtained by timeout, calls t.Fatal. +func RetryUntil(t testing.T, timeout time.Duration, f func() error) { + t.Helper() + RetryUntilAtCadence(t, timeout, 100*time.Millisecond, f) } -// CreateEntityAndAlias clones an existing client and creates an entity/alias. +// CreateEntityAndAlias clones an existing client and creates an entity/alias, uses userpass mount path // It returns the cloned client, entityID, and aliasID. func CreateEntityAndAlias(t testing.T, client *api.Client, mountAccessor, entityName, aliasName string) (*api.Client, string, string) { + return CreateEntityAndAliasWithinMount(t, client, mountAccessor, "userpass", entityName, aliasName) +} + +// CreateEntityAndAliasWithinMount clones an existing client and creates an entity/alias, within the specified mountPath +// It returns the cloned client, entityID, and aliasID. +func CreateEntityAndAliasWithinMount(t testing.T, client *api.Client, mountAccessor, mountPath, entityName, aliasName string) (*api.Client, string, string) { t.Helper() userClient, err := client.Clone() if err != nil { @@ -812,7 +810,8 @@ func CreateEntityAndAlias(t testing.T, client *api.Client, mountAccessor, entity if aliasID == "" { t.Fatal("Alias ID not present in response") } - _, err = client.Logical().WriteWithContext(context.Background(), fmt.Sprintf("auth/userpass/users/%s", aliasName), map[string]interface{}{ + path := fmt.Sprintf("auth/%s/users/%s", mountPath, aliasName) + _, err = client.Logical().WriteWithContext(context.Background(), path, map[string]interface{}{ "password": "testpassword", }) if err != nil { @@ -942,7 +941,7 @@ func GetTOTPCodeFromEngine(t testing.T, client *api.Client, enginePath string) s // SetupLoginMFATOTP setups up a TOTP MFA using some basic configuration and // returns all relevant information to the client. -func SetupLoginMFATOTP(t testing.T, client *api.Client) (*api.Client, string, string) { +func SetupLoginMFATOTP(t testing.T, client *api.Client, methodName string, waitPeriod int) (*api.Client, string, string) { t.Helper() // Mount the totp secrets engine SetupTOTPMount(t, client) @@ -956,13 +955,14 @@ func SetupLoginMFATOTP(t testing.T, client *api.Client) (*api.Client, string, st // Configure a default TOTP method totpConfig := map[string]interface{}{ "issuer": "yCorp", - "period": 20, + "period": waitPeriod, "algorithm": "SHA256", "digits": 6, "skew": 1, "key_size": 20, "qr_size": 200, "max_validation_attempts": 5, + "method_name": methodName, } methodID := SetupTOTPMethod(t, client, totpConfig) @@ -986,3 +986,40 @@ func SkipUnlessEnvVarsSet(t testing.T, envVars []string) { } } } + +// WaitForNodesExcludingSelectedStandbys is variation on WaitForActiveNodeAndStandbys. +// It waits for the active node before waiting for standby nodes, however +// it will not wait for cores with indexes that match those specified as arguments. +// Whilst you could specify index 0 which is likely to be the leader node, the function +// checks for the leader first regardless of the indexes to skip, so it would be redundant to do so. +// The intention/use case for this function is to allow a cluster to start and become active with one +// or more nodes not joined, so that we can test scenarios where a node joins later. +// e.g. 4 nodes in the cluster, only 3 nodes in cluster 'active', 1 node can be joined later in tests. +func WaitForNodesExcludingSelectedStandbys(t testing.T, cluster *vault.TestCluster, indexesToSkip ...int) { + WaitForActiveNode(t, cluster) + + contains := func(elems []int, e int) bool { + for _, v := range elems { + if v == e { + return true + } + } + + return false + } + for i, core := range cluster.Cores { + if contains(indexesToSkip, i) { + continue + } + + if standby, _ := core.Core.Standby(); standby { + WaitForStandbyNode(t, core) + } + } +} + +// IsLocalOrRegressionTests returns true when the tests are running locally (not in CI), or when +// the regression test env var (VAULT_REGRESSION_TESTS) is provided. +func IsLocalOrRegressionTests() bool { + return os.Getenv("CI") == "" || os.Getenv("VAULT_REGRESSION_TESTS") == "true" +} diff --git a/helper/testhelpers/testhelpers_oss.go b/helper/testhelpers/testhelpers_oss.go index 912d50fdec3b..8965ed9169a3 100644 --- a/helper/testhelpers/testhelpers_oss.go +++ b/helper/testhelpers/testhelpers_oss.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !enterprise package testhelpers diff --git a/helper/testhelpers/teststorage/consul/consul.go b/helper/testhelpers/teststorage/consul/consul.go index 47ec99f294b7..26a2175dac2e 100644 --- a/helper/testhelpers/teststorage/consul/consul.go +++ b/helper/testhelpers/teststorage/consul/consul.go @@ -1,11 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( + "sync" realtesting "testing" "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/testhelpers/consul" - "github.com/hashicorp/vault/helper/testhelpers/teststorage" physConsul "github.com/hashicorp/vault/physical/consul" "github.com/hashicorp/vault/vault" "github.com/mitchellh/go-testing-interface" @@ -30,5 +33,93 @@ func MakeConsulBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendB } func ConsulBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { - opts.PhysicalFactory = teststorage.SharedPhysicalFactory(MakeConsulBackend) + m := &consulContainerManager{} + opts.PhysicalFactory = m.Backend +} + +// consulContainerManager exposes Backend which matches the PhysicalFactory func +// type. When called, it will ensure that a separate Consul container is started +// for each distinct vault cluster that calls it and ensures that each Vault +// core gets a separate Consul backend instance since that contains state +// related to lock sessions. The whole test framework doesn't have a concept of +// "cluster names" outside of the prefix attached to the logger and other +// backend factories, mostly via SharedPhysicalFactory currently implicitly rely +// on being called in a sequence of core 0, 1, 2,... on one cluster and then +// core 0, 1, 2... on the next and so on. Refactoring lots of things to make +// first-class cluster identifiers a thing seems like a heavy lift given that we +// already rely on sequence of calls everywhere else anyway so we do the same +// here - each time the Backend method is called with coreIdx == 0 we create a +// whole new Consul and assume subsequent non 0 index cores are in the same +// cluster. +type consulContainerManager struct { + mu sync.Mutex + current *consulContainerBackendFactory +} + +func (m *consulContainerManager) Backend(t testing.T, coreIdx int, + logger hclog.Logger, conf map[string]interface{}, +) *vault.PhysicalBackendBundle { + m.mu.Lock() + if coreIdx == 0 || m.current == nil { + // Create a new consul container factory + m.current = &consulContainerBackendFactory{} + } + f := m.current + m.mu.Unlock() + + return f.Backend(t, coreIdx, logger, conf) +} + +type consulContainerBackendFactory struct { + mu sync.Mutex + refCount int + cleanupFn func() + config map[string]string +} + +func (f *consulContainerBackendFactory) Backend(t testing.T, coreIdx int, + logger hclog.Logger, conf map[string]interface{}, +) *vault.PhysicalBackendBundle { + f.mu.Lock() + defer f.mu.Unlock() + + if f.refCount == 0 { + f.startContainerLocked(t) + logger.Debug("started consul container", "clusterID", conf["cluster_id"], + "address", f.config["address"]) + } + + f.refCount++ + consulBackend, err := physConsul.NewConsulBackend(f.config, logger.Named("consul")) + if err != nil { + t.Fatal(err) + } + return &vault.PhysicalBackendBundle{ + Backend: consulBackend, + Cleanup: f.cleanup, + } +} + +func (f *consulContainerBackendFactory) startContainerLocked(t testing.T) { + cleanup, config := consul.PrepareTestContainer(t.(*realtesting.T), "", false, true) + f.config = map[string]string{ + "address": config.Address(), + "token": config.Token, + "max_parallel": "32", + } + f.cleanupFn = cleanup +} + +func (f *consulContainerBackendFactory) cleanup() { + f.mu.Lock() + defer f.mu.Unlock() + + if f.refCount < 1 || f.cleanupFn == nil { + return + } + f.refCount-- + if f.refCount == 0 { + f.cleanupFn() + f.cleanupFn = nil + } } diff --git a/helper/testhelpers/teststorage/teststorage.go b/helper/testhelpers/teststorage/teststorage.go index bf342e11ebf7..1c8aab2445af 100644 --- a/helper/testhelpers/teststorage/teststorage.go +++ b/helper/testhelpers/teststorage/teststorage.go @@ -1,15 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package teststorage import ( "fmt" "io/ioutil" + "math/rand" "os" "time" "github.com/hashicorp/go-hclog" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/audit" + auditFile "github.com/hashicorp/vault/builtin/audit/file" + auditSocket "github.com/hashicorp/vault/builtin/audit/socket" + auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog" + logicalDb "github.com/hashicorp/vault/builtin/logical/database" + "github.com/hashicorp/vault/builtin/plugin" "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" physFile "github.com/hashicorp/vault/sdk/physical/file" "github.com/hashicorp/vault/sdk/physical/inmem" @@ -33,6 +47,17 @@ func MakeInmemBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBu } } +func MakeLatentInmemBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + jitter := r.Intn(20) + latency := time.Duration(r.Intn(15)) * time.Millisecond + + pbb := MakeInmemBackend(t, logger) + latencyInjector := physical.NewTransactionalLatencyInjector(pbb.Backend, latency, jitter, logger) + pbb.Backend = latencyInjector + return pbb +} + func MakeInmemNonTransactionalBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { inm, err := inmem.NewInmem(nil, logger) if err != nil { @@ -79,7 +104,7 @@ func MakeFileBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBun } } -func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}) *vault.PhysicalBackendBundle { +func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) *vault.PhysicalBackendBundle { nodeID := fmt.Sprintf("core-%d", coreIdx) raftDir, err := ioutil.TempDir("", "vault-raft-") if err != nil { @@ -92,10 +117,25 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma logger.Info("raft dir", "dir", raftDir) + backend, err := makeRaftBackend(logger, nodeID, raftDir, extraConf, bridge) + if err != nil { + cleanupFunc() + t.Fatal(err) + } + + return &vault.PhysicalBackendBundle{ + Backend: backend, + Cleanup: cleanupFunc, + } +} + +func makeRaftBackend(logger hclog.Logger, nodeID, raftDir string, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) (physical.Backend, error) { conf := map[string]string{ - "path": raftDir, - "node_id": nodeID, - "performance_multiplier": "8", + "path": raftDir, + "node_id": nodeID, + "performance_multiplier": "8", + "autopilot_reconcile_interval": "300ms", + "autopilot_update_interval": "100ms", } for k, v := range extraConf { val, ok := v.(string) @@ -106,14 +146,13 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma backend, err := raft.NewRaftBackend(conf, logger.Named("raft")) if err != nil { - cleanupFunc() - t.Fatal(err) + return nil, err } - - return &vault.PhysicalBackendBundle{ - Backend: backend, - Cleanup: cleanupFunc, + if bridge != nil { + backend.(*raft.RaftBackend).SetServerAddressProvider(bridge) } + + return backend, nil } // RaftHAFactory returns a PhysicalBackendBundle with raft set as the HABackend @@ -182,6 +221,10 @@ func InmemBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.PhysicalFactory = SharedPhysicalFactory(MakeInmemBackend) } +func InmemLatentBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.PhysicalFactory = SharedPhysicalFactory(MakeLatentInmemBackend) +} + func InmemNonTransactionalBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.PhysicalFactory = SharedPhysicalFactory(MakeInmemNonTransactionalBackend) } @@ -192,7 +235,21 @@ func FileBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.KeepStandbysSealed = true - opts.PhysicalFactory = MakeRaftBackend + var bridge *raft.ClusterAddrBridge + opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + // The same PhysicalFactory can be shared across multiple clusters. + // The coreIdx == 0 check ensures that each time a new cluster is setup, + // when setting up its first node we create a new ClusterAddrBridge. + if !opts.InmemClusterLayers && opts.ClusterLayers == nil && coreIdx == 0 { + bridge = raft.NewClusterAddrBridge() + } + bundle := MakeRaftBackend(t, coreIdx, logger, conf, bridge) + bundle.MutateCoreConfig = func(conf *vault.CoreConfig) { + logger.Trace("setting bridge", "idx", coreIdx, "bridge", fmt.Sprintf("%p", bridge)) + conf.ClusterAddrBridge = bridge + } + return bundle + } opts.SetupFunc = func(t testing.T, c *vault.TestCluster) { if opts.NumCores != 1 { testhelpers.RaftClusterJoinNodes(t, c) @@ -202,7 +259,7 @@ func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { } func RaftHASetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, bundler PhysicalBackendBundler) { - opts.KeepStandbysSealed = true + opts.InmemClusterLayers = true opts.PhysicalFactory = RaftHAFactory(bundler) } @@ -214,6 +271,9 @@ func ClusterSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, setup } localOpts := vault.TestClusterOptions{ HandlerFunc: vaulthttp.Handler, + DefaultHandlerProperties: vault.HandlerProperties{ + ListenerConfig: &configutil.Listener{}, + }, } if opts != nil { localOpts = *opts @@ -222,5 +282,28 @@ func ClusterSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, setup setup = InmemBackendSetup } setup(&localConf, &localOpts) + if localConf.CredentialBackends == nil { + localConf.CredentialBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + } + } + if localConf.LogicalBackends == nil { + localConf.LogicalBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + "database": logicalDb.Factory, + // This is also available in the plugin catalog, but is here due to the need to + // automatically mount it. + "kv": logicalKv.Factory, + } + } + if localConf.AuditBackends == nil { + localConf.AuditBackends = map[string]audit.Factory{ + "file": auditFile.Factory, + "socket": auditSocket.Factory, + "syslog": auditSyslog.Factory, + "noop": corehelpers.NoopAuditFactory(nil), + } + } + return &localConf, &localOpts } diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go index 257a5a0184c5..89642cf61fd7 100644 --- a/helper/testhelpers/teststorage/teststorage_reusable.go +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package teststorage import ( @@ -6,7 +9,6 @@ import ( "os" hclog "github.com/hashicorp/go-hclog" - raftlib "github.com/hashicorp/raft" "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault" @@ -71,7 +73,7 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica // MakeReusableRaftStorage makes a physical raft backend that can be re-used // across multiple test clusters in sequence. -func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, addressProvider raftlib.ServerAddressProvider) (ReusableStorage, StorageCleanup) { +func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (ReusableStorage, StorageCleanup) { raftDirs := make([]string, numCores) for i := 0; i < numCores; i++ { raftDirs[i] = makeRaftDir(t) @@ -84,7 +86,7 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, add conf.DisablePerformanceStandby = true opts.KeepStandbysSealed = true opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { - return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], addressProvider, false) + return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], false) } }, @@ -121,9 +123,10 @@ func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, b storage := ReusableStorage{ Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.InmemClusterLayers = true opts.KeepStandbysSealed = true opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { - haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], nil, true) + haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], true) return &vault.PhysicalBackendBundle{ Backend: bundle.Backend, @@ -165,25 +168,13 @@ func makeRaftDir(t testing.T) string { return raftDir } -func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, addressProvider raftlib.ServerAddressProvider, ha bool) *vault.PhysicalBackendBundle { +func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, ha bool) *vault.PhysicalBackendBundle { nodeID := fmt.Sprintf("core-%d", coreIdx) - conf := map[string]string{ - "path": raftDir, - "node_id": nodeID, - "performance_multiplier": "8", - "autopilot_reconcile_interval": "300ms", - "autopilot_update_interval": "100ms", - } - - backend, err := raft.NewRaftBackend(conf, logger) + backend, err := makeRaftBackend(logger, nodeID, raftDir, nil, nil) if err != nil { t.Fatal(err) } - if addressProvider != nil { - backend.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - } - bundle := new(vault.PhysicalBackendBundle) if ha { diff --git a/helper/timeutil/timeutil.go b/helper/timeutil/timeutil.go index a65d3cf908bc..56a20615afd3 100644 --- a/helper/timeutil/timeutil.go +++ b/helper/timeutil/timeutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package timeutil import ( @@ -14,6 +17,20 @@ func StartOfPreviousMonth(t time.Time) time.Time { return time.Date(year, month, 1, 0, 0, 0, 0, t.Location()).AddDate(0, -1, 0) } +func StartOfDay(t time.Time) time.Time { + year, month, day := t.Date() + return time.Date(year, month, day, 0, 0, 0, 0, t.Location()) +} + +// IsCurrentDay checks if :t: is in the current day, as defined by :compare: +// generally, pass in time.Now().UTC() as :compare: +func IsCurrentDay(t, compare time.Time) bool { + thisDayStart := StartOfDay(compare) + queryDayStart := StartOfDay(t) + + return queryDayStart.Equal(thisDayStart) +} + func StartOfMonth(t time.Time) time.Time { year, month, _ := t.Date() return time.Date(year, month, 1, 0, 0, 0, 0, t.Location()) @@ -139,3 +156,26 @@ func SkipAtEndOfMonth(t *testing.T) { t.Skip("too close to end of month") } } + +// This interface allows unit tests to substitute in a simulated Clock. +type Clock interface { + Now() time.Time + NewTicker(time.Duration) *time.Ticker + NewTimer(time.Duration) *time.Timer +} + +type DefaultClock struct{} + +var _ Clock = (*DefaultClock)(nil) + +func (_ DefaultClock) Now() time.Time { + return time.Now() +} + +func (_ DefaultClock) NewTicker(d time.Duration) *time.Ticker { + return time.NewTicker(d) +} + +func (_ DefaultClock) NewTimer(d time.Duration) *time.Timer { + return time.NewTimer(d) +} diff --git a/helper/timeutil/timeutil_test.go b/helper/timeutil/timeutil_test.go index 5cef2d2061fe..df14a6fd1777 100644 --- a/helper/timeutil/timeutil_test.go +++ b/helper/timeutil/timeutil_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package timeutil import ( @@ -220,6 +223,47 @@ func TestTimeutil_IsCurrentMonth(t *testing.T) { } } +// TestTimeutil_IsCurrentDay checks if the test times equals the current day or not. +func TestTimeutil_IsCurrentDay(t *testing.T) { + now := time.Now() + testCases := []struct { + input time.Time + expected bool + }{ + { + input: now, + expected: true, + }, + { + input: StartOfDay(now).AddDate(0, 0, -1), + expected: false, + }, + { + input: StartOfDay(now).AddDate(-1, 0, 0), + expected: false, + }, + { + input: StartOfDay(now).Add(1 * time.Second), + expected: true, + }, + { + input: StartOfDay(now).Add(-1 * time.Second), + expected: false, + }, + { + input: StartOfDay(now).Add(86400), // a day is 86400 seconds + expected: true, + }, + } + + for _, tc := range testCases { + result := IsCurrentDay(tc.input, now) + if result != tc.expected { + t.Errorf("invalid result. expected %t for %v", tc.expected, tc.input) + } + } +} + func TestTimeUtil_ContiguousMonths(t *testing.T) { testCases := []struct { input []time.Time diff --git a/helper/useragent/useragent.go b/helper/useragent/useragent.go index b2fa40a30c8e..0becfe9e9fd6 100644 --- a/helper/useragent/useragent.go +++ b/helper/useragent/useragent.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package useragent import ( @@ -29,3 +32,79 @@ func String() string { return fmt.Sprintf("Vault/%s (+%s; %s)", versionFunc(), projectURL, rt) } + +// AgentString returns the consistent user-agent string for Vault Agent. +// +// e.g. Vault Agent/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentString() string { + return fmt.Sprintf("Vault Agent/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// AgentTemplatingString returns the consistent user-agent string for Vault Agent Templating. +// +// e.g. Vault Agent Templating/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentTemplatingString() string { + return fmt.Sprintf("Vault Agent Templating/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// AgentProxyString returns the consistent user-agent string for Vault Agent API Proxying. +// +// e.g. Vault Agent API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentProxyString() string { + return fmt.Sprintf("Vault Agent API Proxy/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// AgentProxyStringWithProxiedUserAgent returns the consistent user-agent +// string for Vault Agent API Proxying, keeping the User-Agent of the proxied +// client as an extension to this UserAgent +// +// e.g. Vault Agent API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1); proxiedUserAgent +func AgentProxyStringWithProxiedUserAgent(proxiedUserAgent string) string { + return fmt.Sprintf("Vault Agent API Proxy/%s (+%s; %s); %s", + versionFunc(), projectURL, rt, proxiedUserAgent) +} + +// AgentAutoAuthString returns the consistent user-agent string for Vault Agent Auto-Auth. +// +// e.g. Vault Agent Auto-Auth/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentAutoAuthString() string { + return fmt.Sprintf("Vault Agent Auto-Auth/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// ProxyString returns the consistent user-agent string for Vault Proxy. +// +// e.g. Vault Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func ProxyString() string { + return fmt.Sprintf("Vault Proxy/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// ProxyAPIProxyString returns the consistent user-agent string for Vault Proxy API Proxying. +// +// e.g. Vault Proxy API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func ProxyAPIProxyString() string { + return fmt.Sprintf("Vault Proxy API Proxy/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// ProxyStringWithProxiedUserAgent returns the consistent user-agent +// string for Vault Proxy API Proxying, keeping the User-Agent of the proxied +// client as an extension to this UserAgent +// +// e.g. Vault Proxy API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1); proxiedUserAgent +func ProxyStringWithProxiedUserAgent(proxiedUserAgent string) string { + return fmt.Sprintf("Vault Proxy API Proxy/%s (+%s; %s); %s", + versionFunc(), projectURL, rt, proxiedUserAgent) +} + +// ProxyAutoAuthString returns the consistent user-agent string for Vault Agent Auto-Auth. +// +// e.g. Vault Proxy Auto-Auth/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func ProxyAutoAuthString() string { + return fmt.Sprintf("Vault Proxy Auto-Auth/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} diff --git a/helper/useragent/useragent_test.go b/helper/useragent/useragent_test.go index cb0cf32942c9..f58363a8e913 100644 --- a/helper/useragent/useragent_test.go +++ b/helper/useragent/useragent_test.go @@ -1,7 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package useragent import ( "testing" + + "github.com/stretchr/testify/require" ) func TestUserAgent(t *testing.T) { @@ -12,7 +17,124 @@ func TestUserAgent(t *testing.T) { act := String() exp := "Vault/1.2.3 (+https://vault-test.com; go5.0)" - if exp != act { - t.Errorf("expected %q to be %q", act, exp) - } + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgent tests the AgentString() function works +// as expected +func TestUserAgent_VaultAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentString() + + exp := "Vault Agent/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentTemplating tests the AgentTemplatingString() function works +// as expected +func TestUserAgent_VaultAgentTemplating(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentTemplatingString() + + exp := "Vault Agent Templating/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentProxy tests the AgentProxyString() function works +// as expected +func TestUserAgent_VaultAgentProxy(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentProxyString() + + exp := "Vault Agent API Proxy/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentProxyWithProxiedUserAgent tests the AgentProxyStringWithProxiedUserAgent() +// function works as expected +func TestUserAgent_VaultAgentProxyWithProxiedUserAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + userAgent := "my-user-agent" + + act := AgentProxyStringWithProxiedUserAgent(userAgent) + + exp := "Vault Agent API Proxy/1.2.3 (+https://vault-test.com; go5.0); my-user-agent" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentAutoAuth tests the AgentAutoAuthString() function works +// as expected +func TestUserAgent_VaultAgentAutoAuth(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentAutoAuthString() + + exp := "Vault Agent Auto-Auth/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxy tests the ProxyString() function works +// as expected +func TestUserAgent_VaultProxy(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := ProxyString() + + exp := "Vault Proxy/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxyAPIProxy tests the ProxyAPIProxyString() function works +// as expected +func TestUserAgent_VaultProxyAPIProxy(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := ProxyAPIProxyString() + + exp := "Vault Proxy API Proxy/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxyWithProxiedUserAgent tests the ProxyStringWithProxiedUserAgent() +// function works as expected +func TestUserAgent_VaultProxyWithProxiedUserAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + userAgent := "my-user-agent" + + act := ProxyStringWithProxiedUserAgent(userAgent) + + exp := "Vault Proxy API Proxy/1.2.3 (+https://vault-test.com; go5.0); my-user-agent" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxyAutoAuth tests the ProxyAPIProxyString() function works +// as expected +func TestUserAgent_VaultProxyAutoAuth(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := ProxyAutoAuthString() + + exp := "Vault Proxy Auto-Auth/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) } diff --git a/helper/versions/version.go b/helper/versions/version.go index b64dd3d26034..590e25ec0ada 100644 --- a/helper/versions/version.go +++ b/helper/versions/version.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package versions import ( diff --git a/helper/versions/version_test.go b/helper/versions/version_test.go index cc1b3e1c20f2..c6d31f4dc00e 100644 --- a/helper/versions/version_test.go +++ b/helper/versions/version_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package versions import "testing" diff --git a/http/assets.go b/http/assets.go index c401f9491087..f1f080c27284 100644 --- a/http/assets.go +++ b/http/assets.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build ui package http diff --git a/http/assets_stub.go b/http/assets_stub.go index 1989a09d9860..de29ee972100 100644 --- a/http/assets_stub.go +++ b/http/assets_stub.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !ui package http diff --git a/http/auth_token_test.go b/http/auth_token_test.go index 552a32cbdd93..37903a418583 100644 --- a/http/auth_token_test.go +++ b/http/auth_token_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/cors.go b/http/cors.go index 74cfeeaef072..2689a007dbec 100644 --- a/http/cors.go +++ b/http/cors.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -15,6 +18,7 @@ var allowedMethods = []string{ http.MethodOptions, http.MethodPost, http.MethodPut, + http.MethodPatch, "LIST", // LIST is not an official HTTP method, but Vault supports it. } diff --git a/http/custom_header_test.go b/http/custom_header_test.go index 8c204584aeb7..36227cc781dc 100644 --- a/http/custom_header_test.go +++ b/http/custom_header_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/events_stubs_oss.go b/http/events_stubs_oss.go new file mode 100644 index 000000000000..c1a4a673598f --- /dev/null +++ b/http/events_stubs_oss.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package http + +import ( + "net/http" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func entHandleEventsSubscribe(core *vault.Core, req *logical.Request) http.Handler { + return nil +} diff --git a/http/forwarded_for_test.go b/http/forwarded_for_test.go index b7060c6671e4..c0409bab30f0 100644 --- a/http/forwarded_for_test.go +++ b/http/forwarded_for_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/forwarding_bench_test.go b/http/forwarding_bench_test.go index f7334058d6a8..287a6c9ff49a 100644 --- a/http/forwarding_bench_test.go +++ b/http/forwarding_bench_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -45,7 +48,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) { host := fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[0].Listeners[0].Address.Port) transport := &http.Transport{ - TLSClientConfig: cores[0].TLSConfig, + TLSClientConfig: cores[0].TLSConfig(), } if err := http2.ConfigureTransport(transport); err != nil { b.Fatal(err) @@ -56,7 +59,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) { } req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/mounts/transit", cores[0].Listeners[0].Address.Port), - bytes.NewBuffer([]byte("{\"type\": \"transit\"}"))) + bytes.NewBufferString("{\"type\": \"transit\"}")) if err != nil { b.Fatal(err) } @@ -89,7 +92,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) { numOps++ } - doReq(b, "POST", host+"keys/test1", bytes.NewBuffer([]byte("{}"))) + doReq(b, "POST", host+"keys/test1", bytes.NewBufferString("{}")) keyUrl := host + "encrypt/test1" reqBuf := []byte(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64)) diff --git a/http/forwarding_test.go b/http/forwarding_test.go index f0225a42230e..c2fdfcb25646 100644 --- a/http/forwarding_test.go +++ b/http/forwarding_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -53,7 +56,7 @@ func TestHTTP_Fallback_Bad_Address(t *testing.T) { for _, addr := range addrs { config := api.DefaultConfig() config.Address = addr - config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig + config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig() client, err := api.NewClient(config) if err != nil { @@ -101,7 +104,7 @@ func TestHTTP_Fallback_Disabled(t *testing.T) { for _, addr := range addrs { config := api.DefaultConfig() config.Address = addr - config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig + config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig() client, err := api.NewClient(config) if err != nil { @@ -161,7 +164,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) } transport := &http.Transport{ - TLSClientConfig: cores[0].TLSConfig, + TLSClientConfig: cores[0].TLSConfig(), } if err := http2.ConfigureTransport(transport); err != nil { t.Fatal(err) @@ -176,7 +179,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) // core.Logger().Printf("[TRACE] mounting transit") req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/mounts/transit", cores[0].Listeners[0].Address.Port), - bytes.NewBuffer([]byte("{\"type\": \"transit\"}"))) + bytes.NewBufferString("{\"type\": \"transit\"}")) if err != nil { t.Fatal(err) } @@ -269,7 +272,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) for _, chosenHost := range hosts { for _, chosenKey := range keys { // Try to write the key to make sure it exists - _, err := doReq("POST", chosenHost+"keys/"+fmt.Sprintf("%s-%t", chosenKey, parallel), bytes.NewBuffer([]byte("{}"))) + _, err := doReq("POST", chosenHost+"keys/"+fmt.Sprintf("%s-%t", chosenKey, parallel), bytes.NewBufferString("{}")) if err != nil { panic(err) } @@ -280,7 +283,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) chosenHost = hosts[id%len(hosts)] chosenKey = fmt.Sprintf("key-%t-%d", parallel, id) - _, err := doReq("POST", chosenHost+"keys/"+chosenKey, bytes.NewBuffer([]byte("{}"))) + _, err := doReq("POST", chosenHost+"keys/"+chosenKey, bytes.NewBufferString("{}")) if err != nil { panic(err) } @@ -317,7 +320,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) // Encrypt our plaintext and store the result case "encrypt": // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) - resp, err := doReq("POST", chosenHost+"encrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64)))) + resp, err := doReq("POST", chosenHost+"encrypt/"+chosenKey, bytes.NewBufferString(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64))) if err != nil { panic(err) } @@ -344,7 +347,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) } // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) - resp, err := doReq("POST", chosenHost+"decrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"ciphertext\": \"%s\"}", ct)))) + resp, err := doReq("POST", chosenHost+"decrypt/"+chosenKey, bytes.NewBufferString(fmt.Sprintf("{\"ciphertext\": \"%s\"}", ct))) if err != nil { panic(err) } @@ -373,7 +376,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) // Rotate to a new key version case "rotate": // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) - _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/rotate", bytes.NewBuffer([]byte("{}"))) + _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/rotate", bytes.NewBufferString("{}")) if err != nil { panic(err) } @@ -410,7 +413,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) // core.Logger().Printf("[TRACE] %s, %s, %d, new min version %d", chosenFunc, chosenKey, id, setVersion) - _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/config", bytes.NewBuffer([]byte(fmt.Sprintf("{\"min_decryption_version\": %d}", setVersion)))) + _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/config", bytes.NewBufferString(fmt.Sprintf("{\"min_decryption_version\": %d}", setVersion))) if err != nil { panic(err) } @@ -459,7 +462,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { vault.TestWaitActive(t, core) transport := cleanhttp.DefaultTransport() - transport.TLSClientConfig = cores[0].TLSConfig + transport.TLSClientConfig = cores[0].TLSConfig() if err := http2.ConfigureTransport(transport); err != nil { t.Fatal(err) } @@ -469,7 +472,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { } req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/auth/cert", cores[0].Listeners[0].Address.Port), - bytes.NewBuffer([]byte("{\"type\": \"cert\"}"))) + bytes.NewBufferString("{\"type\": \"cert\"}")) if err != nil { t.Fatal(err) } @@ -511,7 +514,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) { // be to a different address transport = cleanhttp.DefaultTransport() // i starts at zero but cores in addrs start at 1 - transport.TLSClientConfig = cores[i+1].TLSConfig + transport.TLSClientConfig = cores[i+1].TLSConfig() if err := http2.ConfigureTransport(transport); err != nil { t.Fatal(err) } diff --git a/http/handler.go b/http/handler.go index 15885ac3518c..2ee5f287444e 100644 --- a/http/handler.go +++ b/http/handler.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -20,7 +23,6 @@ import ( "strings" "time" - "github.com/NYTimes/gziphandler" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-secure-stdlib/parseutil" @@ -28,11 +30,13 @@ import ( "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/limits" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/pathmanager" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" + gziphandler "github.com/klauspost/compress/gzhttp" ) const ( @@ -81,6 +85,7 @@ var ( // the always forward list perfStandbyAlwaysForwardPaths = pathmanager.New() alwaysRedirectPaths = pathmanager.New() + websocketPaths = pathmanager.New() injectDataIntoTopRoutes = []string{ "/v1/sys/audit", @@ -106,7 +111,9 @@ var ( "/v1/sys/rotate", "/v1/sys/wrapping/wrap", } - + websocketRawPaths = []string{ + "/v1/sys/events/subscribe", + } oidcProtectedPathRegex = regexp.MustCompile(`^identity/oidc/provider/\w(([\w-.]+)?\w)?/userinfo$`) ) @@ -116,6 +123,10 @@ func init() { "sys/storage/raft/snapshot-force", "!sys/storage/raft/snapshot-auto/config", }) + websocketPaths.AddPaths(websocketRawPaths) + for _, path := range websocketRawPaths { + alwaysRedirectPaths.AddPaths([]string{strings.TrimPrefix(path, "/v1/")}) + } } type HandlerAnchor struct{} @@ -141,6 +152,10 @@ func handler(props *vault.HandlerProperties) http.Handler { // Create the muxer to handle the actual endpoints mux := http.NewServeMux() + var chrootNamespace string + if props.ListenerConfig != nil { + chrootNamespace = props.ListenerConfig.ChrootNamespace + } switch { case props.RecoveryMode: @@ -151,17 +166,23 @@ func handler(props *vault.HandlerProperties) http.Handler { mux.Handle("/v1/sys/generate-recovery-token/update", handleSysGenerateRootUpdate(core, strategy)) default: // Handle non-forwarded paths - mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core)) - mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core, chrootNamespace)) + mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core, chrootNamespace)) mux.Handle("/v1/sys/init", handleSysInit(core)) - mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core)) + mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core, + WithRedactClusterName(props.ListenerConfig.RedactClusterName), + WithRedactVersion(props.ListenerConfig.RedactVersion))) + mux.Handle("/v1/sys/seal-backend-status", handleSysSealBackendStatus(core)) mux.Handle("/v1/sys/seal", handleSysSeal(core)) mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core))) mux.Handle("/v1/sys/unseal", handleSysUnseal(core)) - mux.Handle("/v1/sys/leader", handleSysLeader(core)) - mux.Handle("/v1/sys/health", handleSysHealth(core)) - mux.Handle("/v1/sys/monitor", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/leader", handleSysLeader(core, + WithRedactAddresses(props.ListenerConfig.RedactAddresses))) + mux.Handle("/v1/sys/health", handleSysHealth(core, + WithRedactClusterName(props.ListenerConfig.RedactClusterName), + WithRedactVersion(props.ListenerConfig.RedactVersion))) + mux.Handle("/v1/sys/monitor", handleLogicalNoForward(core, chrootNamespace)) mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core, handleAuditNonLogical(core, handleSysGenerateRootAttempt(core, vault.GenerateStandardRootTokenStrategy)))) mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core, @@ -175,11 +196,12 @@ func handler(props *vault.HandlerProperties) http.Handler { mux.Handle("/v1/sys/storage/raft/bootstrap", handleSysRaftBootstrap(core)) mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core)) mux.Handle("/v1/sys/internal/ui/feature-flags", handleSysInternalFeatureFlags(core)) + for _, path := range injectDataIntoTopRoutes { - mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core))) + mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core, chrootNamespace))) } - mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core))) - mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core))) + mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core, chrootNamespace))) + mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core, chrootNamespace))) if core.UIEnabled() { if uiBuiltIn { mux.Handle("/ui/", http.StripPrefix("/ui/", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()})))))) @@ -196,7 +218,7 @@ func handler(props *vault.HandlerProperties) http.Handler { if props.ListenerConfig != nil && props.ListenerConfig.Telemetry.UnauthenticatedMetricsAccess { mux.Handle("/v1/sys/metrics", handleMetricsUnauthenticated(core)) } else { - mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core, chrootNamespace)) } if props.ListenerConfig != nil && props.ListenerConfig.Profiling.UnauthenticatedPProfAccess { @@ -209,31 +231,50 @@ func handler(props *vault.HandlerProperties) http.Handler { mux.Handle("/v1/sys/pprof/symbol", http.HandlerFunc(pprof.Symbol)) mux.Handle("/v1/sys/pprof/trace", http.HandlerFunc(pprof.Trace)) } else { - mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core, chrootNamespace)) } if props.ListenerConfig != nil && props.ListenerConfig.InFlightRequestLogging.UnauthenticatedInFlightAccess { mux.Handle("/v1/sys/in-flight-req", handleUnAuthenticatedInFlightRequest(core)) } else { - mux.Handle("/v1/sys/in-flight-req", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/in-flight-req", handleLogicalNoForward(core, chrootNamespace)) } - additionalRoutes(mux, core) + entAdditionalRoutes(mux, core) } - // Wrap the handler in another handler to trigger all help paths. - helpWrappedHandler := wrapHelpHandler(mux, core) - corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core) - quotaWrappedHandler := rateLimitQuotaWrapping(corsWrappedHandler, core) - genericWrappedHandler := genericWrapping(core, quotaWrappedHandler, props) + // Build up a chain of wrapping handlers. + wrappedHandler := wrapHelpHandler(mux, core) + wrappedHandler = wrapCORSHandler(wrappedHandler, core) + wrappedHandler = rateLimitQuotaWrapping(wrappedHandler, core) + wrappedHandler = entWrapGenericHandler(core, wrappedHandler, props) + wrappedHandler = wrapMaxRequestSizeHandler(wrappedHandler, props) - // Wrap the handler with PrintablePathCheckHandler to check for non-printable - // characters in the request path. - printablePathCheckHandler := genericWrappedHandler + // Add an extra wrapping handler if the DisablePrintableCheck listener + // setting isn't true that checks for non-printable characters in the + // request path. if !props.DisablePrintableCheck { - printablePathCheckHandler = cleanhttp.PrintablePathCheckHandler(genericWrappedHandler, nil) + wrappedHandler = cleanhttp.PrintablePathCheckHandler(wrappedHandler, nil) } - return printablePathCheckHandler + // Add an extra wrapping handler if the DisableReplicationStatusEndpoints + // setting is true that will create a new request with a context that has + // a value indicating that the replication status endpoints are disabled. + if props.ListenerConfig != nil && props.ListenerConfig.DisableReplicationStatusEndpoints { + wrappedHandler = disableReplicationStatusEndpointWrapping(wrappedHandler) + } + + // Add an extra wrapping handler if any of the Redaction settings are + // true that will create a new request with a context containing the + // redaction settings. + if props.ListenerConfig != nil && (props.ListenerConfig.RedactAddresses || props.ListenerConfig.RedactClusterName || props.ListenerConfig.RedactVersion) { + wrappedHandler = redactionSettingsWrapping(wrappedHandler, props.ListenerConfig.RedactVersion, props.ListenerConfig.RedactAddresses, props.ListenerConfig.RedactClusterName) + } + + if props.ListenerConfig != nil && props.ListenerConfig.DisableRequestLimiter { + wrappedHandler = wrapRequestLimiterHandler(wrappedHandler, props) + } + + return wrappedHandler } type copyResponseWriter struct { @@ -271,14 +312,14 @@ func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler { origBody := new(bytes.Buffer) reader := ioutil.NopCloser(io.TeeReader(r.Body, origBody)) r.Body = reader - req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) + req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), core.RouterAccess(), w, r) if err != nil || status != 0 { respondError(w, status, err) return } - if origBody != nil { - r.Body = ioutil.NopCloser(origBody) - } + + r.Body = io.NopCloser(origBody) + input := &logical.LogInput{ Request: req, } @@ -290,17 +331,16 @@ func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler { cw := newCopyResponseWriter(w) h.ServeHTTP(cw, r) data := make(map[string]interface{}) - err = jsonutil.DecodeJSON(cw.body.Bytes(), &data) - if err != nil { - // best effort, ignore - } + + // Refactoring this code, since the returned error was being ignored. + jsonutil.DecodeJSON(cw.body.Bytes(), &data) + httpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()} input.Response = logical.HTTPResponseToLogicalResponse(httpResp) err = core.AuditLogger().AuditResponse(r.Context(), input) if err != nil { respondError(w, status, err) } - return }) } @@ -309,23 +349,18 @@ func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler { // are performed. func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerProperties) http.Handler { var maxRequestDuration time.Duration - var maxRequestSize int64 if props.ListenerConfig != nil { maxRequestDuration = props.ListenerConfig.MaxRequestDuration - maxRequestSize = props.ListenerConfig.MaxRequestSize } if maxRequestDuration == 0 { maxRequestDuration = vault.DefaultMaxRequestDuration } - if maxRequestSize == 0 { - maxRequestSize = DefaultMaxRequestSize - } - // Swallow this error since we don't want to pollute the logs and we also don't want to // return an HTTP error here. This information is best effort. hostname, _ := os.Hostname() - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var hf func(w http.ResponseWriter, r *http.Request) + hf = func(w http.ResponseWriter, r *http.Request) { // This block needs to be here so that upon sending SIGHUP, custom response // headers are also reloaded into the handlers. var customHeaders map[string][]*logical.CustomHeader @@ -348,18 +383,14 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerPr // Start with the request context ctx := r.Context() var cancelFunc context.CancelFunc - // Add our timeout, but not for the monitor endpoint, as it's streaming - if strings.HasSuffix(r.URL.Path, "sys/monitor") { + // Add our timeout, but not for the monitor or events endpoints, as they are streaming + if strings.HasSuffix(r.URL.Path, "sys/monitor") || strings.Contains(r.URL.Path, "sys/events") { ctx, cancelFunc = context.WithCancel(ctx) } else { ctx, cancelFunc = context.WithTimeout(ctx, maxRequestDuration) } - // if maxRequestSize < 0, no need to set context value - // Add a size limiter if desired - if maxRequestSize > 0 { - ctx = context.WithValue(ctx, "max_request_size", maxRequestSize) - } - ctx = context.WithValue(ctx, "original_request_path", r.URL.Path) + + ctx = logical.CreateContextOriginalRequestPath(ctx, r.URL.Path) r = r.WithContext(ctx) r = r.WithContext(namespace.ContextWithNamespace(r.Context(), namespace.RootNamespace)) @@ -375,18 +406,48 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerPr nw.Header().Set("X-Vault-Hostname", hostname) } + // Extract the namespace from the header before we modify it + ns := r.Header.Get(consts.NamespaceHeaderName) switch { case strings.HasPrefix(r.URL.Path, "/v1/"): - newR, status := adjustRequest(core, r) + // Setting the namespace in the header to be included in the error message + newR, status, err := adjustRequest(core, props.ListenerConfig, r) if status != 0 { - respondError(nw, status, nil) + respondError(nw, status, err) cancelFunc() return } r = newR case strings.HasPrefix(r.URL.Path, "/ui"), r.URL.Path == "/robots.txt", r.URL.Path == "/": - default: + // RFC 5785 + case strings.HasPrefix(r.URL.Path, "/.well-known/"): + perfStandby := core.PerfStandby() + standby, err := core.Standby() + if err != nil { + core.Logger().Warn("error resolving standby status handling .well-known path", "error", err) + } else if standby && !perfStandby { + // Standby nodes, not performance standbys, don't start plugins + // so registration can not happen, instead redirect to active + respondStandby(core, w, r.URL) + cancelFunc() + return + } else { + redir, err := core.GetWellKnownRedirect(r.Context(), r.URL.Path) + if err != nil { + core.Logger().Warn("error resolving potential API redirect", "error", err) + } else { + if redir != "" { + newReq := r.Clone(ctx) + // Save the original path for audit logging. + newReq.RequestURI = newReq.URL.Path + newReq.URL.Path = redir + hf(w, newReq) + cancelFunc() + return + } + } + } respondError(nw, http.StatusNotFound, nil) cancelFunc() return @@ -430,7 +491,6 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerPr }() // Setting the namespace in the header to be included in the error message - ns := r.Header.Get(consts.NamespaceHeaderName) if ns != "" { nw.Header().Set(consts.NamespaceHeaderName, ns) } @@ -438,8 +498,8 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerPr h.ServeHTTP(nw, r) cancelFunc() - return - }) + } + return http.HandlerFunc(hf) } func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handler { @@ -530,25 +590,9 @@ func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handle r.RemoteAddr = net.JoinHostPort(acc[indexToUse], port) h.ServeHTTP(w, r) - return }) } -// stripPrefix is a helper to strip a prefix from the path. It will -// return false from the second return value if it the prefix doesn't exist. -func stripPrefix(prefix, path string) (string, bool) { - if !strings.HasPrefix(path, prefix) { - return "", false - } - - path = path[len(prefix):] - if path == "" { - return "", false - } - - return path, true -} - func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { header := w.Header() @@ -558,12 +602,12 @@ func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler { respondError(w, http.StatusInternalServerError, err) return } - if userHeaders != nil { - for k := range userHeaders { - v := userHeaders.Get(k) - header.Set(k, v) - } + + for k := range userHeaders { + v := userHeaders.Get(k) + header.Set(k, v) } + h.ServeHTTP(w, req) }) } @@ -575,7 +619,6 @@ func handleUI(h http.Handler) http.Handler { // here. req.URL.Path = strings.TrimSuffix(req.URL.Path, "/") h.ServeHTTP(w, req) - return }) } @@ -653,8 +696,7 @@ func handleUIStub() http.Handler { func handleUIRedirect() http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - http.Redirect(w, req, "/ui/", 307) - return + http.Redirect(w, req, "/ui/", http.StatusTemporaryRedirect) }) } @@ -702,26 +744,9 @@ func parseJSONRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, // Limit the maximum number of bytes to MaxRequestSize to protect // against an indefinite amount of data being read. reader := r.Body - ctx := r.Context() - maxRequestSize := ctx.Value("max_request_size") - if maxRequestSize != nil { - max, ok := maxRequestSize.(int64) - if !ok { - return nil, errors.New("could not parse max_request_size from request context") - } - if max > 0 { - // MaxBytesReader won't do all the internal stuff it must unless it's - // given a ResponseWriter that implements the internal http interface - // requestTooLarger. So we let it have access to the underlying - // ResponseWriter. - inw := w - if myw, ok := inw.(logical.WrappingResponseWriter); ok { - inw = myw.Wrapped() - } - reader = http.MaxBytesReader(inw, r.Body, max) - } - } + var origBody io.ReadWriter + if perfStandby { // Since we're checking PerfStandby here we key on origBody being nil // or not later, so we need to always allocate so it's non-nil @@ -742,16 +767,6 @@ func parseJSONRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, // // A nil map will be returned if the format is empty or invalid. func parseFormRequest(r *http.Request) (map[string]interface{}, error) { - maxRequestSize := r.Context().Value("max_request_size") - if maxRequestSize != nil { - max, ok := maxRequestSize.(int64) - if !ok { - return nil, errors.New("could not parse max_request_size from request context") - } - if max > 0 { - r.Body = ioutil.NopCloser(io.LimitReader(r.Body, max)) - } - } if err := r.ParseForm(); err != nil { return nil, err } @@ -828,16 +843,9 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle return } path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) - switch { - case !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path): + if !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path) { handler.ServeHTTP(w, r) return - case strings.HasPrefix(path, "auth/token/create/"): - isBatch, err := core.IsBatchTokenCreationRequest(r.Context(), path) - if err == nil && isBatch { - handler.ServeHTTP(w, r) - return - } } } @@ -866,7 +874,6 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle } forwardRequest(core, w, r) - return }) } @@ -910,10 +917,8 @@ func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) { return } - if header != nil { - for k, v := range header { - w.Header()[k] = v - } + for k, v := range header { + w.Header()[k] = v } w.WriteHeader(statusCode) @@ -923,8 +928,35 @@ func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) { // request is a helper to perform a request and properly exit in the // case of an error. func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool, bool) { + lim := &limits.HTTPLimiter{ + Method: rawReq.Method, + PathLimited: r.PathLimited, + LookupFunc: core.GetRequestLimiter, + } + lsnr, ok := lim.Acquire(rawReq.Context()) + if !ok { + resp := &logical.Response{} + logical.RespondWithStatusCode(resp, r, http.StatusServiceUnavailable) + respondError(w, http.StatusServiceUnavailable, limits.ErrCapacity) + return resp, false, false + } + + // To guard against leaking RequestListener slots, we should ignore Limiter + // measurements on panic. OnIgnore will check to see if a RequestListener + // slot has been acquired and not released, which could happen on + // recoverable panics. + defer lsnr.OnIgnore() + resp, err := core.HandleRequest(rawReq.Context(), r) - if r.LastRemoteWAL() > 0 && !vault.WaitUntilWALShipped(rawReq.Context(), core, r.LastRemoteWAL()) { + + // Do the limiter measurement + if err != nil { + lsnr.OnDropped() + } else { + lsnr.OnSuccess() + } + + if r.LastRemoteWAL() > 0 && !core.EntWaitUntilWALShipped(rawReq.Context(), r.LastRemoteWAL()) { if resp == nil { resp = &logical.Response{} } @@ -1017,6 +1049,15 @@ func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) { RawQuery: reqURL.RawQuery, } + // WebSockets schemas are ws or wss + if websocketPaths.HasPath(reqURL.Path) { + if finalURL.Scheme == "http" { + finalURL.Scheme = "ws" + } else { + finalURL.Scheme = "wss" + } + } + // Ensure there is a scheme, default to https if finalURL.Scheme == "" { finalURL.Scheme = "https" diff --git a/http/handler_stubs_oss.go b/http/handler_stubs_oss.go new file mode 100644 index 000000000000..f59a637eb58a --- /dev/null +++ b/http/handler_stubs_oss.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package http + +import ( + "net/http" + + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/vault" +) + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func adjustRequest(c *vault.Core, listener *configutil.Listener, r *http.Request) (*http.Request, int, error) { + return r, 0, nil +} + +func handleEntPaths(nsPath string, core *vault.Core, r *http.Request) http.Handler { + return nil +} diff --git a/http/handler_test.go b/http/handler_test.go index 49565b41e235..b27cff867951 100644 --- a/http/handler_test.go +++ b/http/handler_test.go @@ -1,6 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( + "bytes" "context" "crypto/tls" "encoding/json" @@ -11,9 +15,13 @@ import ( "net/textproto" "net/url" "reflect" + "runtime" "strings" "testing" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/stretchr/testify/require" + "github.com/go-test/deep" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/helper/namespace" @@ -110,6 +118,28 @@ func TestHandler_parseMFAHandler(t *testing.T) { } } +// TestHandler_CORS_Patch verifies that http PATCH is included in the list of +// allowed request methods +func TestHandler_CORS_Patch(t *testing.T) { + core, _, _ := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + corsConfig := core.CORSConfig() + err := corsConfig.Enable(context.Background(), []string{addr}, nil) + require.NoError(t, err) + req, err := http.NewRequest(http.MethodOptions, addr+"/v1/sys/seal-status", nil) + require.NoError(t, err) + + req.Header.Set("Origin", addr) + req.Header.Set("Access-Control-Request-Method", http.MethodPatch) + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) +} + func TestHandler_cors(t *testing.T) { core, _, _ := vault.TestCoreUnsealed(t) ln, addr := TestServer(t, core) @@ -400,6 +430,7 @@ func TestSysMounts_headerAuth(t *testing.T) { "lease_duration": json.Number("0"), "wrap_info": nil, "warnings": nil, + "mount_type": "system", "auth": nil, "data": map[string]interface{}{ "secret/": map[string]interface{}{ @@ -588,8 +619,9 @@ func TestSysMounts_headerAuth_Wrapped(t *testing.T) { "wrap_info": map[string]interface{}{ "ttl": json.Number("60"), }, - "warnings": nil, - "auth": nil, + "warnings": nil, + "auth": nil, + "mount_type": "", } testResponseStatus(t, resp, 200) @@ -801,6 +833,7 @@ func testNonPrintable(t *testing.T, disable bool) { props := &vault.HandlerProperties{ Core: core, DisablePrintableCheck: disable, + ListenerConfig: &configutil.Listener{}, } TestServerWithListenerAndProperties(t, ln, addr, core, props) defer ln.Close() @@ -884,3 +917,59 @@ func TestHandler_Parse_Form(t *testing.T) { t.Fatal(diff) } } + +// TestHandler_MaxRequestSize verifies that a request larger than the +// MaxRequestSize fails +func TestHandler_MaxRequestSize(t *testing.T) { + t.Parallel() + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + DefaultHandlerProperties: vault.HandlerProperties{ + ListenerConfig: &configutil.Listener{ + MaxRequestSize: 1024, + }, + }, + HandlerFunc: Handler, + NumCores: 1, + }) + cluster.Start() + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + _, err := client.KVv2("secret").Put(context.Background(), "foo", map[string]interface{}{ + "bar": strings.Repeat("a", 1025), + }) + + require.ErrorContains(t, err, "error parsing JSON") +} + +// TestHandler_MaxRequestSize_Memory sets the max request size to 1024 bytes, +// and creates a 1MB request. The test verifies that less than 1MB of memory is +// allocated when the request is sent. This test shouldn't be run in parallel, +// because it modifies GOMAXPROCS +func TestHandler_MaxRequestSize_Memory(t *testing.T) { + ln, addr := TestListener(t) + core, _, token := vault.TestCoreUnsealed(t) + TestServerWithListenerAndProperties(t, ln, addr, core, &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + Address: addr, + MaxRequestSize: 1024, + }, + }) + defer ln.Close() + + data := bytes.Repeat([]byte{0x1}, 1024*1024) + + req, err := http.NewRequest("POST", addr+"/v1/sys/unseal", bytes.NewReader(data)) + require.NoError(t, err) + req.Header.Set(consts.AuthHeaderName, token) + + client := cleanhttp.DefaultClient() + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) + var start, end runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&start) + client.Do(req) + runtime.ReadMemStats(&end) + require.Less(t, end.TotalAlloc-start.TotalAlloc, uint64(1024*1024)) +} diff --git a/http/help.go b/http/help.go index 7ec6fb6131aa..e4d03b261c9f 100644 --- a/http/help.go +++ b/http/help.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/help_test.go b/http/help_test.go index ec9a67dd1c58..5fa96e50ddad 100644 --- a/http/help_test.go +++ b/http/help_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/http_test.go b/http/http_test.go index 692aef0d8287..addd423b6181 100644 --- a/http/http_test.go +++ b/http/http_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -11,7 +14,7 @@ import ( "testing" "time" - cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" ) @@ -22,41 +25,61 @@ func testHttpGet(t *testing.T, token string, addr string) *http.Response { loggedToken = "" } t.Logf("Token is %s", loggedToken) - return testHttpData(t, "GET", token, addr, nil, false, 0) + return testHttpData(t, "GET", token, addr, "", nil, false, 0, false) } func testHttpDelete(t *testing.T, token string, addr string) *http.Response { - return testHttpData(t, "DELETE", token, addr, nil, false, 0) + return testHttpData(t, "DELETE", token, addr, "", nil, false, 0, false) } // Go 1.8+ clients redirect automatically which breaks our 307 standby testing func testHttpDeleteDisableRedirect(t *testing.T, token string, addr string) *http.Response { - return testHttpData(t, "DELETE", token, addr, nil, true, 0) + return testHttpData(t, "DELETE", token, addr, "", nil, true, 0, false) } func testHttpPostWrapped(t *testing.T, token string, addr string, body interface{}, wrapTTL time.Duration) *http.Response { - return testHttpData(t, "POST", token, addr, body, false, wrapTTL) + return testHttpData(t, "POST", token, addr, "", body, false, wrapTTL, false) } func testHttpPost(t *testing.T, token string, addr string, body interface{}) *http.Response { - return testHttpData(t, "POST", token, addr, body, false, 0) + return testHttpData(t, "POST", token, addr, "", body, false, 0, false) +} + +func testHttpPostBinaryData(t *testing.T, token string, addr string, body interface{}) *http.Response { + return testHttpData(t, "POST", token, addr, "", body, false, 0, true) +} + +func testHttpPostNamespace(t *testing.T, token string, addr string, namespace string, body interface{}) *http.Response { + return testHttpData(t, "POST", token, addr, namespace, body, false, 0, false) } func testHttpPut(t *testing.T, token string, addr string, body interface{}) *http.Response { - return testHttpData(t, "PUT", token, addr, body, false, 0) + return testHttpData(t, "PUT", token, addr, "", body, false, 0, false) +} + +func testHttpPutBinaryData(t *testing.T, token string, addr string, body interface{}) *http.Response { + return testHttpData(t, "PUT", token, addr, "", body, false, 0, true) } // Go 1.8+ clients redirect automatically which breaks our 307 standby testing func testHttpPutDisableRedirect(t *testing.T, token string, addr string, body interface{}) *http.Response { - return testHttpData(t, "PUT", token, addr, body, true, 0) + return testHttpData(t, "PUT", token, addr, "", body, true, 0, false) } -func testHttpData(t *testing.T, method string, token string, addr string, body interface{}, disableRedirect bool, wrapTTL time.Duration) *http.Response { +func testHttpData(t *testing.T, method string, token string, addr string, namespace string, body interface{}, disableRedirect bool, wrapTTL time.Duration, binaryBody bool) *http.Response { bodyReader := new(bytes.Buffer) if body != nil { - enc := json.NewEncoder(bodyReader) - if err := enc.Encode(body); err != nil { - t.Fatalf("err:%s", err) + if binaryBody { + bodyAsBytes, ok := body.([]byte) + if !ok { + t.Fatalf("binary body was true, but body was not a []byte was %T", body) + } + bodyReader = bytes.NewBuffer(bodyAsBytes) + } else { + enc := json.NewEncoder(bodyReader) + if err := enc.Encode(body); err != nil { + t.Fatalf("err:%s", err) + } } } @@ -75,6 +98,9 @@ func testHttpData(t *testing.T, method string, token string, addr string, body i if wrapTTL > 0 { req.Header.Set("X-Vault-Wrap-TTL", wrapTTL.String()) } + if namespace != "" { + req.Header.Set("X-Vault-Namespace", namespace) + } if len(token) != 0 { req.Header.Set(consts.AuthHeaderName, token) diff --git a/http/logical.go b/http/logical.go index 6cdf6bb07110..cf80df2b0f2c 100644 --- a/http/logical.go +++ b/http/logical.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( "bufio" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "mime" @@ -13,8 +17,9 @@ import ( "strings" "time" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/limits" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" @@ -41,7 +46,7 @@ func (b *bufferedReader) Close() error { const MergePatchContentTypeHeader = "application/merge-patch+json" -func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { +func buildLogicalRequestNoAuth(perfStandby bool, ra *vault.RouterAccess, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { ns, err := namespace.FromContext(r.Context()) if err != nil { return nil, nil, http.StatusBadRequest, nil @@ -106,7 +111,9 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. // is der encoded) we don't want to parse it. Instead, we will simply // add the HTTP request to the logical request object for later consumption. contentType := r.Header.Get("Content-Type") - if path == "sys/storage/raft/snapshot" || path == "sys/storage/raft/snapshot-force" || isOcspRequest(contentType) { + + if (ra != nil && ra.IsBinaryPath(r.Context(), path)) || + path == "sys/storage/raft/snapshot" || path == "sys/storage/raft/snapshot-force" { passHTTPReq = true origBody = r.Body } else { @@ -179,12 +186,19 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. } data = parseQuery(r.URL.Query()) - - case "OPTIONS", "HEAD": + case "HEAD": + op = logical.HeaderOperation + data = parseQuery(r.URL.Query()) + case "OPTIONS": default: return nil, nil, http.StatusMethodNotAllowed, nil } + // RFC 5785 Redirect, keep the request for auditing purposes + if r.URL.Path != r.RequestURI { + passHTTPReq = true + } + requestId, err := uuid.GenerateUUID() if err != nil { return nil, nil, http.StatusInternalServerError, fmt.Errorf("failed to generate identifier for the request: %w", err) @@ -199,6 +213,10 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. Headers: r.Header, } + if ra != nil && ra.IsLimitedPath(r.Context(), path) { + req.PathLimited = true + } + if passHTTPReq { req.HTTPRequest = r } @@ -209,15 +227,6 @@ func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http. return req, origBody, 0, nil } -func isOcspRequest(contentType string) bool { - contentType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return false - } - - return contentType == "application/ocsp-request" -} - func buildLogicalPath(r *http.Request) (string, int, error) { ns, err := namespace.FromContext(r.Context()) if err != nil { @@ -257,11 +266,12 @@ func buildLogicalPath(r *http.Request) (string, int, error) { return path, 0, nil } -func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { - req, origBody, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) +func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Request, chrootNamespace string) (*logical.Request, io.ReadCloser, int, error) { + req, origBody, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), core.RouterAccess(), w, r) if err != nil || status != 0 { return nil, nil, status, err } + req.ChrootNamespace = chrootNamespace req.SetRequiredState(r.Header.Values(VaultIndexHeaderName)) requestAuth(r, req) @@ -290,27 +300,27 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques // - Perf standby and token with limited use count. // - Perf standby and token re-validation needed (e.g. due to invalid token). // - Perf standby and control group error. -func handleLogical(core *vault.Core) http.Handler { - return handleLogicalInternal(core, false, false) +func handleLogical(core *vault.Core, chrootNamespace string) http.Handler { + return handleLogicalInternal(core, false, false, chrootNamespace) } // handleLogicalWithInjector returns a handler for processing logical requests // that also have their logical response data injected at the top-level payload. // All forwarding behavior remains the same as `handleLogical`. -func handleLogicalWithInjector(core *vault.Core) http.Handler { - return handleLogicalInternal(core, true, false) +func handleLogicalWithInjector(core *vault.Core, chrootNamespace string) http.Handler { + return handleLogicalInternal(core, true, false, chrootNamespace) } // handleLogicalNoForward returns a handler for processing logical local-only // requests. These types of requests never forwarded, and return an // `vault.ErrCannotForwardLocalOnly` error if attempted to do so. -func handleLogicalNoForward(core *vault.Core) http.Handler { - return handleLogicalInternal(core, false, true) +func handleLogicalNoForward(core *vault.Core, chrootNamespace string) http.Handler { + return handleLogicalInternal(core, false, true, chrootNamespace) } func handleLogicalRecovery(raw *vault.RawBackend, token *atomic.String) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req, _, statusCode, err := buildLogicalRequestNoAuth(false, w, r) + req, _, statusCode, err := buildLogicalRequestNoAuth(false, nil, w, r) if err != nil || statusCode != 0 { respondError(w, statusCode, err) return @@ -338,14 +348,37 @@ func handleLogicalRecovery(raw *vault.RawBackend, token *atomic.String) http.Han // handleLogicalInternal is a common helper that returns a handler for // processing logical requests. The behavior depends on the various boolean // toggles. Refer to usage on functions for possible behaviors. -func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForward bool) http.Handler { +func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForward bool, chrootNamespace string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req, origBody, statusCode, err := buildLogicalRequest(core, w, r) + req, origBody, statusCode, err := buildLogicalRequest(core, w, r, chrootNamespace) if err != nil || statusCode != 0 { respondError(w, statusCode, err) return } + // Websockets need to be handled at HTTP layer instead of logical requests. + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + nsPath := ns.Path + if ns.ID == namespace.RootNamespaceID { + nsPath = "" + } + if strings.HasPrefix(r.URL.Path, fmt.Sprintf("/v1/%ssys/events/subscribe/", nsPath)) { + handler := entHandleEventsSubscribe(core, req) + if handler != nil { + handler.ServeHTTP(w, r) + return + } + } + handler := handleEntPaths(nsPath, core, r) + if handler != nil { + handler.ServeHTTP(w, r) + return + } + // Make the internal request. We attach the connection info // as well in case this is an authentication request that requires // it. Vault core handles stripping this if we need to. This also @@ -353,6 +386,9 @@ func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForw // success. resp, ok, needsForward := request(core, w, r, req) switch { + case errors.Is(resp.Error(), limits.ErrCapacity): + respondError(w, http.StatusServiceUnavailable, limits.ErrCapacity) + return case needsForward && noForward: respondError(w, http.StatusBadRequest, vault.ErrCannotForwardLocalOnly) return @@ -425,7 +461,7 @@ func respondLogical(core *vault.Core, w http.ResponseWriter, r *http.Request, re } } - adjustResponse(core, w, req) + entAdjustResponse(core, w, req) // Respond respondOk(w, ret) diff --git a/http/logical_test.go b/http/logical_test.go index fc6fc765811e..bd90ac4ea266 100644 --- a/http/logical_test.go +++ b/http/logical_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -8,26 +11,28 @@ import ( "io/ioutil" "net/http" "net/http/httptest" + "os" "reflect" "strconv" "strings" "testing" "time" + "github.com/go-test/deep" + log "github.com/hashicorp/go-hclog" kv "github.com/hashicorp/vault-plugin-secrets-kv" "github.com/hashicorp/vault/api" auditFile "github.com/hashicorp/vault/builtin/audit/file" credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/sdk/physical/inmem" - "github.com/go-test/deep" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/vault" @@ -59,9 +64,10 @@ func TestLogical(t *testing.T) { "data": map[string]interface{}{ "data": "bar", }, - "auth": nil, - "wrap_info": nil, - "warnings": nilWarnings, + "auth": nil, + "wrap_info": nil, + "warnings": nilWarnings, + "mount_type": "kv", } testResponseStatus(t, resp, 200) testResponseBody(t, resp, &actual) @@ -176,9 +182,10 @@ func TestLogical_StandbyRedirect(t *testing.T) { "entity_id": "", "type": "service", }, - "warnings": nilWarnings, - "wrap_info": nil, - "auth": nil, + "warnings": nilWarnings, + "wrap_info": nil, + "auth": nil, + "mount_type": "token", } testResponseStatus(t, resp, 200) @@ -217,6 +224,7 @@ func TestLogical_CreateToken(t *testing.T) { "renewable": false, "lease_duration": json.Number("0"), "data": nil, + "mount_type": "token", "wrap_info": nil, "auth": map[string]interface{}{ "policies": []interface{}{"root"}, @@ -315,7 +323,7 @@ func TestLogical_ListSuffix(t *testing.T) { req = req.WithContext(namespace.RootContext(nil)) req.Header.Add(consts.AuthHeaderName, rootToken) - lreq, _, status, err := buildLogicalRequest(core, nil, req) + lreq, _, status, err := buildLogicalRequest(core, nil, req, "") if err != nil { t.Fatal(err) } @@ -330,7 +338,7 @@ func TestLogical_ListSuffix(t *testing.T) { req = req.WithContext(namespace.RootContext(nil)) req.Header.Add(consts.AuthHeaderName, rootToken) - lreq, _, status, err = buildLogicalRequest(core, nil, req) + lreq, _, status, err = buildLogicalRequest(core, nil, req, "") if err != nil { t.Fatal(err) } @@ -345,12 +353,12 @@ func TestLogical_ListSuffix(t *testing.T) { req = req.WithContext(namespace.RootContext(nil)) req.Header.Add(consts.AuthHeaderName, rootToken) - _, _, status, err = buildLogicalRequestNoAuth(core.PerfStandby(), nil, req) + _, _, status, err = buildLogicalRequestNoAuth(core.PerfStandby(), core.RouterAccess(), nil, req) if err != nil || status != 0 { t.Fatal(err) } - lreq, _, status, err = buildLogicalRequest(core, nil, req) + lreq, _, status, err = buildLogicalRequest(core, nil, req, "") if err != nil { t.Fatal(err) } @@ -362,6 +370,94 @@ func TestLogical_ListSuffix(t *testing.T) { } } +// TestLogical_BinaryPath tests the legacy behavior passing in binary data to a +// path that isn't explicitly marked by a plugin as a binary path to fail, along +// with making sure we pass through when marked as a binary path +func TestLogical_BinaryPath(t *testing.T) { + t.Parallel() + + testHandler := func(ctx context.Context, l *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return nil, nil + } + operations := map[logical.Operation]framework.OperationHandler{ + logical.PatchOperation: &framework.PathOperation{Callback: testHandler}, + logical.UpdateOperation: &framework.PathOperation{Callback: testHandler}, + } + + conf := &vault.CoreConfig{ + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + LogicalBackends: map[string]logical.Factory{ + "bintest": func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + b := new(framework.Backend) + b.BackendType = logical.TypeLogical + b.Paths = []*framework.Path{ + {Pattern: "binary", Operations: operations}, + {Pattern: "binary/" + framework.MatchAllRegex("test"), Operations: operations}, + } + b.PathsSpecial = &logical.Paths{Binary: []string{"binary", "binary/*"}} + err := b.Setup(ctx, config) + return b, err + }, + }, + } + + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + mountReq := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: token, + Path: "sys/mounts/bintest", + Data: map[string]interface{}{ + "type": "bintest", + }, + } + mountResp, err := core.HandleRequest(namespace.RootContext(nil), mountReq) + if err != nil { + t.Fatalf("failed mounting bin-test engine: %v", err) + } + if mountResp.IsError() { + t.Fatalf("failed mounting bin-test error in response: %v", mountResp.Error()) + } + + tests := []struct { + name string + op string + url string + expectedReturn int + }{ + {name: "PUT non-binary", op: "PUT", url: addr + "/v1/bintest/non-binary", expectedReturn: http.StatusBadRequest}, + {name: "POST non-binary", op: "POST", url: addr + "/v1/bintest/non-binary", expectedReturn: http.StatusBadRequest}, + {name: "PUT binary", op: "PUT", url: addr + "/v1/bintest/binary", expectedReturn: http.StatusNoContent}, + {name: "POST binary", op: "POST", url: addr + "/v1/bintest/binary/sub-path", expectedReturn: http.StatusNoContent}, + } + for _, test := range tests { + t.Run(test.name, func(st *testing.T) { + var resp *http.Response + switch test.op { + case "PUT": + resp = testHttpPutBinaryData(st, token, test.url, make([]byte, 100)) + case "POST": + resp = testHttpPostBinaryData(st, token, test.url, make([]byte, 100)) + default: + t.Fatalf("unsupported operation: %s", test.op) + } + testResponseStatus(st, resp, test.expectedReturn) + if test.expectedReturn != http.StatusNoContent { + all, err := io.ReadAll(resp.Body) + if err != nil { + st.Fatalf("failed reading error response body: %v", err) + } + if !strings.Contains(string(all), "error parsing JSON") { + st.Fatalf("error response body did not contain expected error: %v", all) + } + } + }) + } +} + func TestLogical_ListWithQueryParameters(t *testing.T) { core, _, rootToken := vault.TestCoreUnsealed(t) @@ -421,7 +517,7 @@ func TestLogical_ListWithQueryParameters(t *testing.T) { req = req.WithContext(namespace.RootContext(nil)) req.Header.Add(consts.AuthHeaderName, rootToken) - lreq, _, status, err := buildLogicalRequest(core, nil, req) + lreq, _, status, err := buildLogicalRequest(core, nil, req, "") if err != nil { t.Fatal(err) } @@ -460,12 +556,12 @@ func TestLogical_RespondWithStatusCode(t *testing.T) { t.Fatalf("Bad Status code: %d", w.Code) } - bodyRaw, err := ioutil.ReadAll(w.Body) + bodyRaw, err := io.ReadAll(w.Body) if err != nil { t.Fatal(err) } - expected := `{"request_id":"id","lease_id":"","renewable":false,"lease_duration":0,"data":{"test-data":"foo"},"wrap_info":null,"warnings":null,"auth":null}` + expected := `{"request_id":"id","lease_id":"","renewable":false,"lease_duration":0,"data":{"test-data":"foo"},"wrap_info":null,"warnings":null,"auth":null,"mount_type":""}` if string(bodyRaw[:]) != strings.Trim(expected, "\n") { t.Fatalf("bad response: %s", string(bodyRaw[:])) @@ -474,13 +570,10 @@ func TestLogical_RespondWithStatusCode(t *testing.T) { func TestLogical_Audit_invalidWrappingToken(t *testing.T) { // Create a noop audit backend - var noop *vault.NoopAudit + noop := corehelpers.TestNoopAudit(t, "noop/", nil) c, _, root := vault.TestCoreUnsealedWithConfig(t, &vault.CoreConfig{ AuditBackends: map[string]audit.Factory{ - "noop": func(ctx context.Context, config *audit.BackendConfig) (audit.Backend, error) { - noop = &vault.NoopAudit{ - Config: config, - } + "noop": func(ctx context.Context, config *audit.BackendConfig, _ audit.HeaderFormatter) (audit.Backend, error) { return noop, nil }, }, @@ -489,7 +582,6 @@ func TestLogical_Audit_invalidWrappingToken(t *testing.T) { defer ln.Close() // Enable the audit backend - resp := testHttpPost(t, root, addr+"/v1/sys/audit/noop", map[string]interface{}{ "type": "noop", }) @@ -636,7 +728,7 @@ func TestLogical_AuditPort(t *testing.T) { // workaround kv-v2 initialization upgrade errors numFailures := 0 - vault.RetryUntil(t, 10*time.Second, func() error { + corehelpers.RetryUntil(t, 10*time.Second, func() error { resp, err := c.Logical().Write("kv/data/foo", writeData) if err != nil { if strings.Contains(err.Error(), "Upgrading from non-versioned to versioned data") { @@ -759,3 +851,180 @@ func TestLogical_ErrRelativePath(t *testing.T) { t.Errorf("expected response for write to include %q", logical.ErrRelativePath.Error()) } } + +func testBuiltinPluginMetadataAuditLog(t *testing.T, log map[string]interface{}, expectedMountClass string) { + if mountClass, ok := log["mount_class"].(string); !ok { + t.Fatalf("mount_class should be a string, not %T", log["mount_class"]) + } else if mountClass != expectedMountClass { + t.Fatalf("bad: mount_class should be %s, not %s", expectedMountClass, mountClass) + } + + if _, ok := log["mount_running_version"].(string); !ok { + t.Fatalf("mount_running_version should be a string, not %T", log["mount_running_version"]) + } + + if _, ok := log["mount_running_sha256"].(string); ok { + t.Fatalf("mount_running_sha256 should be nil, not %T", log["mount_running_sha256"]) + } + + if mountIsExternalPlugin, ok := log["mount_is_external_plugin"].(bool); ok && mountIsExternalPlugin { + t.Fatalf("mount_is_external_plugin should be nil or false, not %T", log["mount_is_external_plugin"]) + } +} + +// TestLogical_AuditEnabled_ShouldLogPluginMetadata_Auth tests that we have plugin metadata of a builtin auth plugin +// in audit log when it is enabled +func TestLogical_AuditEnabled_ShouldLogPluginMetadata_Auth(t *testing.T) { + coreConfig := &vault.CoreConfig{ + AuditBackends: map[string]audit.Factory{ + "file": auditFile.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + core := cores[0].Core + c := cluster.Cores[0].Client + vault.TestWaitActive(t, core) + + // Enable the audit backend + tempDir := t.TempDir() + auditLogFile, err := os.CreateTemp(tempDir, "") + if err != nil { + t.Fatal(err) + } + + err = c.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": auditLogFile.Name(), + }, + }) + if err != nil { + t.Fatal(err) + } + + _, err = c.Logical().Write("auth/token/create", map[string]interface{}{ + "ttl": "10s", + }) + if err != nil { + t.Fatal(err) + } + + // Check the audit trail on request and response + decoder := json.NewDecoder(auditLogFile) + var auditRecord map[string]interface{} + for decoder.Decode(&auditRecord) == nil { + auditRequest := map[string]interface{}{} + if req, ok := auditRecord["request"]; ok { + auditRequest = req.(map[string]interface{}) + if auditRequest["path"] != "auth/token/create" { + continue + } + } + testBuiltinPluginMetadataAuditLog(t, auditRequest, consts.PluginTypeCredential.String()) + + auditResponse := map[string]interface{}{} + if req, ok := auditRecord["response"]; ok { + auditRequest = req.(map[string]interface{}) + if auditResponse["path"] != "auth/token/create" { + continue + } + } + testBuiltinPluginMetadataAuditLog(t, auditResponse, consts.PluginTypeCredential.String()) + } +} + +// TestLogical_AuditEnabled_ShouldLogPluginMetadata_Secret tests that we have plugin metadata of a builtin secret plugin +// in audit log when it is enabled +func TestLogical_AuditEnabled_ShouldLogPluginMetadata_Secret(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + AuditBackends: map[string]audit.Factory{ + "file": auditFile.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + core := cores[0].Core + c := cluster.Cores[0].Client + vault.TestWaitActive(t, core) + + if err := c.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + // Enable the audit backend + tempDir := t.TempDir() + auditLogFile, err := os.CreateTemp(tempDir, "") + if err != nil { + t.Fatal(err) + } + + err = c.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": auditLogFile.Name(), + }, + }) + if err != nil { + t.Fatal(err) + } + + { + writeData := map[string]interface{}{ + "data": map[string]interface{}{ + "bar": "a", + }, + } + corehelpers.RetryUntil(t, 10*time.Second, func() error { + resp, err := c.Logical().Write("kv/data/foo", writeData) + if err != nil { + t.Fatalf("write request failed, err: %#v, resp: %#v\n", err, resp) + } + return nil + }) + } + + // Check the audit trail on request and response + decoder := json.NewDecoder(auditLogFile) + var auditRecord map[string]interface{} + for decoder.Decode(&auditRecord) == nil { + auditRequest := map[string]interface{}{} + if req, ok := auditRecord["request"]; ok { + auditRequest = req.(map[string]interface{}) + if auditRequest["path"] != "kv/data/foo" { + continue + } + } + testBuiltinPluginMetadataAuditLog(t, auditRequest, consts.PluginTypeSecrets.String()) + + auditResponse := map[string]interface{}{} + if req, ok := auditRecord["response"]; ok { + auditRequest = req.(map[string]interface{}) + if auditResponse["path"] != "kv/data/foo" { + continue + } + } + testBuiltinPluginMetadataAuditLog(t, auditResponse, consts.PluginTypeSecrets.String()) + } +} diff --git a/http/options.go b/http/options.go new file mode 100644 index 000000000000..b1200c018e72 --- /dev/null +++ b/http/options.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package http + +// ListenerConfigOption is how listenerConfigOptions are passed as arguments. +type ListenerConfigOption func(*listenerConfigOptions) error + +// listenerConfigOptions are used to represent configuration of listeners for http handlers. +type listenerConfigOptions struct { + withRedactionValue string + withRedactAddresses bool + withRedactClusterName bool + withRedactVersion bool +} + +// getDefaultOptions returns listenerConfigOptions with their default values. +func getDefaultOptions() listenerConfigOptions { + return listenerConfigOptions{ + withRedactionValue: "", // Redacted values will be set to an empty string by default. + } +} + +// getOpts applies each supplied ListenerConfigOption and returns the fully configured listenerConfigOptions. +// Each ListenerConfigOption is applied in the order it appears in the argument list, so it is +// possible to supply the same ListenerConfigOption numerous times and the 'last write wins'. +func getOpts(opt ...ListenerConfigOption) (listenerConfigOptions, error) { + opts := getDefaultOptions() + for _, o := range opt { + if o == nil { + continue + } + if err := o(&opts); err != nil { + return listenerConfigOptions{}, err + } + } + return opts, nil +} + +// WithRedactionValue provides an ListenerConfigOption to represent the value used to redact +// values which require redaction. +func WithRedactionValue(r string) ListenerConfigOption { + return func(o *listenerConfigOptions) error { + o.withRedactionValue = r + return nil + } +} + +// WithRedactAddresses provides an ListenerConfigOption to represent whether redaction of addresses is required. +func WithRedactAddresses(r bool) ListenerConfigOption { + return func(o *listenerConfigOptions) error { + o.withRedactAddresses = r + return nil + } +} + +// WithRedactClusterName provides an ListenerConfigOption to represent whether redaction of cluster names is required. +func WithRedactClusterName(r bool) ListenerConfigOption { + return func(o *listenerConfigOptions) error { + o.withRedactClusterName = r + return nil + } +} + +// WithRedactVersion provides an ListenerConfigOption to represent whether redaction of version is required. +func WithRedactVersion(r bool) ListenerConfigOption { + return func(o *listenerConfigOptions) error { + o.withRedactVersion = r + return nil + } +} diff --git a/http/options_test.go b/http/options_test.go new file mode 100644 index 000000000000..5d52a6e42dc8 --- /dev/null +++ b/http/options_test.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package http + +import ( + "net/http" + "strings" + "testing" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/version" + "github.com/stretchr/testify/require" +) + +// TestOptions_Default ensures that the default values are as expected. +func TestOptions_Default(t *testing.T) { + opts := getDefaultOptions() + require.NotNil(t, opts) + require.Equal(t, "", opts.withRedactionValue) +} + +// TestOptions_WithRedactionValue ensures that we set the correct value to use for +// redaction when required. +func TestOptions_WithRedactionValue(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value string + ExpectedValue string + IsErrorExpected bool + }{ + "empty": { + Value: "", + ExpectedValue: "", + IsErrorExpected: false, + }, + "whitespace": { + Value: " ", + ExpectedValue: " ", + IsErrorExpected: false, + }, + "value": { + Value: "*****", + ExpectedValue: "*****", + IsErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &listenerConfigOptions{} + applyOption := WithRedactionValue(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withRedactionValue) + } + }) + } +} + +// TestOptions_WithRedactAddresses ensures that the option works as intended. +func TestOptions_WithRedactAddresses(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &listenerConfigOptions{} + applyOption := WithRedactAddresses(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withRedactAddresses) + }) + } +} + +// TestOptions_WithRedactClusterName ensures that the option works as intended. +func TestOptions_WithRedactClusterName(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &listenerConfigOptions{} + applyOption := WithRedactClusterName(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withRedactClusterName) + }) + } +} + +// TestOptions_WithRedactVersion ensures that the option works as intended. +func TestOptions_WithRedactVersion(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Value bool + ExpectedValue bool + }{ + "true": { + Value: true, + ExpectedValue: true, + }, + "false": { + Value: false, + ExpectedValue: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &listenerConfigOptions{} + applyOption := WithRedactVersion(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withRedactVersion) + }) + } +} + +// TestRedactVersionListener tests that the version will be redacted +// from e.g. sys/health and the OpenAPI response if `redact_version` +// is set on the listener. +func TestRedactVersionListener(t *testing.T) { + conf := &vault.CoreConfig{ + EnableUI: false, + EnableRaw: true, + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + } + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + + // Setup listener without redaction + ln, addr := TestListener(t) + props := &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + RedactVersion: false, + }, + } + TestServerWithListenerAndProperties(t, ln, addr, core, props) + defer ln.Close() + TestServerAuth(t, addr, token) + + testRedactVersionEndpoints(t, addr, token, version.Version) + + // Setup listener with redaction + ln, addr = TestListener(t) + props.ListenerConfig.RedactVersion = true + TestServerWithListenerAndProperties(t, ln, addr, core, props) + defer ln.Close() + TestServerAuth(t, addr, token) + + testRedactVersionEndpoints(t, addr, token, "") +} + +// testRedactVersionEndpoints tests the endpoints containing versions +// contain the expected version +func testRedactVersionEndpoints(t *testing.T, addr, token, expectedVersion string) { + client := cleanhttp.DefaultClient() + req, err := http.NewRequest("GET", addr+"/v1/auth/token?help=1", nil) + require.NoError(t, err) + + req.Header.Set(consts.AuthHeaderName, token) + resp, err := client.Do(req) + require.NoError(t, err) + + testResponseStatus(t, resp, 200) + + var actual map[string]interface{} + testResponseBody(t, resp, &actual) + + require.NotNil(t, actual["openapi"]) + openAPI, ok := actual["openapi"].(map[string]interface{}) + require.True(t, ok) + + require.NotNil(t, openAPI["info"]) + info, ok := openAPI["info"].(map[string]interface{}) + require.True(t, ok) + + require.NotNil(t, info["version"]) + version, ok := info["version"].(string) + require.True(t, ok) + require.Equal(t, expectedVersion, version) + + req, err = http.NewRequest("GET", addr+"/v1/sys/internal/specs/openapi", nil) + require.NoError(t, err) + + req.Header.Set(consts.AuthHeaderName, "") + resp, err = client.Do(req) + require.NoError(t, err) + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + require.NotNil(t, actual["info"]) + info, ok = openAPI["info"].(map[string]interface{}) + require.True(t, ok) + + require.NotNil(t, info["version"]) + version, ok = info["version"].(string) + require.True(t, ok) + require.Equal(t, expectedVersion, version) + + req, err = http.NewRequest("GET", addr+"/v1/sys/health", nil) + require.NoError(t, err) + + req.Header.Set(consts.AuthHeaderName, "") + resp, err = client.Do(req) + require.NoError(t, err) + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + require.NotNil(t, actual["version"]) + version, ok = actual["version"].(string) + require.True(t, ok) + + // sys/health is special and uses a different format to the OpenAPI + // version.GetVersion().VersionNumber() instead of version.Version + // We use substring to make sure the check works anyway. + // In practice, version.GetVersion().VersionNumber() will give something like 1.17.0-beta1 + // and version.Version gives something like 1.17.0 + require.Truef(t, strings.HasPrefix(version, expectedVersion), "version was not as expected, version=%s, expectedVersion=%s", + version, expectedVersion) +} diff --git a/http/plugin_test.go b/http/plugin_test.go index 164a3d25f664..b215a6b1c6bc 100644 --- a/http/plugin_test.go +++ b/http/plugin_test.go @@ -1,7 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( "encoding/json" + "fmt" "io/ioutil" "os" "reflect" @@ -12,6 +16,7 @@ import ( "github.com/hashicorp/vault/api" bplugin "github.com/hashicorp/vault/builtin/plugin" "github.com/hashicorp/vault/helper/benchhelpers" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" @@ -22,7 +27,8 @@ import ( "github.com/hashicorp/vault/vault" ) -func getPluginClusterAndCore(t testing.TB, logger log.Logger) (*vault.TestCluster, *vault.TestClusterCore) { +func getPluginClusterAndCore(t *testing.T, logger log.Logger) (*vault.TestCluster, *vault.TestClusterCore) { + t.Helper() inm, err := inmem.NewTransactionalInmem(nil, logger) if err != nil { t.Fatal(err) @@ -32,27 +38,27 @@ func getPluginClusterAndCore(t testing.TB, logger log.Logger) (*vault.TestCluste t.Fatal(err) } + pluginDir := corehelpers.MakeTestPluginDir(t) coreConfig := &vault.CoreConfig{ Physical: inm, HAPhysical: inmha.(physical.HABackend), LogicalBackends: map[string]logical.Factory{ "plugin": bplugin.Factory, }, + PluginDirectory: pluginDir, } cluster := vault.NewTestCluster(benchhelpers.TBtoT(t), coreConfig, &vault.TestClusterOptions{ HandlerFunc: Handler, - Logger: logger.Named("testclusteroptions"), }) cluster.Start() cores := cluster.Cores core := cores[0] - os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) - vault.TestWaitActive(benchhelpers.TBtoT(t), core.Core) - vault.TestAddTestPlugin(benchhelpers.TBtoT(t), core.Core, "mock-plugin", consts.PluginTypeSecrets, "", "TestPlugin_PluginMain", []string{}, "") + vault.TestAddTestPlugin(benchhelpers.TBtoT(t), core.Core, "mock-plugin", consts.PluginTypeSecrets, "", "TestPlugin_PluginMain", + []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)}) // Mount the mock plugin err = core.Client.Sys().Mount("mock", &api.MountInput{ diff --git a/http/sys_audit_test.go b/http/sys_audit_test.go index 58873bfb12aa..d620a291e775 100644 --- a/http/sys_audit_test.go +++ b/http/sys_audit_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -29,6 +32,7 @@ func TestSysAudit(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "noop/": map[string]interface{}{ "path": "noop/", @@ -80,6 +84,7 @@ func TestSysDisableAudit(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{}, } @@ -114,6 +119,7 @@ func TestSysAuditHash(t *testing.T) { "renewable": false, "lease_duration": json.Number("0"), "wrap_info": nil, + "mount_type": "system", "warnings": nil, "auth": nil, "data": map[string]interface{}{ diff --git a/http/sys_auth_test.go b/http/sys_auth_test.go index 2d1fdf8144cb..fe6c4e27390f 100644 --- a/http/sys_auth_test.go +++ b/http/sys_auth_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -8,6 +11,7 @@ import ( "time" "github.com/go-test/deep" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/versions" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/vault" @@ -27,6 +31,7 @@ func TestSysAuth(t *testing.T) { "renewable": false, "lease_duration": json.Number("0"), "wrap_info": nil, + "mount_type": "system", "warnings": nil, "auth": nil, "data": map[string]interface{}{ @@ -111,6 +116,7 @@ func TestSysEnableAuth(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", @@ -232,6 +238,7 @@ func TestSysDisableAuth(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "token/": map[string]interface{}{ "config": map[string]interface{}{ @@ -322,6 +329,7 @@ func TestSysTuneAuth_nonHMACKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "token based credentials", "default_lease_ttl": json.Number("2764800"), @@ -367,6 +375,7 @@ func TestSysTuneAuth_nonHMACKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "token based credentials", "default_lease_ttl": json.Number("2764800"), @@ -405,6 +414,7 @@ func TestSysTuneAuth_showUIMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "token based credentials", "default_lease_ttl": json.Number("2764800"), @@ -443,6 +453,7 @@ func TestSysTuneAuth_showUIMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "token based credentials", "default_lease_ttl": json.Number("2764800"), @@ -485,7 +496,7 @@ func TestSysRemountAuth(t *testing.T) { // Poll until the remount succeeds var remountResp map[string]interface{} testResponseBody(t, resp, &remountResp) - vault.RetryUntil(t, 5*time.Second, func() error { + corehelpers.RetryUntil(t, 5*time.Second, func() error { resp = testHttpGet(t, token, addr+"/v1/sys/remount/status/"+remountResp["migration_id"].(string)) testResponseStatus(t, resp, 200) @@ -509,6 +520,7 @@ func TestSysRemountAuth(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "bar/": map[string]interface{}{ "description": "foo", diff --git a/http/sys_config_cors_test.go b/http/sys_config_cors_test.go index 3ad0e810a2c5..3f69888f81b1 100644 --- a/http/sys_config_cors_test.go +++ b/http/sys_config_cors_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -56,6 +59,7 @@ func TestSysConfigCors(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "enabled": true, "allowed_origins": []interface{}{addr}, diff --git a/http/sys_config_state_test.go b/http/sys_config_state_test.go index 4cd2aae8b827..8081aaf642c9 100644 --- a/http/sys_config_state_test.go +++ b/http/sys_config_state_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -6,68 +9,197 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/vault" ) func TestSysConfigState_Sanitized(t *testing.T) { - var resp *http.Response - - core, _, token := vault.TestCoreUnsealed(t) - ln, addr := TestServer(t, core) - defer ln.Close() - TestServerAuth(t, addr, token) - - resp = testHttpGet(t, token, addr+"/v1/sys/config/state/sanitized") - testResponseStatus(t, resp, 200) - - var actual map[string]interface{} - var expected map[string]interface{} - - configResp := map[string]interface{}{ - "api_addr": "", - "cache_size": json.Number("0"), - "cluster_addr": "", - "cluster_cipher_suites": "", - "cluster_name": "", - "default_lease_ttl": json.Number("0"), - "default_max_request_duration": json.Number("0"), - "disable_cache": false, - "disable_clustering": false, - "disable_indexing": false, - "disable_mlock": false, - "disable_performance_standby": false, - "disable_printable_check": false, - "disable_sealwrap": false, - "raw_storage_endpoint": false, - "introspection_endpoint": false, - "disable_sentinel_trace": false, - "enable_ui": false, - "log_format": "", - "log_level": "", - "max_lease_ttl": json.Number("0"), - "pid_file": "", - "plugin_directory": "", - "plugin_file_uid": json.Number("0"), - "plugin_file_permissions": json.Number("0"), - "enable_response_header_hostname": false, - "enable_response_header_raft_node_id": false, - "log_requests_level": "", + cases := []struct { + name string + storageConfig *server.Storage + haStorageConfig *server.Storage + expectedStorageOutput map[string]interface{} + expectedHAStorageOutput map[string]interface{} + }{ + { + name: "raft storage", + storageConfig: &server.Storage{ + Type: "raft", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "max_entry_size": "2097152", + }, + }, + haStorageConfig: nil, + expectedStorageOutput: map[string]interface{}{ + "type": "raft", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + "raft": map[string]interface{}{ + "max_entry_size": "2097152", + }, + }, + expectedHAStorageOutput: nil, + }, + { + name: "inmem storage, no HA storage", + storageConfig: &server.Storage{ + Type: "inmem", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + }, + haStorageConfig: nil, + expectedStorageOutput: map[string]interface{}{ + "type": "inmem", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + }, + expectedHAStorageOutput: nil, + }, + { + name: "inmem storage, raft HA storage", + storageConfig: &server.Storage{ + Type: "inmem", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + }, + haStorageConfig: &server.Storage{ + Type: "raft", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "max_entry_size": "2097152", + }, + }, + expectedStorageOutput: map[string]interface{}{ + "type": "inmem", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + }, + expectedHAStorageOutput: map[string]interface{}{ + "type": "raft", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + "raft": map[string]interface{}{ + "max_entry_size": "2097152", + }, + }, + }, } - expected = map[string]interface{}{ - "lease_id": "", - "renewable": false, - "lease_duration": json.Number("0"), - "wrap_info": nil, - "warnings": nil, - "auth": nil, - "data": configResp, - } + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + var resp *http.Response + confRaw := &server.Config{ + Storage: tc.storageConfig, + HAStorage: tc.haStorageConfig, + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1", + }, + }, + }, + } + + conf := &vault.CoreConfig{ + RawConfig: confRaw, + } + + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp = testHttpGet(t, token, addr+"/v1/sys/config/state/sanitized") + testResponseStatus(t, resp, 200) + + var actual map[string]interface{} + var expected map[string]interface{} + + configResp := map[string]interface{}{ + "api_addr": "", + "cache_size": json.Number("0"), + "cluster_addr": "", + "cluster_cipher_suites": "", + "cluster_name": "", + "default_lease_ttl": json.Number("0"), + "default_max_request_duration": json.Number("0"), + "disable_cache": false, + "disable_clustering": false, + "disable_indexing": false, + "disable_mlock": false, + "disable_performance_standby": false, + "disable_printable_check": false, + "disable_sealwrap": false, + "experiments": nil, + "raw_storage_endpoint": false, + "detect_deadlocks": "", + "introspection_endpoint": false, + "disable_sentinel_trace": false, + "enable_ui": false, + "log_format": "", + "log_level": "", + "max_lease_ttl": json.Number("0"), + "pid_file": "", + "plugin_directory": "", + "plugin_tmpdir": "", + "plugin_file_uid": json.Number("0"), + "plugin_file_permissions": json.Number("0"), + "enable_response_header_hostname": false, + "enable_response_header_raft_node_id": false, + "log_requests_level": "", + "listeners": []interface{}{ + map[string]interface{}{ + "config": nil, + "type": "tcp", + }, + }, + "storage": tc.expectedStorageOutput, + "administrative_namespace_path": "", + "imprecise_lease_role_tracking": false, + } + + if tc.expectedHAStorageOutput != nil { + configResp["ha_storage"] = tc.expectedHAStorageOutput + } + + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": configResp, + "mount_type": "system", + } - testResponseBody(t, resp, &actual) - expected["request_id"] = actual["request_id"] + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] - if diff := deep.Equal(actual, expected); len(diff) > 0 { - t.Fatalf("bad mismatch response body: diff: %v", diff) + if diff := deep.Equal(actual, expected); len(diff) > 0 { + t.Fatalf("bad mismatch response body: diff: %v", diff) + } + }) } } diff --git a/http/sys_feature_flags.go b/http/sys_feature_flags.go index 11ece32795b7..9e7244da750b 100644 --- a/http/sys_feature_flags.go +++ b/http/sys_feature_flags.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/sys_generate_root.go b/http/sys_generate_root.go index db2da6f7f3b1..ffe0c14ec154 100644 --- a/http/sys_generate_root.go +++ b/http/sys_generate_root.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/sys_generate_root_test.go b/http/sys_generate_root_test.go index f226d0042b1d..8358d18a537a 100644 --- a/http/sys_generate_root_test.go +++ b/http/sys_generate_root_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -12,8 +15,10 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/helper/xor" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" @@ -224,9 +229,11 @@ func enableNoopAudit(t *testing.T, token string, core *vault.Core) { func testCoreUnsealedWithAudit(t *testing.T, records **[][]byte) (*vault.Core, [][]byte, string) { conf := &vault.CoreConfig{ - BuiltinRegistry: vault.NewMockBuiltinRegistry(), + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + AuditBackends: map[string]audit.Factory{ + "noop": corehelpers.NoopAuditFactory(records), + }, } - vault.AddNoopAudit(conf, records) core, keys, token := vault.TestCoreUnsealedWithConfig(t, conf) return core, keys, token } diff --git a/http/sys_health.go b/http/sys_health.go index a1f2bb8bd347..11123243f9c3 100644 --- a/http/sys_health.go +++ b/http/sys_health.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -9,16 +12,17 @@ import ( "time" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/version" ) -func handleSysHealth(core *vault.Core) http.Handler { +func handleSysHealth(core *vault.Core, opt ...ListenerConfigOption) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.Method { case "GET": - handleSysHealthGet(core, w, r) + handleSysHealthGet(core, w, r, opt...) case "HEAD": handleSysHealthHead(core, w, r) default: @@ -40,7 +44,7 @@ func fetchStatusCode(r *http.Request, field string) (int, bool, bool) { return statusCode, false, true } -func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { +func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request, opt ...ListenerConfigOption) { code, body, err := getSysHealth(core, r) if err != nil { core.Logger().Error("error checking health", "error", err) @@ -53,6 +57,29 @@ func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request return } + var tokenPresent bool + token := r.Header.Get(consts.AuthHeaderName) + + if token != "" { + // We don't care about the error, we just want to know if the token exists + lock := core.HALock() + lock.Lock() + tokenEntry, err := core.LookupToken(r.Context(), token) + lock.Unlock() + tokenPresent = err == nil && tokenEntry != nil + } + opts, _ := getOpts(opt...) + + if !tokenPresent { + if opts.withRedactVersion { + body.Version = opts.withRedactionValue + } + + if opts.withRedactClusterName { + body.ClusterName = opts.withRedactionValue + } + } + w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) @@ -191,11 +218,14 @@ func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, erro ReplicationDRMode: replicationState.GetDRString(), ServerTimeUTC: time.Now().UTC().Unix(), Version: version.GetVersion().VersionNumber(), + Enterprise: constants.IsEnterprise, ClusterName: clusterName, ClusterID: clusterID, + ClockSkewMillis: core.ActiveNodeClockSkewMillis(), + EchoDurationMillis: core.EchoDuration().Milliseconds(), } - licenseState, err := vault.LicenseSummary(core) + licenseState, err := core.EntGetLicenseState() if err != nil { return http.StatusInternalServerError, nil, err } @@ -211,7 +241,7 @@ func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, erro } if init && !sealed && !standby { - body.LastWAL = vault.LastWAL(core) + body.LastWAL = core.EntLastWAL() } return code, body, nil @@ -232,8 +262,11 @@ type HealthResponse struct { ReplicationDRMode string `json:"replication_dr_mode"` ServerTimeUTC int64 `json:"server_time_utc"` Version string `json:"version"` + Enterprise bool `json:"enterprise"` ClusterName string `json:"cluster_name,omitempty"` ClusterID string `json:"cluster_id,omitempty"` LastWAL uint64 `json:"last_wal,omitempty"` License *HealthResponseLicense `json:"license,omitempty"` + EchoDurationMillis int64 `json:"echo_duration_ms"` + ClockSkewMillis int64 `json:"clock_skew_ms"` } diff --git a/http/sys_health_test.go b/http/sys_health_test.go index 68ef11b9e2f5..bd64ea853232 100644 --- a/http/sys_health_test.go +++ b/http/sys_health_test.go @@ -1,12 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( "io/ioutil" "net/http" "net/url" - "reflect" "testing" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/vault" ) @@ -16,71 +22,54 @@ func TestSysHealth_get(t *testing.T) { ln, addr := TestServer(t, core) defer ln.Close() - resp, err := http.Get(addr + "/v1/sys/health") + // Test without the client first since we want to verify the response code + raw, err := http.Get(addr + "/v1/sys/health") if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 501) - var actual map[string]interface{} - expected := map[string]interface{}{ - "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), - "initialized": false, - "sealed": true, - "standby": true, - "performance_standby": false, + // Test with the client because it's a bit easier to work with structs + config := api.DefaultConfig() + config.Address = addr + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) } - testResponseStatus(t, resp, 501) - testResponseBody(t, resp, &actual) - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] + + resp, err := client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + + expected := &api.HealthResponse{ + Enterprise: constants.IsEnterprise, + Initialized: false, + Sealed: true, + Standby: true, + PerformanceStandby: false, + ReplicationPerformanceMode: consts.ReplicationUnknown.GetPerformanceString(), + ReplicationDRMode: consts.ReplicationUnknown.GetDRString(), } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + ignore := cmpopts.IgnoreFields(*expected, "ClusterName", "ClusterID", "ServerTimeUTC", "Version") + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } keys, _ := vault.TestCoreInit(t, core) - resp, err = http.Get(addr + "/v1/sys/health") + raw, err = http.Get(addr + "/v1/sys/health") if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 503) - actual = map[string]interface{}{} - expected = map[string]interface{}{ - "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), - "initialized": true, - "sealed": true, - "standby": true, - "performance_standby": false, - } - testResponseStatus(t, resp, 503) - testResponseBody(t, resp, &actual) - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] - } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + resp, err = client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + expected.Initialized = true + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } for _, key := range keys { @@ -88,37 +77,22 @@ func TestSysHealth_get(t *testing.T) { t.Fatalf("unseal err: %s", err) } } - resp, err = http.Get(addr + "/v1/sys/health") + raw, err = http.Get(addr + "/v1/sys/health") if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 200) - actual = map[string]interface{}{} - expected = map[string]interface{}{ - "replication_performance_mode": consts.ReplicationPerformanceDisabled.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationDRDisabled.GetDRString(), - "initialized": true, - "sealed": false, - "standby": false, - "performance_standby": false, - } - testResponseStatus(t, resp, 200) - testResponseBody(t, resp, &actual) - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] - } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + resp, err = client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + expected.Sealed = false + expected.Standby = false + expected.ReplicationPerformanceMode = consts.ReplicationPerformanceDisabled.GetPerformanceString() + expected.ReplicationDRMode = consts.ReplicationDRDisabled.GetDRString() + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } } @@ -131,73 +105,53 @@ func TestSysHealth_customcodes(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - resp, err := http.Get(queryurl.String()) + raw, err := http.Get(queryurl.String()) if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 581) - var actual map[string]interface{} - expected := map[string]interface{}{ - "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), - "initialized": false, - "sealed": true, - "standby": true, - "performance_standby": false, + // Test with the client because it's a bit easier to work with structs + config := api.DefaultConfig() + config.Address = addr + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) } - testResponseStatus(t, resp, 581) - testResponseBody(t, resp, &actual) - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] + resp, err := client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + + expected := &api.HealthResponse{ + Enterprise: constants.IsEnterprise, + Initialized: false, + Sealed: true, + Standby: true, + PerformanceStandby: false, + ReplicationPerformanceMode: consts.ReplicationUnknown.GetPerformanceString(), + ReplicationDRMode: consts.ReplicationUnknown.GetDRString(), } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + ignore := cmpopts.IgnoreFields(*expected, "ClusterName", "ClusterID", "ServerTimeUTC", "Version") + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } keys, _ := vault.TestCoreInit(t, core) - resp, err = http.Get(queryurl.String()) + raw, err = http.Get(queryurl.String()) if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 523) - actual = map[string]interface{}{} - expected = map[string]interface{}{ - "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), - "initialized": true, - "sealed": true, - "standby": true, - "performance_standby": false, - } - testResponseStatus(t, resp, 523) - testResponseBody(t, resp, &actual) - - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] - } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + resp, err = client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + expected.Initialized = true + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } for _, key := range keys { @@ -205,37 +159,22 @@ func TestSysHealth_customcodes(t *testing.T) { t.Fatalf("unseal err: %s", err) } } - resp, err = http.Get(queryurl.String()) + raw, err = http.Get(queryurl.String()) if err != nil { t.Fatalf("err: %s", err) } + testResponseStatus(t, raw, 202) - actual = map[string]interface{}{} - expected = map[string]interface{}{ - "replication_performance_mode": consts.ReplicationPerformanceDisabled.GetPerformanceString(), - "replication_dr_mode": consts.ReplicationDRDisabled.GetDRString(), - "initialized": true, - "sealed": false, - "standby": false, - "performance_standby": false, - } - testResponseStatus(t, resp, 202) - testResponseBody(t, resp, &actual) - expected["server_time_utc"] = actual["server_time_utc"] - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] - } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] + resp, err = client.Sys().Health() + if err != nil { + t.Fatalf("err: %s", err) } - delete(actual, "license") - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + expected.Sealed = false + expected.Standby = false + expected.ReplicationPerformanceMode = consts.ReplicationPerformanceDisabled.GetPerformanceString() + expected.ReplicationDRMode = consts.ReplicationDRDisabled.GetDRString() + if diff := cmp.Diff(resp, expected, ignore); len(diff) > 0 { + t.Fatal(diff) } } diff --git a/http/sys_hostinfo_test.go b/http/sys_hostinfo_test.go index af313a382b2b..2df641ea8403 100644 --- a/http/sys_hostinfo_test.go +++ b/http/sys_hostinfo_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/sys_in_flight_requests.go b/http/sys_in_flight_requests.go index b38156f38ebc..a31ae2ffc2f8 100644 --- a/http/sys_in_flight_requests.go +++ b/http/sys_in_flight_requests.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/sys_in_flight_requests_test.go b/http/sys_in_flight_requests_test.go index de64d708c68f..880a9ad61560 100644 --- a/http/sys_in_flight_requests_test.go +++ b/http/sys_in_flight_requests_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/sys_init.go b/http/sys_init.go index ae3059462bef..fee10d972a44 100644 --- a/http/sys_init.go +++ b/http/sys_init.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -164,11 +167,11 @@ func validateInitParameters(core *vault.Core, req InitRequest) error { switch core.SealAccess().RecoveryKeySupported() { case true: if len(barrierFlags) > 0 { - return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(barrierFlags, ","), core.SealAccess().BarrierType()) + return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(barrierFlags, ","), core.SealAccess().BarrierSealConfigType()) } default: if len(recoveryFlags) > 0 { - return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(recoveryFlags, ","), core.SealAccess().BarrierType()) + return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(recoveryFlags, ","), core.SealAccess().BarrierSealConfigType()) } } diff --git a/http/sys_init_test.go b/http/sys_init_test.go index 4953c4244ce8..e9957f9a8f83 100644 --- a/http/sys_init_test.go +++ b/http/sys_init_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -7,9 +10,8 @@ import ( "strconv" "testing" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/builtin/logical/transit" - "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/seal" @@ -150,12 +152,8 @@ func TestSysInit_Put_ValidateParams(t *testing.T) { } func TestSysInit_Put_ValidateParams_AutoUnseal(t *testing.T) { - testSeal := seal.NewTestSeal(nil) - autoSeal, err := vault.NewAutoSeal(testSeal) - if err != nil { - t.Fatal(err) - } - autoSeal.SetType("transit") + testSeal, _ := seal.NewTestSeal(&seal.TestSealOpts{Name: "transit"}) + autoSeal := vault.NewAutoSeal(testSeal) // Create the transit server. conf := &vault.CoreConfig{ @@ -167,7 +165,7 @@ func TestSysInit_Put_ValidateParams_AutoUnseal(t *testing.T) { opts := &vault.TestClusterOptions{ NumCores: 1, HandlerFunc: Handler, - Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit-seal" + strconv.Itoa(0)), + Logger: corehelpers.NewTestLogger(t).Named("transit-seal" + strconv.Itoa(0)), } cluster := vault.NewTestCluster(t, conf, opts) cluster.Start() @@ -188,7 +186,8 @@ func TestSysInit_Put_ValidateParams_AutoUnseal(t *testing.T) { testResponseStatus(t, resp, http.StatusBadRequest) body := map[string][]string{} testResponseBody(t, resp, &body) - if body["errors"][0] != "parameters secret_shares,secret_threshold not applicable to seal type transit" { + if body["errors"][0] != "parameters secret_shares,secret_threshold not applicable to seal type transit" && + body["errors"][0] != "parameters secret_shares,secret_threshold not applicable to seal type test-auto" { t.Fatal(body) } } diff --git a/http/sys_internal_test.go b/http/sys_internal_test.go index d3c066f70c84..0be213672acd 100644 --- a/http/sys_internal_test.go +++ b/http/sys_internal_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -30,6 +33,7 @@ func TestSysInternal_UIMounts(t *testing.T) { "auth": map[string]interface{}{}, "secret": map[string]interface{}{}, }, + "mount_type": "", } testResponseBody(t, resp, &actual) expected["request_id"] = actual["request_id"] @@ -59,6 +63,7 @@ func TestSysInternal_UIMounts(t *testing.T) { "auth": nil, "lease_id": "", "renewable": false, + "mount_type": "", "lease_duration": json.Number("0"), "data": map[string]interface{}{ "secret": map[string]interface{}{ diff --git a/http/sys_leader.go b/http/sys_leader.go index 8c2ce21e5001..b6e0f55e9325 100644 --- a/http/sys_leader.go +++ b/http/sys_leader.go @@ -1,29 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( "net/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) // This endpoint is needed to answer queries before Vault unseals // or becomes the leader. -func handleSysLeader(core *vault.Core) http.Handler { +func handleSysLeader(core *vault.Core, opt ...ListenerConfigOption) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.Method { case "GET": - handleSysLeaderGet(core, w, r) + handleSysLeaderGet(core, w, r, opt...) default: respondError(w, http.StatusMethodNotAllowed, nil) } }) } -func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { - resp, err := core.GetLeaderStatus() +func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request, opt ...ListenerConfigOption) { + var tokenPresent bool + token := r.Header.Get(consts.AuthHeaderName) + ctx := r.Context() + + if token != "" { + // We don't care about the error, we just want to know if token exists + lock := core.HALock() + lock.Lock() + tokenEntry, err := core.LookupToken(ctx, token) + lock.Unlock() + tokenPresent = err == nil && tokenEntry != nil + } + + if tokenPresent { + ctx = logical.CreateContextRedactionSettings(r.Context(), false, false, false) + } + + resp, err := core.GetLeaderStatus(ctx) if err != nil { respondError(w, http.StatusInternalServerError, err) return } + respondOk(w, resp) } diff --git a/http/sys_leader_test.go b/http/sys_leader_test.go index 974b3a7b7e40..e495e118703a 100644 --- a/http/sys_leader_test.go +++ b/http/sys_leader_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/sys_lease_test.go b/http/sys_lease_test.go index a254be71c211..2b1025b06ad9 100644 --- a/http/sys_lease_test.go +++ b/http/sys_lease_test.go @@ -1,14 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( "testing" "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) func TestSysRenew(t *testing.T) { - core, _, token := vault.TestCoreUnsealed(t) + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + core, _, token := vault.TestCoreUnsealedWithConfig(t, coreConfig) ln, addr := TestServer(t, core) defer ln.Close() TestServerAuth(t, addr, token) diff --git a/http/sys_metrics.go b/http/sys_metrics.go index 012417282e5f..27cb45f2560d 100644 --- a/http/sys_metrics.go +++ b/http/sys_metrics.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/sys_metrics_test.go b/http/sys_metrics_test.go index 241caac2db8f..1198815a5533 100644 --- a/http/sys_metrics_test.go +++ b/http/sys_metrics_test.go @@ -1,9 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( "testing" "time" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/armon/go-metrics" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/internalshared/configutil" @@ -14,7 +19,7 @@ func TestSysMetricsUnauthenticated(t *testing.T) { inm := metrics.NewInmemSink(10*time.Second, time.Minute) metrics.DefaultInmemSignal(inm) conf := &vault.CoreConfig{ - BuiltinRegistry: vault.NewMockBuiltinRegistry(), + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), MetricsHelper: metricsutil.NewMetricsHelper(inm, true), } core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) diff --git a/http/sys_monitor_test.go b/http/sys_monitor_test.go index 733862ec448f..5e6b49d0d14d 100644 --- a/http/sys_monitor_test.go +++ b/http/sys_monitor_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -7,15 +10,16 @@ import ( "testing" "time" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/testhelpers" - "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/vault" ) func TestSysMonitorUnknownLogLevel(t *testing.T) { - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{HandlerFunc: Handler}) - cluster.Start() + t.Parallel() + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + NumCores: 1, + }) defer cluster.Cleanup() client := cluster.Cores[0].Client @@ -37,8 +41,11 @@ func TestSysMonitorUnknownLogLevel(t *testing.T) { } func TestSysMonitorUnknownLogFormat(t *testing.T) { - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{HandlerFunc: Handler}) - cluster.Start() + t.Parallel() + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + NumCores: 1, + }) defer cluster.Cleanup() client := cluster.Cores[0].Client @@ -60,64 +67,61 @@ func TestSysMonitorUnknownLogFormat(t *testing.T) { } func TestSysMonitorStreamingLogs(t *testing.T) { - logger := log.NewInterceptLogger(&log.LoggerOptions{ - Output: log.DefaultOutput, - Level: log.Debug, - JSONFormat: logging.ParseEnvLogFormat() == logging.JSONFormat, + t.Parallel() + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + NumCores: 1, }) - - lf := logging.ParseEnvLogFormat().String() - - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{HandlerFunc: Handler, Logger: logger}) - cluster.Start() defer cluster.Cleanup() client := cluster.Cores[0].Client stopCh := testhelpers.GenerateDebugLogs(t, client) + defer close(stopCh) - debugCount := 0 - ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) - defer cancel() - logCh, err := client.Sys().Monitor(ctx, "DEBUG", lf) - if err != nil { - t.Fatal(err) - } + for _, lf := range []string{"standard", "json"} { + t.Run(lf, func(t *testing.T) { + debugCount := 0 - type jsonlog struct { - Level string `json:"@level"` - Message string `json:"@message"` - TimeStamp string `json:"@timestamp"` - } - jsonLog := &jsonlog{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - timeCh := time.After(5 * time.Second) + logCh, err := client.Sys().Monitor(ctx, "DEBUG", lf) + if err != nil { + t.Fatal(err) + } - for { - select { - case log := <-logCh: - if lf == "json" { - err := json.Unmarshal([]byte(log), jsonLog) - if err != nil { - t.Fatal("Expected JSON log from channel") - } - if strings.Contains(jsonLog.Level, "debug") { - debugCount++ - } + type jsonlog struct { + Level string `json:"@level"` + Message string `json:"@message"` + TimeStamp string `json:"@timestamp"` } - if strings.Contains(log, "[DEBUG]") { - debugCount++ + jsonLog := &jsonlog{} + + timeCh := time.After(5 * time.Second) + + for { + select { + case log := <-logCh: + if lf == "json" { + err := json.Unmarshal([]byte(log), jsonLog) + if err != nil { + t.Fatal("Expected JSON log from channel") + } + if strings.Contains(jsonLog.Level, "debug") { + debugCount++ + } + } else if strings.Contains(log, "[DEBUG]") { + debugCount++ + } + if debugCount > 3 { + // If we've seen multiple lines that match what we want, + // it's probably safe to assume streaming is working + return + } + case <-timeCh: + t.Fatal("Failed to get a DEBUG message after 5 seconds") + } } - case <-timeCh: - t.Fatal("Failed to get a DEBUG message after 5 seconds") - } - - // If we've seen multiple lines that match what we want, - // it's probably safe to assume streaming is working - if debugCount > 3 { - stopCh <- struct{}{} - break - } + }) } - - <-stopCh } diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go index 09ca8c8c3505..9db7fed3749f 100644 --- a/http/sys_mount_test.go +++ b/http/sys_mount_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -9,8 +12,10 @@ import ( "github.com/fatih/structs" "github.com/go-test/deep" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/versions" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) @@ -30,6 +35,7 @@ func TestSysMounts(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -211,6 +217,7 @@ func TestSysMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", @@ -415,6 +422,72 @@ func TestSysMount_put(t *testing.T) { // for more info. } +// TestSysRemountSpacesFrom ensure we succeed in a remount where the 'from' mount has spaces in the name +func TestSysRemountSpacesFrom(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo%20bar", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "foo bar", + "to": "baz", + }) + testResponseStatus(t, resp, 200) +} + +// TestSysRemountSpacesTo ensure we succeed in a remount where the 'to' mount has spaces in the name +func TestSysRemountSpacesTo(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo%20bar", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "foo bar", + "to": "bar baz", + }) + testResponseStatus(t, resp, 200) +} + +// TestSysRemountTrailingSpaces ensures we fail on trailing spaces +func TestSysRemountTrailingSpaces(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo%20bar", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "foo bar", + "to": " baz ", + }) + testResponseStatus(t, resp, 400) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": " foo bar ", + "to": "baz", + }) + testResponseStatus(t, resp, 400) +} + func TestSysRemount(t *testing.T) { core, _, token := vault.TestCoreUnsealed(t) ln, addr := TestServer(t, core) @@ -436,7 +509,7 @@ func TestSysRemount(t *testing.T) { // Poll until the remount succeeds var remountResp map[string]interface{} testResponseBody(t, resp, &remountResp) - vault.RetryUntil(t, 5*time.Second, func() error { + corehelpers.RetryUntil(t, 5*time.Second, func() error { resp = testHttpGet(t, token, addr+"/v1/sys/remount/status/"+remountResp["migration_id"].(string)) testResponseStatus(t, resp, 200) @@ -459,6 +532,7 @@ func TestSysRemount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "bar/": map[string]interface{}{ "description": "foo", @@ -672,6 +746,7 @@ func TestSysUnmount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "secret/": map[string]interface{}{ "description": "key/value secret storage", @@ -860,6 +935,7 @@ func TestSysTuneMount_Options(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "foo", "default_lease_ttl": json.Number("2764800"), @@ -898,6 +974,7 @@ func TestSysTuneMount_Options(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "foo", "default_lease_ttl": json.Number("2764800"), @@ -918,7 +995,12 @@ func TestSysTuneMount_Options(t *testing.T) { } func TestSysTuneMount(t *testing.T) { - core, _, token := vault.TestCoreUnsealed(t) + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + core, _, token := vault.TestCoreUnsealedWithConfig(t, coreConfig) ln, addr := TestServer(t, core) defer ln.Close() TestServerAuth(t, addr, token) @@ -939,6 +1021,7 @@ func TestSysTuneMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", @@ -1178,6 +1261,7 @@ func TestSysTuneMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "foo/": map[string]interface{}{ "description": "foo", @@ -1376,6 +1460,7 @@ func TestSysTuneMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "foo", "default_lease_ttl": json.Number("259196400"), @@ -1414,6 +1499,7 @@ func TestSysTuneMount(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "foobar", "default_lease_ttl": json.Number("40"), @@ -1511,6 +1597,7 @@ func TestSysTuneMount_nonHMACKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1557,6 +1644,7 @@ func TestSysTuneMount_nonHMACKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1595,6 +1683,7 @@ func TestSysTuneMount_listingVisibility(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1632,6 +1721,7 @@ func TestSysTuneMount_listingVisibility(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1678,6 +1768,7 @@ func TestSysTuneMount_passthroughRequestHeaders(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1717,6 +1808,7 @@ func TestSysTuneMount_passthroughRequestHeaders(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1761,6 +1853,7 @@ func TestSysTuneMount_allowedManagedKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), @@ -1800,6 +1893,7 @@ func TestSysTuneMount_allowedManagedKeys(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "description": "key/value secret storage", "default_lease_ttl": json.Number("2764800"), diff --git a/http/sys_mounts_test.go b/http/sys_mounts_test.go index 7c113d987950..4f597f474a26 100644 --- a/http/sys_mounts_test.go +++ b/http/sys_mounts_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/sys_policy_test.go b/http/sys_policy_test.go index 6844a5321d6a..bf797cede8c3 100644 --- a/http/sys_policy_test.go +++ b/http/sys_policy_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -24,6 +27,7 @@ func TestSysPolicies(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "policies": []interface{}{"default", "root"}, "keys": []interface{}{"default", "root"}, @@ -55,6 +59,7 @@ func TestSysReadPolicy(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "name": "root", "rules": "", @@ -91,6 +96,7 @@ func TestSysWritePolicy(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "policies": []interface{}{"default", "foo", "root"}, "keys": []interface{}{"default", "foo", "root"}, @@ -140,6 +146,7 @@ func TestSysDeletePolicy(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "policies": []interface{}{"default", "root"}, "keys": []interface{}{"default", "root"}, diff --git a/http/sys_raft.go b/http/sys_raft.go index 428aad4f7da3..e209f0a6f4ea 100644 --- a/http/sys_raft.go +++ b/http/sys_raft.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/sys_rekey.go b/http/sys_rekey.go index d1cec653a628..0968076246c9 100644 --- a/http/sys_rekey.go +++ b/http/sys_rekey.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/sys_rekey_test.go b/http/sys_rekey_test.go index fd068ba48bda..e39664447693 100644 --- a/http/sys_rekey_test.go +++ b/http/sys_rekey_test.go @@ -1,46 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( "encoding/hex" "encoding/json" "fmt" - "net/http" "reflect" "testing" "github.com/go-test/deep" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "github.com/hashicorp/vault/vault" ) // Test to check if the API errors out when wrong number of PGP keys are // supplied for rekey func TestSysRekey_Init_pgpKeysEntriesForRekey(t *testing.T) { - core, _, token := vault.TestCoreUnsealed(t) - ln, addr := TestServer(t, core) - defer ln.Close() - TestServerAuth(t, addr, token) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), + }) + cluster.Start() + defer cluster.Cleanup() + cl := cluster.Cores[0].Client - resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{ + _, err := cl.Logical().Write("sys/rekey/init", map[string]interface{}{ "secret_shares": 5, "secret_threshold": 3, "pgp_keys": []string{"pgpkey1"}, }) - testResponseStatus(t, resp, 400) + if err == nil { + t.Fatal("should have failed to write pgp key entry due to mismatched keys", err) + } } func TestSysRekey_Init_Status(t *testing.T) { t.Run("status-barrier-default", func(t *testing.T) { - core, _, token := vault.TestCoreUnsealed(t) - ln, addr := TestServer(t, core) - defer ln.Close() - TestServerAuth(t, addr, token) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), + }) + cluster.Start() + defer cluster.Cleanup() + cl := cluster.Cores[0].Client - resp, err := http.Get(addr + "/v1/sys/rekey/init") + resp, err := cl.Logical().Read("sys/rekey/init") if err != nil { t.Fatalf("err: %s", err) } - var actual map[string]interface{} + actual := resp.Data expected := map[string]interface{}{ "started": false, "t": json.Number("0"), @@ -52,8 +63,7 @@ func TestSysRekey_Init_Status(t *testing.T) { "nonce": "", "verification_required": false, } - testResponseStatus(t, resp, 200) - testResponseBody(t, resp, &actual) + if !reflect.DeepEqual(actual, expected) { t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) } @@ -62,19 +72,24 @@ func TestSysRekey_Init_Status(t *testing.T) { func TestSysRekey_Init_Setup(t *testing.T) { t.Run("init-barrier-barrier-key", func(t *testing.T) { - core, _, token := vault.TestCoreUnsealed(t) - ln, addr := TestServer(t, core) - defer ln.Close() - TestServerAuth(t, addr, token) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), + }) + cluster.Start() + defer cluster.Cleanup() + cl := cluster.Cores[0].Client // Start rekey - resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{ + resp, err := cl.Logical().Write("sys/rekey/init", map[string]interface{}{ "secret_shares": 5, "secret_threshold": 3, }) - testResponseStatus(t, resp, 200) + if err != nil { + t.Fatalf("err: %s", err) + } - var actual map[string]interface{} + actual := resp.Data expected := map[string]interface{}{ "started": true, "t": json.Number("3"), @@ -85,8 +100,7 @@ func TestSysRekey_Init_Setup(t *testing.T) { "backup": false, "verification_required": false, } - testResponseStatus(t, resp, 200) - testResponseBody(t, resp, &actual) + if actual["nonce"].(string) == "" { t.Fatalf("nonce was empty") } @@ -96,9 +110,12 @@ func TestSysRekey_Init_Setup(t *testing.T) { } // Get rekey status - resp = testHttpGet(t, token, addr+"/v1/sys/rekey/init") + resp, err = cl.Logical().Read("sys/rekey/init") + if err != nil { + t.Fatalf("err: %s", err) + } - actual = map[string]interface{}{} + actual = resp.Data expected = map[string]interface{}{ "started": true, "t": json.Number("3"), @@ -109,8 +126,6 @@ func TestSysRekey_Init_Setup(t *testing.T) { "backup": false, "verification_required": false, } - testResponseStatus(t, resp, 200) - testResponseBody(t, resp, &actual) if actual["nonce"].(string) == "" { t.Fatalf("nonce was empty") } @@ -126,26 +141,33 @@ func TestSysRekey_Init_Setup(t *testing.T) { func TestSysRekey_Init_Cancel(t *testing.T) { t.Run("cancel-barrier-barrier-key", func(t *testing.T) { - core, _, token := vault.TestCoreUnsealed(t) - ln, addr := TestServer(t, core) - defer ln.Close() - TestServerAuth(t, addr, token) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), + }) + cluster.Start() + defer cluster.Cleanup() + cl := cluster.Cores[0].Client - resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{ + _, err := cl.Logical().Write("sys/rekey/init", map[string]interface{}{ "secret_shares": 5, "secret_threshold": 3, }) - testResponseStatus(t, resp, 200) + if err != nil { + t.Fatalf("err: %s", err) + } - resp = testHttpDelete(t, token, addr+"/v1/sys/rekey/init") - testResponseStatus(t, resp, 204) + _, err = cl.Logical().Delete("sys/rekey/init") + if err != nil { + t.Fatalf("err: %s", err) + } - resp, err := http.Get(addr + "/v1/sys/rekey/init") + resp, err := cl.Logical().Read("sys/rekey/init") if err != nil { t.Fatalf("err: %s", err) } - var actual map[string]interface{} + actual := resp.Data expected := map[string]interface{}{ "started": false, "t": json.Number("0"), @@ -157,8 +179,6 @@ func TestSysRekey_Init_Cancel(t *testing.T) { "nonce": "", "verification_required": false, } - testResponseStatus(t, resp, 200) - testResponseBody(t, resp, &actual) if !reflect.DeepEqual(actual, expected) { t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) } diff --git a/http/sys_rotate_test.go b/http/sys_rotate_test.go index 81597c7008e0..6be03ce3759a 100644 --- a/http/sys_rotate_test.go +++ b/http/sys_rotate_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -27,6 +30,7 @@ func TestSysRotate(t *testing.T) { "wrap_info": nil, "warnings": nil, "auth": nil, + "mount_type": "system", "data": map[string]interface{}{ "term": json.Number("2"), }, diff --git a/http/sys_seal.go b/http/sys_seal.go index 24f491b65d1d..4852d57d5e4c 100644 --- a/http/sys_seal.go +++ b/http/sys_seal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -15,7 +18,7 @@ import ( func handleSysSeal(core *vault.Core) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req, _, statusCode, err := buildLogicalRequest(core, w, r) + req, _, statusCode, err := buildLogicalRequest(core, w, r, "") if err != nil || statusCode != 0 { respondError(w, statusCode, err) return @@ -45,7 +48,7 @@ func handleSysSeal(core *vault.Core) http.Handler { func handleSysStepDown(core *vault.Core) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req, _, statusCode, err := buildLogicalRequest(core, w, r) + req, _, statusCode, err := buildLogicalRequest(core, w, r, "") if err != nil || statusCode != 0 { respondError(w, statusCode, err) return @@ -149,25 +152,62 @@ func handleSysUnseal(core *vault.Core) http.Handler { }) } -func handleSysSealStatus(core *vault.Core) http.Handler { +func handleSysSealStatus(core *vault.Core, opt ...ListenerConfigOption) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { respondError(w, http.StatusMethodNotAllowed, nil) return } - handleSysSealStatusRaw(core, w, r) + handleSysSealStatusRaw(core, w, r, opt...) }) } -func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) { - ctx := context.Background() - status, err := core.GetSealStatus(ctx) +func handleSysSealBackendStatus(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + handleSysSealBackendStatusRaw(core, w, r) + }) +} + +func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request, opt ...ListenerConfigOption) { + ctx := r.Context() + + var tokenPresent bool + token := r.Header.Get(consts.AuthHeaderName) + if token != "" { + // We don't care about the error, we just want to know if the token exists + lock := core.HALock() + lock.Lock() + tokenEntry, err := core.LookupToken(ctx, token) + lock.Unlock() + tokenPresent = err == nil && tokenEntry != nil + } + + // If there are is no valid token then we will redact the specified values + if tokenPresent { + ctx = logical.CreateContextRedactionSettings(ctx, false, false, false) + } + + status, err := core.GetSealStatus(ctx, true) if err != nil { respondError(w, http.StatusInternalServerError, err) return } + respondOk(w, status) +} +func handleSysSealBackendStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + status, err := core.GetSealBackendStatus(ctx) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } respondOk(w, status) } diff --git a/http/sys_seal_test.go b/http/sys_seal_test.go index 26796d02cc72..866ef4609234 100644 --- a/http/sys_seal_test.go +++ b/http/sys_seal_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( @@ -12,11 +15,18 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/audit" + auditFile "github.com/hashicorp/vault/builtin/audit/file" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/seal" "github.com/hashicorp/vault/version" + "github.com/stretchr/testify/assert" ) func TestSysSealStatus(t *testing.T) { @@ -64,80 +74,6 @@ func TestSysSealStatus(t *testing.T) { } } -func TestSysSealStatus_Warnings(t *testing.T) { - core := vault.TestCore(t) - vault.TestCoreInit(t, core) - ln, addr := TestServer(t, core) - defer ln.Close() - - // Manually configure DisableSSCTokens to be true - core.GetCoreConfigInternal().DisableSSCTokens = true - - resp, err := http.Get(addr + "/v1/sys/seal-status") - if err != nil { - t.Fatalf("err: %s", err) - } - - var actual map[string]interface{} - expected := map[string]interface{}{ - "sealed": true, - "t": json.Number("3"), - "n": json.Number("3"), - "progress": json.Number("0"), - "nonce": "", - "type": "shamir", - "recovery_seal": false, - "initialized": true, - "migration": false, - "build_date": version.BuildDate, - } - testResponseStatus(t, resp, 200) - testResponseBody(t, resp, &actual) - if actual["version"] == nil { - t.Fatalf("expected version information") - } - expected["version"] = actual["version"] - if actual["cluster_name"] == nil { - delete(expected, "cluster_name") - } else { - expected["cluster_name"] = actual["cluster_name"] - } - if actual["cluster_id"] == nil { - delete(expected, "cluster_id") - } else { - expected["cluster_id"] = actual["cluster_id"] - } - actualWarnings := actual["warnings"] - if actualWarnings == nil { - t.Fatalf("expected warnings about SSCToken disabling") - } - - actualWarningsArray, ok := actualWarnings.([]interface{}) - if !ok { - t.Fatalf("expected warnings about SSCToken disabling were not in the right format") - } - if len(actualWarningsArray) != 1 { - t.Fatalf("too many warnings were given") - } - actualWarning, ok := actualWarningsArray[0].(string) - if !ok { - t.Fatalf("expected warning about SSCToken disabling was not in the right format") - } - - expectedWarning := "Server Side Consistent Tokens are disabled, due to the " + - "VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS environment variable being set. " + - "It is not recommended to run Vault for an extended period of time with this configuration." - if actualWarning != expectedWarning { - t.Fatalf("actual warning was not as expected. Expected %s, but got %s", expectedWarning, actualWarning) - } - - expected["warnings"] = actual["warnings"] - - if diff := deep.Equal(actual, expected); diff != nil { - t.Fatal(diff) - } -} - func TestSysSealStatus_uninit(t *testing.T) { core := vault.TestCore(t) ln, addr := TestServer(t, core) @@ -627,3 +563,64 @@ func TestSysStepDown(t *testing.T) { resp := testHttpPut(t, token, addr+"/v1/sys/step-down", nil) testResponseStatus(t, resp, 204) } + +// TestSysSealStatusRedaction tests that the response from a +// a request to sys/seal-status are redacted only if no valid token +// is provided with the request +func TestSysSealStatusRedaction(t *testing.T) { + conf := &vault.CoreConfig{ + EnableUI: false, + EnableRaw: true, + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + AuditBackends: map[string]audit.Factory{ + "file": auditFile.Factory, + }, + } + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + + // Setup new custom listener + ln, addr := TestListener(t) + props := &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + RedactVersion: true, + }, + } + TestServerWithListenerAndProperties(t, ln, addr, core, props) + defer ln.Close() + TestServerAuth(t, addr, token) + + client := cleanhttp.DefaultClient() + + // Check seal-status + req, err := http.NewRequest("GET", addr+"/v1/sys/seal-status", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set(consts.AuthHeaderName, token) + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + testResponseStatus(t, resp, 200) + + // Verify that version exists when provided a valid token + var actual map[string]interface{} + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + assert.NotEmpty(t, actual["version"]) + + // Verify that version is redacted when no token is provided + req, err = http.NewRequest("GET", addr+"/v1/sys/seal-status", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set(consts.AuthHeaderName, "") + resp, err = client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + assert.Empty(t, actual["version"]) +} diff --git a/http/sys_wrapping_test.go b/http/sys_wrapping_test.go index 17520e78cf32..d059e5830f4b 100644 --- a/http/sys_wrapping_test.go +++ b/http/sys_wrapping_test.go @@ -1,7 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( "encoding/json" + "errors" "reflect" "testing" "time" @@ -366,4 +370,20 @@ func TestHTTP_Wrapping(t *testing.T) { }) { t.Fatalf("secret data did not match expected: %#v", secret.Data) } + + // Ensure that wrapping lookup without a client token responds correctly + client.ClearToken() + secret, err = client.Logical().Read("sys/wrapping/lookup") + if secret != nil { + t.Fatalf("expected no response: %#v", secret) + } + + if err == nil { + t.Fatal("expected error") + } + + var respError *api.ResponseError + if errors.As(err, &respError); respError.StatusCode != 403 { + t.Fatalf("expected 403 response, actual: %d", respError.StatusCode) + } } diff --git a/http/testing.go b/http/testing.go index 9bb3970a6c32..5797e4dc5b86 100644 --- a/http/testing.go +++ b/http/testing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/unwrapping_raw_body_test.go b/http/unwrapping_raw_body_test.go index 6ba24b7c9098..de145486dbaa 100644 --- a/http/unwrapping_raw_body_test.go +++ b/http/unwrapping_raw_body_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( diff --git a/http/util.go b/http/util.go index b4c8923cc3ee..e067c29ce427 100644 --- a/http/util.go +++ b/http/util.go @@ -1,14 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package http import ( "bytes" - "errors" + "context" "fmt" - "io/ioutil" + "io" "net" "net/http" "strings" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/limits" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/helper/namespace" @@ -16,23 +21,41 @@ import ( "github.com/hashicorp/vault/vault/quotas" ) -var ( - adjustRequest = func(c *vault.Core, r *http.Request) (*http.Request, int) { - return r, 0 - } - - genericWrapping = func(core *vault.Core, in http.Handler, props *vault.HandlerProperties) http.Handler { - // Wrap the help wrapped handler with another layer with a generic - // handler - return wrapGenericHandler(core, in, props) - } +var nonVotersAllowed = false - additionalRoutes = func(mux *http.ServeMux, core *vault.Core) {} +func wrapMaxRequestSizeHandler(handler http.Handler, props *vault.HandlerProperties) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var maxRequestSize int64 + if props.ListenerConfig != nil { + maxRequestSize = props.ListenerConfig.MaxRequestSize + } + if maxRequestSize == 0 { + maxRequestSize = DefaultMaxRequestSize + } + ctx := r.Context() + originalBody := r.Body + if maxRequestSize > 0 { + r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize) + } + ctx = logical.CreateContextOriginalBody(ctx, originalBody) + r = r.WithContext(ctx) - nonVotersAllowed = false + handler.ServeHTTP(w, r) + }) +} - adjustResponse = func(core *vault.Core, w http.ResponseWriter, req *logical.Request) {} -) +func wrapRequestLimiterHandler(handler http.Handler, props *vault.HandlerProperties) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + request := r.WithContext( + context.WithValue( + r.Context(), + limits.CtxKeyDisableRequestLimiter{}, + props.ListenerConfig.DisableRequestLimiter, + ), + ) + handler.ServeHTTP(w, request) + }) +} func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -52,25 +75,46 @@ func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler } mountPath := strings.TrimPrefix(core.MatchingMount(r.Context(), path), ns.Path) - // Clone body, so we do not close the request body reader - bodyBytes, err := ioutil.ReadAll(r.Body) - if err != nil { - respondError(w, http.StatusInternalServerError, errors.New("failed to read request body")) - return - } - r.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) - - quotaResp, err := core.ApplyRateLimitQuota(r.Context(), "as.Request{ + quotaReq := "as.Request{ Type: quotas.TypeRateLimit, Path: path, MountPath: mountPath, - Role: core.DetermineRoleFromLoginRequestFromBytes(mountPath, bodyBytes, r.Context()), NamespacePath: ns.Path, ClientAddress: parseRemoteIPAddress(r), - }) + } + + // This checks if any role based quota is required (LCQ or RLQ). + requiresResolveRole, err := core.ResolveRoleForQuotas(r.Context(), quotaReq) + if err != nil { + core.Logger().Error("failed to lookup quotas", "path", path, "error", err) + respondError(w, http.StatusInternalServerError, err) + return + } + + // If any role-based quotas are enabled for this namespace/mount, just + // do the role resolution once here. + if requiresResolveRole { + buf := bytes.Buffer{} + teeReader := io.TeeReader(r.Body, &buf) + role := core.DetermineRoleFromLoginRequestFromReader(r.Context(), mountPath, teeReader) + + // Reset the body if it was read + if buf.Len() > 0 { + r.Body = io.NopCloser(&buf) + originalBody, ok := logical.ContextOriginalBodyValue(r.Context()) + if ok { + r = r.WithContext(logical.CreateContextOriginalBody(r.Context(), newMultiReaderCloser(&buf, originalBody))) + } + } + // add an entry to the context to prevent recalculating request role unnecessarily + r = r.WithContext(context.WithValue(r.Context(), logical.CtxKeyRequestRole{}, role)) + quotaReq.Role = role + } + + quotaResp, err := core.ApplyRateLimitQuota(r.Context(), quotaReq) if err != nil { core.Logger().Error("failed to apply quota", "path", path, "error", err) - respondError(w, http.StatusUnprocessableEntity, err) + respondError(w, http.StatusInternalServerError, err) return } @@ -89,7 +133,7 @@ func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler } if core.RateLimitAuditLoggingEnabled() { - req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) + req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), core.RouterAccess(), w, r) if err != nil || status != 0 { respondError(w, status, err) return @@ -112,6 +156,22 @@ func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler }) } +func disableReplicationStatusEndpointWrapping(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + request := r.WithContext(logical.CreateContextDisableReplicationStatusEndpoints(r.Context(), true)) + + h.ServeHTTP(w, request) + }) +} + +func redactionSettingsWrapping(h http.Handler, redactVersion, redactAddresses, redactClusterName bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + request := r.WithContext(logical.CreateContextRedactionSettings(r.Context(), redactVersion, redactAddresses, redactClusterName)) + + h.ServeHTTP(w, request) + }) +} + func parseRemoteIPAddress(r *http.Request) string { ip, _, err := net.SplitHostPort(r.RemoteAddr) if err != nil { @@ -120,3 +180,25 @@ func parseRemoteIPAddress(r *http.Request) string { return ip } + +type multiReaderCloser struct { + readers []io.Reader + io.Reader +} + +func newMultiReaderCloser(readers ...io.Reader) *multiReaderCloser { + return &multiReaderCloser{ + readers: readers, + Reader: io.MultiReader(readers...), + } +} + +func (m *multiReaderCloser) Close() error { + var err error + for _, r := range m.readers { + if c, ok := r.(io.Closer); ok { + err = multierror.Append(err, c.Close()) + } + } + return err +} diff --git a/http/util_stubs_oss.go b/http/util_stubs_oss.go new file mode 100644 index 000000000000..7bffec7924e7 --- /dev/null +++ b/http/util_stubs_oss.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package http + +import ( + "net/http" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +//go:generate go run github.com/hashicorp/vault/tools/stubmaker + +func entWrapGenericHandler(core *vault.Core, in http.Handler, props *vault.HandlerProperties) http.Handler { + // Wrap the help wrapped handler with another layer with a generic + // handler + return wrapGenericHandler(core, in, props) +} + +func entAdditionalRoutes(mux *http.ServeMux, core *vault.Core) {} + +func entAdjustResponse(core *vault.Core, w http.ResponseWriter, req *logical.Request) { +} diff --git a/internal/go118_sha1_patch.go b/internal/go118_sha1_patch.go deleted file mode 100644 index f3b3cea68847..000000000000 --- a/internal/go118_sha1_patch.go +++ /dev/null @@ -1,56 +0,0 @@ -package internal - -import ( - "fmt" - "os" - "sync" - _ "unsafe" // for go:linkname - - goversion "github.com/hashicorp/go-version" - "github.com/hashicorp/vault/sdk/version" -) - -const sha1PatchVersionsBefore = "1.12.0" - -var patchSha1 sync.Once - -//go:linkname debugAllowSHA1 crypto/x509.debugAllowSHA1 -var debugAllowSHA1 bool - -// PatchSha1 patches Go 1.18+ to allow certificates with signatures containing SHA-1 hashes to be allowed. -// It is safe to call this function multiple times. -// This is necessary to allow Vault 1.10 and 1.11 to work with Go 1.18+ without breaking backwards compatibility -// with these certificates. See https://go.dev/doc/go1.18#sha1 and -// https://developer.hashicorp.com/vault/docs/deprecation/faq#q-what-is-the-impact-of-removing-support-for-x-509-certificates-with-signatures-that-use-sha-1 -// for more details. -// TODO: remove when Vault <=1.11 is no longer supported -func PatchSha1() { - patchSha1.Do(func() { - // for Go 1.19.4 and later - godebug := os.Getenv("GODEBUG") - if godebug != "" { - godebug += "," - } - godebug += "x509sha1=1" - os.Setenv("GODEBUG", godebug) - - // for Go 1.19.3 and earlier, patch the variable - patchBefore, err := goversion.NewSemver(sha1PatchVersionsBefore) - if err != nil { - panic(err) - } - - patch := false - v, err := goversion.NewSemver(version.GetVersion().Version) - if err == nil { - patch = v.LessThan(patchBefore) - } else { - fmt.Fprintf(os.Stderr, "Cannot parse version %s; going to apply SHA-1 deprecation patch workaround\n", version.GetVersion().Version) - patch = true - } - - if patch { - debugAllowSHA1 = true - } - }) -} diff --git a/internal/observability/event/errors.go b/internal/observability/event/errors.go new file mode 100644 index 000000000000..a8ad7516fd7c --- /dev/null +++ b/internal/observability/event/errors.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "errors" +) + +var ErrInvalidParameter = errors.New("invalid parameter") diff --git a/internal/observability/event/event_type.go b/internal/observability/event/event_type.go new file mode 100644 index 000000000000..16a2f7674bb8 --- /dev/null +++ b/internal/observability/event/event_type.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "fmt" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-uuid" +) + +// EventType represents the event's type +type EventType string + +const ( + AuditType EventType = "audit" // AuditType represents audit events +) + +// Validate ensures that EventType is one of the set of allowed event types. +func (t EventType) Validate() error { + switch t { + case AuditType: + return nil + default: + return fmt.Errorf("invalid event type %q: %w", t, ErrInvalidParameter) + } +} + +// GenerateNodeID generates a new UUID that it casts to the eventlogger.NodeID +// type. +func GenerateNodeID() (eventlogger.NodeID, error) { + id, err := uuid.GenerateUUID() + + return eventlogger.NodeID(id), err +} + +// String returns the string version of an EventType. +func (t EventType) String() string { + return string(t) +} + +// AsEventType returns the EventType in a format for eventlogger. +func (t EventType) AsEventType() eventlogger.EventType { + return eventlogger.EventType(t.String()) +} diff --git a/internal/observability/event/event_type_test.go b/internal/observability/event/event_type_test.go new file mode 100644 index 000000000000..ce8e238dfec0 --- /dev/null +++ b/internal/observability/event/event_type_test.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestEventType_Validate exercises the Validate method for EventType. +func TestEventType_Validate(t *testing.T) { + tests := map[string]struct { + Value string + IsValid bool + ExpectedError string + }{ + "audit": { + Value: "audit", + IsValid: true, + }, + "empty": { + Value: "", + IsValid: false, + ExpectedError: "invalid event type \"\": invalid parameter", + }, + "random": { + Value: "random", + IsValid: false, + ExpectedError: "invalid event type \"random\": invalid parameter", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + eventType := EventType(tc.Value) + err := eventType.Validate() + switch { + case tc.IsValid: + require.NoError(t, err) + case !tc.IsValid: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedError) + } + }) + } +} diff --git a/internal/observability/event/node_metrics_counter.go b/internal/observability/event/node_metrics_counter.go new file mode 100644 index 000000000000..980906137634 --- /dev/null +++ b/internal/observability/event/node_metrics_counter.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/armon/go-metrics" + "github.com/hashicorp/eventlogger" +) + +var _ eventlogger.Node = (*MetricsCounter)(nil) + +// MetricsCounter offers a way for nodes to emit metrics which increment a label by 1. +type MetricsCounter struct { + Name string + Node eventlogger.Node + labeler Labeler +} + +// Labeler provides a way to inject the logic required to determine labels based +// on the state of the eventlogger.Event being returned and the error resulting +// from processing the by the underlying eventlogger.Node. +type Labeler interface { + Labels(*eventlogger.Event, error) []string +} + +// NewMetricsCounter should be used to create the MetricsCounter. +func NewMetricsCounter(name string, node eventlogger.Node, labeler Labeler) (*MetricsCounter, error) { + name = strings.TrimSpace(name) + if name == "" { + return nil, fmt.Errorf("name is required: %w", ErrInvalidParameter) + } + + if node == nil || reflect.ValueOf(node).IsNil() { + return nil, fmt.Errorf("node is required: %w", ErrInvalidParameter) + } + + if labeler == nil || reflect.ValueOf(labeler).IsNil() { + return nil, fmt.Errorf("labeler is required: %w", ErrInvalidParameter) + } + + return &MetricsCounter{ + Name: name, + Node: node, + labeler: labeler, + }, nil +} + +// Process will process the event using the underlying eventlogger.Node, and then +// use the configured Labeler to provide a label which is used to increment a metric by 1. +func (m MetricsCounter) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + // NOTE: We don't provide an 'op' here, as we're just wrapping the underlying node. + var err error + + // Process the node first + e, err = m.Node.Process(ctx, e) + + // Provide the results to the Labeler. + metrics.IncrCounter(m.labeler.Labels(e, err), 1) + + return e, err +} + +// Reopen attempts to reopen the underlying eventlogger.Node. +func (m MetricsCounter) Reopen() error { + return m.Node.Reopen() +} + +// Type returns the type for the underlying eventlogger.Node. +func (m MetricsCounter) Type() eventlogger.NodeType { + return m.Node.Type() +} diff --git a/internal/observability/event/node_metrics_counter_test.go b/internal/observability/event/node_metrics_counter_test.go new file mode 100644 index 000000000000..ac1679723123 --- /dev/null +++ b/internal/observability/event/node_metrics_counter_test.go @@ -0,0 +1,97 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "testing" + + "github.com/hashicorp/eventlogger" + "github.com/stretchr/testify/require" +) + +var ( + _ eventlogger.Node = (*testEventLoggerNode)(nil) + _ Labeler = (*testMetricsCounter)(nil) +) + +// TestNewMetricsCounter ensures that NewMetricsCounter operates as intended and +// can validate the input parameters correctly, returning the right error message +// when required. +func TestNewMetricsCounter(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + name string + node eventlogger.Node + labeler Labeler + isErrorExpected bool + expectedErrorMessage string + }{ + "happy": { + name: "foo", + node: &testEventLoggerNode{}, + labeler: &testMetricsCounter{}, + isErrorExpected: false, + }, + "no-name": { + node: nil, + labeler: nil, + isErrorExpected: true, + expectedErrorMessage: "name is required: invalid parameter", + }, + "no-node": { + name: "foo", + node: nil, + isErrorExpected: true, + expectedErrorMessage: "node is required: invalid parameter", + }, + "no-labeler": { + name: "foo", + node: &testEventLoggerNode{}, + labeler: nil, + isErrorExpected: true, + expectedErrorMessage: "labeler is required: invalid parameter", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + m, err := NewMetricsCounter(tc.name, tc.node, tc.labeler) + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrorMessage) + default: + require.NoError(t, err) + require.NotNil(t, m) + } + }) + } +} + +// testEventLoggerNode is for testing and implements the eventlogger.Node interface. +type testEventLoggerNode struct{} + +func (t testEventLoggerNode) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + return nil, nil +} + +func (t testEventLoggerNode) Reopen() error { + return nil +} + +func (t testEventLoggerNode) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} + +// testMetricsCounter is for testing and implements the event.Labeler interface. +type testMetricsCounter struct{} + +func (m *testMetricsCounter) Labels(_ *eventlogger.Event, err error) []string { + return []string{""} +} diff --git a/internal/observability/event/options.go b/internal/observability/event/options.go new file mode 100644 index 000000000000..62fb4265954e --- /dev/null +++ b/internal/observability/event/options.go @@ -0,0 +1,203 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-uuid" +) + +// Option is how Options are passed as arguments. +type Option func(*options) error + +// Options are used to represent configuration for an Event. +type options struct { + withID string + withNow time.Time + withFacility string + withTag string + withSocketType string + withMaxDuration time.Duration + withFileMode *os.FileMode +} + +// getDefaultOptions returns Options with their default values. +func getDefaultOptions() options { + fileMode := os.FileMode(0o600) + + return options{ + withNow: time.Now(), + withFacility: "AUTH", + withTag: "vault", + withSocketType: "tcp", + withMaxDuration: 2 * time.Second, + withFileMode: &fileMode, + } +} + +// getOpts applies all the supplied Option and returns configured Options. +// Each Option is applied in the order it appears in the argument list, so it is +// possible to supply the same Option numerous times and the 'last write wins'. +func getOpts(opt ...Option) (options, error) { + opts := getDefaultOptions() + for _, o := range opt { + if o == nil { + continue + } + if err := o(&opts); err != nil { + return options{}, err + } + } + return opts, nil +} + +// ValidateOptions can be used to validate options before they are required. +func ValidateOptions(opt ...Option) error { + _, err := getOpts(opt...) + + return err +} + +// NewID is a bit of a modified NewID has been done to stop a circular +// dependency with the errors package that is caused by importing +// boundary/internal/db +func NewID(prefix string) (string, error) { + if prefix == "" { + return "", fmt.Errorf("missing prefix: %w", ErrInvalidParameter) + } + + id, err := uuid.GenerateUUID() + if err != nil { + return "", fmt.Errorf("unable to generate ID: %w", err) + } + + return fmt.Sprintf("%s_%s", prefix, id), nil +} + +// WithID provides an optional ID. +func WithID(id string) Option { + return func(o *options) error { + var err error + + id := strings.TrimSpace(id) + switch { + case id == "": + err = fmt.Errorf("id cannot be empty: %w", ErrInvalidParameter) + default: + o.withID = id + } + + return err + } +} + +// WithNow provides an option to represent 'now'. +func WithNow(now time.Time) Option { + return func(o *options) error { + var err error + + switch { + case now.IsZero(): + err = fmt.Errorf("cannot specify 'now' to be the zero time instant: %w", ErrInvalidParameter) + default: + o.withNow = now + } + + return err + } +} + +// WithFacility provides an Option to represent a 'facility' for a syslog sink. +func WithFacility(facility string) Option { + return func(o *options) error { + facility = strings.TrimSpace(facility) + + if facility != "" { + o.withFacility = facility + } + + return nil + } +} + +// WithTag provides an Option to represent a 'tag' for a syslog sink. +func WithTag(tag string) Option { + return func(o *options) error { + tag = strings.TrimSpace(tag) + + if tag != "" { + o.withTag = tag + } + + return nil + } +} + +// WithSocketType provides an Option to represent the socket type for a socket sink. +func WithSocketType(socketType string) Option { + return func(o *options) error { + socketType = strings.TrimSpace(socketType) + + if socketType != "" { + o.withSocketType = socketType + } + + return nil + } +} + +// WithMaxDuration provides an Option to represent the max duration for writing to a socket. +func WithMaxDuration(duration string) Option { + return func(o *options) error { + duration = strings.TrimSpace(duration) + + if duration == "" { + return nil + } + + parsed, err := parseutil.ParseDurationSecond(duration) + if err != nil { + return fmt.Errorf("unable to parse max duration: %w: %w", ErrInvalidParameter, err) + } + + o.withMaxDuration = parsed + + return nil + } +} + +// WithFileMode provides an Option to represent a file mode for a file sink. +// Supplying an empty string or whitespace will prevent this Option from being +// applied, but it will not return an error in those circumstances. +func WithFileMode(mode string) Option { + return func(o *options) error { + // If supplied file mode is empty, just return early without setting anything. + // We can assume that this Option was called by something that didn't + // parse the incoming value, perhaps from a config map etc. + mode = strings.TrimSpace(mode) + if mode == "" { + return nil + } + + // By now we believe we have something that the caller really intended to + // be parsed into a file mode. + raw, err := strconv.ParseUint(mode, 8, 32) + + switch { + case err != nil: + return fmt.Errorf("unable to parse file mode: %w: %w", ErrInvalidParameter, err) + default: + m := os.FileMode(raw) + o.withFileMode = &m + } + + return nil + } +} diff --git a/internal/observability/event/options_test.go b/internal/observability/event/options_test.go new file mode 100644 index 000000000000..a3e47a2c487c --- /dev/null +++ b/internal/observability/event/options_test.go @@ -0,0 +1,425 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestOptions_WithNow exercises WithNow option to ensure it performs as expected. +func TestOptions_WithNow(t *testing.T) { + tests := map[string]struct { + Value time.Time + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue time.Time + }{ + "default-time": { + Value: time.Time{}, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot specify 'now' to be the zero time instant: invalid parameter", + }, + "valid-time": { + Value: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + IsErrorExpected: false, + ExpectedValue: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + opts := &options{} + applyOption := WithNow(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withNow) + } + }) + } +} + +// TestOptions_WithID exercises WithID option to ensure it performs as expected. +func TestOptions_WithID(t *testing.T) { + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedValue string + }{ + "empty": { + Value: "", + IsErrorExpected: true, + ExpectedErrorMessage: "id cannot be empty: invalid parameter", + }, + "whitespace": { + Value: " ", + IsErrorExpected: true, + ExpectedErrorMessage: "id cannot be empty: invalid parameter", + }, + "valid": { + Value: "test", + IsErrorExpected: false, + ExpectedValue: "test", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithID(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withID) + } + }) + } +} + +// TestOptions_Default exercises getDefaultOptions to assert the default values. +func TestOptions_Default(t *testing.T) { + opts := getDefaultOptions() + require.NotNil(t, opts) + require.True(t, time.Now().After(opts.withNow)) + require.False(t, opts.withNow.IsZero()) + require.Equal(t, "AUTH", opts.withFacility) + require.Equal(t, "vault", opts.withTag) + require.Equal(t, 2*time.Second, opts.withMaxDuration) +} + +// TestOptions_Opts exercises getOpts with various Option values. +func TestOptions_Opts(t *testing.T) { + tests := map[string]struct { + opts []Option + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedID string + IsNowExpected bool + ExpectedNow time.Time + }{ + "nil-options": { + opts: nil, + IsErrorExpected: false, + IsNowExpected: true, + }, + "empty-options": { + opts: []Option{}, + IsErrorExpected: false, + IsNowExpected: true, + }, + "with-multiple-valid-id": { + opts: []Option{ + WithID("qwerty"), + WithID("juan"), + }, + IsErrorExpected: false, + ExpectedID: "juan", + IsNowExpected: true, + }, + "with-multiple-valid-now": { + opts: []Option{ + WithNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + WithNow(time.Date(2023, time.July, 4, 13, 3, 0, 0, time.Local)), + }, + IsErrorExpected: false, + ExpectedNow: time.Date(2023, time.July, 4, 13, 3, 0, 0, time.Local), + IsNowExpected: false, + }, + "with-multiple-valid-then-invalid-now": { + opts: []Option{ + WithNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + WithNow(time.Time{}), + }, + IsErrorExpected: true, + ExpectedErrorMessage: "cannot specify 'now' to be the zero time instant: invalid parameter", + }, + "with-multiple-valid-options": { + opts: []Option{ + WithID("qwerty"), + WithNow(time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local)), + }, + IsErrorExpected: false, + ExpectedID: "qwerty", + ExpectedNow: time.Date(2023, time.July, 4, 12, 3, 0, 0, time.Local), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts, err := getOpts(tc.opts...) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NotNil(t, opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedID, opts.withID) + switch { + case tc.IsNowExpected: + require.True(t, time.Now().After(opts.withNow)) + require.False(t, opts.withNow.IsZero()) + default: + require.Equal(t, tc.ExpectedNow, opts.withNow) + } + + } + }) + } +} + +// TestOptions_WithFacility exercises WithFacility Option to ensure it performs as expected. +func TestOptions_WithFacility(t *testing.T) { + tests := map[string]struct { + Value string + ExpectedValue string + }{ + "empty": { + Value: "", + ExpectedValue: "", + }, + "whitespace": { + Value: " ", + ExpectedValue: "", + }, + "value": { + Value: "juan", + ExpectedValue: "juan", + }, + "spacey-value": { + Value: " juan ", + ExpectedValue: "juan", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithFacility(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withFacility) + }) + } +} + +// TestOptions_WithTag exercises WithTag Option to ensure it performs as expected. +func TestOptions_WithTag(t *testing.T) { + tests := map[string]struct { + Value string + ExpectedValue string + }{ + "empty": { + Value: "", + ExpectedValue: "", + }, + "whitespace": { + Value: " ", + ExpectedValue: "", + }, + "value": { + Value: "juan", + ExpectedValue: "juan", + }, + "spacey-value": { + Value: " juan ", + ExpectedValue: "juan", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithTag(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withTag) + }) + } +} + +// TestOptions_WithSocketType exercises WithSocketType Option to ensure it performs as expected. +func TestOptions_WithSocketType(t *testing.T) { + tests := map[string]struct { + Value string + ExpectedValue string + }{ + "empty": { + Value: "", + ExpectedValue: "", + }, + "whitespace": { + Value: " ", + ExpectedValue: "", + }, + "value": { + Value: "juan", + ExpectedValue: "juan", + }, + "spacey-value": { + Value: " juan ", + ExpectedValue: "juan", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithSocketType(tc.Value) + err := applyOption(opts) + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withSocketType) + }) + } +} + +// TestOptions_WithMaxDuration exercises WithMaxDuration Option to ensure it performs as expected. +func TestOptions_WithMaxDuration(t *testing.T) { + tests := map[string]struct { + Value string + ExpectedValue time.Duration + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "empty-gives-default": { + Value: "", + }, + "whitespace-give-default": { + Value: " ", + }, + "bad-value": { + Value: "juan", + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse max duration: invalid parameter: time: invalid duration \"juan\"", + }, + "bad-spacey-value": { + Value: " juan ", + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse max duration: invalid parameter: time: invalid duration \"juan\"", + }, + "duration-2s": { + Value: "2s", + ExpectedValue: 2 * time.Second, + }, + "duration-2m": { + Value: "2m", + ExpectedValue: 2 * time.Minute, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithMaxDuration(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.ExpectedValue, opts.withMaxDuration) + } + }) + } +} + +// TestOptions_WithFileMode exercises WithFileMode Option to ensure it performs as expected. +func TestOptions_WithFileMode(t *testing.T) { + tests := map[string]struct { + Value string + IsErrorExpected bool + ExpectedErrorMessage string + IsNilExpected bool + ExpectedValue os.FileMode + }{ + "empty": { + Value: "", + IsErrorExpected: false, + IsNilExpected: true, + }, + "whitespace": { + Value: " ", + IsErrorExpected: false, + IsNilExpected: true, + }, + "nonsense": { + Value: "juan", + IsErrorExpected: true, + ExpectedErrorMessage: "unable to parse file mode: invalid parameter: strconv.ParseUint: parsing \"juan\": invalid syntax", + }, + "zero": { + Value: "0000", + IsErrorExpected: false, + ExpectedValue: os.FileMode(0o000), + }, + "valid": { + Value: "0007", + IsErrorExpected: false, + ExpectedValue: os.FileMode(0o007), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + opts := &options{} + applyOption := WithFileMode(tc.Value) + err := applyOption(opts) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + switch { + case tc.IsNilExpected: + // Optional Option 'not supplied' (i.e. was whitespace/empty string) + require.Nil(t, opts.withFileMode) + default: + // Dereference the pointer, so we can examine the file mode. + require.Equal(t, tc.ExpectedValue, *opts.withFileMode) + } + } + }) + } +} diff --git a/internal/observability/event/pipeline_reader.go b/internal/observability/event/pipeline_reader.go new file mode 100644 index 000000000000..f35672f8efa6 --- /dev/null +++ b/internal/observability/event/pipeline_reader.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import "github.com/hashicorp/eventlogger" + +// PipelineReader surfaces information required for pipeline registration. +type PipelineReader interface { + // EventType should return the event type to be used for pipeline registration. + EventType() eventlogger.EventType + + // HasFiltering should determine if filter nodes are used by this pipeline. + HasFiltering() bool + + // Name for the pipeline which should be used for the eventlogger.PipelineID. + Name() string + + // Nodes should return the nodes which should be used by the framework to process events. + Nodes() map[eventlogger.NodeID]eventlogger.Node + + // NodeIDs should return the IDs of the nodes, in the order they are required. + NodeIDs() []eventlogger.NodeID +} diff --git a/internal/observability/event/sink_file.go b/internal/observability/event/sink_file.go new file mode 100644 index 000000000000..0f5e22e4c8de --- /dev/null +++ b/internal/observability/event/sink_file.go @@ -0,0 +1,218 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/hashicorp/eventlogger" +) + +// defaultFileMode is the default file permissions (read/write for everyone). +const ( + defaultFileMode = 0o600 + devnull = "/dev/null" +) + +var _ eventlogger.Node = (*FileSink)(nil) + +// FileSink is a sink node which handles writing events to file. +type FileSink struct { + file *os.File + fileLock sync.RWMutex + fileMode os.FileMode + path string + requiredFormat string +} + +// NewFileSink should be used to create a new FileSink. +// Accepted options: WithFileMode. +func NewFileSink(path string, format string, opt ...Option) (*FileSink, error) { + // Parse and check path + p := strings.TrimSpace(path) + if p == "" { + return nil, fmt.Errorf("path is required: %w", ErrInvalidParameter) + } + + opts, err := getOpts(opt...) + if err != nil { + return nil, err + } + + mode := os.FileMode(defaultFileMode) + // If we got an optional file mode supplied and our path isn't a special keyword + // then we should use the supplied file mode, or maintain the existing file mode. + switch { + case path == devnull: + case opts.withFileMode == nil: + case *opts.withFileMode == 0: // Maintain the existing file's mode when set to "0000". + fileInfo, err := os.Stat(path) + if err != nil { + return nil, fmt.Errorf("unable to determine existing file mode: %w", err) + } + mode = fileInfo.Mode() + default: + mode = *opts.withFileMode + } + + sink := &FileSink{ + file: nil, + fileLock: sync.RWMutex{}, + fileMode: mode, + requiredFormat: format, + path: p, + } + + // Ensure that the file can be successfully opened for writing; + // otherwise it will be too late to catch later without problems + // (ref: https://github.com/hashicorp/vault/issues/550) + if err := sink.open(); err != nil { + return nil, fmt.Errorf("sanity check failed; unable to open %q for writing: %w", sink.path, err) + } + + return sink, nil +} + +// Process handles writing the event to the file sink. +func (s *FileSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + // '/dev/null' path means we just do nothing and pretend we're done. + if s.path == devnull { + return nil, nil + } + + formatted, found := e.Format(s.requiredFormat) + if !found { + return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter) + } + + err := s.log(formatted) + if err != nil { + return nil, fmt.Errorf("error writing file for sink %q: %w", s.path, err) + } + + // return nil for the event to indicate the pipeline is complete. + return nil, nil +} + +// Reopen handles closing and reopening the file. +func (s *FileSink) Reopen() error { + // '/dev/null' path means we just do nothing and pretend we're done. + if s.path == devnull { + return nil + } + + s.fileLock.Lock() + defer s.fileLock.Unlock() + + if s.file == nil { + return s.open() + } + + err := s.file.Close() + // Set to nil here so that even if we error out, on the next access open() will be tried. + s.file = nil + if err != nil { + return fmt.Errorf("unable to close file for re-opening on sink %q: %w", s.path, err) + } + + return s.open() +} + +// Type describes the type of this node (sink). +func (s *FileSink) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} + +// open attempts to open a file at the sink's path, with the sink's fileMode permissions +// if one is not already open. +// It doesn't have any locking and relies on calling functions of FileSink to +// handle this (e.g. log and Reopen methods). +func (s *FileSink) open() error { + if s.file != nil { + return nil + } + + if err := os.MkdirAll(filepath.Dir(s.path), s.fileMode); err != nil { + return fmt.Errorf("unable to create file %q: %w", s.path, err) + } + + var err error + s.file, err = os.OpenFile(s.path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, s.fileMode) + if err != nil { + return fmt.Errorf("unable to open file for sink %q: %w", s.path, err) + } + + // Change the file mode in case the log file already existed. + // We special case '/dev/null' since we can't chmod it, and bypass if the mode is zero. + switch s.path { + case devnull: + default: + if s.fileMode != 0 { + err = os.Chmod(s.path, s.fileMode) + if err != nil { + return fmt.Errorf("unable to change file permissions '%v' for sink %q: %w", s.fileMode, s.path, err) + } + } + } + + return nil +} + +// log writes the buffer to the file. +// It acquires a lock on the file to do this. +func (s *FileSink) log(data []byte) error { + s.fileLock.Lock() + defer s.fileLock.Unlock() + + reader := bytes.NewReader(data) + + if err := s.open(); err != nil { + return fmt.Errorf("unable to open file for sink %q: %w", s.path, err) + } + + if _, err := reader.WriteTo(s.file); err == nil { + return nil + } + + // Otherwise, opportunistically try to re-open the FD, once per call (1 retry attempt). + err := s.file.Close() + if err != nil { + return fmt.Errorf("unable to close file for sink %q: %w", s.path, err) + } + + s.file = nil + + if err := s.open(); err != nil { + return fmt.Errorf("unable to re-open file for sink %q: %w", s.path, err) + } + + _, err = reader.Seek(0, io.SeekStart) + if err != nil { + return fmt.Errorf("unable to seek to start of file for sink %q: %w", s.path, err) + } + + _, err = reader.WriteTo(s.file) + if err != nil { + return fmt.Errorf("unable to re-write to file for sink %q: %w", s.path, err) + } + + return nil +} diff --git a/internal/observability/event/sink_file_test.go b/internal/observability/event/sink_file_test.go new file mode 100644 index 000000000000..3c5a40802a39 --- /dev/null +++ b/internal/observability/event/sink_file_test.go @@ -0,0 +1,301 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/hashicorp/vault/helper/namespace" + + "github.com/hashicorp/eventlogger" + + "github.com/stretchr/testify/require" +) + +// TestFileSink_Type ensures that the node is a 'sink' type. +func TestFileSink_Type(t *testing.T) { + f, err := NewFileSink(filepath.Join(t.TempDir(), "vault.log"), "json") + require.NoError(t, err) + require.NotNil(t, f) + require.Equal(t, eventlogger.NodeTypeSink, f.Type()) +} + +// TestNewFileSink tests creation of an AuditFileSink. +func TestNewFileSink(t *testing.T) { + tests := map[string]struct { + ShouldUseAbsolutePath bool // Path should contain the filename if temp dir is true + Path string + Format string + Options []Option + IsErrorExpected bool + ExpectedErrorMessage string + // Expected values of AuditFileSink + ExpectedFileMode os.FileMode + ExpectedFormat string + ExpectedPath string + ExpectedPrefix string + }{ + "default-values": { + ShouldUseAbsolutePath: true, + IsErrorExpected: true, + ExpectedErrorMessage: "path is required: invalid parameter", + }, + "spacey-path": { + ShouldUseAbsolutePath: true, + Path: " ", + Format: "json", + IsErrorExpected: true, + ExpectedErrorMessage: "path is required: invalid parameter", + }, + "valid-path-and-format": { + Path: "vault.log", + Format: "json", + IsErrorExpected: false, + ExpectedFileMode: defaultFileMode, + ExpectedFormat: "json", + ExpectedPrefix: "", + }, + "file-mode-not-default-or-zero": { + Path: "vault.log", + Format: "json", + Options: []Option{WithFileMode("0007")}, + IsErrorExpected: false, + ExpectedFormat: "json", + ExpectedPrefix: "", + ExpectedFileMode: 0o007, + }, + "prefix": { + Path: "vault.log", + Format: "json", + Options: []Option{WithFileMode("0007")}, + IsErrorExpected: false, + ExpectedPrefix: "bleep", + ExpectedFormat: "json", + ExpectedFileMode: 0o007, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + // t.Parallel() + + // If we need a real directory as a path we can use a temp dir. + // but we should keep track of it for comparison in the new sink. + var tempDir string + tempPath := tc.Path + if !tc.ShouldUseAbsolutePath { + tempDir = t.TempDir() + tempPath = filepath.Join(tempDir, tempPath) + } + + sink, err := NewFileSink(tempPath, tc.Format, tc.Options...) + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, sink) + default: + require.NoError(t, err) + require.NotNil(t, sink) + + // Assert properties are correct. + require.Equal(t, tc.ExpectedFormat, sink.requiredFormat) + require.Equal(t, tc.ExpectedFileMode, sink.fileMode) + + switch { + case tc.ShouldUseAbsolutePath: + require.Equal(t, tc.ExpectedPath, sink.path) + default: + require.Equal(t, tempPath, sink.path) + } + } + }) + } +} + +// TestFileSink_Reopen tests that the sink reopens files as expected when requested to. +// stdout and discard paths are ignored. +// see: https://developer.hashicorp.com/vault/docs/audit/file#file_path +func TestFileSink_Reopen(t *testing.T) { + tests := map[string]struct { + Path string + ShouldUseAbsolutePath bool + ShouldCreateFile bool + ShouldIgnoreFileMode bool + Options []Option + IsErrorExpected bool + ExpectedErrorMessage string + ExpectedFileMode os.FileMode + }{ + // Should be ignored by Reopen + "devnull": { + Path: "/dev/null", + ShouldUseAbsolutePath: true, + ShouldIgnoreFileMode: true, + }, + "happy": { + Path: "vault.log", + ExpectedFileMode: os.FileMode(defaultFileMode), + }, + "filemode-existing": { + Path: "vault.log", + ShouldCreateFile: true, + Options: []Option{WithFileMode("0000")}, + ExpectedFileMode: os.FileMode(defaultFileMode), + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // If we need a real directory as a path we can use a temp dir. + // but we should keep track of it for comparison in the new sink. + var tempDir string + tempPath := tc.Path + if !tc.ShouldUseAbsolutePath { + tempDir = t.TempDir() + tempPath = filepath.Join(tempDir, tc.Path) + } + + // If the file mode is 0 then we will need a pre-created file to stat. + // Only do this for paths that are not 'special keywords' + if tc.ShouldCreateFile && tc.Path != devnull { + f, err := os.OpenFile(tempPath, os.O_CREATE, defaultFileMode) + require.NoError(t, err) + defer func() { + err = os.Remove(f.Name()) + require.NoError(t, err) + }() + } + + sink, err := NewFileSink(tempPath, "json", tc.Options...) + require.NoError(t, err) + require.NotNil(t, sink) + + err = sink.Reopen() + + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + default: + require.NoError(t, err) + info, err := os.Stat(tempPath) + require.NoError(t, err) + require.NotNil(t, info) + if !tc.ShouldIgnoreFileMode { + require.Equal(t, tc.ExpectedFileMode, info.Mode()) + } + } + }) + } +} + +// TestFileSink_Process ensures that Process behaves as expected. +func TestFileSink_Process(t *testing.T) { + tests := map[string]struct { + ShouldUseAbsolutePath bool + Path string + ShouldCreateFile bool + Format string + ShouldIgnoreFormat bool + Data string + ShouldUseNilEvent bool + IsErrorExpected bool + ExpectedErrorMessage string + }{ + "devnull": { + ShouldUseAbsolutePath: true, + Path: devnull, + Format: "json", + Data: "foo", + IsErrorExpected: false, + }, + "no-formatted-data": { + ShouldCreateFile: true, + Path: "juan.log", + Format: "json", + Data: "foo", + ShouldIgnoreFormat: true, + IsErrorExpected: true, + ExpectedErrorMessage: "unable to retrieve event formatted as \"json\": invalid parameter", + }, + "nil": { + Path: "foo.log", + Format: "json", + Data: "foo", + ShouldUseNilEvent: true, + IsErrorExpected: true, + ExpectedErrorMessage: "event is nil: invalid parameter", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + // Temp dir for most testing unless we're trying to test an error + var tempDir string + tempPath := tc.Path + if !tc.ShouldUseAbsolutePath { + tempDir = t.TempDir() + tempPath = filepath.Join(tempDir, tc.Path) + } + + // Create a file if we will need it there before Process kicks off. + if tc.ShouldCreateFile && tc.Path != devnull { + f, err := os.OpenFile(tempPath, os.O_CREATE, defaultFileMode) + require.NoError(t, err) + defer func() { + err = os.Remove(f.Name()) + require.NoError(t, err) + }() + } + + // Set up a sink + sink, err := NewFileSink(tempPath, tc.Format) + require.NoError(t, err) + require.NotNil(t, sink) + + // Generate a fake event + ctx := namespace.RootContext(nil) + + event := &eventlogger.Event{ + Type: "audit", + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: struct{ ID string }{ID: "123"}, + } + + if !tc.ShouldIgnoreFormat { + event.FormattedAs(tc.Format, []byte(tc.Data)) + } + + if tc.ShouldUseNilEvent { + event = nil + } + + // The actual exercising of the sink. + event, err = sink.Process(ctx, event) + switch { + case tc.IsErrorExpected: + require.Error(t, err) + require.EqualError(t, err, tc.ExpectedErrorMessage) + require.Nil(t, event) + default: + require.NoError(t, err) + require.Nil(t, event) + } + }) + } +} diff --git a/internal/observability/event/sink_noop.go b/internal/observability/event/sink_noop.go new file mode 100644 index 000000000000..165fd700f5bf --- /dev/null +++ b/internal/observability/event/sink_noop.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + + "github.com/hashicorp/eventlogger" +) + +var _ eventlogger.Node = (*NoopSink)(nil) + +// NoopSink is a sink node which handles ignores everything. +type NoopSink struct{} + +// NewNoopSink should be used to create a new NoopSink. +func NewNoopSink() *NoopSink { + return &NoopSink{} +} + +// Process is a no-op and always returns nil event and nil error. +func (_ *NoopSink) Process(ctx context.Context, _ *eventlogger.Event) (*eventlogger.Event, error) { + // return nil for the event to indicate the pipeline is complete. + return nil, nil +} + +// Reopen is a no-op and always returns nil. +func (_ *NoopSink) Reopen() error { + return nil +} + +// Type describes the type of this node (sink). +func (_ *NoopSink) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} diff --git a/internal/observability/event/sink_socket.go b/internal/observability/event/sink_socket.go new file mode 100644 index 000000000000..7d7502306086 --- /dev/null +++ b/internal/observability/event/sink_socket.go @@ -0,0 +1,194 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-multierror" +) + +var _ eventlogger.Node = (*SocketSink)(nil) + +// SocketSink is a sink node which handles writing events to socket. +type SocketSink struct { + requiredFormat string + address string + socketType string + maxDuration time.Duration + socketLock sync.RWMutex + connection net.Conn +} + +// NewSocketSink should be used to create a new SocketSink. +// Accepted options: WithMaxDuration and WithSocketType. +func NewSocketSink(address string, format string, opt ...Option) (*SocketSink, error) { + address = strings.TrimSpace(address) + if address == "" { + return nil, fmt.Errorf("address is required: %w", ErrInvalidParameter) + } + + format = strings.TrimSpace(format) + if format == "" { + return nil, fmt.Errorf("format is required: %w", ErrInvalidParameter) + } + + opts, err := getOpts(opt...) + if err != nil { + return nil, err + } + + sink := &SocketSink{ + requiredFormat: format, + address: address, + socketType: opts.withSocketType, + maxDuration: opts.withMaxDuration, + socketLock: sync.RWMutex{}, + connection: nil, + } + + return sink, nil +} + +// Process handles writing the event to the socket. +func (s *SocketSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + s.socketLock.Lock() + defer s.socketLock.Unlock() + + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + formatted, found := e.Format(s.requiredFormat) + if !found { + return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter) + } + + // Try writing and return early if successful. + err := s.write(ctx, formatted) + if err == nil { + return nil, nil + } + + // We will try to reconnect and retry a single write. + reconErr := s.reconnect(ctx) + switch { + case reconErr != nil: + // Add the reconnection error to the existing error. + err = multierror.Append(err, reconErr) + default: + err = s.write(ctx, formatted) + } + + // Format the error nicely if we need to return one. + if err != nil { + err = fmt.Errorf("error writing to socket %q: %w", s.address, err) + } + + // return nil for the event to indicate the pipeline is complete. + return nil, err +} + +// Reopen handles reopening the connection for the socket sink. +func (s *SocketSink) Reopen() error { + s.socketLock.Lock() + defer s.socketLock.Unlock() + + err := s.reconnect(nil) + if err != nil { + return fmt.Errorf("error reconnecting %q: %w", s.address, err) + } + + return nil +} + +// Type describes the type of this node (sink). +func (_ *SocketSink) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} + +// connect attempts to establish a connection using the socketType and address. +func (s *SocketSink) connect(ctx context.Context) error { + // If we're already connected, we should have disconnected first. + if s.connection != nil { + return nil + } + + timeoutContext, cancel := context.WithTimeout(ctx, s.maxDuration) + defer cancel() + + dialer := net.Dialer{} + conn, err := dialer.DialContext(timeoutContext, s.socketType, s.address) + if err != nil { + return fmt.Errorf("error connecting to %q address %q: %w", s.socketType, s.address, err) + } + + s.connection = conn + + return nil +} + +// disconnect attempts to close and clear an existing connection. +func (s *SocketSink) disconnect() error { + // If we're already disconnected, we can return early. + if s.connection == nil { + return nil + } + + err := s.connection.Close() + if err != nil { + return fmt.Errorf("error closing connection to %q address %q: %w", s.socketType, s.address, err) + } + s.connection = nil + + return nil +} + +// reconnect attempts to disconnect and then connect to the configured socketType and address. +func (s *SocketSink) reconnect(ctx context.Context) error { + err := s.disconnect() + if err != nil { + return err + } + + err = s.connect(ctx) + if err != nil { + return err + } + + return nil +} + +// write attempts to write the specified data using the established connection. +func (s *SocketSink) write(ctx context.Context, data []byte) error { + // Ensure we're connected. + err := s.connect(ctx) + if err != nil { + return err + } + + err = s.connection.SetWriteDeadline(time.Now().Add(s.maxDuration)) + if err != nil { + return fmt.Errorf("unable to set write deadline: %w", err) + } + + _, err = s.connection.Write(data) + if err != nil { + return fmt.Errorf("unable to write to socket: %w", err) + } + + return nil +} diff --git a/internal/observability/event/sink_socket_test.go b/internal/observability/event/sink_socket_test.go new file mode 100644 index 000000000000..f0685e52e461 --- /dev/null +++ b/internal/observability/event/sink_socket_test.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestNewSocketSink ensures that we validate the input arguments and can create +// the SocketSink if everything goes to plan. +func TestNewSocketSink(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + address string + format string + opts []Option + want *SocketSink + wantErr bool + expectedErrMsg string + }{ + "address-empty": { + address: "", + wantErr: true, + expectedErrMsg: "address is required: invalid parameter", + }, + "address-whitespace": { + address: " ", + wantErr: true, + expectedErrMsg: "address is required: invalid parameter", + }, + "format-empty": { + address: "addr", + format: "", + wantErr: true, + expectedErrMsg: "format is required: invalid parameter", + }, + "format-whitespace": { + address: "addr", + format: " ", + wantErr: true, + expectedErrMsg: "format is required: invalid parameter", + }, + "bad-max-duration": { + address: "addr", + format: "json", + opts: []Option{WithMaxDuration("bar")}, + wantErr: true, + expectedErrMsg: "unable to parse max duration: invalid parameter: time: invalid duration \"bar\"", + }, + "happy": { + address: "wss://foo", + format: "json", + want: &SocketSink{ + requiredFormat: "json", + address: "wss://foo", + socketType: "tcp", // defaults to tcp + maxDuration: 2 * time.Second, // defaults to 2 secs + }, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := NewSocketSink(tc.address, tc.format, tc.opts...) + + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + require.Nil(t, got) + } else { + require.NoError(t, err) + require.Equal(t, tc.want, got) + } + }) + } +} diff --git a/internal/observability/event/sink_stdout.go b/internal/observability/event/sink_stdout.go new file mode 100644 index 000000000000..1c0508f80da6 --- /dev/null +++ b/internal/observability/event/sink_stdout.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/hashicorp/eventlogger" +) + +var _ eventlogger.Node = (*StdoutSink)(nil) + +// StdoutSink is structure that implements the eventlogger.Node interface +// as a Sink node that writes the events to the standard output stream. +type StdoutSink struct { + requiredFormat string +} + +// NewStdoutSinkNode creates a new StdoutSink that will persist the events +// it processes using the specified expected format. +func NewStdoutSinkNode(format string) (*StdoutSink, error) { + format = strings.TrimSpace(format) + if format == "" { + return nil, fmt.Errorf("format is required: %w", ErrInvalidParameter) + } + + return &StdoutSink{ + requiredFormat: format, + }, nil +} + +// Process persists the provided eventlogger.Event to the standard output stream. +func (s *StdoutSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + formatted, found := e.Format(s.requiredFormat) + if !found { + return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter) + } + + _, err := os.Stdout.Write(formatted) + if err != nil { + return nil, fmt.Errorf("error writing to stdout: %w", err) + } + + // Return nil, nil to indicate the pipeline is complete. + return nil, nil +} + +// Reopen is a no-op for the StdoutSink type. +func (s *StdoutSink) Reopen() error { + return nil +} + +// Type returns the eventlogger.NodeTypeSink constant. +func (s *StdoutSink) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} diff --git a/internal/observability/event/sink_syslog.go b/internal/observability/event/sink_syslog.go new file mode 100644 index 000000000000..6d6b6b6aee2f --- /dev/null +++ b/internal/observability/event/sink_syslog.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/eventlogger" + gsyslog "github.com/hashicorp/go-syslog" +) + +var _ eventlogger.Node = (*SyslogSink)(nil) + +// SyslogSink is a sink node which handles writing events to syslog. +type SyslogSink struct { + requiredFormat string + logger gsyslog.Syslogger +} + +// NewSyslogSink should be used to create a new SyslogSink. +// Accepted options: WithFacility and WithTag. +func NewSyslogSink(format string, opt ...Option) (*SyslogSink, error) { + format = strings.TrimSpace(format) + if format == "" { + return nil, fmt.Errorf("format is required: %w", ErrInvalidParameter) + } + + opts, err := getOpts(opt...) + if err != nil { + return nil, err + } + + logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, opts.withFacility, opts.withTag) + if err != nil { + return nil, fmt.Errorf("error creating syslogger: %w", err) + } + + return &SyslogSink{requiredFormat: format, logger: logger}, nil +} + +// Process handles writing the event to the syslog. +func (s *SyslogSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if e == nil { + return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) + } + + formatted, found := e.Format(s.requiredFormat) + if !found { + return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter) + } + + _, err := s.logger.Write(formatted) + if err != nil { + return nil, fmt.Errorf("error writing to syslog: %w", err) + } + + // return nil for the event to indicate the pipeline is complete. + return nil, nil +} + +// Reopen is a no-op for a syslog sink. +func (_ *SyslogSink) Reopen() error { + return nil +} + +// Type describes the type of this node (sink). +func (_ *SyslogSink) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} diff --git a/internal/observability/event/sink_syslog_test.go b/internal/observability/event/sink_syslog_test.go new file mode 100644 index 000000000000..519ae5197c6d --- /dev/null +++ b/internal/observability/event/sink_syslog_test.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package event + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestNewSyslogSink ensures that we validate the input arguments and can create +// the SyslogSink if everything goes to plan. +func TestNewSyslogSink(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + format string + opts []Option + want *SyslogSink + wantErr bool + expectedErrMsg string + }{ + "format-empty": { + format: "", + wantErr: true, + expectedErrMsg: "format is required: invalid parameter", + }, + "format-whitespace": { + format: " ", + wantErr: true, + expectedErrMsg: "format is required: invalid parameter", + }, + "happy": { + format: "json", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + got, err := NewSyslogSink(tc.format, tc.opts...) + + if tc.wantErr { + require.Error(t, err) + require.EqualError(t, err, tc.expectedErrMsg) + require.Nil(t, got) + } else { + require.NoError(t, err) + require.NotNil(t, got) + } + }) + } +} diff --git a/internalshared/configutil/config.go b/internalshared/configutil/config.go index dd63239c7c9b..7ca2c32c509d 100644 --- a/internalshared/configutil/config.go +++ b/internalshared/configutil/config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( @@ -38,16 +41,20 @@ type SharedConfig struct { // LogFormat specifies the log format. Valid values are "standard" and // "json". The values are case-insenstive. If no log format is specified, // then standard format will be used. - LogFormat string `hcl:"log_format"` - LogLevel string `hcl:"log_level"` - LogFile string `hcl:"log_file"` - LogRotateBytes string `hcl:"log_rotate_bytes"` - LogRotateDuration string `hcl:"log_rotate_duration"` - LogRotateMaxFiles string `hcl:"log_rotate_max_files"` + LogFile string `hcl:"log_file"` + LogFormat string `hcl:"log_format"` + LogLevel string `hcl:"log_level"` + LogRotateBytes int `hcl:"log_rotate_bytes"` + LogRotateBytesRaw interface{} `hcl:"log_rotate_bytes"` + LogRotateDuration string `hcl:"log_rotate_duration"` + LogRotateMaxFiles int `hcl:"log_rotate_max_files"` + LogRotateMaxFilesRaw interface{} `hcl:"log_rotate_max_files"` PidFile string `hcl:"pid_file"` ClusterName string `hcl:"cluster_name"` + + AdministrativeNamespacePath string `hcl:"administrative_namespace_path"` } func ParseConfig(d string) (*SharedConfig, error) { @@ -94,7 +101,7 @@ func ParseConfig(d string) (*SharedConfig, error) { if o := list.Filter("seal"); len(o.Items) > 0 { result.found("seal", "Seal") - if err := parseKMS(&result.Seals, o, "seal", 3); err != nil { + if err := parseKMS(&result.Seals, o, "seal", 5); err != nil { return nil, fmt.Errorf("error parsing 'seal': %w", err) } } @@ -115,9 +122,17 @@ func ParseConfig(d string) (*SharedConfig, error) { if o := list.Filter("listener"); len(o.Items) > 0 { result.found("listener", "Listener") - if err := ParseListeners(&result, o); err != nil { + listeners, err := ParseListeners(o) + if err != nil { return nil, fmt.Errorf("error parsing 'listener': %w", err) } + // Update the shared config + result.Listeners = listeners + + // Track which types of listener were found. + for _, l := range result.Listeners { + result.found(l.Type.String(), l.Type.String()) + } } if o := list.Filter("user_lockout"); len(o.Items) > 0 { @@ -162,16 +177,27 @@ func (c *SharedConfig) Sanitized() map[string]interface{} { } result := map[string]interface{}{ - "disable_mlock": c.DisableMlock, - - "default_max_request_duration": c.DefaultMaxRequestDuration, - - "log_level": c.LogLevel, - "log_format": c.LogFormat, - - "pid_file": c.PidFile, + "default_max_request_duration": c.DefaultMaxRequestDuration, + "disable_mlock": c.DisableMlock, + "log_level": c.LogLevel, + "log_format": c.LogFormat, + "pid_file": c.PidFile, + "cluster_name": c.ClusterName, + "administrative_namespace_path": c.AdministrativeNamespacePath, + } - "cluster_name": c.ClusterName, + // Optional log related settings + if c.LogFile != "" { + result["log_file"] = c.LogFile + } + if c.LogRotateBytes != 0 { + result["log_rotate_bytes"] = c.LogRotateBytes + } + if c.LogRotateDuration != "" { + result["log_rotate_duration"] = c.LogRotateDuration + } + if c.LogRotateMaxFiles != 0 { + result["log_rotate_max_files"] = c.LogRotateMaxFiles } // Sanitize listeners @@ -210,7 +236,12 @@ func (c *SharedConfig) Sanitized() map[string]interface{} { cleanSeal := map[string]interface{}{ "type": s.Type, "disabled": s.Disabled, + "name": s.Name, + } + if s.Priority > 0 { + cleanSeal["priority"] = s.Priority } + sanitizedSeals = append(sanitizedSeals, cleanSeal) } result["seals"] = sanitizedSeals @@ -248,6 +279,7 @@ func (c *SharedConfig) Sanitized() map[string]interface{} { "lease_metrics_epsilon": c.Telemetry.LeaseMetricsEpsilon, "num_lease_metrics_buckets": c.Telemetry.NumLeaseMetricsTimeBuckets, "add_lease_metrics_namespace_labels": c.Telemetry.LeaseMetricsNameSpaceLabels, + "add_mount_point_rollback_metrics": c.Telemetry.RollbackMetricsIncludeMountPoint, } result["telemetry"] = sanitizedTelemetry } diff --git a/internalshared/configutil/config_test.go b/internalshared/configutil/config_test.go new file mode 100644 index 000000000000..4362f92284e5 --- /dev/null +++ b/internalshared/configutil/config_test.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package configutil + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type mapValue[T any] struct { + Value T + IsFound bool +} + +type expectedLogFields struct { + File mapValue[string] + Format mapValue[string] + Level mapValue[string] + RotateBytes mapValue[int] + RotateDuration mapValue[string] + RotateMaxFiles mapValue[int] +} + +// TestSharedConfig_Sanitized_LogFields ensures that 'log related' shared config +// is sanitized as expected. +func TestSharedConfig_Sanitized_LogFields(t *testing.T) { + tests := map[string]struct { + Value *SharedConfig + IsNil bool + Expected expectedLogFields + }{ + "nil": { + Value: nil, + IsNil: true, + }, + "empty": { + Value: &SharedConfig{}, + IsNil: false, + Expected: expectedLogFields{ + Format: mapValue[string]{IsFound: true, Value: ""}, + Level: mapValue[string]{IsFound: true, Value: ""}, + }, + }, + "only-log-level-and-format": { + Value: &SharedConfig{ + LogFormat: "json", + LogLevel: "warn", + }, + IsNil: false, + Expected: expectedLogFields{ + Format: mapValue[string]{IsFound: true, Value: "json"}, + Level: mapValue[string]{IsFound: true, Value: "warn"}, + }, + }, + "valid-log-fields": { + Value: &SharedConfig{ + LogFile: "vault.log", + LogFormat: "json", + LogLevel: "warn", + LogRotateBytes: 1024, + LogRotateDuration: "30m", + LogRotateMaxFiles: -1, + }, + IsNil: false, + Expected: expectedLogFields{ + File: mapValue[string]{IsFound: true, Value: "vault.log"}, + Format: mapValue[string]{IsFound: true, Value: "json"}, + Level: mapValue[string]{IsFound: true, Value: "warn"}, + RotateBytes: mapValue[int]{IsFound: true, Value: 1024}, + RotateDuration: mapValue[string]{IsFound: true, Value: "30m"}, + RotateMaxFiles: mapValue[int]{IsFound: true, Value: -1}, + }, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + cfg := tc.Value.Sanitized() + switch { + case tc.IsNil: + require.Nil(t, cfg) + default: + require.NotNil(t, cfg) + + // Log file + val, found := cfg["log_file"] + switch { + case tc.Expected.File.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.File.Value, val) + default: + require.Nil(t, val) + } + + // Log format + val, found = cfg["log_format"] + switch { + case tc.Expected.Format.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.Format.Value, val) + default: + require.Nil(t, val) + } + + // Log level + val, found = cfg["log_level"] + switch { + case tc.Expected.Level.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.Level.Value, val) + default: + require.Nil(t, val) + } + + // Log rotate bytes + val, found = cfg["log_rotate_bytes"] + switch { + case tc.Expected.RotateBytes.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.RotateBytes.Value, val) + default: + require.Nil(t, val) + } + + // Log rotate duration + val, found = cfg["log_rotate_duration"] + switch { + case tc.Expected.RotateDuration.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.RotateDuration.Value, val) + default: + require.Nil(t, val) + } + + // Log rotate max files + val, found = cfg["log_rotate_max_files"] + switch { + case tc.Expected.RotateMaxFiles.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.RotateMaxFiles.Value, val) + default: + require.Nil(t, val) + } + } + }) + } +} diff --git a/internalshared/configutil/config_util.go b/internalshared/configutil/config_util.go index 05cb061e7e9b..76e2b198a60e 100644 --- a/internalshared/configutil/config_util.go +++ b/internalshared/configutil/config_util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !enterprise package configutil diff --git a/internalshared/configutil/encrypt_decrypt.go b/internalshared/configutil/encrypt_decrypt.go index 1e9f830901c4..84ad39fb6a24 100644 --- a/internalshared/configutil/encrypt_decrypt.go +++ b/internalshared/configutil/encrypt_decrypt.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( diff --git a/internalshared/configutil/encrypt_decrypt_test.go b/internalshared/configutil/encrypt_decrypt_test.go index b9257bb6c2b2..3b4609e4549a 100644 --- a/internalshared/configutil/encrypt_decrypt_test.go +++ b/internalshared/configutil/encrypt_decrypt_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( @@ -43,7 +46,7 @@ telemetry { } first := true - locs := decryptRegex.FindAllIndex([]byte(out), -1) + locs := decryptRegex.FindAllStringIndex(out, -1) for _, match := range locs { matchBytes := []byte(out)[match[0]:match[1]] matchBytes = bytes.TrimSuffix(bytes.TrimPrefix(matchBytes, []byte("{{decrypt(")), []byte(")}}")) diff --git a/internalshared/configutil/entropymode_enumer.go b/internalshared/configutil/entropymode_enumer.go new file mode 100644 index 000000000000..6b804001c481 --- /dev/null +++ b/internalshared/configutil/entropymode_enumer.go @@ -0,0 +1,49 @@ +// Code generated by "enumer -type=EntropyMode -trimprefix=Entropy"; DO NOT EDIT. + +package configutil + +import ( + "fmt" +) + +const _EntropyModeName = "UnknownAugmentation" + +var _EntropyModeIndex = [...]uint8{0, 7, 19} + +func (i EntropyMode) String() string { + if i < 0 || i >= EntropyMode(len(_EntropyModeIndex)-1) { + return fmt.Sprintf("EntropyMode(%d)", i) + } + return _EntropyModeName[_EntropyModeIndex[i]:_EntropyModeIndex[i+1]] +} + +var _EntropyModeValues = []EntropyMode{0, 1} + +var _EntropyModeNameToValueMap = map[string]EntropyMode{ + _EntropyModeName[0:7]: 0, + _EntropyModeName[7:19]: 1, +} + +// EntropyModeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func EntropyModeString(s string) (EntropyMode, error) { + if val, ok := _EntropyModeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to EntropyMode values", s) +} + +// EntropyModeValues returns all values of the enum +func EntropyModeValues() []EntropyMode { + return _EntropyModeValues +} + +// IsAEntropyMode returns "true" if the value is listed in the enum definition. "false" otherwise +func (i EntropyMode) IsAEntropyMode() bool { + for _, v := range _EntropyModeValues { + if i == v { + return true + } + } + return false +} diff --git a/internalshared/configutil/env_var_util.go b/internalshared/configutil/env_var_util.go new file mode 100644 index 000000000000..4c2986d18408 --- /dev/null +++ b/internalshared/configutil/env_var_util.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package configutil + +import ( + "github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2" +) + +var ( + AliCloudKMSEnvVars = map[string]string{ + "ALICLOUD_REGION": "region", + "ALICLOUD_DOMAIN": "domain", + "ALICLOUD_ACCESS_KEY": "access_key", + "ALICLOUD_SECRET_KEY": "secret_key", + alicloudkms.EnvVaultAliCloudKmsSealKeyId: "kms_key_id", + alicloudkms.EnvAliCloudKmsWrapperKeyId: "kms_key_id", + } + + AWSKMSEnvVars = map[string]string{ + "AWS_REGION": "region", + "AWS_DEFAULT_REGION": "region", + "AWS_ACCESS_KEY_ID": "access_key", + "AWS_SESSION_TOKEN": "session_token", + "AWS_SECRET_ACCESS_KEY": "secret_key", + awskms.EnvVaultAwsKmsSealKeyId: "kms_key_id", + awskms.EnvAwsKmsWrapperKeyId: "kms_key_id", + "AWS_KMS_ENDPOINT": "endpoint", + } + + AzureEnvVars = map[string]string{ + "AZURE_TENANT_ID": "tenant_id", + "AZURE_CLIENT_ID": "client_id", + "AZURE_CLIENT_SECRET": "client_secret", + "AZURE_ENVIRONMENT": "environment", + "AZURE_AD_RESOURCE": "resource", + azurekeyvault.EnvAzureKeyVaultWrapperKeyName: "key_name", + azurekeyvault.EnvVaultAzureKeyVaultKeyName: "key_name", + azurekeyvault.EnvAzureKeyVaultWrapperVaultName: "vault_name", + azurekeyvault.EnvVaultAzureKeyVaultVaultName: "vault_name", + } + + GCPCKMSEnvVars = map[string]string{ + gcpckms.EnvGcpCkmsWrapperCredsPath: "credentials", + "GOOGLE_APPLICATION_CREDENTIALS": "credentials", + gcpckms.EnvGcpCkmsWrapperProject: "project", + gcpckms.EnvGcpCkmsWrapperLocation: "region", + gcpckms.EnvVaultGcpCkmsSealCryptoKey: "crypto_key", + gcpckms.EnvGcpCkmsWrapperCryptoKey: "crypto_key", + gcpckms.EnvGcpCkmsWrapperKeyRing: "key_ring", + gcpckms.EnvVaultGcpCkmsSealKeyRing: "key_ring", + } + + OCIKMSEnvVars = map[string]string{ + ocikms.EnvOciKmsWrapperCryptoEndpoint: "crypto_endpoint", + ocikms.EnvVaultOciKmsSealCryptoEndpoint: "crypto_endpoint", + ocikms.EnvOciKmsWrapperKeyId: "key_id", + ocikms.EnvVaultOciKmsSealKeyId: "key_id", + ocikms.EnvOciKmsWrapperManagementEndpoint: "management_endpoint", + ocikms.EnvVaultOciKmsSealManagementEndpoint: "management_endpoint", + } + + TransitEnvVars = map[string]string{ + "VAULT_ADDR": "address", + "VAULT_TOKEN": "token", + "VAULT_NAMESPACE": "namespace", + "VAULT_CACERT": "tls_ca_cert", + "VAULT_CLIENT_CERT": "tls_client_cert", + "VAULT_CLIENT_KEY": "tls_client_key", + "VAULT_TLS_SERVER_NAME": "tls_server_name", + "VAULT_SKIP_VERIFY": "tls_skip_verify", + transit.EnvVaultTransitSealKeyName: "key_name", + transit.EnvTransitWrapperKeyName: "key_name", + transit.EnvTransitWrapperMountPath: "mount_path", + transit.EnvVaultTransitSealMountPath: "mount_path", + transit.EnvTransitWrapperDisableRenewal: "disable_renewal", + transit.EnvVaultTransitSealDisableRenewal: "disable_renewal", + } + + // TransitPrioritizeConfigValues are the variables where file config takes precedence over env vars in transit seals + TransitPrioritizeConfigValues = []string{ + "token", + "address", + } + + // TransitTLSConfigVars are the TLS config variables for transit seals + // if one of them is set in file config, transit seals use the file config for all TLS values and ignore env vars + // otherwise they use the env vars for TLS config + TransitTLSConfigVars = []string{ + "tls_ca_cert", + "tls_client_cert", + "tls_client_key", + "tls_server_name", + "tls_skip_verify", + } +) diff --git a/internalshared/configutil/hcp_link.go b/internalshared/configutil/hcp_link.go index a46c3bb1f553..0f130961d1af 100644 --- a/internalshared/configutil/hcp_link.go +++ b/internalshared/configutil/hcp_link.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( diff --git a/internalshared/configutil/http_response_headers.go b/internalshared/configutil/http_response_headers.go index 2db3034e588b..cbc71bccfac3 100644 --- a/internalshared/configutil/http_response_headers.go +++ b/internalshared/configutil/http_response_headers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( diff --git a/internalshared/configutil/kms.go b/internalshared/configutil/kms.go index 614a6ec8e571..f0948118dd95 100644 --- a/internalshared/configutil/kms.go +++ b/internalshared/configutil/kms.go @@ -1,14 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( "context" "crypto/rand" + "errors" "fmt" "io" + "os" + "regexp" "strings" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy/v2" wrapping "github.com/hashicorp/go-kms-wrapping/v2" aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" "github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2" @@ -21,24 +28,36 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/logical" ) var ( ConfigureWrapper = configureWrapper CreateSecureRandomReaderFunc = createSecureRandomReader + GetEnvConfigFunc = getEnvConfig ) -// Entropy contains Entropy configuration for the server +//go:generate enumer -type=EntropyMode -trimprefix=Entropy + +// EntropyMode contains Entropy configuration for the server type EntropyMode int const ( EntropyUnknown EntropyMode = iota EntropyAugmentation + + KmsRenameDisabledSuffix = "-disabled" ) type Entropy struct { - Mode EntropyMode + Mode EntropyMode + SealName string +} + +type EntropySourcerInfo struct { + Sourcer entropy.Sourcer + Name string } // KMS contains KMS configuration for the server @@ -52,6 +71,9 @@ type KMS struct { Disabled bool Config map[string]string + + Priority int `hcl:"priority"` + Name string `hcl:"name"` } func (k *KMS) GoString() string { @@ -60,7 +82,7 @@ func (k *KMS) GoString() string { func parseKMS(result *[]*KMS, list *ast.ObjectList, blockName string, maxKMS int) error { if len(list.Items) > maxKMS { - return fmt.Errorf("only two or less %q blocks are permitted", blockName) + return fmt.Errorf("only %d or less %q blocks are permitted", maxKMS, blockName) } seals := make([]*KMS, 0, len(list.Items)) @@ -99,6 +121,36 @@ func parseKMS(result *[]*KMS, list *ast.ObjectList, blockName string, maxKMS int delete(m, "disabled") } + var priority int + if v, ok := m["priority"]; ok { + priority, err = parseutil.SafeParseInt(v) + if err != nil { + return multierror.Prefix(fmt.Errorf("unable to parse 'priority' in kms type %q: %w", key, err), fmt.Sprintf("%s.%s", blockName, key)) + } + delete(m, "priority") + + if priority < 1 { + return multierror.Prefix(fmt.Errorf("invalid priority in kms type %q: %d", key, priority), fmt.Sprintf("%s.%s", blockName, key)) + } + } + + name := strings.ToLower(key) + // ensure that seals of the same type will have unique names for seal migration + if disabled { + name += KmsRenameDisabledSuffix + } + if v, ok := m["name"]; ok { + name, ok = v.(string) + if !ok { + return multierror.Prefix(fmt.Errorf("unable to parse 'name' in kms type %q: unexpected type %T", key, v), fmt.Sprintf("%s.%s", blockName, key)) + } + delete(m, "name") + + if !regexp.MustCompile("^[a-zA-Z0-9-_]+$").MatchString(name) { + return multierror.Prefix(errors.New("'name' field can only include alphanumeric characters, hyphens, and underscores"), fmt.Sprintf("%s.%s", blockName, key)) + } + } + strMap := make(map[string]string, len(m)) for k, v := range m { s, err := parseutil.ParseString(v) @@ -112,6 +164,8 @@ func parseKMS(result *[]*KMS, list *ast.ObjectList, blockName string, maxKMS int Type: strings.ToLower(key), Purpose: purpose, Disabled: disabled, + Priority: priority, + Name: name, } if len(strMap) > 0 { seal.Config = strMap @@ -165,6 +219,19 @@ func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]strin var kmsInfo map[string]string var err error + envConfig := GetEnvConfigFunc(configKMS) + if len(envConfig) > 0 && configKMS.Config == nil { + configKMS.Config = make(map[string]string) + } + // transit is a special case, because some config values take precedence over env vars + if configKMS.Type == wrapping.WrapperTypeTransit.String() { + mergeTransitConfig(configKMS.Config, envConfig) + } else { + for name, val := range envConfig { + configKMS.Config[name] = val + } + } + switch wrapping.WrapperType(configKMS.Type) { case wrapping.WrapperTypeShamir: return nil, nil @@ -232,7 +299,7 @@ func GetAEADKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[st func GetAliCloudKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := alicloudkms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config))...) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -252,7 +319,7 @@ func GetAliCloudKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, ma var GetAWSKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := awskms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, awskms.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config))...) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -272,7 +339,7 @@ var GetAWSKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, m func GetAzureKeyVaultKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := azurekeyvault.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, azurekeyvault.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config))...) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -290,7 +357,7 @@ func GetAzureKeyVaultKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrappe func GetGCPCKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := gcpckms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config))...) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -309,7 +376,7 @@ func GetGCPCKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map func GetOCIKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := ocikms.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config))...) if err != nil { return nil, nil, err } @@ -325,7 +392,17 @@ func GetOCIKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[ var GetTransitKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { wrapper := transit.NewWrapper() - wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + var prefix string + if p, ok := kms.Config["key_id_prefix"]; ok { + prefix = p + } else { + prefix = kms.Name + } + if !strings.HasSuffix(prefix, "/") { + prefix = prefix + "/" + } + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithDisallowEnvVars(true), wrapping.WithConfigMap(kms.Config), + transit.WithKeyIdPrefix(prefix))...) if err != nil { // If the error is any other than logical.KeyNotFoundError, return the error if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { @@ -344,6 +421,75 @@ var GetTransitKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrappe return wrapper, info, nil } -func createSecureRandomReader(conf *SharedConfig, wrapper wrapping.Wrapper) (io.Reader, error) { +func createSecureRandomReader(_ *SharedConfig, _ []*EntropySourcerInfo, _ hclog.Logger) (io.Reader, error) { return rand.Reader, nil } + +func getEnvConfig(kms *KMS) map[string]string { + envValues := make(map[string]string) + + var wrapperEnvVars map[string]string + switch wrapping.WrapperType(kms.Type) { + case wrapping.WrapperTypeAliCloudKms: + wrapperEnvVars = AliCloudKMSEnvVars + case wrapping.WrapperTypeAwsKms: + wrapperEnvVars = AWSKMSEnvVars + case wrapping.WrapperTypeAzureKeyVault: + wrapperEnvVars = AzureEnvVars + case wrapping.WrapperTypeGcpCkms: + wrapperEnvVars = GCPCKMSEnvVars + case wrapping.WrapperTypeOciKms: + wrapperEnvVars = OCIKMSEnvVars + case wrapping.WrapperTypeTransit: + wrapperEnvVars = TransitEnvVars + default: + return nil + } + + for envVar, configName := range wrapperEnvVars { + val := os.Getenv(envVar) + if val != "" { + envValues[configName] = val + } + } + + return envValues +} + +func mergeTransitConfig(config map[string]string, envConfig map[string]string) { + useFileTlsConfig := false + for _, varName := range TransitTLSConfigVars { + if _, ok := config[varName]; ok { + useFileTlsConfig = true + break + } + } + + if useFileTlsConfig { + for _, varName := range TransitTLSConfigVars { + delete(envConfig, varName) + } + } + + for varName, val := range envConfig { + // for some values, file config takes precedence + if strutil.StrListContains(TransitPrioritizeConfigValues, varName) && config[varName] != "" { + continue + } + + config[varName] = val + } +} + +func (k *KMS) Clone() *KMS { + ret := &KMS{ + UnusedKeys: k.UnusedKeys, + Type: k.Type, + Purpose: k.Purpose, + Config: k.Config, + Name: k.Name, + Disabled: k.Disabled, + Priority: k.Priority, + } + return ret +} diff --git a/internalshared/configutil/kms_test.go b/internalshared/configutil/kms_test.go new file mode 100644 index 000000000000..9eb19a3e3d7d --- /dev/null +++ b/internalshared/configutil/kms_test.go @@ -0,0 +1,102 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package configutil + +import ( + "os" + "reflect" + "testing" +) + +func Test_getEnvConfig(t *testing.T) { + tests := []struct { + name string + kms *KMS + envVars map[string]string + want map[string]string + }{ + { + "AliCloud wrapper", + &KMS{ + Type: "alicloudkms", + Priority: 1, + }, + map[string]string{"ALICLOUD_REGION": "test_region", "ALICLOUD_DOMAIN": "test_domain", "ALICLOUD_ACCESS_KEY": "test_access_key", "ALICLOUD_SECRET_KEY": "test_secret_key", "VAULT_ALICLOUDKMS_SEAL_KEY_ID": "test_key_id"}, + map[string]string{"region": "test_region", "domain": "test_domain", "access_key": "test_access_key", "secret_key": "test_secret_key", "kms_key_id": "test_key_id"}, + }, + { + "AWS KMS wrapper", + &KMS{ + Type: "awskms", + Priority: 1, + }, + map[string]string{"AWS_REGION": "test_region", "AWS_ACCESS_KEY_ID": "test_access_key", "AWS_SECRET_ACCESS_KEY": "test_secret_key", "VAULT_AWSKMS_SEAL_KEY_ID": "test_key_id"}, + map[string]string{"region": "test_region", "access_key": "test_access_key", "secret_key": "test_secret_key", "kms_key_id": "test_key_id"}, + }, + { + "Azure KeyVault wrapper", + &KMS{ + Type: "azurekeyvault", + Priority: 1, + }, + map[string]string{"AZURE_TENANT_ID": "test_tenant_id", "AZURE_CLIENT_ID": "test_client_id", "AZURE_CLIENT_SECRET": "test_client_secret", "AZURE_ENVIRONMENT": "test_environment", "VAULT_AZUREKEYVAULT_VAULT_NAME": "test_vault_name", "VAULT_AZUREKEYVAULT_KEY_NAME": "test_key_name"}, + map[string]string{"tenant_id": "test_tenant_id", "client_id": "test_client_id", "client_secret": "test_client_secret", "environment": "test_environment", "vault_name": "test_vault_name", "key_name": "test_key_name"}, + }, + { + "GCP CKMS wrapper", + &KMS{ + Type: "gcpckms", + Priority: 1, + }, + map[string]string{"GOOGLE_CREDENTIALS": "test_credentials", "GOOGLE_PROJECT": "test_project", "GOOGLE_REGION": "test_region", "VAULT_GCPCKMS_SEAL_KEY_RING": "test_key_ring", "VAULT_GCPCKMS_SEAL_CRYPTO_KEY": "test_crypto_key"}, + map[string]string{"credentials": "test_credentials", "project": "test_project", "region": "test_region", "key_ring": "test_key_ring", "crypto_key": "test_crypto_key"}, + }, + { + "OCI KMS wrapper", + &KMS{ + Type: "ocikms", + Priority: 1, + }, + map[string]string{"VAULT_OCIKMS_SEAL_KEY_ID": "test_key_id", "VAULT_OCIKMS_CRYPTO_ENDPOINT": "test_crypto_endpoint", "VAULT_OCIKMS_MANAGEMENT_ENDPOINT": "test_management_endpoint"}, + map[string]string{"key_id": "test_key_id", "crypto_endpoint": "test_crypto_endpoint", "management_endpoint": "test_management_endpoint"}, + }, + { + "Transit wrapper", + &KMS{ + Type: "transit", + Priority: 1, + }, + map[string]string{"VAULT_ADDR": "test_address", "VAULT_TOKEN": "test_token", "VAULT_TRANSIT_SEAL_KEY_NAME": "test_key_name", "VAULT_TRANSIT_SEAL_MOUNT_PATH": "test_mount_path"}, + map[string]string{"address": "test_address", "token": "test_token", "key_name": "test_key_name", "mount_path": "test_mount_path"}, + }, + { + "Environment vars not set", + &KMS{ + Type: "awskms", + Priority: 1, + }, + map[string]string{}, + map[string]string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for envName, envVal := range tt.envVars { + if err := os.Setenv(envName, envVal); err != nil { + t.Errorf("error setting environment vars for test: %s", err) + } + } + + if got := GetEnvConfigFunc(tt.kms); !reflect.DeepEqual(got, tt.want) { + t.Errorf("getEnvConfig() = %v, want %v", got, tt.want) + } + + for env := range tt.envVars { + if err := os.Unsetenv(env); err != nil { + t.Errorf("error unsetting environment vars for test: %s", err) + } + } + }) + } +} diff --git a/internalshared/configutil/lint.go b/internalshared/configutil/lint.go index 2b5b634156b3..3831564549c7 100644 --- a/internalshared/configutil/lint.go +++ b/internalshared/configutil/lint.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( diff --git a/internalshared/configutil/listener.go b/internalshared/configutil/listener.go index ea28dbf199eb..2c526eca9bcc 100644 --- a/internalshared/configutil/listener.go +++ b/internalshared/configutil/listener.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( @@ -16,8 +19,17 @@ import ( "github.com/hashicorp/go-sockaddr/template" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/helper/namespace" +) + +const ( + TCP ListenerType = "tcp" + Unix ListenerType = "unix" ) +// ListenerType represents the supported types of listener. +type ListenerType string + type ListenerTelemetry struct { UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` UnauthenticatedMetricsAccess bool `hcl:"-"` @@ -41,7 +53,7 @@ type Listener struct { UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` RawConfig map[string]interface{} - Type string + Type ListenerType Purpose []string `hcl:"-"` PurposeRaw interface{} `hcl:"purpose"` Role string `hcl:"role"` @@ -97,6 +109,8 @@ type Listener struct { AgentAPI *AgentAPI `hcl:"agent_api"` + ProxyAPI *ProxyAPI `hcl:"proxy_api"` + Telemetry ListenerTelemetry `hcl:"telemetry"` Profiling ListenerProfiling `hcl:"profiling"` InFlightRequestLogging ListenerInFlightRequestLogging `hcl:"inflight_requests_logging"` @@ -113,6 +127,26 @@ type Listener struct { // Custom Http response headers CustomResponseHeaders map[string]map[string]string `hcl:"-"` CustomResponseHeadersRaw interface{} `hcl:"custom_response_headers"` + + // ChrootNamespace will prepend the specified namespace to requests + ChrootNamespaceRaw interface{} `hcl:"chroot_namespace"` + ChrootNamespace string `hcl:"-"` + + // Per-listener redaction configuration + RedactAddressesRaw any `hcl:"redact_addresses"` + RedactAddresses bool `hcl:"-"` + RedactClusterNameRaw any `hcl:"redact_cluster_name"` + RedactClusterName bool `hcl:"-"` + RedactVersionRaw any `hcl:"redact_version"` + RedactVersion bool `hcl:"-"` + + // DisableReplicationStatusEndpoint disables the unauthenticated replication status endpoints + DisableReplicationStatusEndpointsRaw interface{} `hcl:"disable_replication_status_endpoints"` + DisableReplicationStatusEndpoints bool `hcl:"-"` + + // DisableRequestLimiter allows per-listener disabling of the Request Limiter. + DisableRequestLimiterRaw any `hcl:"disable_request_limiter"` + DisableRequestLimiter bool `hcl:"-"` } // AgentAPI allows users to select which parts of the Agent API they want enabled. @@ -120,6 +154,11 @@ type AgentAPI struct { EnableQuit bool `hcl:"enable_quit"` } +// ProxyAPI allows users to select which parts of the Vault Proxy API they want enabled. +type ProxyAPI struct { + EnableQuit bool `hcl:"enable_quit"` +} + func (l *Listener) GoString() string { return fmt.Sprintf("*%#v", *l) } @@ -129,317 +168,543 @@ func (l *Listener) Validate(path string) []ConfigError { return append(results, ValidateUnusedFields(l.Profiling.UnusedKeys, path)...) } -func ParseListeners(result *SharedConfig, list *ast.ObjectList) error { - var err error - result.Listeners = make([]*Listener, 0, len(list.Items)) +// ParseSingleIPTemplate is used as a helper function to parse out a single IP +// address from a config parameter. +// If the input doesn't appear to contain the 'template' format, +// it will return the specified input unchanged. +func ParseSingleIPTemplate(ipTmpl string) (string, error) { + r := regexp.MustCompile("{{.*?}}") + if !r.MatchString(ipTmpl) { + return ipTmpl, nil + } + + out, err := template.Parse(ipTmpl) + if err != nil { + return "", fmt.Errorf("unable to parse address template %q: %v", ipTmpl, err) + } + + ips := strings.Split(out, " ") + switch len(ips) { + case 0: + return "", errors.New("no addresses found, please configure one") + case 1: + return strings.TrimSpace(ips[0]), nil + default: + return "", fmt.Errorf("multiple addresses found (%q), please configure one", out) + } +} + +// ParseListeners attempts to parse the AST list of objects into listeners. +func ParseListeners(list *ast.ObjectList) ([]*Listener, error) { + listeners := make([]*Listener, len(list.Items)) + for i, item := range list.Items { - var l Listener - if err := hcl.DecodeObject(&l, item.Val); err != nil { - return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) + l, err := parseListener(item) + if err != nil { + return nil, multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) } - if rendered, err := ParseSingleIPTemplate(l.Address); err != nil { - return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) - } else { - l.Address = rendered - } - if rendered, err := ParseSingleIPTemplate(l.ClusterAddress); err != nil { - return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) - } else { - l.ClusterAddress = rendered + listeners[i] = l + } + + return listeners, nil +} + +// parseListener attempts to parse the AST object into a listener. +func parseListener(item *ast.ObjectItem) (*Listener, error) { + var l *Listener + var err error + + // Decode the current item + if err = hcl.DecodeObject(&l, item.Val); err != nil { + return nil, err + } + + // Parse and update address if required. + if l.Address, err = ParseSingleIPTemplate(l.Address); err != nil { + return nil, err + } + + // Parse and update cluster address if required. + if l.ClusterAddress, err = ParseSingleIPTemplate(l.ClusterAddress); err != nil { + return nil, err + } + + // Get the values for sanitizing + var m map[string]interface{} + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return nil, err + } + l.RawConfig = m + + // Parse type, but supply a fallback if type wasn't set. + var fallbackType string + if len(item.Keys) == 1 { + fallbackType = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + + if err = l.parseType(fallbackType); err != nil { + return nil, err + } + + // Parse out each set off settings for the listener. + for _, parser := range []func() error{ + l.parseRequestSettings, + l.parseTLSSettings, + l.parseHTTPTimeoutSettings, + l.parseProxySettings, + l.parseForwardedForSettings, + l.parseTelemetrySettings, + l.parseProfilingSettings, + l.parseInFlightRequestSettings, + l.parseCORSSettings, + l.parseHTTPHeaderSettings, + l.parseChrootNamespaceSettings, + l.parseRedactionSettings, + l.parseDisableReplicationStatusEndpointSettings, + l.parseDisableRequestLimiter, + } { + err := parser() + if err != nil { + return nil, err } + } + + return l, nil +} + +// Normalize returns the lower case string version of a listener type. +func (t ListenerType) Normalize() ListenerType { + return ListenerType(strings.ToLower(string(t))) +} + +// String returns the string version of a listener type. +func (t ListenerType) String() string { + return string(t.Normalize()) +} - // Hacky way, for now, to get the values we want for sanitizing - var m map[string]interface{} - if err := hcl.DecodeObject(&m, item.Val); err != nil { - return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) +// parseAndClearBool parses a raw setting as a bool configuration parameter. If +// the raw value is successfully parsed, the parsedSetting argument is set to it +// and the rawSetting argument is cleared. Otherwise, the rawSetting argument is +// left unchanged and an error is returned. +func parseAndClearBool(rawSetting *interface{}, parsedSetting *bool) error { + var err error + + if *rawSetting != nil { + *parsedSetting, err = parseutil.ParseBool(*rawSetting) + if err != nil { + return err } - l.RawConfig = m - - // Base values - { - switch { - case l.Type != "": - case len(item.Keys) == 1: - l.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) - default: - return multierror.Prefix(errors.New("listener type must be specified"), fmt.Sprintf("listeners.%d:", i)) - } - - l.Type = strings.ToLower(l.Type) - switch l.Type { - case "tcp", "unix": - result.found(l.Type, l.Type) - default: - return multierror.Prefix(fmt.Errorf("unsupported listener type %q", l.Type), fmt.Sprintf("listeners.%d:", i)) - } - - if l.PurposeRaw != nil { - if l.Purpose, err = parseutil.ParseCommaStringSlice(l.PurposeRaw); err != nil { - return multierror.Prefix(fmt.Errorf("unable to parse 'purpose' in listener type %q: %w", l.Type, err), fmt.Sprintf("listeners.%d:", i)) - } - for i, v := range l.Purpose { - l.Purpose[i] = strings.ToLower(v) - } - - l.PurposeRaw = nil - } - - switch l.Role { - case "default", "metrics_only", "": - result.found(l.Type, l.Type) - default: - return multierror.Prefix(fmt.Errorf("unsupported listener role %q", l.Role), fmt.Sprintf("listeners.%d:", i)) - } + + *rawSetting = nil + } + + return nil +} + +// parseAndClearString parses a raw setting as a string configuration parameter. +// If the raw value is successfully parsed, the parsedSetting argument is set to +// it and the rawSetting argument is cleared. Otherwise, the rawSetting argument +// is left unchanged and an error is returned. +func parseAndClearString(rawSetting *interface{}, parsedSetting *string) error { + var err error + + if *rawSetting != nil { + *parsedSetting, err = parseutil.ParseString(*rawSetting) + if err != nil { + return err } - // Request Parameters - { - if l.MaxRequestSizeRaw != nil { - if l.MaxRequestSize, err = parseutil.ParseInt(l.MaxRequestSizeRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing max_request_size: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.MaxRequestSizeRaw = nil - } - - if l.MaxRequestDurationRaw != nil { - if l.MaxRequestDuration, err = parseutil.ParseDurationSecond(l.MaxRequestDurationRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing max_request_duration: %w", err), fmt.Sprintf("listeners.%d", i)) - } - if l.MaxRequestDuration < 0 { - return multierror.Prefix(errors.New("max_request_duration cannot be negative"), fmt.Sprintf("listeners.%d", i)) - } - - l.MaxRequestDurationRaw = nil - } - - if l.RequireRequestHeaderRaw != nil { - if l.RequireRequestHeader, err = parseutil.ParseBool(l.RequireRequestHeaderRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for require_request_header: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.RequireRequestHeaderRaw = nil - } + *rawSetting = nil + } + + return nil +} + +// parseAndClearInt parses a raw setting as an integer configuration parameter. +// If the raw value is successfully parsed, the parsedSetting argument is set to +// it and the rawSetting argument is cleared. Otherwise, the rawSetting argument +// is left unchanged and an error is returned. +func parseAndClearInt(rawSetting *interface{}, parsedSetting *int64) error { + var err error + + if *rawSetting != nil { + *parsedSetting, err = parseutil.ParseInt(*rawSetting) + if err != nil { + return err } - // TLS Parameters - { - if l.TLSDisableRaw != nil { - if l.TLSDisable, err = parseutil.ParseBool(l.TLSDisableRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for tls_disable: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.TLSDisableRaw = nil - } - - if l.TLSCipherSuitesRaw != "" { - if l.TLSCipherSuites, err = tlsutil.ParseCiphers(l.TLSCipherSuitesRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for tls_cipher_suites: %w", err), fmt.Sprintf("listeners.%d", i)) - } - } - - if l.TLSRequireAndVerifyClientCertRaw != nil { - if l.TLSRequireAndVerifyClientCert, err = parseutil.ParseBool(l.TLSRequireAndVerifyClientCertRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for tls_require_and_verify_client_cert: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.TLSRequireAndVerifyClientCertRaw = nil - } - - if l.TLSDisableClientCertsRaw != nil { - if l.TLSDisableClientCerts, err = parseutil.ParseBool(l.TLSDisableClientCertsRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for tls_disable_client_certs: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.TLSDisableClientCertsRaw = nil - } + *rawSetting = nil + } + + return nil +} + +// parseAndClearDurationSecond parses a raw setting as a time duration +// configuration parameter. If the raw value is successfully parsed, the +// parsedSetting argument is set to it and the rawSetting argument is cleared. +// Otherwise, the rawSetting argument is left unchanged and an error is +// returned. +func parseAndClearDurationSecond(rawSetting *interface{}, parsedSetting *time.Duration) error { + var err error + + if *rawSetting != nil { + *parsedSetting, err = parseutil.ParseDurationSecond(*rawSetting) + if err != nil { + return err } - // HTTP timeouts - { - if l.HTTPReadTimeoutRaw != nil { - if l.HTTPReadTimeout, err = parseutil.ParseDurationSecond(l.HTTPReadTimeoutRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing http_read_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) - } + *rawSetting = nil + } - l.HTTPReadTimeoutRaw = nil - } + return nil +} - if l.HTTPReadHeaderTimeoutRaw != nil { - if l.HTTPReadHeaderTimeout, err = parseutil.ParseDurationSecond(l.HTTPReadHeaderTimeoutRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing http_read_header_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) - } +// parseDisableReplicationStatusEndpointSettings attempts to parse the raw +// disable_replication_status_endpoints setting. The receiving Listener's +// DisableReplicationStatusEndpoints field will be set with the successfully +// parsed value. +func (l *Listener) parseDisableReplicationStatusEndpointSettings() error { + if l.Type != TCP { + return nil + } - l.HTTPReadHeaderTimeoutRaw = nil - } + if err := parseAndClearBool(&l.DisableReplicationStatusEndpointsRaw, &l.DisableReplicationStatusEndpoints); err != nil { + return fmt.Errorf("invalid value for disable_replication_status_endpoints: %w", err) + } - if l.HTTPWriteTimeoutRaw != nil { - if l.HTTPWriteTimeout, err = parseutil.ParseDurationSecond(l.HTTPWriteTimeoutRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing http_write_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) - } + return nil +} - l.HTTPWriteTimeoutRaw = nil - } +// parseDisableRequestLimiter attempts to parse the raw disable_request_limiter +// setting. The receiving Listener's DisableRequestLimiter field will be set +// with the successfully parsed value or return an error +func (l *Listener) parseDisableRequestLimiter() error { + if err := parseAndClearBool(&l.DisableRequestLimiterRaw, &l.DisableRequestLimiter); err != nil { + return fmt.Errorf("invalid value for disable_request_limiter: %w", err) + } - if l.HTTPIdleTimeoutRaw != nil { - if l.HTTPIdleTimeout, err = parseutil.ParseDurationSecond(l.HTTPIdleTimeoutRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing http_idle_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) - } + return nil +} - l.HTTPIdleTimeoutRaw = nil - } - } +// parseChrootNamespace attempts to parse the raw listener chroot namespace settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseChrootNamespaceSettings() error { + var ( + err error + setting string + ) - // Proxy Protocol config - { - if l.ProxyProtocolAuthorizedAddrsRaw != nil { - if l.ProxyProtocolAuthorizedAddrs, err = parseutil.ParseAddrs(l.ProxyProtocolAuthorizedAddrsRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing proxy_protocol_authorized_addrs: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - switch l.ProxyProtocolBehavior { - case "allow_authorized", "deny_authorized": - if len(l.ProxyProtocolAuthorizedAddrs) == 0 { - return multierror.Prefix(errors.New("proxy_protocol_behavior set to allow or deny only authorized addresses but no proxy_protocol_authorized_addrs value"), fmt.Sprintf("listeners.%d", i)) - } - } - - l.ProxyProtocolAuthorizedAddrsRaw = nil - } - } + err = parseAndClearString(&l.ChrootNamespaceRaw, &setting) + if err != nil { + return fmt.Errorf("invalid value for chroot_namespace: %w", err) + } - // X-Forwarded-For config - { - if l.XForwardedForAuthorizedAddrsRaw != nil { - if l.XForwardedForAuthorizedAddrs, err = parseutil.ParseAddrs(l.XForwardedForAuthorizedAddrsRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing x_forwarded_for_authorized_addrs: %w", err), fmt.Sprintf("listeners.%d", i)) - } + l.ChrootNamespace = namespace.Canonicalize(setting) - l.XForwardedForAuthorizedAddrsRaw = nil - } + return nil +} - if l.XForwardedForHopSkipsRaw != nil { - if l.XForwardedForHopSkips, err = parseutil.ParseInt(l.XForwardedForHopSkipsRaw); err != nil { - return multierror.Prefix(fmt.Errorf("error parsing x_forwarded_for_hop_skips: %w", err), fmt.Sprintf("listeners.%d", i)) - } +// parseType attempts to sanitize and validate the type set on the listener. +// If the listener has no type set, the fallback value will be used. +// The state of the listener will be modified. +func (l *Listener) parseType(fallback string) error { + switch { + case l.Type != "": + case fallback != "": + default: + return errors.New("listener type must be specified") + } - if l.XForwardedForHopSkips < 0 { - return multierror.Prefix(fmt.Errorf("x_forwarded_for_hop_skips cannot be negative but set to %d", l.XForwardedForHopSkips), fmt.Sprintf("listeners.%d", i)) - } + // Use type if available, otherwise fall back. + rawType := l.Type + if rawType == "" { + rawType = ListenerType(fallback) + } + + parsedType := rawType.Normalize() + + // Sanity check the values + switch parsedType { + case TCP, Unix: + default: + return fmt.Errorf("unsupported listener type %q", parsedType) + } - l.XForwardedForHopSkipsRaw = nil - } + l.Type = parsedType - if l.XForwardedForRejectNotAuthorizedRaw != nil { - if l.XForwardedForRejectNotAuthorized, err = parseutil.ParseBool(l.XForwardedForRejectNotAuthorizedRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for x_forwarded_for_reject_not_authorized: %w", err), fmt.Sprintf("listeners.%d", i)) - } + return nil +} - l.XForwardedForRejectNotAuthorizedRaw = nil - } +// parseRequestSettings attempts to parse the raw listener request settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseRequestSettings() error { + if err := parseAndClearInt(&l.MaxRequestSizeRaw, &l.MaxRequestSize); err != nil { + return fmt.Errorf("error parsing max_request_size: %w", err) + } - if l.XForwardedForRejectNotPresentRaw != nil { - if l.XForwardedForRejectNotPresent, err = parseutil.ParseBool(l.XForwardedForRejectNotPresentRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for x_forwarded_for_reject_not_present: %w", err), fmt.Sprintf("listeners.%d", i)) - } + if l.MaxRequestDurationRaw != nil { + maxRequestDuration, err := parseutil.ParseDurationSecond(l.MaxRequestDurationRaw) + if err != nil { + return fmt.Errorf("error parsing max_request_duration: %w", err) + } - l.XForwardedForRejectNotPresentRaw = nil - } + if maxRequestDuration < 0 { + return errors.New("max_request_duration cannot be negative") } - // Telemetry - { - if l.Telemetry.UnauthenticatedMetricsAccessRaw != nil { - if l.Telemetry.UnauthenticatedMetricsAccess, err = parseutil.ParseBool(l.Telemetry.UnauthenticatedMetricsAccessRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for telemetry.unauthenticated_metrics_access: %w", err), fmt.Sprintf("listeners.%d", i)) - } + l.MaxRequestDuration = maxRequestDuration + l.MaxRequestDurationRaw = nil + } + + if err := parseAndClearBool(&l.RequireRequestHeaderRaw, &l.RequireRequestHeader); err != nil { + return fmt.Errorf("invalid value for require_request_header: %w", err) + } + + if err := parseAndClearBool(&l.DisableRequestLimiterRaw, &l.DisableRequestLimiter); err != nil { + return fmt.Errorf("invalid value for disable_request_limiter: %w", err) + } + + return nil +} - l.Telemetry.UnauthenticatedMetricsAccessRaw = nil - } +// parseTLSSettings attempts to parse the raw listener TLS settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseTLSSettings() error { + if err := parseAndClearBool(&l.TLSDisableRaw, &l.TLSDisable); err != nil { + return fmt.Errorf("invalid value for tls_disable: %w", err) + } + + if l.TLSCipherSuitesRaw != "" { + tlsCipherSuites, err := tlsutil.ParseCiphers(l.TLSCipherSuitesRaw) + if err != nil { + return fmt.Errorf("invalid value for tls_cipher_suites: %w", err) } + l.TLSCipherSuites = tlsCipherSuites + } + + if err := parseAndClearBool(&l.TLSRequireAndVerifyClientCertRaw, &l.TLSRequireAndVerifyClientCert); err != nil { + return fmt.Errorf("invalid value for tls_require_and_verify_client_cert: %w", err) + } + + if err := parseAndClearBool(&l.TLSDisableClientCertsRaw, &l.TLSDisableClientCerts); err != nil { + return fmt.Errorf("invalid value for tls_disable_client_certs: %w", err) + } + + // Clear raw values after successful parsing. + l.TLSCipherSuitesRaw = "" + + return nil +} + +// parseHTTPHeaderSettings attempts to parse the raw listener HTTP header settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseHTTPHeaderSettings() error { + // Custom response headers are only supported by TCP listeners. + // Clear raw data and return early if it was something else. + if l.Type != TCP { + l.CustomResponseHeadersRaw = nil + return nil + } + + // if CustomResponseHeadersRaw is nil, we still need to set the default headers + customHeadersMap, err := ParseCustomResponseHeaders(l.CustomResponseHeadersRaw) + if err != nil { + return fmt.Errorf("failed to parse custom_response_headers: %w", err) + } + + l.CustomResponseHeaders = customHeadersMap + l.CustomResponseHeadersRaw = nil - // Profiling - { - if l.Profiling.UnauthenticatedPProfAccessRaw != nil { - if l.Profiling.UnauthenticatedPProfAccess, err = parseutil.ParseBool(l.Profiling.UnauthenticatedPProfAccessRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for profiling.unauthenticated_pprof_access: %w", err), fmt.Sprintf("listeners.%d", i)) - } + return nil +} + +// parseHTTPTimeoutSettings attempts to parse the raw listener HTTP timeout settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseHTTPTimeoutSettings() error { + if err := parseAndClearDurationSecond(&l.HTTPReadTimeoutRaw, &l.HTTPReadTimeout); err != nil { + return fmt.Errorf("error parsing http_read_timeout: %w", err) + } + + if err := parseAndClearDurationSecond(&l.HTTPReadHeaderTimeoutRaw, &l.HTTPReadHeaderTimeout); err != nil { + return fmt.Errorf("error parsing http_read_header_timeout: %w", err) + } + + if err := parseAndClearDurationSecond(&l.HTTPWriteTimeoutRaw, &l.HTTPWriteTimeout); err != nil { + return fmt.Errorf("error parsing http_write_timeout: %w", err) + } + + if err := parseAndClearDurationSecond(&l.HTTPIdleTimeoutRaw, &l.HTTPIdleTimeout); err != nil { + return fmt.Errorf("error parsing http_idle_timeout: %w", err) + } + + return nil +} + +// parseProxySettings attempts to parse the raw listener proxy settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseProxySettings() error { + var err error - l.Profiling.UnauthenticatedPProfAccessRaw = nil - } + if l.ProxyProtocolAuthorizedAddrsRaw != nil { + l.ProxyProtocolAuthorizedAddrs, err = parseutil.ParseAddrs(l.ProxyProtocolAuthorizedAddrsRaw) + if err != nil { + return fmt.Errorf("error parsing proxy_protocol_authorized_addrs: %w", err) } + } + + // Validation/sanity check on allowed settings for behavior. + switch l.ProxyProtocolBehavior { + case "allow_authorized", "deny_authorized", "use_always", "": + // Ignore these cases, they're all valid values. + // In the case of 'allow_authorized' and 'deny_authorized', we don't need + // to check how many addresses we have in ProxyProtocolAuthorizedAddrs + // as parseutil.ParseAddrs returns "one or more addresses" (or an error) + // so we'd have returned earlier. + default: + return fmt.Errorf("unsupported value supplied for proxy_protocol_behavior: %q", l.ProxyProtocolBehavior) + } + + // Clear raw values after successful parsing. + l.ProxyProtocolAuthorizedAddrsRaw = nil + + return nil +} - // InFlight Request logging - { - if l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw != nil { - if l.InFlightRequestLogging.UnauthenticatedInFlightAccess, err = parseutil.ParseBool(l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for inflight_requests_logging.unauthenticated_in_flight_requests_access: %w", err), fmt.Sprintf("listeners.%d", i)) - } +// parseForwardedForSettings attempts to parse the raw listener x-forwarded-for settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseForwardedForSettings() error { + var err error - l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw = "" - } + if l.XForwardedForAuthorizedAddrsRaw != nil { + if l.XForwardedForAuthorizedAddrs, err = parseutil.ParseAddrs(l.XForwardedForAuthorizedAddrsRaw); err != nil { + return fmt.Errorf("error parsing x_forwarded_for_authorized_addrs: %w", err) } + } - // CORS - { - if l.CorsEnabledRaw != nil { - if l.CorsEnabled, err = parseutil.ParseBool(l.CorsEnabledRaw); err != nil { - return multierror.Prefix(fmt.Errorf("invalid value for cors_enabled: %w", err), fmt.Sprintf("listeners.%d", i)) - } - - l.CorsEnabledRaw = nil - } - - if strutil.StrListContains(l.CorsAllowedOrigins, "*") && len(l.CorsAllowedOrigins) > 1 { - return multierror.Prefix(errors.New("cors_allowed_origins must only contain a wildcard or only non-wildcard values"), fmt.Sprintf("listeners.%d", i)) - } - - if len(l.CorsAllowedHeadersRaw) > 0 { - for _, header := range l.CorsAllowedHeadersRaw { - l.CorsAllowedHeaders = append(l.CorsAllowedHeaders, textproto.CanonicalMIMEHeaderKey(header)) - } - } + if l.XForwardedForHopSkipsRaw != nil { + if l.XForwardedForHopSkips, err = parseutil.ParseInt(l.XForwardedForHopSkipsRaw); err != nil { + return fmt.Errorf("error parsing x_forwarded_for_hop_skips: %w", err) } - // HTTP Headers - { - // if CustomResponseHeadersRaw is nil, we still need to set the default headers - customHeadersMap, err := ParseCustomResponseHeaders(l.CustomResponseHeadersRaw) - if err != nil { - return multierror.Prefix(fmt.Errorf("failed to parse custom_response_headers: %w", err), fmt.Sprintf("listeners.%d", i)) - } - l.CustomResponseHeaders = customHeadersMap - l.CustomResponseHeadersRaw = nil + if l.XForwardedForHopSkips < 0 { + return fmt.Errorf("x_forwarded_for_hop_skips cannot be negative but set to %d", l.XForwardedForHopSkips) } - result.Listeners = append(result.Listeners, &l) + l.XForwardedForHopSkipsRaw = nil } + if err := parseAndClearBool(&l.XForwardedForRejectNotAuthorizedRaw, &l.XForwardedForRejectNotAuthorized); err != nil { + return fmt.Errorf("invalid value for x_forwarded_for_reject_not_authorized: %w", err) + } + + if err := parseAndClearBool(&l.XForwardedForRejectNotPresentRaw, &l.XForwardedForRejectNotPresent); err != nil { + return fmt.Errorf("invalid value for x_forwarded_for_reject_not_present: %w", err) + } + + // Clear raw values after successful parsing. + l.XForwardedForAuthorizedAddrsRaw = nil + return nil } -// ParseSingleIPTemplate is used as a helper function to parse out a single IP -// address from a config parameter. -// If the input doesn't appear to contain the 'template' format, -// it will return the specified input unchanged. -func ParseSingleIPTemplate(ipTmpl string) (string, error) { - r := regexp.MustCompile("{{.*?}}") - if !r.MatchString(ipTmpl) { - return ipTmpl, nil +// parseTelemetrySettings attempts to parse the raw listener telemetry settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseTelemetrySettings() error { + if err := parseAndClearBool(&l.Telemetry.UnauthenticatedMetricsAccessRaw, &l.Telemetry.UnauthenticatedMetricsAccess); err != nil { + return fmt.Errorf("invalid value for telemetry.unauthenticated_metrics_access: %w", err) } - out, err := template.Parse(ipTmpl) - if err != nil { - return "", fmt.Errorf("unable to parse address template %q: %v", ipTmpl, err) + return nil +} + +// parseProfilingSettings attempts to parse the raw listener profiling settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseProfilingSettings() error { + if err := parseAndClearBool(&l.Profiling.UnauthenticatedPProfAccessRaw, &l.Profiling.UnauthenticatedPProfAccess); err != nil { + return fmt.Errorf("invalid value for profiling.unauthenticated_pprof_access: %w", err) } - ips := strings.Split(out, " ") - switch len(ips) { - case 0: - return "", errors.New("no addresses found, please configure one") - case 1: - return strings.TrimSpace(ips[0]), nil - default: - return "", fmt.Errorf("multiple addresses found (%q), please configure one", out) + return nil +} + +// parseProfilingSettings attempts to parse the raw listener in-flight request logging settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseInFlightRequestSettings() error { + if err := parseAndClearBool(&l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw, &l.InFlightRequestLogging.UnauthenticatedInFlightAccess); err != nil { + return fmt.Errorf("invalid value for inflight_requests_logging.unauthenticated_in_flight_requests_access: %w", err) } + + return nil +} + +// parseCORSSettings attempts to parse the raw listener CORS settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseCORSSettings() error { + if err := parseAndClearBool(&l.CorsEnabledRaw, &l.CorsEnabled); err != nil { + return fmt.Errorf("invalid value for cors_enabled: %w", err) + } + + if strutil.StrListContains(l.CorsAllowedOrigins, "*") && len(l.CorsAllowedOrigins) > 1 { + return errors.New("cors_allowed_origins must only contain a wildcard or only non-wildcard values") + } + + if len(l.CorsAllowedHeadersRaw) > 0 { + for _, header := range l.CorsAllowedHeadersRaw { + l.CorsAllowedHeaders = append(l.CorsAllowedHeaders, textproto.CanonicalMIMEHeaderKey(header)) + } + } + + l.CorsAllowedHeadersRaw = nil + + return nil +} + +// parseRedactionSettings attempts to parse the raw listener redaction settings. +// The state of the listener will be modified, raw data will be cleared upon +// successful parsing. +func (l *Listener) parseRedactionSettings() error { + // Redaction is only supported on TCP listeners. + // Clear raw data and return early if it was something else. + if l.Type != TCP { + l.RedactAddressesRaw = nil + l.RedactClusterNameRaw = nil + l.RedactVersionRaw = nil + + return nil + } + + var err error + + if l.RedactAddressesRaw != nil { + if l.RedactAddresses, err = parseutil.ParseBool(l.RedactAddressesRaw); err != nil { + return fmt.Errorf("invalid value for redact_addresses: %w", err) + } + } + if l.RedactClusterNameRaw != nil { + if l.RedactClusterName, err = parseutil.ParseBool(l.RedactClusterNameRaw); err != nil { + return fmt.Errorf("invalid value for redact_cluster_name: %w", err) + } + } + if l.RedactVersionRaw != nil { + if l.RedactVersion, err = parseutil.ParseBool(l.RedactVersionRaw); err != nil { + return fmt.Errorf("invalid value for redact_version: %w", err) + } + } + + l.RedactAddressesRaw = nil + l.RedactClusterNameRaw = nil + l.RedactVersionRaw = nil + + return nil } diff --git a/internalshared/configutil/listener_test.go b/internalshared/configutil/listener_test.go index 803086e483e2..bfd922faa89b 100644 --- a/internalshared/configutil/listener_test.go +++ b/internalshared/configutil/listener_test.go @@ -1,49 +1,1380 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( - "fmt" + "crypto/tls" "testing" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestParseSingleIPTemplate(t *testing.T) { - type args struct { - ipTmpl string +// TestListener_ParseSingleIPTemplate exercises the ParseSingleIPTemplate function to +// ensure that we only attempt to parse templates when the input contains a +// template placeholder (see: go-sockaddr/template). +func TestListener_ParseSingleIPTemplate(t *testing.T) { + tests := map[string]struct { + arg string + want string + isErrorExpected bool + errorMessage string + }{ + "test https addr": { + arg: "https://vaultproject.io:8200", + want: "https://vaultproject.io:8200", + isErrorExpected: false, + }, + "test invalid template func": { + arg: "{{ FooBar }}", + want: "", + isErrorExpected: true, + errorMessage: "unable to parse address template", + }, + "test partial template": { + arg: "{{FooBar", + want: "{{FooBar", + isErrorExpected: false, + }, + } + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + got, err := ParseSingleIPTemplate(tc.arg) + + if tc.isErrorExpected { + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.want, got) + }) + } +} + +// TestListener_parseType exercises the listener receiver parseType. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseType(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + inputType string + inputFallback string + expectedValue string + isErrorExpected bool + errorMessage string + }{ + "empty-all": { + inputType: "", + inputFallback: "", + isErrorExpected: true, + errorMessage: "listener type must be specified", + }, + "bad-type": { + inputType: "foo", + isErrorExpected: true, + errorMessage: "unsupported listener type", + }, + "bad-fallback": { + inputType: "", + inputFallback: "foo", + isErrorExpected: true, + errorMessage: "unsupported listener type", + }, + "tcp-type-lower": { + inputType: "tcp", + expectedValue: "tcp", + isErrorExpected: false, + }, + "tcp-type-upper": { + inputType: "TCP", + expectedValue: "tcp", + isErrorExpected: false, + }, + "tcp-type-mixed": { + inputType: "tCp", + expectedValue: "tcp", + isErrorExpected: false, + }, + "tcp-fallback-lower": { + inputType: "", + inputFallback: "tcp", + expectedValue: "tcp", + isErrorExpected: false, + }, + "tcp-fallback-upper": { + inputType: "", + inputFallback: "TCP", + expectedValue: "tcp", + isErrorExpected: false, + }, + "tcp-fallback-mixed": { + inputType: "", + inputFallback: "tCp", + expectedValue: "tcp", + isErrorExpected: false, + }, + "unix-type-lower": { + inputType: "unix", + expectedValue: "unix", + isErrorExpected: false, + }, + "unix-type-upper": { + inputType: "UNIX", + expectedValue: "unix", + isErrorExpected: false, + }, + "unix-type-mixed": { + inputType: "uNiX", + expectedValue: "unix", + isErrorExpected: false, + }, + "unix-fallback-lower": { + inputType: "", + inputFallback: "unix", + expectedValue: "unix", + isErrorExpected: false, + }, + "unix-fallback-upper": { + inputType: "", + inputFallback: "UNIX", + expectedValue: "unix", + isErrorExpected: false, + }, + "unix-fallback-mixed": { + inputType: "", + inputFallback: "uNiX", + expectedValue: "unix", + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + l := &Listener{Type: ListenerType(tc.inputType)} + err := l.parseType(tc.inputFallback) + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + require.NoError(t, err) + require.Equal(t, tc.expectedValue, l.Type.String()) + } + }) + } +} + +// TestListener_parseRequestSettings exercises the listener receiver parseRequestSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseRequestSettings(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + rawMaxRequestSize any + expectedMaxRequestSize int64 + rawMaxRequestDuration any + expectedDuration time.Duration + rawRequireRequestHeader any + expectedRequireRequestHeader bool + rawDisableRequestLimiter any + expectedDisableRequestLimiter bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "max-request-size-bad": { + rawMaxRequestSize: "juan", + isErrorExpected: true, + errorMessage: "error parsing max_request_size", + }, + "max-request-size-good": { + rawMaxRequestSize: "5", + expectedMaxRequestSize: 5, + isErrorExpected: false, + }, + "max-request-duration-bad": { + rawMaxRequestDuration: "juan", + isErrorExpected: true, + errorMessage: "error parsing max_request_duration", + }, + "max-request-duration-good": { + rawMaxRequestDuration: "30s", + expectedDuration: 30 * time.Second, + isErrorExpected: false, + }, + "require-request-header-bad": { + rawRequireRequestHeader: "juan", + expectedRequireRequestHeader: false, + isErrorExpected: true, + errorMessage: "invalid value for require_request_header", + }, + "require-request-header-good": { + rawRequireRequestHeader: "true", + expectedRequireRequestHeader: true, + isErrorExpected: false, + }, + "disable-request-limiter-bad": { + rawDisableRequestLimiter: "badvalue", + expectedDisableRequestLimiter: false, + isErrorExpected: true, + errorMessage: "invalid value for disable_request_limiter", + }, + "disable-request-limiter-good": { + rawDisableRequestLimiter: "true", + expectedDisableRequestLimiter: true, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + MaxRequestSizeRaw: tc.rawMaxRequestSize, + MaxRequestDurationRaw: tc.rawMaxRequestDuration, + RequireRequestHeaderRaw: tc.rawRequireRequestHeader, + DisableRequestLimiterRaw: tc.rawDisableRequestLimiter, + } + + err := l.parseRequestSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedMaxRequestSize, l.MaxRequestSize) + require.Equal(t, tc.expectedDuration, l.MaxRequestDuration) + require.Equal(t, tc.expectedRequireRequestHeader, l.RequireRequestHeader) + require.Equal(t, tc.expectedDisableRequestLimiter, l.DisableRequestLimiter) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.MaxRequestSizeRaw) + require.Nil(t, l.MaxRequestDurationRaw) + require.Nil(t, l.RequireRequestHeaderRaw) + require.Nil(t, l.DisableRequestLimiterRaw) + } + }) + } +} + +// TestListener_parseTLSSettings exercises the listener receiver parseTLSSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseTLSSettings(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + rawTLSDisable any + expectedTLSDisable bool + rawTLSCipherSuites string + expectedTLSCipherSuites []uint16 + rawTLSRequireAndVerifyClientCert any + expectedTLSRequireAndVerifyClientCert bool + rawTLSDisableClientCerts any + expectedTLSDisableClientCerts bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "tls-disable-bad": { + rawTLSDisable: "juan", + isErrorExpected: true, + errorMessage: "invalid value for tls_disable", + }, + "tls-disable-good": { + rawTLSDisable: "true", + expectedTLSDisable: true, + isErrorExpected: false, + }, + "tls-cipher-suites-bad": { + rawTLSCipherSuites: "juan", + isErrorExpected: true, + errorMessage: "invalid value for tls_cipher_suites", + }, + "tls-cipher-suites-good": { + rawTLSCipherSuites: "TLS_RSA_WITH_RC4_128_SHA", + expectedTLSCipherSuites: []uint16{tls.TLS_RSA_WITH_RC4_128_SHA}, + isErrorExpected: false, + }, + "tls-require-and-verify-client-cert-bad": { + rawTLSRequireAndVerifyClientCert: "juan", + isErrorExpected: true, + errorMessage: "invalid value for tls_require_and_verify_client_cert", + }, + "tls-require-and-verify-client-cert-good": { + rawTLSRequireAndVerifyClientCert: "true", + expectedTLSRequireAndVerifyClientCert: true, + isErrorExpected: false, + }, + "tls-disable-client-certs-bad": { + rawTLSDisableClientCerts: "juan", + isErrorExpected: true, + errorMessage: "invalid value for tls_disable_client_certs", + }, + "tls-disable-client-certs-good": { + rawTLSDisableClientCerts: "true", + expectedTLSDisableClientCerts: true, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + TLSDisableRaw: tc.rawTLSDisable, + TLSCipherSuitesRaw: tc.rawTLSCipherSuites, + TLSRequireAndVerifyClientCertRaw: tc.rawTLSRequireAndVerifyClientCert, + TLSDisableClientCertsRaw: tc.rawTLSDisableClientCerts, + } + + err := l.parseTLSSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedTLSDisable, l.TLSDisable) + require.Equal(t, tc.expectedTLSCipherSuites, l.TLSCipherSuites) + require.Equal(t, tc.expectedTLSRequireAndVerifyClientCert, l.TLSRequireAndVerifyClientCert) + require.Equal(t, tc.expectedTLSDisableClientCerts, l.TLSDisableClientCerts) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.TLSDisableRaw) + require.Empty(t, l.TLSCipherSuitesRaw) + require.Nil(t, l.TLSRequireAndVerifyClientCertRaw) + require.Nil(t, l.TLSDisableClientCertsRaw) + } + }) + } +} + +// TestListener_parseHTTPTimeoutSettings exercises the listener receiver parseHTTPTimeoutSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseHTTPTimeoutSettings(t *testing.T) { + tests := map[string]struct { + rawHTTPReadTimeout any + expectedHTTPReadTimeout time.Duration + rawHTTPReadHeaderTimeout any + expectedHTTPReadHeaderTimeout time.Duration + rawHTTPWriteTimeout any + expectedHTTPWriteTimeout time.Duration + rawHTTPIdleTimeout any + expectedHTTPIdleTimeout time.Duration + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "read-timeout-bad": { + rawHTTPReadTimeout: "juan", + isErrorExpected: true, + errorMessage: "error parsing http_read_timeout", + }, + "read-timeout-good": { + rawHTTPReadTimeout: "30s", + expectedHTTPReadTimeout: 30 * time.Second, + isErrorExpected: false, + }, + "read-header-timeout-bad": { + rawHTTPReadHeaderTimeout: "juan", + isErrorExpected: true, + errorMessage: "error parsing http_read_header_timeout", + }, + "read-header-timeout-good": { + rawHTTPReadHeaderTimeout: "30s", + expectedHTTPReadHeaderTimeout: 30 * time.Second, + isErrorExpected: false, + }, + "write-timeout-bad": { + rawHTTPWriteTimeout: "juan", + isErrorExpected: true, + errorMessage: "error parsing http_write_timeout", + }, + "write-timeout-good": { + rawHTTPWriteTimeout: "30s", + expectedHTTPWriteTimeout: 30 * time.Second, + isErrorExpected: false, + }, + "idle-timeout-bad": { + rawHTTPIdleTimeout: "juan", + isErrorExpected: true, + errorMessage: "error parsing http_idle_timeout", + }, + "idle-timeout-good": { + rawHTTPIdleTimeout: "30s", + expectedHTTPIdleTimeout: 30 * time.Second, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + HTTPReadTimeoutRaw: tc.rawHTTPReadTimeout, + HTTPReadHeaderTimeoutRaw: tc.rawHTTPReadHeaderTimeout, + HTTPWriteTimeoutRaw: tc.rawHTTPWriteTimeout, + HTTPIdleTimeoutRaw: tc.rawHTTPIdleTimeout, + } + + err := l.parseHTTPTimeoutSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedHTTPReadTimeout, l.HTTPReadTimeout) + require.Equal(t, tc.expectedHTTPReadHeaderTimeout, l.HTTPReadHeaderTimeout) + require.Equal(t, tc.expectedHTTPWriteTimeout, l.HTTPWriteTimeout) + require.Equal(t, tc.expectedHTTPIdleTimeout, l.HTTPIdleTimeout) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.HTTPReadTimeoutRaw) + require.Nil(t, l.HTTPReadHeaderTimeoutRaw) + require.Nil(t, l.HTTPWriteTimeoutRaw) + require.Nil(t, l.HTTPIdleTimeoutRaw) + } + }) + } +} + +// TestListener_parseProxySettings exercises the listener receiver parseProxySettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseProxySettings(t *testing.T) { + tests := map[string]struct { + rawProxyProtocolAuthorizedAddrs any + expectedNumAddrs int + proxyBehavior string + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "bad-addrs": { + rawProxyProtocolAuthorizedAddrs: "juan", + isErrorExpected: true, + errorMessage: "error parsing proxy_protocol_authorized_addrs", + }, + "good-addrs": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + proxyBehavior: "", + isErrorExpected: false, + }, + "behavior-bad": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + proxyBehavior: "juan", + isErrorExpected: true, + errorMessage: "unsupported value supplied for proxy_protocol_behavior", + }, + "behavior-use-always": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + proxyBehavior: "use_always", + isErrorExpected: false, + }, + "behavior-empty": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + proxyBehavior: "", + isErrorExpected: false, + }, + "behavior-allow": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + proxyBehavior: "allow_authorized", + isErrorExpected: false, + }, + "behavior-deny": { + rawProxyProtocolAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + proxyBehavior: "deny_authorized", + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + ProxyProtocolAuthorizedAddrsRaw: tc.rawProxyProtocolAuthorizedAddrs, + ProxyProtocolBehavior: tc.proxyBehavior, + } + + err := l.parseProxySettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Len(t, l.ProxyProtocolAuthorizedAddrs, tc.expectedNumAddrs) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.ProxyProtocolAuthorizedAddrsRaw) + } + }) + } +} + +// TestListener_parseForwardedForSettings exercises the listener receiver parseForwardedForSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseForwardedForSettings(t *testing.T) { + tests := map[string]struct { + rawAuthorizedAddrs any + expectedNumAddrs int + rawHopSkips any + expectedHopSkips int64 + rawRejectNotAuthorized any + expectedRejectNotAuthorized bool + rawRejectNotPresent any + expectedRejectNotPresent bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "authorized-addrs-bad": { + rawAuthorizedAddrs: "juan", + isErrorExpected: true, + errorMessage: "error parsing x_forwarded_for_authorized_addrs", + }, + "authorized-addrs-good": { + rawAuthorizedAddrs: "10.0.0.1,10.0.2.1", + expectedNumAddrs: 2, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + XForwardedForAuthorizedAddrsRaw: tc.rawAuthorizedAddrs, + XForwardedForHopSkipsRaw: tc.rawHopSkips, + XForwardedForRejectNotAuthorizedRaw: tc.rawRejectNotAuthorized, + XForwardedForRejectNotPresentRaw: tc.rawRejectNotPresent, + } + + err := l.parseForwardedForSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + + require.Len(t, l.XForwardedForAuthorizedAddrs, tc.expectedNumAddrs) + require.Equal(t, tc.expectedHopSkips, l.XForwardedForHopSkips) + require.Equal(t, tc.expectedRejectNotAuthorized, l.XForwardedForRejectNotAuthorized) + require.Equal(t, tc.expectedRejectNotPresent, l.XForwardedForRejectNotPresent) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.XForwardedForAuthorizedAddrsRaw) + require.Nil(t, l.XForwardedForHopSkipsRaw) + require.Nil(t, l.XForwardedForRejectNotAuthorizedRaw) + require.Nil(t, l.XForwardedForRejectNotPresentRaw) + } + }) + } +} + +// TestListener_parseTelemetrySettings exercises the listener receiver parseTelemetrySettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseTelemetrySettings(t *testing.T) { + tests := map[string]struct { + rawUnauthenticatedMetricsAccess any + expectedUnauthenticatedMetricsAccess bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "unauth-bad": { + rawUnauthenticatedMetricsAccess: "juan", + isErrorExpected: true, + errorMessage: "invalid value for telemetry.unauthenticated_metrics_access", + }, + "unauth-good": { + rawUnauthenticatedMetricsAccess: "true", + expectedUnauthenticatedMetricsAccess: true, + isErrorExpected: false, + }, } - tests := []struct { - name string - arg string - want string - wantErr assert.ErrorAssertionFunc + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + Telemetry: ListenerTelemetry{ + UnauthenticatedMetricsAccessRaw: tc.rawUnauthenticatedMetricsAccess, + }, + } + + err := l.parseTelemetrySettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedUnauthenticatedMetricsAccess, l.Telemetry.UnauthenticatedMetricsAccess) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.Telemetry.UnauthenticatedMetricsAccessRaw) + } + }) + } +} + +// TestListener_parseProfilingSettings exercises the listener receiver parseProfilingSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseProfilingSettings(t *testing.T) { + tests := map[string]struct { + rawUnauthenticatedPProfAccess any + expectedUnauthenticatedPProfAccess bool + isErrorExpected bool + errorMessage string }{ + "nil": { + isErrorExpected: false, + }, + "bad": { + rawUnauthenticatedPProfAccess: "juan", + isErrorExpected: true, + errorMessage: "invalid value for profiling.unauthenticated_pprof_access", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + Profiling: ListenerProfiling{ + UnauthenticatedPProfAccessRaw: tc.rawUnauthenticatedPProfAccess, + }, + } + + err := l.parseProfilingSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedUnauthenticatedPProfAccess, l.Profiling.UnauthenticatedPProfAccess) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.Profiling.UnauthenticatedPProfAccessRaw) + } + }) + } +} + +// TestListener_parseInFlightRequestSettings exercises the listener receiver parseInFlightRequestSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseInFlightRequestSettings(t *testing.T) { + tests := map[string]struct { + rawUnauthenticatedInFlightAccess any + expectedUnauthenticatedInFlightAccess bool + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "bad": { + rawUnauthenticatedInFlightAccess: "juan", + isErrorExpected: true, + errorMessage: "invalid value for inflight_requests_logging.unauthenticated_in_flight_requests_access", + }, + "good": { + rawUnauthenticatedInFlightAccess: "true", + expectedUnauthenticatedInFlightAccess: true, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + InFlightRequestLogging: ListenerInFlightRequestLogging{ + UnauthenticatedInFlightAccessRaw: tc.rawUnauthenticatedInFlightAccess, + }, + } + + err := l.parseInFlightRequestSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedUnauthenticatedInFlightAccess, l.InFlightRequestLogging.UnauthenticatedInFlightAccess) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw) + } + }) + } +} + +// TestListener_parseCORSSettings exercises the listener receiver parseCORSSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseCORSSettings(t *testing.T) { + tests := map[string]struct { + rawCorsEnabled any + rawCorsAllowedHeaders []string + corsAllowedOrigins []string + expectedCorsEnabled bool + expectedNumCorsAllowedHeaders int + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "cors-enabled-bad": { + rawCorsEnabled: "juan", + expectedCorsEnabled: false, + isErrorExpected: true, + errorMessage: "invalid value for cors_enabled", + }, + "cors-enabled-good": { + rawCorsEnabled: "true", + expectedCorsEnabled: true, + isErrorExpected: false, + }, + "cors-allowed-origins-single-wildcard": { + corsAllowedOrigins: []string{"*"}, + isErrorExpected: false, + }, + "cors-allowed-origins-multi-wildcard": { + corsAllowedOrigins: []string{"*", "hashicorp.com"}, + isErrorExpected: true, + errorMessage: "cors_allowed_origins must only contain a wildcard or only non-wildcard values", + }, + "cors-allowed-headers-anything": { + rawCorsAllowedHeaders: []string{"foo", "bar"}, + expectedNumCorsAllowedHeaders: 2, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + CorsEnabledRaw: tc.rawCorsEnabled, + CorsAllowedHeadersRaw: tc.rawCorsAllowedHeaders, + CorsAllowedOrigins: tc.corsAllowedOrigins, + } + + err := l.parseCORSSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedCorsEnabled, l.CorsEnabled) + require.Len(t, l.CorsAllowedHeaders, tc.expectedNumCorsAllowedHeaders) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.CorsEnabledRaw) + require.Nil(t, l.CorsAllowedHeadersRaw) + } + }) + } +} + +// TestListener_parseHTTPHeaderSettings exercises the listener receiver parseHTTPHeaderSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseHTTPHeaderSettings(t *testing.T) { + tests := map[string]struct { + listenerType ListenerType + rawCustomResponseHeaders []map[string]any + expectedNumCustomResponseHeaders int + isErrorExpected bool + errorMessage string + }{ + "nil": { + listenerType: TCP, + isErrorExpected: false, + expectedNumCustomResponseHeaders: 1, // default: Strict-Transport-Security + }, + "custom-headers-bad": { + listenerType: TCP, + rawCustomResponseHeaders: []map[string]any{ + {"juan": false}, + }, + isErrorExpected: true, + errorMessage: "failed to parse custom_response_headers", + }, + "custom-headers-good": { + listenerType: TCP, + rawCustomResponseHeaders: []map[string]any{ + { + "2xx": []map[string]any{ + {"X-Custom-Header": []any{"Custom Header Value 1", "Custom Header Value 2"}}, + }, + }, + }, + expectedNumCustomResponseHeaders: 2, + isErrorExpected: false, + }, + "unix-no-headers": { + listenerType: Unix, + rawCustomResponseHeaders: []map[string]any{ + { + "2xx": []map[string]any{ + {"X-Custom-Header": []any{"Custom Header Value 1", "Custom Header Value 2"}}, + }, + }, + }, + expectedNumCustomResponseHeaders: 0, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + Type: tc.listenerType, + CustomResponseHeadersRaw: tc.rawCustomResponseHeaders, + } + + err := l.parseHTTPHeaderSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Len(t, l.CustomResponseHeaders, tc.expectedNumCustomResponseHeaders) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.CustomResponseHeadersRaw) + } + }) + } +} + +// TestListener_parseChrootNamespaceSettings exercises the listener receiver parseChrootNamespaceSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseChrootNamespaceSettings(t *testing.T) { + tests := map[string]struct { + rawChrootNamespace any + expectedChrootNamespace string + isErrorExpected bool + errorMessage string + }{ + "nil": { + isErrorExpected: false, + }, + "bad": { + rawChrootNamespace: &Listener{}, // Unsure how we'd ever see this really. + isErrorExpected: true, + errorMessage: "invalid value for chroot_namespace", + }, + "good": { + rawChrootNamespace: "juan", + expectedChrootNamespace: "juan/", + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + ChrootNamespaceRaw: tc.rawChrootNamespace, + } + + err := l.parseChrootNamespaceSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedChrootNamespace, l.ChrootNamespace) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.ChrootNamespaceRaw) + } + }) + } +} + +// TestListener_parseRedactionSettings exercises the listener receiver parseRedactionSettings. +// We check various inputs to ensure we can parse the values as expected and +// assign the relevant value on the SharedConfig struct. +func TestListener_parseRedactionSettings(t *testing.T) { + tests := map[string]struct { + listenerType ListenerType + rawRedactAddresses any + expectedRedactAddresses bool + rawRedactClusterName any + expectedRedactClusterName bool + rawRedactVersion any + expectedRedactVersion bool + isErrorExpected bool + errorMessage string + }{ + "missing": { + listenerType: TCP, + isErrorExpected: false, + expectedRedactAddresses: false, + expectedRedactClusterName: false, + expectedRedactVersion: false, + }, + "redact-addresses-bad": { + listenerType: TCP, + rawRedactAddresses: "juan", + isErrorExpected: true, + errorMessage: "invalid value for redact_addresses", + }, + "redact-addresses-good": { + listenerType: TCP, + rawRedactAddresses: "true", + expectedRedactAddresses: true, + isErrorExpected: false, + }, + "redact-cluster-name-bad": { + listenerType: TCP, + rawRedactClusterName: "juan", + isErrorExpected: true, + errorMessage: "invalid value for redact_cluster_name", + }, + "redact-cluster-name-good": { + listenerType: TCP, + rawRedactClusterName: "true", + expectedRedactClusterName: true, + isErrorExpected: false, + }, + "redact-version-bad": { + listenerType: TCP, + rawRedactVersion: "juan", + isErrorExpected: true, + errorMessage: "invalid value for redact_version", + }, + "redact-version-good": { + listenerType: TCP, + rawRedactVersion: "true", + expectedRedactVersion: true, + isErrorExpected: false, + }, + "redact-unix-na": { + listenerType: Unix, + rawRedactAddresses: "true", + expectedRedactAddresses: false, + rawRedactClusterName: "true", + expectedRedactClusterName: false, + rawRedactVersion: "true", + expectedRedactVersion: false, + isErrorExpected: false, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + // Configure listener with raw values + l := &Listener{ + Type: tc.listenerType, + RedactAddressesRaw: tc.rawRedactAddresses, + RedactClusterNameRaw: tc.rawRedactClusterName, + RedactVersionRaw: tc.rawRedactVersion, + } + + err := l.parseRedactionSettings() + + switch { + case tc.isErrorExpected: + require.Error(t, err) + require.ErrorContains(t, err, tc.errorMessage) + default: + // Assert we got the relevant values. + require.NoError(t, err) + require.Equal(t, tc.expectedRedactAddresses, l.RedactAddresses) + require.Equal(t, tc.expectedRedactClusterName, l.RedactClusterName) + require.Equal(t, tc.expectedRedactVersion, l.RedactVersion) + + // Ensure the state was modified for the raw values. + require.Nil(t, l.RedactAddressesRaw) + require.Nil(t, l.RedactClusterNameRaw) + require.Nil(t, l.RedactVersionRaw) + } + }) + } +} + +func TestParseAndClearBool(t *testing.T) { + testcases := []struct { + name string + raw interface{} + rawAssertion func(assert.TestingT, any, ...any) bool + expectedParsed bool + errorAssertion func(assert.TestingT, error, ...any) bool + }{ + { + name: "valid-true-as-string", + raw: "true", + rawAssertion: assert.Nil, + expectedParsed: true, + errorAssertion: assert.NoError, + }, + { + name: "valid-false-as-string", + raw: "false", + rawAssertion: assert.Nil, + expectedParsed: false, + errorAssertion: assert.NoError, + }, + { + name: "valid-true-as-bool", + raw: true, + rawAssertion: assert.Nil, + expectedParsed: true, + errorAssertion: assert.NoError, + }, + { + name: "valid-false-as-bool", + raw: false, + rawAssertion: assert.Nil, + expectedParsed: false, + errorAssertion: assert.NoError, + }, + { + name: "valid-true-as-string-mix-case", + raw: "True", + rawAssertion: assert.Nil, + expectedParsed: true, + errorAssertion: assert.NoError, + }, + { + name: "valid-false-as-integer", + raw: 0, + rawAssertion: assert.Nil, + expectedParsed: false, + errorAssertion: assert.NoError, + }, + { + name: "valid-true-as-integer", + raw: 2, + rawAssertion: assert.Nil, + expectedParsed: true, + errorAssertion: assert.NoError, + }, { - name: "test https addr", - arg: "https://vaultproject.io:8200", - want: "https://vaultproject.io:8200", - wantErr: assert.NoError, + name: "valid-true-as-float", + raw: 3.14, + rawAssertion: assert.Nil, + expectedParsed: true, + errorAssertion: assert.NoError, }, { - name: "test invalid template func", - arg: "{{FooBar}}", - want: "", - wantErr: assert.Error, + name: "valid-false-as-float", + raw: 0.0, + rawAssertion: assert.Nil, + expectedParsed: false, + errorAssertion: assert.NoError, }, { - name: "test partial template", - arg: "{{FooBar", - want: "{{FooBar", - wantErr: assert.NoError, + name: "invalid-as-string", + raw: "0.0.0.0:8200", + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + { + name: "invalid-as-struct", + raw: struct{}{}, + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + { + name: "not-set", + raw: nil, + rawAssertion: assert.Nil, + errorAssertion: assert.NoError, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := ParseSingleIPTemplate(tt.arg) - if !tt.wantErr(t, err, fmt.Sprintf("ParseSingleIPTemplate(%v)", tt.arg)) { - return - } - assert.Equalf(t, tt.want, got, "ParseSingleIPTemplate(%v)", tt.arg) - }) + for _, testcase := range testcases { + var parsed bool + err := parseAndClearBool(&testcase.raw, &parsed) + + testcase.errorAssertion(t, err, testcase.name) + assert.Equal(t, testcase.expectedParsed, parsed, testcase.name) + testcase.rawAssertion(t, testcase.raw, testcase.name) + } +} + +func TestParseAndClearString(t *testing.T) { + testcases := []struct { + name string + raw any + rawAssertion func(assert.TestingT, any, ...any) bool + expectedParsed string + errorAssertion func(assert.TestingT, error, ...any) bool + }{ + { + name: "valid-empty-string", + raw: "", + rawAssertion: assert.Nil, + expectedParsed: "", + errorAssertion: assert.NoError, + }, + { + name: "valid-some-string", + raw: "blah blah", + rawAssertion: assert.Nil, + expectedParsed: "blah blah", + errorAssertion: assert.NoError, + }, + { + name: "valid-as-integer", + raw: 8, + rawAssertion: assert.Nil, + expectedParsed: "8", + errorAssertion: assert.NoError, + }, + { + name: "valid-as-bool", + raw: true, + rawAssertion: assert.Nil, + expectedParsed: "1", + errorAssertion: assert.NoError, + }, + { + name: "not-set", + raw: nil, + rawAssertion: assert.Nil, + expectedParsed: "", + errorAssertion: assert.NoError, + }, + { + name: "invalid-as-struct", + raw: struct{}{}, + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + } + for _, testcase := range testcases { + var parsed string + err := parseAndClearString(&testcase.raw, &parsed) + + testcase.errorAssertion(t, err, testcase.name) + assert.Equal(t, testcase.expectedParsed, parsed, testcase.name) + testcase.rawAssertion(t, testcase.raw, testcase.name) + } +} + +func TestParseAndClearInt(t *testing.T) { + testcases := []struct { + name string + raw any + rawAssertion func(assert.TestingT, any, ...any) bool + expectedParsed int64 + errorAssertion func(assert.TestingT, error, ...any) bool + }{ + { + name: "valid-as-int", + raw: 200, + rawAssertion: assert.Nil, + expectedParsed: int64(200), + errorAssertion: assert.NoError, + }, + { + name: "valid-as-string", + raw: "53", + rawAssertion: assert.Nil, + expectedParsed: int64(53), + errorAssertion: assert.NoError, + }, + { + name: "invalid-as-hex-string", + raw: "0xa", + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + { + name: "not-set", + raw: nil, + rawAssertion: assert.Nil, + errorAssertion: assert.NoError, + }, + } + + for _, testcase := range testcases { + var parsed int64 + err := parseAndClearInt(&testcase.raw, &parsed) + + testcase.errorAssertion(t, err, testcase.name) + assert.Equal(t, testcase.expectedParsed, parsed, testcase.name) + testcase.rawAssertion(t, testcase.raw, testcase.name) + } +} + +func TestParseAndClearDurationSecond(t *testing.T) { + testcases := []struct { + name string + raw any + rawAssertion func(assert.TestingT, any, ...any) bool + expectedParsed time.Duration + errorAssertion func(assert.TestingT, error, ...any) bool + }{ + { + name: "valid-as-string", + raw: "30s", + rawAssertion: assert.Nil, + expectedParsed: time.Duration(30 * time.Second), + errorAssertion: assert.NoError, + }, + { + name: "valid-as-string-more-complex", + raw: "29h24m49s", + rawAssertion: assert.Nil, + expectedParsed: time.Duration((29 * time.Hour) + (24 * time.Minute) + (49 * time.Second)), + errorAssertion: assert.NoError, + }, + { + name: "invalid-as-string-using-days", + raw: "1d3s", + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + { + name: "valid-as-integer", + raw: 87, + rawAssertion: assert.Nil, + expectedParsed: time.Duration(87 * time.Second), + errorAssertion: assert.NoError, + }, + { + name: "not-set", + raw: nil, + rawAssertion: assert.Nil, + errorAssertion: assert.NoError, + }, + { + name: "invalid-as-struct", + raw: struct{}{}, + rawAssertion: assert.NotNil, + errorAssertion: assert.Error, + }, + } + + for _, testcase := range testcases { + var parsed time.Duration + + err := parseAndClearDurationSecond(&testcase.raw, &parsed) + testcase.errorAssertion(t, err, testcase.name) + assert.Equal(t, testcase.expectedParsed, parsed) + testcase.rawAssertion(t, testcase.raw, testcase.name) } } diff --git a/internalshared/configutil/merge.go b/internalshared/configutil/merge.go index 791bd41a7f29..5068be556c24 100644 --- a/internalshared/configutil/merge.go +++ b/internalshared/configutil/merge.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil func (c *SharedConfig) Merge(c2 *SharedConfig) *SharedConfig { @@ -69,13 +72,15 @@ func (c *SharedConfig) Merge(c2 *SharedConfig) *SharedConfig { } result.LogRotateBytes = c.LogRotateBytes - if c2.LogRotateBytes != "" { + if c2.LogRotateBytesRaw != nil { result.LogRotateBytes = c2.LogRotateBytes + result.LogRotateBytesRaw = c2.LogRotateBytesRaw } result.LogRotateMaxFiles = c.LogRotateMaxFiles - if c2.LogRotateMaxFiles != "" { + if c2.LogRotateMaxFilesRaw != nil { result.LogRotateMaxFiles = c2.LogRotateMaxFiles + result.LogRotateMaxFilesRaw = c2.LogRotateMaxFilesRaw } result.LogRotateDuration = c.LogRotateDuration diff --git a/internalshared/configutil/telemetry.go b/internalshared/configutil/telemetry.go index 77620770db3e..964e03f0b9c8 100644 --- a/internalshared/configutil/telemetry.go +++ b/internalshared/configutil/telemetry.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( @@ -15,11 +18,11 @@ import ( "github.com/armon/go-metrics/prometheus" stackdriver "github.com/google/go-metrics-stackdriver" stackdrivervault "github.com/google/go-metrics-stackdriver/vault" + "github.com/hashicorp/cli" "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/vault/helper/metricsutil" - "github.com/mitchellh/cli" "google.golang.org/api/option" ) @@ -159,6 +162,10 @@ type Telemetry struct { // PrefixFilter is a list of filter rules to apply for allowing // or blocking metrics by prefix. PrefixFilter []string `hcl:"prefix_filter"` + + // Whether or not telemetry should include the mount point in the rollback + // metrics + RollbackMetricsIncludeMountPoint bool `hcl:"add_mount_point_rollback_metrics"` } func (t *Telemetry) Validate(source string) []ConfigError { @@ -399,6 +406,7 @@ func SetupTelemetry(opts *SetupTelemetryOpts) (*metrics.InmemSink, *metricsutil. wrapper.TelemetryConsts.LeaseMetricsEpsilon = opts.Config.LeaseMetricsEpsilon wrapper.TelemetryConsts.LeaseMetricsNameSpaceLabels = opts.Config.LeaseMetricsNameSpaceLabels wrapper.TelemetryConsts.NumLeaseMetricsTimeBuckets = opts.Config.NumLeaseMetricsTimeBuckets + wrapper.TelemetryConsts.RollbackMetricsIncludeMountPoint = opts.Config.RollbackMetricsIncludeMountPoint // Parse the metric filters telemetryAllowedPrefixes, telemetryBlockedPrefixes, err := parsePrefixFilter(opts.Config.PrefixFilter) diff --git a/internalshared/configutil/telemetry_test.go b/internalshared/configutil/telemetry_test.go index dda74711dcb5..285278eeaeba 100644 --- a/internalshared/configutil/telemetry_test.go +++ b/internalshared/configutil/telemetry_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( diff --git a/internalshared/configutil/userlockout.go b/internalshared/configutil/userlockout.go index ccf51b23b264..674a9fc8bb3f 100644 --- a/internalshared/configutil/userlockout.go +++ b/internalshared/configutil/userlockout.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( @@ -114,7 +117,7 @@ func ParseUserLockouts(result *SharedConfig, list *ast.ObjectList) error { // we set values for these fields with defaults // The issue with not being able to use non-raw entries is because of fields lockout threshold // and disable lockout. We cannot differentiate using non-raw entries if the user configured these fields - // with values (0 and false) or if the the user did not configure these values in config file at all. + // with values (0 and false) or if the user did not configure these values in config file at all. // The raw fields are set to nil after setting missing values in setNilValuesForRawUserLockoutFields function userLockoutsMap = setMissingUserLockoutValuesInMap(userLockoutsMap) for _, userLockoutValues := range userLockoutsMap { diff --git a/internalshared/configutil/userlockout_test.go b/internalshared/configutil/userlockout_test.go index d5ab42cbe86a..0bc5f0dce10e 100644 --- a/internalshared/configutil/userlockout_test.go +++ b/internalshared/configutil/userlockout_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package configutil import ( diff --git a/internalshared/listenerutil/bufconn.go b/internalshared/listenerutil/bufconn.go index d3d9d653c576..d471ee118fce 100644 --- a/internalshared/listenerutil/bufconn.go +++ b/internalshared/listenerutil/bufconn.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package listenerutil import ( diff --git a/internalshared/listenerutil/listener.go b/internalshared/listenerutil/listener.go index 6095713be5d2..1f8afe717650 100644 --- a/internalshared/listenerutil/listener.go +++ b/internalshared/listenerutil/listener.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package listenerutil import ( @@ -10,12 +13,12 @@ import ( osuser "os/user" "strconv" + "github.com/hashicorp/cli" "github.com/hashicorp/errwrap" "github.com/hashicorp/go-secure-stdlib/reloadutil" "github.com/hashicorp/go-secure-stdlib/tlsutil" "github.com/hashicorp/vault/internalshared/configutil" "github.com/jefferai/isbadcipher" - "github.com/mitchellh/cli" ) type Listener struct { diff --git a/internalshared/listenerutil/listener_test.go b/internalshared/listenerutil/listener_test.go index 3c2afa593aed..c315fd24326a 100644 --- a/internalshared/listenerutil/listener_test.go +++ b/internalshared/listenerutil/listener_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package listenerutil import ( diff --git a/limits/http_limiter.go b/limits/http_limiter.go new file mode 100644 index 000000000000..19b94e4acb05 --- /dev/null +++ b/limits/http_limiter.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package limits + +import ( + "context" + "errors" + "net/http" +) + +//lint:ignore ST1005 Vault is the product name +var ErrCapacity = errors.New("Vault server temporarily overloaded") + +const ( + WriteLimiter = "write" + SpecialPathLimiter = "special-path" +) + +// HTTPLimiter is a convenience struct that we use to wrap some logical request +// context and prevent dependence on Core. +type HTTPLimiter struct { + Method string + PathLimited bool + LookupFunc func(key string) *RequestLimiter +} + +// CtxKeyDisableRequestLimiter holds the HTTP Listener's disable config if set. +type CtxKeyDisableRequestLimiter struct{} + +func (c CtxKeyDisableRequestLimiter) String() string { + return "disable_request_limiter" +} + +// Acquire checks the HTTPLimiter metadata to determine if an HTTP request +// should be limited, or simply passed through as a no-op. +func (h *HTTPLimiter) Acquire(ctx context.Context) (*RequestListener, bool) { + // If the limiter is disabled, return an empty wrapper so the limiter is a + // no-op and indicate that the request can proceed. + if disable := ctx.Value(CtxKeyDisableRequestLimiter{}); disable != nil && disable.(bool) { + return &RequestListener{}, true + } + + lim := &RequestLimiter{} + if h.PathLimited { + lim = h.LookupFunc(SpecialPathLimiter) + } else { + switch h.Method { + case http.MethodGet, http.MethodHead, http.MethodTrace, http.MethodOptions: + // We're only interested in the inverse, so do nothing here. + default: + lim = h.LookupFunc(WriteLimiter) + } + } + return lim.Acquire(ctx) +} diff --git a/limits/limiter.go b/limits/limiter.go new file mode 100644 index 000000000000..09c8bd452e1f --- /dev/null +++ b/limits/limiter.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package limits + +import ( + "context" +) + +type RequestLimiter struct{} + +// Acquire is a no-op on CE +func (l *RequestLimiter) Acquire(_ctx context.Context) (*RequestListener, bool) { + return &RequestListener{}, true +} + +// EstimatedLimit is effectively 0, since we're not limiting requests on CE. +func (l *RequestLimiter) EstimatedLimit() int { return 0 } diff --git a/limits/listener.go b/limits/listener.go new file mode 100644 index 000000000000..f3bffee8026b --- /dev/null +++ b/limits/listener.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package limits + +type RequestListener struct{} + +func (l *RequestListener) OnSuccess() {} + +func (l *RequestListener) OnDropped() {} + +func (l *RequestListener) OnIgnore() {} diff --git a/limits/registry.go b/limits/registry.go new file mode 100644 index 000000000000..a9deee29046a --- /dev/null +++ b/limits/registry.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package limits + +// LimiterRegistry holds the map of RequestLimiters mapped to keys. +type LimiterRegistry struct{} diff --git a/main.go b/main.go index bc8a8651f8f9..35d2f584ee96 100644 --- a/main.go +++ b/main.go @@ -1,17 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main // import "github.com/hashicorp/vault" import ( "os" "github.com/hashicorp/vault/command" - "github.com/hashicorp/vault/internal" ) -func init() { - // this is a good place to patch SHA-1 support back into x509 - internal.PatchSha1() -} - func main() { os.Exit(command.Run(os.Args[1:])) } diff --git a/main_test.go b/main_test.go index 4c4c79a2cb8e..78b0e69a6cb0 100644 --- a/main_test.go +++ b/main_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main // import "github.com/hashicorp/vault" // This file is intentionally empty to force early versions of Go diff --git a/physical/aerospike/aerospike.go b/physical/aerospike/aerospike.go index b323ccd344d4..4e8aeb782afb 100644 --- a/physical/aerospike/aerospike.go +++ b/physical/aerospike/aerospike.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aerospike import ( diff --git a/physical/aerospike/aerospike_test.go b/physical/aerospike/aerospike_test.go index 1a76656f29a8..e519b2da50c6 100644 --- a/physical/aerospike/aerospike_test.go +++ b/physical/aerospike/aerospike_test.go @@ -1,18 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package aerospike import ( "context" + "math/bits" + "runtime" + "strings" "testing" "time" aero "github.com/aerospike/aerospike-client-go/v5" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" ) func TestAerospikeBackend(t *testing.T) { + if bits.UintSize == 32 { + t.Skip("Aerospike storage is only supported on 64-bit architectures") + } cleanup, config := prepareAerospikeContainer(t) defer cleanup() @@ -40,6 +49,11 @@ type aerospikeConfig struct { } func prepareAerospikeContainer(t *testing.T) (func(), *aerospikeConfig) { + // Skipping on ARM, as this image can't run on ARM architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + runner, err := docker.NewServiceRunner(docker.RunOptions{ ImageRepo: "docker.mirror.hashicorp.services/aerospike/aerospike-server", ContainerName: "aerospikedb", diff --git a/physical/alicloudoss/alicloudoss.go b/physical/alicloudoss/alicloudoss.go index 40f3da6d5643..d82287e20268 100644 --- a/physical/alicloudoss/alicloudoss.go +++ b/physical/alicloudoss/alicloudoss.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package alicloudoss import ( diff --git a/physical/alicloudoss/alicloudoss_test.go b/physical/alicloudoss/alicloudoss_test.go index ad292da4f6b7..b7a94db3a818 100644 --- a/physical/alicloudoss/alicloudoss_test.go +++ b/physical/alicloudoss/alicloudoss_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package alicloudoss import ( diff --git a/physical/azure/azure.go b/physical/azure/azure.go index eb158a993d8a..251d2301c0f5 100644 --- a/physical/azure/azure.go +++ b/physical/azure/azure.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package azure import ( diff --git a/physical/azure/azure_test.go b/physical/azure/azure_test.go index 20392a21c688..97b2bbce8822 100644 --- a/physical/azure/azure_test.go +++ b/physical/azure/azure_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package azure import ( diff --git a/physical/cassandra/cassandra.go b/physical/cassandra/cassandra.go index 84c2ab149db2..a7f22c468dc7 100644 --- a/physical/cassandra/cassandra.go +++ b/physical/cassandra/cassandra.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cassandra import ( @@ -101,6 +104,7 @@ func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Ba cluster := gocql.NewCluster(hosts...) cluster.Port = port cluster.Keyspace = keyspace + cluster.Consistency = consistency if retryCountStr, ok := conf["simple_retry_policy_retries"]; ok { retryCount, err := strconv.Atoi(retryCountStr) diff --git a/physical/cassandra/cassandra_test.go b/physical/cassandra/cassandra_test.go index e9fe7bc059a5..3370c3947960 100644 --- a/physical/cassandra/cassandra_test.go +++ b/physical/cassandra/cassandra_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cassandra import ( diff --git a/physical/cockroachdb/cockroachdb.go b/physical/cockroachdb/cockroachdb.go index 385074d917cb..62ffacded1fd 100644 --- a/physical/cockroachdb/cockroachdb.go +++ b/physical/cockroachdb/cockroachdb.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cockroachdb import ( @@ -11,7 +14,7 @@ import ( "unicode" metrics "github.com/armon/go-metrics" - "github.com/cockroachdb/cockroach-go/crdb" + "github.com/cockroachdb/cockroach-go/v2/crdb" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/strutil" diff --git a/physical/cockroachdb/cockroachdb_ha.go b/physical/cockroachdb/cockroachdb_ha.go index 1f22465d0810..03728d63c236 100644 --- a/physical/cockroachdb/cockroachdb_ha.go +++ b/physical/cockroachdb/cockroachdb_ha.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cockroachdb import ( diff --git a/physical/cockroachdb/cockroachdb_test.go b/physical/cockroachdb/cockroachdb_test.go index 70abfda98293..05d342519c33 100644 --- a/physical/cockroachdb/cockroachdb_test.go +++ b/physical/cockroachdb/cockroachdb_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cockroachdb import ( @@ -6,10 +9,12 @@ import ( "fmt" "net/url" "os" + "runtime" + "strings" "testing" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" ) @@ -23,6 +28,11 @@ type Config struct { var _ docker.ServiceConfig = &Config{} func prepareCockroachDBTestContainer(t *testing.T) (func(), *Config) { + // Skipping, as this image can't run on arm architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as CockroachDB 1.0 is not supported on ARM architectures") + } + if retURL := os.Getenv("CR_URL"); retURL != "" { s, err := docker.NewServiceURLParse(retURL) if err != nil { diff --git a/physical/cockroachdb/keywords.go b/physical/cockroachdb/keywords.go index 390dc63f8dfc..c57c9eae1a59 100644 --- a/physical/cockroachdb/keywords.go +++ b/physical/cockroachdb/keywords.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cockroachdb // sqlKeywords is a reference of all of the keywords that we do not allow for use as the table name diff --git a/physical/consul/consul.go b/physical/consul/consul.go index f30403468c6b..dec3717a0207 100644 --- a/physical/consul/consul.go +++ b/physical/consul/consul.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( @@ -7,6 +10,8 @@ import ( "net/http" "strconv" "strings" + "sync" + "sync/atomic" "time" "github.com/armon/go-metrics" @@ -29,20 +34,28 @@ const ( // consistencyModeStrong is the configuration value used to tell // consul to use strong consistency. consistencyModeStrong = "strong" + + // nonExistentKey is used as part of a capabilities check against Consul + nonExistentKey = "F35C28E1-7035-40BB-B865-6BED9E3A1B28" ) // Verify ConsulBackend satisfies the correct interfaces var ( - _ physical.Backend = (*ConsulBackend)(nil) - _ physical.HABackend = (*ConsulBackend)(nil) - _ physical.Lock = (*ConsulLock)(nil) - _ physical.Transactional = (*ConsulBackend)(nil) + _ physical.Backend = (*ConsulBackend)(nil) + _ physical.FencingHABackend = (*ConsulBackend)(nil) + _ physical.Lock = (*ConsulLock)(nil) + _ physical.Transactional = (*ConsulBackend)(nil) + _ physical.TransactionalLimits = (*ConsulBackend)(nil) + + GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in consul backend") ) // ConsulBackend is a physical backend that stores data at specific // prefix within Consul. It is used for most production situations as // it allows Vault to run on multiple machines in a highly-available manner. +// failGetInTxn is only used in tests. type ConsulBackend struct { + logger log.Logger client *api.Client path string kv *api.KV @@ -51,6 +64,8 @@ type ConsulBackend struct { consistencyMode string sessionTTL string lockWaitTime time.Duration + failGetInTxn *uint32 + activeNodeLock atomic.Pointer[ConsulLock] } // NewConsulBackend constructs a Consul backend using the given API client @@ -141,15 +156,16 @@ func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backe // Set up the backend c := &ConsulBackend{ + logger: logger, path: path, client: client, kv: client.KV(), txn: client.Txn(), permitPool: physical.NewPermitPool(maxParInt), consistencyMode: consistencyMode, - - sessionTTL: sessionTTL, - lockWaitTime: lockWaitTime, + sessionTTL: sessionTTL, + lockWaitTime: lockWaitTime, + failGetInTxn: new(uint32), } return c, nil @@ -224,14 +240,89 @@ func SetupSecureTLS(ctx context.Context, consulConf *api.Config, conf map[string return nil } +// ExpandedCapabilitiesAvailable tests to see if Consul has KVGetOrEmpty and 128 entries per transaction available +func (c *ConsulBackend) ExpandedCapabilitiesAvailable(ctx context.Context) bool { + available := false + + maxEntries := 128 + ops := make([]*api.TxnOp, maxEntries) + for i := 0; i < maxEntries; i++ { + ops[i] = &api.TxnOp{KV: &api.KVTxnOp{ + Key: c.path + nonExistentKey, + Verb: api.KVGetOrEmpty, + }} + } + + c.permitPool.Acquire() + defer c.permitPool.Release() + + queryOpts := &api.QueryOptions{} + queryOpts = queryOpts.WithContext(ctx) + + ok, resp, _, err := c.txn.Txn(ops, queryOpts) + if ok && len(resp.Errors) == 0 && err == nil { + available = true + } + + return available +} + +func (c *ConsulBackend) writeTxnOps(ctx context.Context, len int) ([]*api.TxnOp, string) { + if len < 1 { + len = 1 + } + ops := make([]*api.TxnOp, 0, len+1) + + // If we don't have a lock yet, return a transaction with no session check. We + // need to do this to allow writes during cluster initialization before there + // is an active node. + lock := c.activeNodeLock.Load() + if lock == nil { + return ops, "" + } + + lockKey, lockSession := lock.Info() + if lockKey == "" || lockSession == "" { + return ops, "" + } + + // If the context used to write has been marked as a special case write that + // happens outside of a lock then don't add the session check. + if physical.IsUnfencedWrite(ctx) { + return ops, "" + } + + // Insert the session check operation at index 0. This will allow us later to + // work out easily if a write failure is because of the session check. + ops = append(ops, &api.TxnOp{ + KV: &api.KVTxnOp{ + Verb: api.KVCheckSession, + Key: lockKey, + Session: lockSession, + }, + }) + return ops, lockSession +} + // Transaction is used to run multiple entries via a transaction. func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + return c.txnInternal(ctx, txns, "transaction") +} + +func (c *ConsulBackend) txnInternal(ctx context.Context, txns []*physical.TxnEntry, apiOpName string) error { if len(txns) == 0 { return nil } - defer metrics.MeasureSince([]string{"consul", "transaction"}, time.Now()) + defer metrics.MeasureSince([]string{"consul", apiOpName}, time.Now()) + + failGetInTxn := atomic.LoadUint32(c.failGetInTxn) + for _, t := range txns { + if t.Operation == physical.GetOperation && failGetInTxn != 0 { + return GetInTxnDisabledError + } + } - ops := make([]*api.TxnOp, 0, len(txns)) + ops, sessionID := c.writeTxnOps(ctx, len(txns)) for _, t := range txns { o, err := c.makeApiTxn(t) if err != nil { @@ -257,14 +348,15 @@ func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEnt } return err } - if ok && len(resp.Errors) == 0 { - // Loop over results and cache them in a map. Note that we're only caching the first time we see a key, - // which _should_ correspond to a Get operation, since we expect those come first in our txns slice. + // Loop over results and cache them in a map. Note that we're only caching + // the first time we see a key, which _should_ correspond to a Get + // operation, since we expect those come first in our txns slice (though + // after check-session). for _, txnr := range resp.Results { if len(txnr.KV.Value) > 0 { - // We need to trim the Consul kv path (typically "vault/") from the key otherwise it won't - // match the transaction entries we have. + // We need to trim the Consul kv path (typically "vault/") from the key + // otherwise it won't match the transaction entries we have. key := strings.TrimPrefix(txnr.KV.Key, c.path) if _, found := kvMap[key]; !found { kvMap[key] = txnr.KV.Value @@ -276,6 +368,31 @@ func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEnt if len(resp.Errors) > 0 { for _, res := range resp.Errors { retErr = multierror.Append(retErr, errors.New(res.What)) + if res.OpIndex == 0 && sessionID != "" { + // We added a session check (sessionID not empty) so an error at OpIndex + // 0 means that we failed that session check. We don't attempt to string + // match because Consul can return at least three different errors here + // with no common string. In all cases though failing this check means + // we no longer hold the lock because it was released, modified or + // deleted. Rather than just continuing to try writing until the + // blocking query manages to notice we're no longer the lock holder + // (which can take 10s of seconds even in good network conditions in my + // testing) we can now Unlock directly here. Our ConsulLock now has a + // shortcut that will cause the lock to close the leaderCh immediately + // when we call without waiting for the blocking query to return (unlike + // Consul's current Lock implementation). But before we unlock, we + // should re-load the lock and ensure it's still the same instance we + // just tried to write with in case this goroutine is somehow really + // delayed and we actually acquired a whole new lock in the meantime! + lock := c.activeNodeLock.Load() + if lock != nil { + _, lockSessionID := lock.Info() + if sessionID == lockSessionID { + c.logger.Warn("session check failed on write, we lost active node lock, stepping down", "err", res.What) + lock.Unlock() + } + } + } } } @@ -301,8 +418,7 @@ func (c *ConsulBackend) makeApiTxn(txn *physical.TxnEntry) (*api.TxnOp, error) { } switch txn.Operation { case physical.GetOperation: - // TODO: This is currently broken. Once Consul releases 1.14, this should be updated to use api.KVGetOrEmpty - op.Verb = api.KVGet + op.Verb = api.KVGetOrEmpty case physical.DeleteOperation: op.Verb = api.KVDelete case physical.PutOperation: @@ -315,29 +431,24 @@ func (c *ConsulBackend) makeApiTxn(txn *physical.TxnEntry) (*api.TxnOp, error) { return &api.TxnOp{KV: op}, nil } +func (c *ConsulBackend) TransactionLimits() (int, int) { + // Note that even for modern Consul versions that support 128 entries per txn, + // we have an effective limit of 64 write operations because the other 64 are + // used for undo log read operations. We also reserve 1 for a check-session + // operation to prevent split brain so the most we allow WAL to put in a batch + // is 63. + return 63, 128 * 1024 +} + // Put is used to insert or update an entry func (c *ConsulBackend) Put(ctx context.Context, entry *physical.Entry) error { - defer metrics.MeasureSince([]string{"consul", "put"}, time.Now()) - - c.permitPool.Acquire() - defer c.permitPool.Release() - - pair := &api.KVPair{ - Key: c.path + entry.Key, - Value: entry.Value, + txns := []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: entry, + }, } - - writeOpts := &api.WriteOptions{} - writeOpts = writeOpts.WithContext(ctx) - - _, err := c.kv.Put(pair, writeOpts) - if err != nil { - if strings.Contains(err.Error(), "Value exceeds") { - return fmt.Errorf("%s: %w", physical.ErrValueTooLarge, err) - } - return err - } - return nil + return c.txnInternal(ctx, txns, "put") } // Get is used to fetch an entry @@ -370,16 +481,15 @@ func (c *ConsulBackend) Get(ctx context.Context, key string) (*physical.Entry, e // Delete is used to permanently delete an entry func (c *ConsulBackend) Delete(ctx context.Context, key string) error { - defer metrics.MeasureSince([]string{"consul", "delete"}, time.Now()) - - c.permitPool.Acquire() - defer c.permitPool.Release() - - writeOpts := &api.WriteOptions{} - writeOpts = writeOpts.WithContext(ctx) - - _, err := c.kv.Delete(c.path+key, writeOpts) - return err + txns := []*physical.TxnEntry{ + { + Operation: physical.DeleteOperation, + Entry: &physical.Entry{ + Key: key, + }, + }, + } + return c.txnInternal(ctx, txns, "delete") } // List is used to list all the keys under a given @@ -409,26 +519,24 @@ func (c *ConsulBackend) List(ctx context.Context, prefix string) ([]string, erro return out, err } +func (c *ConsulBackend) FailGetInTxn(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(c.failGetInTxn, val) +} + // LockWith is used for mutual exclusion based on the given key. func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) { - // Create the lock - opts := &api.LockOptions{ - Key: c.path + key, - Value: []byte(value), - SessionName: "Vault Lock", - MonitorRetries: 5, - SessionTTL: c.sessionTTL, - LockWaitTime: c.lockWaitTime, - } - lock, err := c.client.LockOpts(opts) - if err != nil { - return nil, fmt.Errorf("failed to create lock: %w", err) - } cl := &ConsulLock{ + logger: c.logger, client: c.client, key: c.path + key, - lock: lock, + value: value, consistencyMode: c.consistencyMode, + sessionTTL: c.sessionTTL, + lockWaitTime: c.lockWaitTime, } return cl, nil } @@ -453,20 +561,203 @@ func (c *ConsulBackend) DetectHostAddr() (string, error) { return addr, nil } -// ConsulLock is used to provide the Lock interface backed by Consul +// RegisterActiveNodeLock is called after active node lock is obtained to allow +// us to fence future writes. +func (c *ConsulBackend) RegisterActiveNodeLock(l physical.Lock) error { + cl, ok := l.(*ConsulLock) + if !ok { + return fmt.Errorf("invalid Lock type") + } + c.activeNodeLock.Store(cl) + key, sessionID := cl.Info() + c.logger.Info("registered active node lock", "key", key, "sessionID", sessionID) + return nil +} + +// ConsulLock is used to provide the Lock interface backed by Consul. We work +// around some limitations of Consuls api.Lock noted in +// https://github.com/hashicorp/consul/issues/18271 by creating and managing the +// session ourselves, while using Consul's Lock to do the heavy lifting. type ConsulLock struct { + logger log.Logger client *api.Client key string - lock *api.Lock + value string consistencyMode string + sessionTTL string + lockWaitTime time.Duration + + mu sync.Mutex // protects session state + session *lockSession + // sessionID is a copy of the value from session.id. We use a separate field + // because `Info` needs to keep returning the same sessionID after Unlock has + // cleaned up the session state so that we continue to fence any writes still + // in flight after the lock is Unlocked. It's easier to reason about that as a + // separate field rather than keeping an already-terminated session object + // around. Once Lock is called again this will be replaced (while mu is + // locked) with the new session ID. Must hold mu to read or write this. + sessionID string +} + +type lockSession struct { + // id is immutable after the session is created so does not need mu held + id string + + // mu protects the lock and unlockCh to ensure they are only cleaned up once + mu sync.Mutex + lock *api.Lock + unlockCh chan struct{} +} + +func (s *lockSession) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + s.mu.Lock() + defer s.mu.Unlock() + + lockHeld := false + defer func() { + if !lockHeld { + s.cleanupLocked() + } + }() + + consulLeaderCh, err := s.lock.Lock(stopCh) + if err != nil { + return nil, err + } + if consulLeaderCh == nil { + // If both leaderCh and err are nil from Consul's Lock then it means we + // waited for the lockWait without grabbing it. + return nil, nil + } + // We got the Lock, monitor it! + lockHeld = true + leaderCh := make(chan struct{}) + go s.monitorLock(leaderCh, s.unlockCh, consulLeaderCh) + return leaderCh, nil +} + +// monitorLock waits for either unlockCh or consulLeaderCh to close and then +// closes leaderCh. It's designed to be run in a separate goroutine. Note that +// we pass unlockCh rather than accessing it via the member variable because it +// is mutated under the lock during Unlock so reading it from c could be racy. +// We just need the chan created at the call site here so we pass it instead of +// locking and unlocking in here. +func (s *lockSession) monitorLock(leaderCh chan struct{}, unlockCh, consulLeaderCh <-chan struct{}) { + select { + case <-unlockCh: + case <-consulLeaderCh: + } + // We lost the lock. Close the leaderCh + close(leaderCh) + + // Whichever chan closed, cleanup to unwind all the state. If we were + // triggered by a cleanup call this will be a no-op, but if not it ensures all + // state is cleaned up correctly. + s.cleanup() +} + +func (s *lockSession) cleanup() { + s.mu.Lock() + defer s.mu.Unlock() + + s.cleanupLocked() +} + +func (s *lockSession) cleanupLocked() { + if s.lock != nil { + s.lock.Unlock() + s.lock = nil + } + if s.unlockCh != nil { + close(s.unlockCh) + s.unlockCh = nil + } + // Don't bother destroying sessions as they will be destroyed after TTL + // anyway. +} + +func (c *ConsulLock) createSession() (*lockSession, error) { + se := &api.SessionEntry{ + Name: "Vault Lock", + TTL: c.sessionTTL, + // We use Consul's default LockDelay of 15s by not specifying it + } + session, _, err := c.client.Session().Create(se, nil) + if err != nil { + return nil, err + } + + opts := &api.LockOptions{ + Key: c.key, + Value: []byte(c.value), + Session: session, + MonitorRetries: 5, + LockWaitTime: c.lockWaitTime, + SessionTTL: c.sessionTTL, + } + lock, err := c.client.LockOpts(opts) + if err != nil { + // Don't bother destroying sessions as they will be destroyed after TTL + // anyway. + return nil, fmt.Errorf("failed to create lock: %w", err) + } + + unlockCh := make(chan struct{}) + + s := &lockSession{ + id: session, + lock: lock, + unlockCh: unlockCh, + } + + // Start renewals of the session + go func() { + // Note we capture unlockCh here rather than s.unlockCh because s.unlockCh + // is mutated on cleanup which is racy since we don't hold a lock here. + // unlockCh will never be mutated though. + err := c.client.Session().RenewPeriodic(c.sessionTTL, session, nil, unlockCh) + if err != nil { + c.logger.Error("failed to renew consul session for more than the TTL, lock lost", "err", err) + } + // release other resources for this session only i.e. don't c.Unlock as that + // might now be locked under a different session). + s.cleanup() + }() + return s, nil } func (c *ConsulLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { - return c.lock.Lock(stopCh) + c.mu.Lock() + defer c.mu.Unlock() + + if c.session != nil { + return nil, fmt.Errorf("lock instance already locked") + } + + session, err := c.createSession() + if err != nil { + return nil, err + } + leaderCh, err := session.Lock(stopCh) + if leaderCh != nil && err == nil { + // We hold the lock, store the session + c.session = session + c.sessionID = session.id + } + return leaderCh, err } func (c *ConsulLock) Unlock() error { - return c.lock.Unlock() + c.mu.Lock() + defer c.mu.Unlock() + + if c.session != nil { + c.session.cleanup() + c.session = nil + // Don't clear c.sessionID since we rely on returning the same old ID after + // Unlock until the next Lock. + } + return nil } func (c *ConsulLock) Value() (bool, string, error) { @@ -486,7 +777,18 @@ func (c *ConsulLock) Value() (bool, string, error) { if pair == nil { return false, "", nil } + // Note that held is expected to mean "does _any_ node hold the lock" not + // "does this current instance hold the lock" so although we know what our own + // session ID is, we don't check it matches here only that there is _some_ + // session in Consul holding the lock right now. held := pair.Session != "" value := string(pair.Value) return held, value, nil } + +func (c *ConsulLock) Info() (key, sessionid string) { + c.mu.Lock() + defer c.mu.Unlock() + + return c.key, c.sessionID +} diff --git a/physical/consul/consul_test.go b/physical/consul/consul_test.go index b2a06e612520..bf1d809afdde 100644 --- a/physical/consul/consul_test.go +++ b/physical/consul/consul_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( @@ -16,6 +19,7 @@ import ( "github.com/hashicorp/vault/helper/testhelpers/consul" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" + "github.com/stretchr/testify/require" ) func TestConsul_newConsulBackend(t *testing.T) { @@ -155,6 +159,10 @@ func TestConsul_newConsulBackend(t *testing.T) { // if test.max_parallel != cap(c.permitPool) { // t.Errorf("bad: %v != %v", test.max_parallel, cap(c.permitPool)) // } + + maxEntries, maxBytes := be.(physical.TransactionalLimits).TransactionLimits() + require.Equal(t, 63, maxEntries) + require.Equal(t, 128*1024, maxBytes) } } @@ -251,11 +259,41 @@ func TestConsul_TooLarge(t *testing.T) { } } -func TestConsul_TransactionalBackend_GetTransactionsForNonExistentValues(t *testing.T) { - // TODO: unskip this after Consul releases 1.14 and we update our API dep. It currently fails but should pass with Consul 1.14 - t.SkipNow() +func TestConsul_ExpandedCapabilitiesAvailable(t *testing.T) { + testCases := map[string]bool{ + "1.13.5": false, + "1.14.3": true, + } - cleanup, config := consul.PrepareTestContainer(t, "1.4.4", false, true) + for version, shouldBeAvailable := range testCases { + t.Run(version, func(t *testing.T) { + cleanup, config := consul.PrepareTestContainer(t, version, false, true) + defer cleanup() + + logger := logging.NewVaultLogger(log.Debug) + backendConfig := map[string]string{ + "address": config.Address(), + "token": config.Token, + "path": "vault/", + "max_parallel": "-1", + } + + be, err := NewConsulBackend(backendConfig, logger) + if err != nil { + t.Fatal(err) + } + b := be.(*ConsulBackend) + + isAvailable := b.ExpandedCapabilitiesAvailable(context.Background()) + if isAvailable != shouldBeAvailable { + t.Errorf("%t != %t, version %s\n", isAvailable, shouldBeAvailable, version) + } + }) + } +} + +func TestConsul_TransactionalBackend_GetTransactionsForNonExistentValues(t *testing.T) { + cleanup, config := consul.PrepareTestContainer(t, "1.14.2", false, true) defer cleanup() client, err := api.NewClient(config.APIConfig()) @@ -316,10 +354,7 @@ func TestConsul_TransactionalBackend_GetTransactionsForNonExistentValues(t *test // TestConsul_TransactionalBackend_GetTransactions tests that passing a slice of transactions to the // consul backend will populate values for any transactions that are Get operations. func TestConsul_TransactionalBackend_GetTransactions(t *testing.T) { - // TODO: unskip this after Consul releases 1.14 and we update our API dep. It currently fails but should pass with Consul 1.14 - t.SkipNow() - - cleanup, config := consul.PrepareTestContainer(t, "1.4.4", false, true) + cleanup, config := consul.PrepareTestContainer(t, "1.14.2", false, true) defer cleanup() client, err := api.NewClient(config.APIConfig()) @@ -412,7 +447,9 @@ func TestConsulHABackend(t *testing.T) { t.Fatalf("err: %v", err) } - randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) + // We used to use a timestamp here but then if you run multiple instances in + // parallel with one Consul they end up conflicting. + randPath := fmt.Sprintf("vault-%d/", rand.Int()) defer func() { client.KV().DeleteTree(randPath, nil) }() @@ -423,6 +460,10 @@ func TestConsulHABackend(t *testing.T) { "token": config.Token, "path": randPath, "max_parallel": "-1", + // We have to wait this out as part of the test so shorten it a little from + // the default 15 seconds helps with test run times, especially when running + // this in a loop to detect flakes! + "lock_wait_time": "3s", } b, err := NewConsulBackend(backendConfig, logger) @@ -448,4 +489,44 @@ func TestConsulHABackend(t *testing.T) { if host == "" { t.Fatalf("bad addr: %v", host) } + + // Calling `Info` on a Lock that has been unlocked must still return the old + // sessionID (until it is locked again) otherwise we will fail to fence writes + // that are still in flight from before (e.g. queued WAL or Merkle flushes) as + // soon as the first one unlocks the session allowing corruption again. + l, err := b.(physical.HABackend).LockWith("test-lock-session-info", "bar") + require.NoError(t, err) + + expectKey := randPath + "test-lock-session-info" + + cl := l.(*ConsulLock) + + stopCh := make(chan struct{}) + time.AfterFunc(5*time.Second, func() { + close(stopCh) + }) + leaderCh, err := cl.Lock(stopCh) + require.NoError(t, err) + require.NotNil(t, leaderCh) + + key, sid := cl.Info() + require.Equal(t, expectKey, key) + require.NotEmpty(t, sid) + + // Now Unlock the lock, sessionID should be reset to empty string + err = cl.Unlock() + require.NoError(t, err) + key2, sid2 := cl.Info() + require.Equal(t, key, key2) + require.Equal(t, sid, sid2) + + // Lock it again, this should cause a new session to be created so SID should + // change. + leaderCh, err = cl.Lock(stopCh) + require.NoError(t, err) + require.NotNil(t, leaderCh) + + key3, sid3 := cl.Info() + require.Equal(t, key, key3) + require.NotEqual(t, sid, sid3) } diff --git a/physical/consul/helpers.go b/physical/consul/helpers.go index 71c30b310068..2f6ac574b0b7 100644 --- a/physical/consul/helpers.go +++ b/physical/consul/helpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go index 86fc139ed92d..0a1c379c6ee9 100644 --- a/physical/couchdb/couchdb.go +++ b/physical/couchdb/couchdb.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package couchdb import ( diff --git a/physical/couchdb/couchdb_test.go b/physical/couchdb/couchdb_test.go index abf11b7c1aae..bc8f6b41dffa 100644 --- a/physical/couchdb/couchdb_test.go +++ b/physical/couchdb/couchdb_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package couchdb import ( @@ -7,12 +10,13 @@ import ( "net/http" "net/url" "os" + "runtime" "strings" "testing" "time" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" ) @@ -75,6 +79,13 @@ func (c couchDB) URL() *url.URL { var _ docker.ServiceConfig = &couchDB{} func prepareCouchdbDBTestContainer(t *testing.T) (func(), *couchDB) { + // ARM64 is only supported on CouchDB 2 and above. If we update + // our image and support to 2 and above, we can unskip these: + // https://hub.docker.com/r/arm64v8/couchdb/ + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as CouchDB 1.6 is not supported on ARM architectures") + } + // If environment variable is set, assume caller wants to target a real // DynamoDB. if os.Getenv("COUCHDB_ENDPOINT") != "" { diff --git a/physical/dynamodb/dynamodb.go b/physical/dynamodb/dynamodb.go index 18c2bbf50a4d..f15db3959237 100644 --- a/physical/dynamodb/dynamodb.go +++ b/physical/dynamodb/dynamodb.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package dynamodb import ( @@ -12,6 +15,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" log "github.com/hashicorp/go-hclog" @@ -86,7 +90,7 @@ type DynamoDBBackend struct { client *dynamodb.DynamoDB logger log.Logger haEnabled bool - permitPool *physical.PermitPool + permitPool *PermitPoolWithMetrics } // DynamoDBRecord is the representation of a vault entry in @@ -119,6 +123,12 @@ type DynamoDBLockRecord struct { Expires int64 } +type PermitPoolWithMetrics struct { + physical.PermitPool + pendingPermits int32 + poolSize int +} + // NewDynamoDBBackend constructs a DynamoDB backend. If the // configured DynamoDB table does not exist, it creates it. func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { @@ -245,7 +255,7 @@ func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Bac return &DynamoDBBackend{ table: table, client: client, - permitPool: physical.NewPermitPool(maxParInt), + permitPool: NewPermitPoolWithMetrics(maxParInt), haEnabled: haEnabledBool, logger: logger, }, nil @@ -519,7 +529,6 @@ func (d *DynamoDBBackend) batchWriteRequests(requests []*dynamodb.WriteRequest) output, err = d.client.BatchWriteItem(&dynamodb.BatchWriteItemInput{ RequestItems: batch, }) - if err != nil { break } @@ -849,7 +858,7 @@ func ensureTableExists(client *dynamodb.DynamoDB, table string, readCapacity, wr // recordPathForVaultKey transforms a vault key into // a value suitable for the `DynamoDBRecord`'s `Path` -// property. This path equals the the vault key without +// property. This path equals the vault key without // its last component. func recordPathForVaultKey(key string) string { if strings.Contains(key, "/") { @@ -860,7 +869,7 @@ func recordPathForVaultKey(key string) string { // recordKeyForVaultKey transforms a vault key into // a value suitable for the `DynamoDBRecord`'s `Key` -// property. This path equals the the vault key's +// property. This path equals the vault key's // last component. func recordKeyForVaultKey(key string) string { return pkgPath.Base(key) @@ -906,3 +915,39 @@ func isConditionCheckFailed(err error) bool { return false } + +// NewPermitPoolWithMetrics returns a new permit pool with the provided +// number of permits which emits metrics +func NewPermitPoolWithMetrics(permits int) *PermitPoolWithMetrics { + return &PermitPoolWithMetrics{ + PermitPool: *physical.NewPermitPool(permits), + pendingPermits: 0, + poolSize: permits, + } +} + +// Acquire returns when a permit has been acquired +func (c *PermitPoolWithMetrics) Acquire() { + atomic.AddInt32(&c.pendingPermits, 1) + c.emitPermitMetrics() + c.PermitPool.Acquire() + atomic.AddInt32(&c.pendingPermits, -1) + c.emitPermitMetrics() +} + +// Release returns a permit to the pool +func (c *PermitPoolWithMetrics) Release() { + c.PermitPool.Release() + c.emitPermitMetrics() +} + +// Get the number of requests in the permit pool +func (c *PermitPoolWithMetrics) CurrentPermits() int { + return c.PermitPool.CurrentPermits() +} + +func (c *PermitPoolWithMetrics) emitPermitMetrics() { + metrics.SetGauge([]string{"dynamodb", "permit_pool", "pending_permits"}, float32(c.pendingPermits)) + metrics.SetGauge([]string{"dynamodb", "permit_pool", "active_permits"}, float32(c.PermitPool.CurrentPermits())) + metrics.SetGauge([]string{"dynamodb", "permit_pool", "pool_size"}, float32(c.poolSize)) +} diff --git a/physical/dynamodb/dynamodb_test.go b/physical/dynamodb/dynamodb_test.go index 1058a6e21026..70a45d01f01a 100644 --- a/physical/dynamodb/dynamodb_test.go +++ b/physical/dynamodb/dynamodb_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package dynamodb import ( @@ -7,12 +10,14 @@ import ( "net/http" "net/url" "os" + "runtime" + "strings" "testing" "time" "github.com/go-test/deep" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" @@ -370,6 +375,11 @@ type Config struct { var _ docker.ServiceConfig = &Config{} func prepareDynamoDBTestContainer(t *testing.T) (func(), *Config) { + // Skipping on ARM, as this image can't run on ARM architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + // If environment variable is set, assume caller wants to target a real // DynamoDB. if endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT"); endpoint != "" { diff --git a/physical/etcd/etcd.go b/physical/etcd/etcd.go index 5bb8d4a31c1b..1d332dc9c43b 100644 --- a/physical/etcd/etcd.go +++ b/physical/etcd/etcd.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package etcd import ( diff --git a/physical/etcd/etcd3.go b/physical/etcd/etcd3.go index 486d448febf7..3182f2d83c96 100644 --- a/physical/etcd/etcd3.go +++ b/physical/etcd/etcd3.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package etcd import ( @@ -11,7 +14,7 @@ import ( "sync" "time" - metrics "github.com/armon/go-metrics" + "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/strutil" @@ -119,6 +122,15 @@ func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backen cfg.MaxCallRecvMsgSize = int(val) } + if maxSend, ok := conf["max_send_size"]; ok { + // grpc converts this to uint32 internally, so parse as that to avoid passing invalid values + val, err := strconv.ParseUint(maxSend, 10, 32) + if err != nil { + return nil, fmt.Errorf("value of 'max_send_size' (%v) could not be understood: %w", maxSend, err) + } + cfg.MaxCallSendMsgSize = int(val) + } + etcd, err := clientv3.New(cfg) if err != nil { return nil, err @@ -235,7 +247,7 @@ func (c *EtcdBackend) List(ctx context.Context, prefix string) ([]string, error) ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout) defer cancel() prefix = path.Join(c.path, prefix) + "/" - resp, err := c.etcd.Get(ctx, prefix, clientv3.WithPrefix()) + resp, err := c.etcd.Get(ctx, prefix, clientv3.WithPrefix(), clientv3.WithKeysOnly()) if err != nil { return nil, err } diff --git a/physical/etcd/etcd3_test.go b/physical/etcd/etcd3_test.go index 71150a698c63..7af1ecd7163e 100644 --- a/physical/etcd/etcd3_test.go +++ b/physical/etcd/etcd3_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package etcd import ( @@ -23,7 +26,7 @@ func TestEtcd3Backend(t *testing.T) { "username": "root", "password": "insecure", - // Syncing adverticed client urls should be disabled since docker port mapping confuses the client. + // Syncing advertised client urls should be disabled since docker port mapping confuses the client. "sync": "false", } diff --git a/physical/foundationdb/fdb-go-install.sh b/physical/foundationdb/fdb-go-install.sh index 550d5cf4d14e..8b56b09b25c8 100755 --- a/physical/foundationdb/fdb-go-install.sh +++ b/physical/foundationdb/fdb-go-install.sh @@ -1,4 +1,7 @@ #!/bin/bash -eu +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # # fdb-go-install.sh # diff --git a/physical/foundationdb/foundationdb.go b/physical/foundationdb/foundationdb.go index 56305b2fbf7d..b62e89da40aa 100644 --- a/physical/foundationdb/foundationdb.go +++ b/physical/foundationdb/foundationdb.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build foundationdb package foundationdb @@ -448,7 +451,6 @@ func (f *FDBBackend) Put(ctx context.Context, entry *physical.Entry) error { return nil, nil }) - if err != nil { return fmt.Errorf("put failed for item %s: %w", entry.Key, err) } @@ -506,7 +508,6 @@ func (f *FDBBackend) Delete(ctx context.Context, key string) error { return nil, nil }) - if err != nil { return fmt.Errorf("delete failed for item %s: %w", key, err) } diff --git a/physical/foundationdb/foundationdb_test.go b/physical/foundationdb/foundationdb_test.go index c6fe75d5ebfd..2ced4742c0f2 100644 --- a/physical/foundationdb/foundationdb_test.go +++ b/physical/foundationdb/foundationdb_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build foundationdb package foundationdb diff --git a/physical/foundationdb/foundationdbstub.go b/physical/foundationdb/foundationdbstub.go index 4fc2734e50b1..d8669fb6464b 100644 --- a/physical/foundationdb/foundationdbstub.go +++ b/physical/foundationdb/foundationdbstub.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !foundationdb package foundationdb diff --git a/physical/gcs/gcs.go b/physical/gcs/gcs.go index b5d1f6b9ff42..eea7a515ac8d 100644 --- a/physical/gcs/gcs.go +++ b/physical/gcs/gcs.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package gcs import ( diff --git a/physical/gcs/gcs_ha.go b/physical/gcs/gcs_ha.go index 3a8e45d98190..279b79ab1f08 100644 --- a/physical/gcs/gcs_ha.go +++ b/physical/gcs/gcs_ha.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package gcs import ( diff --git a/physical/gcs/gcs_ha_test.go b/physical/gcs/gcs_ha_test.go index 8e1b91e77793..ab6ca888a698 100644 --- a/physical/gcs/gcs_ha_test.go +++ b/physical/gcs/gcs_ha_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package gcs import ( diff --git a/physical/gcs/gcs_test.go b/physical/gcs/gcs_test.go index 4caab730faa7..6ee9ab432c81 100644 --- a/physical/gcs/gcs_test.go +++ b/physical/gcs/gcs_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package gcs import ( diff --git a/physical/manta/manta.go b/physical/manta/manta.go index 390683d3695f..5ab1c4e05791 100644 --- a/physical/manta/manta.go +++ b/physical/manta/manta.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package manta import ( diff --git a/physical/manta/manta_test.go b/physical/manta/manta_test.go index 8db52c53ab0e..11b024dd918c 100644 --- a/physical/manta/manta_test.go +++ b/physical/manta/manta_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package manta import ( diff --git a/physical/mssql/mssql.go b/physical/mssql/mssql.go index 65c85ae3e454..ef6d54e0f5a5 100644 --- a/physical/mssql/mssql.go +++ b/physical/mssql/mssql.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mssql import ( "context" "database/sql" "fmt" + "regexp" "sort" "strconv" "strings" @@ -17,7 +21,10 @@ import ( ) // Verify MSSQLBackend satisfies the correct interfaces -var _ physical.Backend = (*MSSQLBackend)(nil) +var ( + _ physical.Backend = (*MSSQLBackend)(nil) + identifierRegex = regexp.MustCompile(`^[\p{L}_][\p{L}\p{Nd}@#$_]*$`) +) type MSSQLBackend struct { dbTable string @@ -27,6 +34,13 @@ type MSSQLBackend struct { permitPool *physical.PermitPool } +func isInvalidIdentifier(name string) bool { + if !identifierRegex.MatchString(name) { + return true + } + return false +} + func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { username, ok := conf["username"] if !ok { @@ -68,11 +82,19 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen database = "Vault" } + if isInvalidIdentifier(database) { + return nil, fmt.Errorf("invalid database name") + } + table, ok := conf["table"] if !ok { table = "Vault" } + if isInvalidIdentifier(table) { + return nil, fmt.Errorf("invalid table name") + } + appname, ok := conf["appname"] if !ok { appname = "Vault" @@ -93,6 +115,10 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen schema = "dbo" } + if isInvalidIdentifier(schema) { + return nil, fmt.Errorf("invalid schema name") + } + connectionString := fmt.Sprintf("server=%s;app name=%s;connection timeout=%s;log=%s", server, appname, connectionTimeout, logLevel) if username != "" { connectionString += ";user id=" + username @@ -113,18 +139,17 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen db.SetMaxOpenConns(maxParInt) - if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = '" + database + "') CREATE DATABASE " + database); err != nil { + if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = ?) CREATE DATABASE "+database, database); err != nil { return nil, fmt.Errorf("failed to create mssql database: %w", err) } dbTable := database + "." + schema + "." + table - createQuery := "IF NOT EXISTS(SELECT 1 FROM " + database + ".INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_NAME='" + table + "' AND TABLE_SCHEMA='" + schema + - "') CREATE TABLE " + dbTable + " (Path VARCHAR(512) PRIMARY KEY, Value VARBINARY(MAX))" + createQuery := "IF NOT EXISTS(SELECT 1 FROM " + database + ".INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_NAME=? AND TABLE_SCHEMA=?) CREATE TABLE " + dbTable + " (Path VARCHAR(512) PRIMARY KEY, Value VARBINARY(MAX))" if schema != "dbo" { var num int - err = db.QueryRow("SELECT 1 FROM " + database + ".sys.schemas WHERE name = '" + schema + "'").Scan(&num) + err = db.QueryRow("SELECT 1 FROM "+database+".sys.schemas WHERE name = ?", schema).Scan(&num) switch { case err == sql.ErrNoRows: @@ -137,7 +162,7 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen } } - if _, err := db.Exec(createQuery); err != nil { + if _, err := db.Exec(createQuery, table, schema); err != nil { return nil, fmt.Errorf("failed to create mssql table: %w", err) } diff --git a/physical/mssql/mssql_test.go b/physical/mssql/mssql_test.go index f447b0355eb0..6b794f10bfb4 100644 --- a/physical/mssql/mssql_test.go +++ b/physical/mssql/mssql_test.go @@ -1,16 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mssql import ( "os" "testing" + _ "github.com/denisenkom/go-mssqldb" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" - - _ "github.com/denisenkom/go-mssqldb" ) +// TestInvalidIdentifier checks validity of an identifier +func TestInvalidIdentifier(t *testing.T) { + testcases := map[string]bool{ + "name": true, + "_name": true, + "Name": true, + "#name": false, + "?Name": false, + "9name": false, + "@name": false, + "$name": false, + " name": false, + "n ame": false, + "n4444444": true, + "_4321098765": true, + "_##$$@@__": true, + "_123name#@": true, + "name!": false, + "name%": false, + "name^": false, + "name&": false, + "name*": false, + "name(": false, + "name)": false, + "nåame": true, + "åname": true, + "name'": false, + "nam`e": false, + "пример": true, + "_#Āā@#$_ĂĄąćĈĉĊċ": true, + "ÛÜÝÞßàáâ": true, + "豈更滑a23$#@": true, + } + + for i, expected := range testcases { + if !isInvalidIdentifier(i) != expected { + t.Fatalf("unexpected identifier %s: expected validity %v", i, expected) + } + } +} + func TestMSSQLBackend(t *testing.T) { server := os.Getenv("MSSQL_SERVER") if server == "" { diff --git a/physical/mysql/mysql.go b/physical/mysql/mysql.go index 29bb3928ab81..40d9611a9d4f 100644 --- a/physical/mysql/mysql.go +++ b/physical/mysql/mysql.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mysql import ( diff --git a/physical/mysql/mysql_test.go b/physical/mysql/mysql_test.go index 86373e91629e..30d8372caa1f 100644 --- a/physical/mysql/mysql_test.go +++ b/physical/mysql/mysql_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mysql import ( diff --git a/physical/oci/oci_ha_test.go b/physical/oci/oci_ha_test.go index 6dfaba9768c3..b7213b9ceb28 100644 --- a/physical/oci/oci_ha_test.go +++ b/physical/oci/oci_ha_test.go @@ -16,6 +16,11 @@ func TestOCIHABackend(t *testing.T) { if os.Getenv("VAULT_ACC") == "" { t.SkipNow() } + + if !hasOCICredentials() { + t.Skip("Skipping because OCI credentials could not be resolved. See https://pkg.go.dev/github.com/oracle/oci-go-sdk/common#DefaultConfigProvider for information on how to set up OCI credentials.") + } + bucketName, _ := uuid.GenerateUUID() configProvider := common.DefaultConfigProvider() objectStorageClient, _ := objectstorage.NewObjectStorageClientWithConfigurationProvider(configProvider) diff --git a/physical/oci/oci_test.go b/physical/oci/oci_test.go index 46edcb8c7247..e20b808fdc60 100644 --- a/physical/oci/oci_test.go +++ b/physical/oci/oci_test.go @@ -19,6 +19,11 @@ func TestOCIBackend(t *testing.T) { if os.Getenv("VAULT_ACC") == "" { t.SkipNow() } + + if !hasOCICredentials() { + t.Skip("Skipping because OCI credentials could not be resolved. See https://pkg.go.dev/github.com/oracle/oci-go-sdk/common#DefaultConfigProvider for information on how to set up OCI credentials.") + } + bucketName, _ := uuid.GenerateUUID() configProvider := common.DefaultConfigProvider() objectStorageClient, _ := objectstorage.NewObjectStorageClientWithConfigurationProvider(configProvider) @@ -87,3 +92,14 @@ func getNamespaceName(objectStorageClient objectstorage.ObjectStorageClient, t * nameSpaceName := *response.Value return nameSpaceName } + +func hasOCICredentials() bool { + configProvider := common.DefaultConfigProvider() + + _, err := configProvider.KeyID() + if err != nil { + return false + } + + return true +} diff --git a/physical/postgresql/postgresql.go b/physical/postgresql/postgresql.go index ed4c883440ca..911dfa18f3e2 100644 --- a/physical/postgresql/postgresql.go +++ b/physical/postgresql/postgresql.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package postgresql import ( diff --git a/physical/postgresql/postgresql_test.go b/physical/postgresql/postgresql_test.go index 15d1ab35076d..301fc15ec263 100644 --- a/physical/postgresql/postgresql_test.go +++ b/physical/postgresql/postgresql_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package postgresql import ( @@ -156,7 +159,7 @@ func TestConnectionURL(t *testing.T) { for name, tt := range cases { t.Run(name, func(t *testing.T) { // This is necessary to avoid always testing the branch where the env is set. - // As long the the env is set --- even if the value is "" --- `ok` returns true. + // As long the env is set --- even if the value is "" --- `ok` returns true. if tt.input.envar != "" { os.Setenv("VAULT_PG_CONNECTION_URL", tt.input.envar) defer os.Unsetenv("VAULT_PG_CONNECTION_URL") diff --git a/physical/raft/bolt_32bit_test.go b/physical/raft/bolt_32bit_test.go index ccb1641ea299..4e6aaccacd3a 100644 --- a/physical/raft/bolt_32bit_test.go +++ b/physical/raft/bolt_32bit_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build 386 || arm package raft diff --git a/physical/raft/bolt_64bit_test.go b/physical/raft/bolt_64bit_test.go index d88c01eed594..f6f1bbd2dee3 100644 --- a/physical/raft/bolt_64bit_test.go +++ b/physical/raft/bolt_64bit_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !386 && !arm package raft diff --git a/physical/raft/bolt_linux.go b/physical/raft/bolt_linux.go index 4ea13e2a3986..811c148a1050 100644 --- a/physical/raft/bolt_linux.go +++ b/physical/raft/bolt_linux.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( diff --git a/physical/raft/chunking_test.go b/physical/raft/chunking_test.go index bdd950b566d2..ad4188f59552 100644 --- a/physical/raft/chunking_test.go +++ b/physical/raft/chunking_test.go @@ -1,16 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( "bytes" "context" - fmt "fmt" + "fmt" "os" "testing" - proto "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/proto" "github.com/hashicorp/go-raftchunking" raftchunkingtypes "github.com/hashicorp/go-raftchunking/types" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/raft" "github.com/hashicorp/raft-boltdb/v2" "github.com/hashicorp/vault/sdk/physical" @@ -26,7 +29,7 @@ func TestRaft_Chunking_Lifecycle(t *testing.T) { require := require.New(t) assert := assert.New(t) - b, dir := getRaft(t, true, false) + b, dir := GetRaft(t, true, false) defer os.RemoveAll(dir) t.Log("applying configuration") @@ -111,7 +114,7 @@ func TestFSM_Chunking_TermChange(t *testing.T) { require := require.New(t) assert := assert.New(t) - b, dir := getRaft(t, true, false) + b, dir := GetRaft(t, true, false) defer os.RemoveAll(dir) t.Log("applying configuration") @@ -183,9 +186,7 @@ func TestFSM_Chunking_TermChange(t *testing.T) { } func TestRaft_Chunking_AppliedIndex(t *testing.T) { - t.Parallel() - - raft, dir := getRaft(t, true, false) + raft, dir := GetRaft(t, true, false) defer os.RemoveAll(dir) // Lower the size for tests diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go index c2d9953ce3f4..19329b67d5f7 100644 --- a/physical/raft/fsm.go +++ b/physical/raft/fsm.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( "bytes" "context" + "encoding/binary" "encoding/hex" "errors" "fmt" @@ -17,15 +21,16 @@ import ( "github.com/armon/go-metrics" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-raftchunking" "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/raft" + "github.com/hashicorp/raft-wal/verifier" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/sdk/plugin/pb" - bolt "go.etcd.io/bbolt" ) const ( @@ -33,6 +38,7 @@ const ( putOp restoreCallbackOp getOp + verifierCheckpointOp chunkingPrefix = "raftchunking/" databaseFilename = "vault.db" @@ -55,6 +61,12 @@ var ( _ raft.BatchingFSM = (*FSM)(nil) ) +var logVerifierMagicBytes [8]byte + +func init() { + binary.LittleEndian.PutUint64(logVerifierMagicBytes[:], verifier.ExtensionMagicPrefix) +} + type restoreCallback func(context.Context) error type FSMEntry struct { @@ -73,6 +85,69 @@ type FSMApplyResponse struct { EntrySlice []*FSMEntry } +type logVerificationChunkingShim struct { + chunker *raftchunking.ChunkingBatchingFSM +} + +// Apply implements raft.BatchingFSM. +func (s *logVerificationChunkingShim) Apply(l *raft.Log) interface{} { + return s.ApplyBatch([]*raft.Log{l})[0] +} + +// ApplyBatch implements raft.BatchingFSM +func (s *logVerificationChunkingShim) ApplyBatch(logs []*raft.Log) []interface{} { + // This is a hack because raftchunking doesn't play nicely with lower-level + // usage of Extensions field like we need for LogStore verification. + + // When we write a verifier log, we write a single byte that consists of the verifierCheckpointOp, + // and then we encode the verifier.ExtensionMagicPrefix into the raft log + // Extensions field. Both of those together should ensure that verifier + // raft logs can never be mistaken for chunked protobufs. See the docs on + // verifier.ExtensionMagicPrefix for the reasoning behind the specific value + // that was chosen, and how it ensures this property. + + // So here, we need to check for the exact conditions that we encoded when we wrote the + // verifier log out. If they match, we're going to insert a dummy raft log. We do this because 1) we + // don't want the chunking FSM to blow up on our verifier op that it won't understand and + // 2) we need to preserve the length of the incoming slice of raft logs because raft expects + // the length of the return value to match 1:1 to the length of the input operations. + newBatch := make([]*raft.Log, 0, len(logs)) + + for _, l := range logs { + if s.isVerifierLog(l) { + // Replace checkpoint with an empty op, but keep the index and term so + // downstream FSMs don't get confused about having a 0 index suddenly. + newBatch = append(newBatch, &raft.Log{ + Index: l.Index, + Term: l.Term, + AppendedAt: l.AppendedAt, + }) + } else { + newBatch = append(newBatch, l) + } + } + + return s.chunker.ApplyBatch(newBatch) +} + +// Snapshot implements raft.BatchingFSM +func (s *logVerificationChunkingShim) Snapshot() (raft.FSMSnapshot, error) { + return s.chunker.Snapshot() +} + +// Restore implements raft.BatchingFSM +func (s *logVerificationChunkingShim) Restore(snapshot io.ReadCloser) error { + return s.chunker.Restore(snapshot) +} + +func (s *logVerificationChunkingShim) RestoreState(state *raftchunking.State) error { + return s.chunker.RestoreState(state) +} + +func (s *logVerificationChunkingShim) isVerifierLog(l *raft.Log) bool { + return isRaftLogVerifyCheckpoint(l) +} + // FSM is Vault's primary state storage. It writes updates to a bolt db file // that lives on local disk. FSM implements raft.FSM and physical.Backend // interfaces. @@ -100,7 +175,7 @@ type FSM struct { // retoreCb is called after we've restored a snapshot restoreCb restoreCallback - chunker *raftchunking.ChunkingBatchingFSM + chunker *logVerificationChunkingShim localID string desiredSuffrage string @@ -131,10 +206,12 @@ func NewFSM(path string, localID string, logger log.Logger) (*FSM, error) { localID: localID, } - f.chunker = raftchunking.NewChunkingBatchingFSM(f, &FSMChunkStorage{ - f: f, - ctx: context.Background(), - }) + f.chunker = &logVerificationChunkingShim{ + chunker: raftchunking.NewChunkingBatchingFSM(f, &FSMChunkStorage{ + f: f, + ctx: context.Background(), + }), + } dbPath := filepath.Join(path, databaseFilename) f.l.Lock() @@ -605,11 +682,16 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { switch l.Type { case raft.LogCommand: command := &LogData{} - err := proto.Unmarshal(l.Data, command) - if err != nil { - f.logger.Error("error proto unmarshaling log data", "error", err) - panic("error proto unmarshaling log data") + + // explicitly check for zero length Data, which will be the case for verifier no-ops + if len(l.Data) > 0 { + err := proto.Unmarshal(l.Data, command) + if err != nil { + f.logger.Error("error proto unmarshaling log data", "error", err, "data", l.Data) + panic("error proto unmarshaling log data") + } } + commands = append(commands, command) case raft.LogConfiguration: configuration := raft.DecodeConfiguration(l.Data) @@ -656,6 +738,7 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { entrySlice := make([]*FSMEntry, 0) switch command := commandRaw.(type) { case *LogData: + // empty logs will have a zero length slice of Operations, so this loop will be a no-op for _, op := range command.Operations { var err error switch op.OpType { diff --git a/physical/raft/fsm_test.go b/physical/raft/fsm_test.go index e80a6ce5573f..44557048bd2b 100644 --- a/physical/raft/fsm_test.go +++ b/physical/raft/fsm_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( diff --git a/physical/raft/io.go b/physical/raft/io.go index d3d3d4b4cb6b..98f96bc97012 100644 --- a/physical/raft/io.go +++ b/physical/raft/io.go @@ -45,6 +45,7 @@ type WriteCloser interface { type Reader interface { ReadMsg(msg proto.Message) error + GetLastReadSize() int } type ReadCloser interface { diff --git a/physical/raft/msgpack.go b/physical/raft/msgpack.go deleted file mode 100644 index 299dd8e0a98d..000000000000 --- a/physical/raft/msgpack.go +++ /dev/null @@ -1,10 +0,0 @@ -package raft - -// If we downgrade msgpack from v1.1.5 to v0.5.5, everything will still -// work, but any pre-existing raft clusters will break on upgrade. -// This file exists so that the Vault project has an explicit dependency -// on the library, which allows us to pin the version in go.mod. - -import ( - _ "github.com/hashicorp/go-msgpack/codec" -) diff --git a/physical/raft/raft.go b/physical/raft/raft.go index e3c720630d55..8958ba78065d 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -1,24 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( + "bytes" "context" "crypto/tls" "errors" "fmt" "io" - "io/ioutil" "math/rand" + "net/url" "os" "path/filepath" "strconv" "sync" + "sync/atomic" "time" "github.com/armon/go-metrics" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" log "github.com/hashicorp/go-hclog" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" "github.com/hashicorp/go-raftchunking" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-secure-stdlib/tlsutil" "github.com/hashicorp/go-uuid" goversion "github.com/hashicorp/go-version" @@ -26,15 +32,16 @@ import ( autopilot "github.com/hashicorp/raft-autopilot" raftboltdb "github.com/hashicorp/raft-boltdb/v2" snapshot "github.com/hashicorp/raft-snapshot" + raftwal "github.com/hashicorp/raft-wal" + walmetrics "github.com/hashicorp/raft-wal/metrics" + "github.com/hashicorp/raft-wal/verifier" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault/cluster" - "github.com/hashicorp/vault/vault/seal" - "github.com/hashicorp/vault/version" - bolt "go.etcd.io/bbolt" + etcdbolt "go.etcd.io/bbolt" ) const ( @@ -47,16 +54,36 @@ const ( // EnvVaultRaftNonVoter is used to override the non_voter config option, telling Vault to join as a non-voter (i.e. read replica). EnvVaultRaftNonVoter = "VAULT_RAFT_RETRY_JOIN_AS_NON_VOTER" raftNonVoterConfigKey = "retry_join_as_non_voter" + + // EnvVaultRaftMaxBatchEntries is used to override the default maxBatchEntries + // limit. + EnvVaultRaftMaxBatchEntries = "VAULT_RAFT_MAX_BATCH_ENTRIES" + + // EnvVaultRaftMaxBatchSizeBytes is used to override the default maxBatchSize + // limit. + EnvVaultRaftMaxBatchSizeBytes = "VAULT_RAFT_MAX_BATCH_SIZE_BYTES" + + // defaultMaxBatchEntries is the default maxBatchEntries limit. This was + // derived from performance testing. It is effectively high enough never to be + // a real limit for realistic Vault operation sizes and the size limit + // provides the actual limit since that amount of data stored is more relevant + // that the specific number of operations. + defaultMaxBatchEntries = 4096 + + // defaultMaxBatchSize is the default maxBatchSize limit. This was derived + // from performance testing. + defaultMaxBatchSize = 128 * 1024 ) var getMmapFlags = func(string) int { return 0 } // Verify RaftBackend satisfies the correct interfaces var ( - _ physical.Backend = (*RaftBackend)(nil) - _ physical.Transactional = (*RaftBackend)(nil) - _ physical.HABackend = (*RaftBackend)(nil) - _ physical.Lock = (*RaftLock)(nil) + _ physical.Backend = (*RaftBackend)(nil) + _ physical.Transactional = (*RaftBackend)(nil) + _ physical.TransactionalLimits = (*RaftBackend)(nil) + _ physical.HABackend = (*RaftBackend)(nil) + _ physical.Lock = (*RaftLock)(nil) ) var ( @@ -64,12 +91,15 @@ var ( // This is used to reduce disk I/O for the recently committed entries. raftLogCacheSize = 512 - raftState = "raft/" - peersFileName = "peers.json" - - restoreOpDelayDuration = 5 * time.Second + raftState = "raft/" + raftWalDir = "wal/" + peersFileName = "peers.json" + restoreOpDelayDuration = 5 * time.Second + defaultMaxEntrySize = uint64(2 * raftchunking.ChunkSize) + defaultRaftLogVerificationInterval = 60 * time.Second + minimumRaftLogVerificationInterval = 10 * time.Second - defaultMaxEntrySize = uint64(2 * raftchunking.ChunkSize) + GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in raft backend") ) // RaftBackend implements the backend interfaces and uses the raft protocol to @@ -117,6 +147,18 @@ type RaftBackend struct { // startup. bootstrapConfig *raft.Configuration + // closers is a list of managed resource (such as stores above or wrapper + // layers around them). That should have Close called on them when the backend + // is closed. We need to take care that each distinct object is closed only + // once which might involve knowing how wrappers to stores work. For example + // raft wal verifier wraps LogStore and is an io.Closer but it also closes the + // underlying LogStore so if we add it here we shouldn't also add the actual + // LogStore or StableStore if it's the same underlying instance. We could use + // a map[io.Closer]bool to prevent double registrations, but that doesn't + // solve the problem of "knowing" whether or not calling Close on some wrapper + // also calls "Close" on it's underlying. + closers []io.Closer + // dataDir is the location on the local filesystem that raft and FSM data // will be stored. dataDir string @@ -136,6 +178,17 @@ type RaftBackend struct { // performance. maxEntrySize uint64 + // maxBatchEntries is the number of operation entries in each batch. It is set + // by default to a value we've tested to work well but may be overridden by + // Environment variable VAULT_RAFT_MAX_BATCH_ENTRIES. + maxBatchEntries int + + // maxBatchSize is the maximum combined key and value size of operation + // entries in each batch. It is set by default to a value we've tested to work + // well but may be overridden by Environment variable + // VAULT_RAFT_MAX_BATCH_SIZE_BYTES. + maxBatchSize int + // autopilot is the instance of raft-autopilot library implementation of the // autopilot features. This will be instantiated in both leader and followers. // However, only active node will have a "running" autopilot. @@ -181,6 +234,12 @@ type RaftBackend struct { nonVoter bool effectiveSDKVersion string + failGetInTxn *uint32 + + // raftLogVerifierEnabled and raftLogVerificationInterval control enabling the raft log verifier and how often + // it writes checkpoints. + raftLogVerifierEnabled bool + raftLogVerificationInterval time.Duration } // LeaderJoinInfo contains information required by a node to join itself as a @@ -213,7 +272,7 @@ type LeaderJoinInfo struct { // client authentication during TLS. LeaderClientKey string `json:"leader_client_key"` - // LeaderCACertFile is the path on disk to the the CA cert file of the + // LeaderCACertFile is the path on disk to the CA cert file of the // leader node. This should only be provided via Vault's configuration file. LeaderCACertFile string `json:"leader_ca_cert_file"` @@ -238,6 +297,25 @@ type LeaderJoinInfo struct { TLSConfig *tls.Config `json:"-"` } +type RaftBackendConfig struct { + Path string + NodeId string + ApplyDelay time.Duration + RaftWal bool + RaftLogVerifierEnabled bool + RaftLogVerificationInterval time.Duration + SnapshotDelay time.Duration + MaxEntrySize uint64 + MaxBatchEntries int + MaxBatchSize int + AutopilotReconcileInterval time.Duration + AutopilotUpdateInterval time.Duration + AutopilotUpgradeVersion string + AutopilotRedundancyZone string + RaftNonVoter bool + RetryJoin string +} + // JoinConfig returns a list of information about possible leader nodes that // this node can join as a follower func (b *RaftBackend) JoinConfig() ([]*LeaderJoinInfo, error) { @@ -306,215 +384,212 @@ func EnsurePath(path string, dir bool) error { return os.MkdirAll(path, 0o700) } -// NewRaftBackend constructs a RaftBackend using the given directory -func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { - path := os.Getenv(EnvVaultRaftPath) - if path == "" { - pathFromConfig, ok := conf["path"] - if !ok { - return nil, fmt.Errorf("'path' must be set") - } - path = pathFromConfig +func NewClusterAddrBridge() *ClusterAddrBridge { + return &ClusterAddrBridge{ + clusterAddressByNodeID: make(map[string]string), } +} - var localID string - { - // Determine the local node ID from the environment. - if raftNodeID := os.Getenv(EnvVaultRaftNodeID); raftNodeID != "" { - localID = raftNodeID - } +type ClusterAddrBridge struct { + l sync.RWMutex + clusterAddressByNodeID map[string]string +} - // If not set in the environment check the configuration file. - if len(localID) == 0 { - localID = conf["node_id"] - } +func (c *ClusterAddrBridge) UpdateClusterAddr(nodeId string, clusterAddr string) { + c.l.Lock() + defer c.l.Unlock() + cu, _ := url.Parse(clusterAddr) + c.clusterAddressByNodeID[nodeId] = cu.Host +} - // If not set in the config check the "node-id" file. - if len(localID) == 0 { - localIDRaw, err := ioutil.ReadFile(filepath.Join(path, "node-id")) - switch { - case err == nil: - if len(localIDRaw) > 0 { - localID = string(localIDRaw) - } - case os.IsNotExist(err): - default: - return nil, err - } +func (c *ClusterAddrBridge) ServerAddr(id raft.ServerID) (raft.ServerAddress, error) { + c.l.RLock() + defer c.l.RUnlock() + if addr, ok := c.clusterAddressByNodeID[string(id)]; ok { + return raft.ServerAddress(addr), nil + } + return "", fmt.Errorf("could not find cluster addr for id=%s", id) +} + +func batchLimitsFromEnv(logger log.Logger) (int, int) { + maxBatchEntries := defaultMaxBatchEntries + if envVal := os.Getenv(EnvVaultRaftMaxBatchEntries); envVal != "" { + if i, err := strconv.Atoi(envVal); err == nil && i > 0 { + maxBatchEntries = i + } else { + logger.Warn("failed to parse VAULT_RAFT_MAX_BATCH_ENTRIES as an integer > 0. Using default value.", + "env_val", envVal, "default_used", maxBatchEntries) } + } - // If all of the above fails generate a UUID and persist it to the - // "node-id" file. - if len(localID) == 0 { - id, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } + maxBatchSize := defaultMaxBatchSize + if envVal := os.Getenv(EnvVaultRaftMaxBatchSizeBytes); envVal != "" { + if i, err := strconv.Atoi(envVal); err == nil && i > 0 { + maxBatchSize = i + } else { + logger.Warn("failed to parse VAULT_RAFT_MAX_BATCH_SIZE_BYTES as an integer > 0. Using default value.", + "env_val", envVal, "default_used", maxBatchSize) + } + } - if err := ioutil.WriteFile(filepath.Join(path, "node-id"), []byte(id), 0o600); err != nil { - return nil, err - } + return maxBatchEntries, maxBatchSize +} - localID = id - } +// NewRaftBackend constructs a RaftBackend using the given directory +func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // parse the incoming map into a proper config struct + backendConfig, err := parseRaftBackendConfig(conf, logger) + if err != nil { + return nil, fmt.Errorf("error parsing config: %w", err) } // Create the FSM. - fsm, err := NewFSM(path, localID, logger.Named("fsm")) + fsm, err := NewFSM(backendConfig.Path, backendConfig.NodeId, logger.Named("fsm")) if err != nil { return nil, fmt.Errorf("failed to create fsm: %v", err) } - if delayRaw, ok := conf["apply_delay"]; ok { - delay, err := time.ParseDuration(delayRaw) - if err != nil { - return nil, fmt.Errorf("apply_delay does not parse as a duration: %w", err) - } + if backendConfig.ApplyDelay > 0 { fsm.applyCallback = func() { - time.Sleep(delay) + time.Sleep(backendConfig.ApplyDelay) } } // Build an all in-memory setup for dev mode, otherwise prepare a full // disk-based setup. - var log raft.LogStore - var stable raft.StableStore - var snap raft.SnapshotStore + var logStore raft.LogStore + var stableStore raft.StableStore + var snapStore raft.SnapshotStore + var closers []io.Closer var devMode bool if devMode { store := raft.NewInmemStore() - stable = store - log = store - snap = raft.NewInmemSnapshotStore() + stableStore = store + logStore = store + snapStore = raft.NewInmemSnapshotStore() } else { // Create the base raft path. - path := filepath.Join(path, raftState) - if err := EnsurePath(path, true); err != nil { + raftBasePath := filepath.Join(backendConfig.Path, raftState) + if err := EnsurePath(raftBasePath, true); err != nil { return nil, err } + dbPath := filepath.Join(raftBasePath, "raft.db") - // Create the backend raft store for logs and stable storage. - dbPath := filepath.Join(path, "raft.db") - opts := boltOptions(dbPath) - raftOptions := raftboltdb.Options{ - Path: dbPath, - BoltOptions: opts, - } - store, err := raftboltdb.New(raftOptions) + // If the existing raft db exists from a previous use of BoltDB, warn about this and continue to use BoltDB + raftDbExists, err := fileExists(dbPath) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to check if raft.db already exists: %w", err) } - stable = store - - // Wrap the store in a LogCache to improve performance. - cacheStore, err := raft.NewLogCache(raftLogCacheSize, store) - if err != nil { - return nil, err + if backendConfig.RaftWal && raftDbExists { + logger.Warn("raft is configured to use raft-wal for storage but existing raft.db detected. raft-wal config will be ignored.") + backendConfig.RaftWal = false } - log = cacheStore - // Create the snapshot store. - snapshots, err := NewBoltSnapshotStore(path, logger.Named("snapshot"), fsm) - if err != nil { - return nil, err - } - snap = snapshots - } + if backendConfig.RaftWal { + raftWalPath := filepath.Join(raftBasePath, raftWalDir) + if err := EnsurePath(raftWalPath, true); err != nil { + return nil, err + } - if delayRaw, ok := conf["snapshot_delay"]; ok { - delay, err := time.ParseDuration(delayRaw) - if err != nil { - return nil, fmt.Errorf("snapshot_delay does not parse as a duration: %w", err) - } - snap = newSnapshotStoreDelay(snap, delay, logger) - } + mc := walmetrics.NewGoMetricsCollector([]string{"raft", "wal"}, nil, nil) + wal, err := raftwal.Open(raftWalPath, raftwal.WithMetricsCollector(mc)) + if err != nil { + return nil, fmt.Errorf("fail to open write-ahead-log: %w", err) + } + // We need to Close the store but don't register it in closers yet because + // if we are going to wrap it with a verifier we need to close through + // that instead. - maxEntrySize := defaultMaxEntrySize - if maxEntrySizeCfg := conf["max_entry_size"]; len(maxEntrySizeCfg) != 0 { - i, err := strconv.Atoi(maxEntrySizeCfg) - if err != nil { - return nil, fmt.Errorf("failed to parse 'max_entry_size': %w", err) - } + stableStore = wal + logStore = wal + } else { + // use the traditional BoltDB setup + opts := etcdboltOptions(dbPath) + raftOptions := raftboltdb.Options{ + Path: dbPath, + BoltOptions: opts, + MsgpackUseNewTimeFormat: true, + } - maxEntrySize = uint64(i) - } + store, err := raftboltdb.New(raftOptions) + if err != nil { + return nil, err + } + // We need to Close the store but don't register it in closers yet because + // if we are going to wrap it with a verifier we need to close through + // that instead. - var reconcileInterval time.Duration - if interval := conf["autopilot_reconcile_interval"]; interval != "" { - interval, err := time.ParseDuration(interval) - if err != nil { - return nil, fmt.Errorf("autopilot_reconcile_interval does not parse as a duration: %w", err) + stableStore = store + logStore = store } - reconcileInterval = interval - } - var updateInterval time.Duration - if interval := conf["autopilot_update_interval"]; interval != "" { - interval, err := time.ParseDuration(interval) + // Create the snapshot store. + snapshots, err := NewBoltSnapshotStore(raftBasePath, logger.Named("snapshot"), fsm) if err != nil { - return nil, fmt.Errorf("autopilot_update_interval does not parse as a duration: %w", err) + return nil, err } - updateInterval = interval + snapStore = snapshots } - effectiveReconcileInterval := autopilot.DefaultReconcileInterval - effectiveUpdateInterval := autopilot.DefaultUpdateInterval - - if reconcileInterval != 0 { - effectiveReconcileInterval = reconcileInterval - } - if updateInterval != 0 { - effectiveUpdateInterval = updateInterval + // Hook up the verifier if it's enabled + if backendConfig.RaftLogVerifierEnabled { + mc := walmetrics.NewGoMetricsCollector([]string{"raft", "logstore", "verifier"}, nil, nil) + reportFn := makeLogVerifyReportFn(logger.Named("raft.logstore.verifier")) + v := verifier.NewLogStore(logStore, isLogVerifyCheckpoint, reportFn, mc) + logStore = v } - if effectiveReconcileInterval < effectiveUpdateInterval { - return nil, fmt.Errorf("autopilot_reconcile_interval (%v) should be larger than autopilot_update_interval (%v)", effectiveReconcileInterval, effectiveUpdateInterval) + // Register the logStore as a closer whether or not it's wrapped in a verifier + // (which is a closer). We do this before the LogCache since that is _not_ an + // io.Closer. + if closer, ok := logStore.(io.Closer); ok { + closers = append(closers, closer) } + // Note that we DON'T register the stableStore as a closer because right now + // we always use the same underlying object as the logStore and we don't want + // to call close on it twice. If we ever support different stable store and + // log store then this logic will get even more complex! We don't register + // snapStore because none of our snapshot stores are io.Closers. - var upgradeVersion string - if uv, ok := conf["autopilot_upgrade_version"]; ok && uv != "" { - upgradeVersion = uv - _, err := goversion.NewVersion(upgradeVersion) - if err != nil { - return nil, fmt.Errorf("autopilot_upgrade_version does not parse as a semantic version: %w", err) - } - } + // Close the FSM + closers = append(closers, fsm) - var nonVoter bool - if v := os.Getenv(EnvVaultRaftNonVoter); v != "" { - // Consistent with handling of other raft boolean env vars - // VAULT_RAFT_AUTOPILOT_DISABLE and VAULT_RAFT_FREELIST_SYNC - nonVoter = true - } else if v, ok := conf[raftNonVoterConfigKey]; ok { - nonVoter, err = strconv.ParseBool(v) - if err != nil { - return nil, fmt.Errorf("failed to parse %s config value %q as a boolean: %w", raftNonVoterConfigKey, v, err) - } + // Wrap the store in a LogCache to improve performance. + cacheStore, err := raft.NewLogCache(raftLogCacheSize, logStore) + if err != nil { + return nil, err } + logStore = cacheStore - if nonVoter && conf["retry_join"] == "" { - return nil, fmt.Errorf("setting %s to true is only valid if at least one retry_join stanza is specified", raftNonVoterConfigKey) + if backendConfig.SnapshotDelay > 0 { + snapStore = newSnapshotStoreDelay(snapStore, backendConfig.SnapshotDelay, logger) } return &RaftBackend{ - logger: logger, - fsm: fsm, - raftInitCh: make(chan struct{}), - conf: conf, - logStore: log, - stableStore: stable, - snapStore: snap, - dataDir: path, - localID: localID, - permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), - maxEntrySize: maxEntrySize, - followerHeartbeatTicker: time.NewTicker(time.Second), - autopilotReconcileInterval: reconcileInterval, - autopilotUpdateInterval: updateInterval, - redundancyZone: conf["autopilot_redundancy_zone"], - nonVoter: nonVoter, - upgradeVersion: upgradeVersion, + logger: logger, + fsm: fsm, + raftInitCh: make(chan struct{}), + conf: conf, + logStore: logStore, + stableStore: stableStore, + snapStore: snapStore, + closers: closers, + dataDir: backendConfig.Path, + localID: backendConfig.NodeId, + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + maxEntrySize: backendConfig.MaxEntrySize, + maxBatchEntries: backendConfig.MaxBatchEntries, + maxBatchSize: backendConfig.MaxBatchSize, + followerHeartbeatTicker: time.NewTicker(time.Second), + autopilotReconcileInterval: backendConfig.AutopilotReconcileInterval, + autopilotUpdateInterval: backendConfig.AutopilotUpdateInterval, + redundancyZone: backendConfig.AutopilotRedundancyZone, + nonVoter: backendConfig.RaftNonVoter, + upgradeVersion: backendConfig.AutopilotUpgradeVersion, + failGetInTxn: new(uint32), + raftLogVerifierEnabled: backendConfig.RaftLogVerifierEnabled, + raftLogVerificationInterval: backendConfig.RaftLogVerificationInterval, }, nil } @@ -555,15 +630,20 @@ func (b *RaftBackend) Close() error { b.l.Lock() defer b.l.Unlock() - if err := b.fsm.Close(); err != nil { - return err + for _, cl := range b.closers { + if err := cl.Close(); err != nil { + return err + } } + return nil +} - if err := b.stableStore.(*raftboltdb.BoltStore).Close(); err != nil { - return err +func (b *RaftBackend) FailGetInTxn(fail bool) { + var val uint32 + if fail { + val = 1 } - - return nil + atomic.StoreUint32(b.failGetInTxn, val) } func (b *RaftBackend) SetEffectiveSDKVersion(sdkVersion string) { @@ -586,7 +666,10 @@ func (b *RaftBackend) NonVoter() bool { return b.nonVoter } -func (b *RaftBackend) EffectiveVersion() string { +// UpgradeVersion returns the string that should be used by autopilot during automated upgrades. We return the +// specified upgradeVersion if it's present. If it's not, we fall back to effectiveSDKVersion, which is +// Vault's binary version (though that can be overridden for tests). +func (b *RaftBackend) UpgradeVersion() string { b.l.RLock() defer b.l.RUnlock() @@ -594,7 +677,21 @@ func (b *RaftBackend) EffectiveVersion() string { return b.upgradeVersion } - return version.GetVersion().Version + return b.effectiveSDKVersion +} + +func (b *RaftBackend) verificationInterval() time.Duration { + b.l.RLock() + defer b.l.RUnlock() + + return b.raftLogVerificationInterval +} + +func (b *RaftBackend) verifierEnabled() bool { + b.l.RLock() + defer b.l.RUnlock() + + return b.raftLogVerifierEnabled } // DisableUpgradeMigration returns the state of the DisableUpgradeMigration config flag and whether it was set or not @@ -609,13 +706,132 @@ func (b *RaftBackend) DisableUpgradeMigration() (bool, bool) { return b.autopilotConfig.DisableUpgradeMigration, true } +// StartRaftWalVerifier runs a raft log store verifier in the background, if configured to do so. +// This periodically writes out special raft logs to verify that the log store is not corrupting data. +// This is only safe to run on the raft leader. +func (b *RaftBackend) StartRaftWalVerifier(ctx context.Context) { + if !b.verifierEnabled() { + return + } + + go func() { + ticker := time.NewTicker(b.verificationInterval()) + defer ticker.Stop() + + logger := b.logger.Named("raft-wal-verifier") + + for { + select { + case <-ticker.C: + err := b.applyVerifierCheckpoint() + if err != nil { + logger.Error("error applying verification checkpoint", "error", err) + } + logger.Debug("sent verification checkpoint") + case <-ctx.Done(): + return + } + } + }() +} + +func (b *RaftBackend) applyVerifierCheckpoint() error { + data := make([]byte, 1) + data[0] = byte(verifierCheckpointOp) + + b.permitPool.Acquire() + b.l.RLock() + + var err error + applyFuture := b.raft.Apply(data, 0) + if e := applyFuture.Error(); e != nil { + err = e + } + + b.l.RUnlock() + b.permitPool.Release() + + return err +} + +// isLogVerifyCheckpoint is the verifier.IsCheckpointFn that can decode our raft logs for +// their type. +func isLogVerifyCheckpoint(l *raft.Log) (bool, error) { + return isRaftLogVerifyCheckpoint(l), nil +} + +func makeLogVerifyReportFn(logger log.Logger) verifier.ReportFn { + return func(r verifier.VerificationReport) { + if r.SkippedRange != nil { + logger.Warn("verification skipped range, consider decreasing validation interval if this is frequent", + "rangeStart", int64(r.SkippedRange.Start), + "rangeEnd", int64(r.SkippedRange.End), + ) + } + + l2 := logger.With( + "rangeStart", int64(r.Range.Start), + "rangeEnd", int64(r.Range.End), + "leaderChecksum", fmt.Sprintf("%08x", r.ExpectedSum), + "elapsed", r.Elapsed, + ) + + if r.Err == nil { + l2.Info("verification checksum OK", + "readChecksum", fmt.Sprintf("%08x", r.ReadSum), + ) + return + } + + if errors.Is(r.Err, verifier.ErrRangeMismatch) { + l2.Warn("verification checksum skipped as we don't have all logs in range") + return + } + + var csErr verifier.ErrChecksumMismatch + if errors.As(r.Err, &csErr) { + if r.WrittenSum > 0 && r.WrittenSum != r.ExpectedSum { + // The failure occurred before the follower wrote to the log so it + // must be corrupted in flight from the leader! + l2.Error("verification checksum FAILED: in-flight corruption", + "followerWriteChecksum", fmt.Sprintf("%08x", r.WrittenSum), + "readChecksum", fmt.Sprintf("%08x", r.ReadSum), + ) + } else { + l2.Error("verification checksum FAILED: storage corruption", + "followerWriteChecksum", fmt.Sprintf("%08x", r.WrittenSum), + "readChecksum", fmt.Sprintf("%08x", r.ReadSum), + ) + } + return + } + + // Some other unknown error occurred + l2.Error(r.Err.Error()) + } +} + func (b *RaftBackend) CollectMetrics(sink *metricsutil.ClusterMetricSink) { + var stats map[string]string + var logStoreStats *etcdbolt.Stats + b.l.RLock() - logstoreStats := b.stableStore.(*raftboltdb.BoltStore).Stats() + if boltStore, ok := b.stableStore.(*raftboltdb.BoltStore); ok { + bss := boltStore.Stats() + logStoreStats = &bss + } + + if b.raft != nil { + stats = b.raft.Stats() + } + fsmStats := b.fsm.Stats() - stats := b.raft.Stats() b.l.RUnlock() - b.collectMetricsWithStats(logstoreStats, sink, "logstore") + + if logStoreStats != nil { + b.collectEtcdBoltMetricsWithStats(*logStoreStats, sink, "logstore") + } + b.collectMetricsWithStats(fsmStats, sink, "fsm") labels := []metrics.Label{ { @@ -623,10 +839,13 @@ func (b *RaftBackend) CollectMetrics(sink *metricsutil.ClusterMetricSink) { Value: b.localID, }, } - for _, key := range []string{"term", "commit_index", "applied_index", "fsm_pending"} { - n, err := strconv.ParseUint(stats[key], 10, 64) - if err == nil { - sink.SetGaugeWithLabels([]string{"raft_storage", "stats", key}, float32(n), labels) + + if stats != nil { + for _, key := range []string{"term", "commit_index", "applied_index", "fsm_pending"} { + n, err := strconv.ParseUint(stats[key], 10, 64) + if err == nil { + sink.SetGaugeWithLabels([]string{"raft_storage", "stats", key}, float32(n), labels) + } } } } @@ -640,18 +859,41 @@ func (b *RaftBackend) collectMetricsWithStats(stats bolt.Stats, sink *metricsuti sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "used_bytes"}, float32(stats.FreelistInuse), labels) sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "transaction", "started_read_transactions"}, float32(stats.TxN), labels) sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "transaction", "currently_open_read_transactions"}, float32(stats.OpenTxN), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "count"}, float32(txstats.PageCount), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "bytes_allocated"}, float32(txstats.PageAlloc), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "cursor", "count"}, float32(txstats.CursorCount), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "count"}, float32(txstats.NodeCount), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "dereferences"}, float32(txstats.NodeDeref), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "rebalance", "count"}, float32(txstats.Rebalance), labels) - sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "rebalance", "time"}, float32(txstats.RebalanceTime.Milliseconds()), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "split", "count"}, float32(txstats.Split), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "spill", "count"}, float32(txstats.Spill), labels) - sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "spill", "time"}, float32(txstats.SpillTime.Milliseconds()), labels) - sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "write", "count"}, float32(txstats.Write), labels) - sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "write", "time"}, float32(txstats.WriteTime.Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "count"}, float32(txstats.GetPageCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "bytes_allocated"}, float32(txstats.GetPageAlloc()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "cursor", "count"}, float32(txstats.GetCursorCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "count"}, float32(txstats.GetNodeCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "dereferences"}, float32(txstats.GetNodeDeref()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "rebalance", "count"}, float32(txstats.GetRebalance()), labels) + sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "rebalance", "time"}, float32(txstats.GetRebalanceTime().Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "split", "count"}, float32(txstats.GetSplit()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "spill", "count"}, float32(txstats.GetSpill()), labels) + sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "spill", "time"}, float32(txstats.GetSpillTime().Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "write", "count"}, float32(txstats.GetWrite()), labels) + sink.IncrCounterWithLabels([]string{"raft_storage", "bolt", "write", "time"}, float32(txstats.GetWriteTime().Milliseconds()), labels) +} + +func (b *RaftBackend) collectEtcdBoltMetricsWithStats(stats etcdbolt.Stats, sink *metricsutil.ClusterMetricSink, database string) { + txstats := stats.TxStats + labels := []metricsutil.Label{{"database", database}} + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "free_pages"}, float32(stats.FreePageN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "pending_pages"}, float32(stats.PendingPageN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "allocated_bytes"}, float32(stats.FreeAlloc), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "used_bytes"}, float32(stats.FreelistInuse), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "transaction", "started_read_transactions"}, float32(stats.TxN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "transaction", "currently_open_read_transactions"}, float32(stats.OpenTxN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "count"}, float32(txstats.GetPageCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "bytes_allocated"}, float32(txstats.GetPageAlloc()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "cursor", "count"}, float32(txstats.GetCursorCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "count"}, float32(txstats.GetNodeCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "dereferences"}, float32(txstats.GetNodeDeref()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "rebalance", "count"}, float32(txstats.GetRebalance()), labels) + sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "rebalance", "time"}, float32(txstats.GetRebalanceTime().Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "split", "count"}, float32(txstats.GetSplit()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "spill", "count"}, float32(txstats.GetSpill()), labels) + sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "spill", "time"}, float32(txstats.GetSpillTime().Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "write", "count"}, float32(txstats.GetWrite()), labels) + sink.IncrCounterWithLabels([]string{"raft_storage", "bolt", "write", "time"}, float32(txstats.GetWriteTime().Milliseconds()), labels) } // RaftServer has information about a server in the Raft configuration @@ -798,7 +1040,7 @@ func (b *RaftBackend) applyConfigSettings(config *raft.Config) error { snapshotIntervalRaw, ok := b.conf["snapshot_interval"] if ok { var err error - snapshotInterval, err := time.ParseDuration(snapshotIntervalRaw) + snapshotInterval, err := parseutil.ParseDurationSecond(snapshotIntervalRaw) if err != nil { return err } @@ -927,11 +1169,12 @@ func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { return err } transConfig := &raft.NetworkTransportConfig{ - Stream: streamLayer, - MaxPool: 3, - Timeout: 10 * time.Second, - ServerAddressProvider: b.serverAddressProvider, - Logger: b.logger.Named("raft-net"), + Stream: streamLayer, + MaxPool: 3, + Timeout: 10 * time.Second, + ServerAddressProvider: b.serverAddressProvider, + Logger: b.logger.Named("raft-net"), + MsgpackUseNewTimeFormat: true, } transport := raft.NewNetworkTransportWithConfig(transConfig) @@ -1302,7 +1545,7 @@ func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) e if b.raft == nil { return errors.New("raft storage is not initialized") } - b.logger.Trace("adding server to raft", "id", peerID) + b.logger.Trace("adding server to raft", "id", peerID, "addr", clusterAddr) future := b.raft.AddVoter(raft.ServerID(peerID), raft.ServerAddress(clusterAddr), 0, 0) return future.Error() } @@ -1311,7 +1554,7 @@ func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) e return errors.New("raft storage autopilot is not initialized") } - b.logger.Trace("adding server to raft via autopilot", "id", peerID) + b.logger.Trace("adding server to raft via autopilot", "id", peerID, "addr", clusterAddr) return b.autopilot.AddServer(&autopilot.Server{ ID: raft.ServerID(peerID), Name: peerID, @@ -1353,17 +1596,17 @@ func (b *RaftBackend) Peers(ctx context.Context) ([]Peer, error) { // SnapshotHTTP is a wrapper for Snapshot that sends the snapshot as an HTTP // response. -func (b *RaftBackend) SnapshotHTTP(out *logical.HTTPResponseWriter, access *seal.Access) error { +func (b *RaftBackend) SnapshotHTTP(out *logical.HTTPResponseWriter, sealer snapshot.Sealer) error { out.Header().Add("Content-Disposition", "attachment") out.Header().Add("Content-Type", "application/gzip") - return b.Snapshot(out, access) + return b.Snapshot(out, sealer) } // Snapshot takes a raft snapshot, packages it into a archive file and writes it // to the provided writer. Seal access is used to encrypt the SHASUM file so we // can validate the snapshot was taken using the same root keys or not. -func (b *RaftBackend) Snapshot(out io.Writer, access *seal.Access) error { +func (b *RaftBackend) Snapshot(out io.Writer, sealer snapshot.Sealer) error { b.l.RLock() defer b.l.RUnlock() @@ -1371,15 +1614,7 @@ func (b *RaftBackend) Snapshot(out io.Writer, access *seal.Access) error { return errors.New("raft storage is sealed") } - // If we have access to the seal create a sealer object - var s snapshot.Sealer - if access != nil { - s = &sealer{ - access: access, - } - } - - return snapshot.Write(b.logger.Named("snapshot"), b.raft, s, out) + return snapshot.Write(b.logger.Named("snapshot"), b.raft, sealer, out) } // WriteSnapshotToTemp reads a snapshot archive off the provided reader, @@ -1387,7 +1622,7 @@ func (b *RaftBackend) Snapshot(out io.Writer, access *seal.Access) error { // access is used to decrypt the SHASUM file in the archive to ensure this // snapshot has the same root key as the running instance. If the provided // access is nil then it will skip that validation. -func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access *seal.Access) (*os.File, func(), raft.SnapshotMeta, error) { +func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, sealer snapshot.Sealer) (*os.File, func(), raft.SnapshotMeta, error) { b.l.RLock() defer b.l.RUnlock() @@ -1396,15 +1631,7 @@ func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access *seal.Access) return nil, nil, metadata, errors.New("raft storage is sealed") } - // If we have access to the seal create a sealer object - var s snapshot.Sealer - if access != nil { - s = &sealer{ - access: access, - } - } - - snap, cleanup, err := snapshot.WriteToTempFileWithSealer(b.logger.Named("snapshot"), in, &metadata, s) + snap, cleanup, err := snapshot.WriteToTempFileWithSealer(b.logger.Named("snapshot"), in, &metadata, sealer) return snap, cleanup, metadata, err } @@ -1563,6 +1790,13 @@ func (b *RaftBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry return err } + failGetInTxn := atomic.LoadUint32(b.failGetInTxn) + for _, t := range txns { + if t.Operation == physical.GetOperation && failGetInTxn != 0 { + return GetInTxnDisabledError + } + } + txnMap := make(map[string]*physical.TxnEntry) command := &LogData{ @@ -1611,6 +1845,10 @@ func (b *RaftBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry return err } +func (b *RaftBackend) TransactionLimits() (int, int) { + return b.maxBatchEntries, b.maxBatchSize +} + // applyLog will take a given log command and apply it to the raft log. applyLog // doesn't return until the log has been applied to a quorum of servers and is // persisted to the local FSM. Caller should hold the backend's read lock. @@ -1870,38 +2108,186 @@ func (l *RaftLock) Value() (bool, string, error) { return true, value, nil } -// sealer implements the snapshot.Sealer interface and is used in the snapshot -// process for encrypting/decrypting the SHASUM file in snapshot archives. -type sealer struct { - access *seal.Access +func fileExists(name string) (bool, error) { + _, err := os.Stat(name) + if err == nil { + // File exists! + return true, nil + } + if errors.Is(err, os.ErrNotExist) { + return false, nil + } + // We hit some other error trying to stat the file which leaves us in an + // unknown state so we can't proceed. + return false, err } -// Seal encrypts the data with using the seal access object. -func (s sealer) Seal(ctx context.Context, pt []byte) ([]byte, error) { - if s.access == nil { - return nil, errors.New("no seal access available") +func parseRaftBackendConfig(conf map[string]string, logger log.Logger) (*RaftBackendConfig, error) { + c := &RaftBackendConfig{} + + c.Path = conf["path"] + envPath := os.Getenv(EnvVaultRaftPath) + if envPath != "" { + c.Path = envPath } - eblob, err := s.access.Encrypt(ctx, pt, nil) - if err != nil { - return nil, err + + if c.Path == "" { + return nil, fmt.Errorf("'path' must be set") } - return proto.Marshal(eblob) -} + c.NodeId = conf["node_id"] + envNodeId := os.Getenv(EnvVaultRaftNodeID) + if envNodeId != "" { + c.NodeId = envNodeId + } -// Open decrypts the data using the seal access object. -func (s sealer) Open(ctx context.Context, ct []byte) ([]byte, error) { - if s.access == nil { - return nil, errors.New("no seal access available") + if c.NodeId == "" { + localIDRaw, err := os.ReadFile(filepath.Join(c.Path, "node-id")) + if err == nil && len(localIDRaw) > 0 { + c.NodeId = string(localIDRaw) + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } } - var eblob wrapping.BlobInfo - err := proto.Unmarshal(ct, &eblob) - if err != nil { - return nil, err + if c.NodeId == "" { + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + if err = os.WriteFile(filepath.Join(c.Path, "node-id"), []byte(id), 0o600); err != nil { + return nil, err + } + + c.NodeId = id + } + + if delayRaw, ok := conf["apply_delay"]; ok { + delay, err := parseutil.ParseDurationSecond(delayRaw) + if err != nil { + return nil, fmt.Errorf("apply_delay does not parse as a duration: %w", err) + } + + c.ApplyDelay = delay + } + + if walRaw, ok := conf["raft_wal"]; ok { + useRaftWal, err := strconv.ParseBool(walRaw) + if err != nil { + return nil, fmt.Errorf("raft_wal does not parse as a boolean: %w", err) + } + + c.RaftWal = useRaftWal + } + + if rlveRaw, ok := conf["raft_log_verifier_enabled"]; ok { + rlve, err := strconv.ParseBool(rlveRaw) + if err != nil { + return nil, fmt.Errorf("raft_log_verifier_enabled does not parse as a boolean: %w", err) + } + c.RaftLogVerifierEnabled = rlve + + c.RaftLogVerificationInterval = defaultRaftLogVerificationInterval + if rlviRaw, ok := conf["raft_log_verification_interval"]; ok { + rlvi, err := parseutil.ParseDurationSecond(rlviRaw) + if err != nil { + return nil, fmt.Errorf("raft_log_verification_interval does not parse as a duration: %w", err) + } + + // Make sure our interval is capped to a reasonable value, so e.g. people don't use 0s or 1s + if rlvi >= minimumRaftLogVerificationInterval { + c.RaftLogVerificationInterval = rlvi + } else { + logger.Warn("raft_log_verification_interval is less than the minimum allowed, using default instead", + "given", rlveRaw, + "minimum", minimumRaftLogVerificationInterval, + "default", defaultRaftLogVerificationInterval) + } + } + } + + if delayRaw, ok := conf["snapshot_delay"]; ok { + delay, err := parseutil.ParseDurationSecond(delayRaw) + if err != nil { + return nil, fmt.Errorf("snapshot_delay does not parse as a duration: %w", err) + } + c.SnapshotDelay = delay + } + + c.MaxEntrySize = defaultMaxEntrySize + if maxEntrySizeCfg := conf["max_entry_size"]; len(maxEntrySizeCfg) != 0 { + i, err := strconv.Atoi(maxEntrySizeCfg) + if err != nil { + return nil, fmt.Errorf("failed to parse 'max_entry_size': %w", err) + } + + c.MaxEntrySize = uint64(i) } - return s.access.Decrypt(ctx, &eblob, nil) + c.MaxBatchEntries, c.MaxBatchSize = batchLimitsFromEnv(logger) + + if interval := conf["autopilot_reconcile_interval"]; interval != "" { + interval, err := parseutil.ParseDurationSecond(interval) + if err != nil { + return nil, fmt.Errorf("autopilot_reconcile_interval does not parse as a duration: %w", err) + } + c.AutopilotReconcileInterval = interval + } + + if interval := conf["autopilot_update_interval"]; interval != "" { + interval, err := parseutil.ParseDurationSecond(interval) + if err != nil { + return nil, fmt.Errorf("autopilot_update_interval does not parse as a duration: %w", err) + } + c.AutopilotUpdateInterval = interval + } + + effectiveReconcileInterval := autopilot.DefaultReconcileInterval + effectiveUpdateInterval := autopilot.DefaultUpdateInterval + + if c.AutopilotReconcileInterval != 0 { + effectiveReconcileInterval = c.AutopilotReconcileInterval + } + if c.AutopilotUpdateInterval != 0 { + effectiveUpdateInterval = c.AutopilotUpdateInterval + } + + if effectiveReconcileInterval < effectiveUpdateInterval { + return nil, fmt.Errorf("autopilot_reconcile_interval (%v) should be larger than autopilot_update_interval (%v)", effectiveReconcileInterval, effectiveUpdateInterval) + } + + if uv, ok := conf["autopilot_upgrade_version"]; ok && uv != "" { + _, err := goversion.NewVersion(uv) + if err != nil { + return nil, fmt.Errorf("autopilot_upgrade_version does not parse as a semantic version: %w", err) + } + + c.AutopilotUpgradeVersion = uv + } + + c.RaftNonVoter = false + if v := os.Getenv(EnvVaultRaftNonVoter); v != "" { + // Consistent with handling of other raft boolean env vars + // VAULT_RAFT_AUTOPILOT_DISABLE and VAULT_RAFT_FREELIST_SYNC + c.RaftNonVoter = true + } else if v, ok := conf[raftNonVoterConfigKey]; ok { + nonVoter, err := strconv.ParseBool(v) + if err != nil { + return nil, fmt.Errorf("failed to parse %s config value %q as a boolean: %w", raftNonVoterConfigKey, v, err) + } + + c.RaftNonVoter = nonVoter + } + + if c.RaftNonVoter && conf["retry_join"] == "" { + return nil, fmt.Errorf("setting %s to true is only valid if at least one retry_join stanza is specified", raftNonVoterConfigKey) + } + + c.AutopilotRedundancyZone = conf["autopilot_redundancy_zone"] + + return c, nil } // boltOptions returns a bolt.Options struct, suitable for passing to @@ -1941,3 +2327,62 @@ func boltOptions(path string) *bolt.Options { return o } + +func etcdboltOptions(path string) *etcdbolt.Options { + o := &etcdbolt.Options{ + Timeout: 1 * time.Second, + FreelistType: etcdbolt.FreelistMapType, + NoFreelistSync: true, + MmapFlags: getMmapFlags(path), + } + + if os.Getenv("VAULT_RAFT_FREELIST_TYPE") == "array" { + o.FreelistType = etcdbolt.FreelistArrayType + } + + if os.Getenv("VAULT_RAFT_FREELIST_SYNC") != "" { + o.NoFreelistSync = false + } + + // By default, we want to set InitialMmapSize to 100GB, but only on 64bit platforms. + // Otherwise, we set it to whatever the value of VAULT_RAFT_INITIAL_MMAP_SIZE + // is, assuming it can be parsed as an int. Bolt itself sets this to 0 by default, + // so if users are wanting to turn this off, they can also set it to 0. Setting it + // to a negative value is the same as not setting it at all. + if os.Getenv("VAULT_RAFT_INITIAL_MMAP_SIZE") == "" { + o.InitialMmapSize = initialMmapSize + } else { + imms, err := strconv.Atoi(os.Getenv("VAULT_RAFT_INITIAL_MMAP_SIZE")) + + // If there's an error here, it means they passed something that's not convertible to + // a number. Rather than fail startup, just ignore it. + if err == nil && imms > 0 { + o.InitialMmapSize = imms + } + } + + return o +} + +func isRaftLogVerifyCheckpoint(l *raft.Log) bool { + if !bytes.Equal(l.Data, []byte{byte(verifierCheckpointOp)}) { + return false + } + + // Single byte log with that byte value can only be a checkpoint or + // the last byte of a chunked message. If it's chunked it will have + // chunking metadata. + if len(l.Extensions) == 0 { + // No metadata, must be a checkpoint on the leader with no + // verifier metadata yet. + return true + } + + if bytes.HasPrefix(l.Extensions, logVerifierMagicBytes[:]) { + // Has verifier metadata so must be a replicated checkpoint on a follower + return true + } + + // Must be the last chunk of a chunked object that has chunking meta + return false +} diff --git a/physical/raft/raft_autopilot.go b/physical/raft/raft_autopilot.go index 5596bbf4255f..1acd85cefc91 100644 --- a/physical/raft/raft_autopilot.go +++ b/physical/raft/raft_autopilot.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( @@ -212,13 +215,15 @@ func NewFollowerStates() *FollowerStates { } } -// Update the peer information in the follower states. Note that this function runs on the active node. -func (s *FollowerStates) Update(req *EchoRequestUpdate) { +// Update the peer information in the follower states. Note that this function +// runs on the active node. Returns true if a new entry was added, as opposed +// to modifying one already present. +func (s *FollowerStates) Update(req *EchoRequestUpdate) bool { s.l.Lock() defer s.l.Unlock() - state, ok := s.followers[req.NodeID] - if !ok { + state, present := s.followers[req.NodeID] + if !present { state = &FollowerState{ IsDead: atomic.NewBool(false), } @@ -233,6 +238,8 @@ func (s *FollowerStates) Update(req *EchoRequestUpdate) { state.Version = req.SDKVersion state.UpgradeVersion = req.UpgradeVersion state.RedundancyZone = req.RedundancyZone + + return !present } // Clear wipes all the information regarding peers in the follower states. @@ -289,7 +296,7 @@ type Delegate struct { emptyVersionLogs map[raft.ServerID]struct{} } -func newDelegate(b *RaftBackend) *Delegate { +func NewDelegate(b *RaftBackend) *Delegate { return &Delegate{ RaftBackend: b, inflightRemovals: make(map[raft.ServerID]bool), @@ -382,6 +389,7 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { return nil } + apServerStates := d.autopilot.GetState().Servers servers := future.Configuration().Servers serverIDs := make([]string, 0, len(servers)) for _, server := range servers { @@ -400,7 +408,8 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { } // If version isn't found in the state, fake it using the version from the leader so that autopilot - // doesn't demote the node to a non-voter, just because of a missed heartbeat. + // doesn't demote the node to a non-voter, just because of a missed heartbeat. Note that this should + // be the SDK version, not the upgrade version. currentServerID := raft.ServerID(id) followerVersion := state.Version leaderVersion := d.effectiveSDKVersion @@ -425,6 +434,19 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { Ext: d.autopilotServerExt(state), } + // As KnownServers is a delegate called by autopilot let's check if we already + // had this data in the correct format and use it. If we don't (which sounds a + // bit sad, unless this ISN'T a voter) then as a fail-safe, let's try what we've + // done elsewhere in code to check the desired suffrage and manually set NodeType + // based on whether that's a voter or not. If we don't do either of these + // things, NodeType isn't set which means technically it's not a voter. + // It shouldn't be a voter and end up in this state. + if apServerState, found := apServerStates[raft.ServerID(id)]; found && apServerState.Server.NodeType != "" { + server.NodeType = apServerState.Server.NodeType + } else if state.DesiredSuffrage == "voter" { + server.NodeType = autopilot.NodeVoter + } + switch state.IsDead.Load() { case true: d.logger.Debug("informing autopilot that the node left", "id", id) @@ -442,8 +464,9 @@ func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { Name: d.localID, RaftVersion: raft.ProtocolVersionMax, NodeStatus: autopilot.NodeAlive, + NodeType: autopilot.NodeVoter, // The leader must be a voter Meta: d.meta(&FollowerState{ - UpgradeVersion: d.EffectiveVersion(), + UpgradeVersion: d.UpgradeVersion(), RedundancyZone: d.RedundancyZone(), }), Version: d.effectiveSDKVersion, @@ -551,6 +574,13 @@ func (b *RaftBackend) startFollowerHeartbeatTracker() { } for range tickerCh { b.l.RLock() + if b.raft == nil { + // We could be racing with teardown, which will stop the ticker + // but that doesn't guarantee that we won't reach this line with a nil + // b.raft. + b.l.RUnlock() + return + } b.followerStates.l.RLock() myAppliedIndex := b.raft.AppliedIndex() for peerID, state := range b.followerStates.followers { @@ -677,7 +707,7 @@ func (d *ReadableDuration) UnmarshalJSON(raw []byte) (err error) { str := string(raw) if len(str) >= 2 && str[0] == '"' && str[len(str)-1] == '"' { // quoted string - dur, err = time.ParseDuration(str[1 : len(str)-1]) + dur, err = parseutil.ParseDurationSecond(str[1 : len(str)-1]) if err != nil { return err } @@ -810,7 +840,7 @@ func (b *RaftBackend) SetupAutopilot(ctx context.Context, storageConfig *Autopil if b.autopilotUpdateInterval != 0 { options = append(options, autopilot.WithUpdateInterval(b.autopilotUpdateInterval)) } - b.autopilot = autopilot.New(b.raft, newDelegate(b), options...) + b.autopilot = autopilot.New(b.raft, NewDelegate(b), options...) b.followerStates = followerStates b.followerHeartbeatTicker = time.NewTicker(1 * time.Second) diff --git a/physical/raft/raft_test.go b/physical/raft/raft_test.go index 50171fd68c3d..3c4aa368b3ef 100644 --- a/physical/raft/raft_test.go +++ b/physical/raft/raft_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( @@ -8,7 +11,6 @@ import ( "encoding/hex" "fmt" "io" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -18,80 +20,62 @@ import ( "github.com/go-test/deep" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-uuid" "github.com/hashicorp/raft" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/physical" - bolt "go.etcd.io/bbolt" + "github.com/stretchr/testify/require" ) -func getRaft(t testing.TB, bootstrap bool, noStoreState bool) (*RaftBackend, string) { - raftDir, err := ioutil.TempDir("", "vault-raft-") - if err != nil { - t.Fatal(err) - } - t.Logf("raft dir: %s", raftDir) - - return getRaftWithDir(t, bootstrap, noStoreState, raftDir) -} - -func getRaftWithDir(t testing.TB, bootstrap bool, noStoreState bool, raftDir string) (*RaftBackend, string) { - id, err := uuid.GenerateUUID() - if err != nil { - t.Fatal(err) - } - - logger := hclog.New(&hclog.LoggerOptions{ - Name: fmt.Sprintf("raft-%s", id), - Level: hclog.Trace, - }) - logger.Info("raft dir", "dir", raftDir) +func testBothRaftBackends(t *testing.T, f func(raftWALValue string)) { + t.Helper() - conf := map[string]string{ - "path": raftDir, - "trailing_logs": "100", - "node_id": id, + testCases := []struct { + name string + useWAL string + }{ + { + name: "use wal", + useWAL: "true", + }, + { + name: "use boltdb", + useWAL: "false", + }, } - if noStoreState { - conf["doNotStoreLatestState"] = "" + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // we can't use t.Parallel() here because some raft tests manipulate package level variables + f(tc.useWAL) + }) } +} - backendRaw, err := NewRaftBackend(conf, logger) - if err != nil { - t.Fatal(err) +func testBothRaftBackendsBenchmark(b *testing.B, f func(raftWALValue string)) { + testCases := []struct { + name string + useWAL string + }{ + { + name: "use wal", + useWAL: "true", + }, + { + name: "use boltdb", + useWAL: "false", + }, } - backend := backendRaw.(*RaftBackend) - if bootstrap { - err = backend.Bootstrap([]Peer{ - { - ID: backend.NodeID(), - Address: backend.NodeID(), - }, + for _, tc := range testCases { + b.Run(tc.name, func(b *testing.B) { + f(tc.useWAL) }) - if err != nil { - t.Fatal(err) - } - - err = backend.SetupCluster(context.Background(), SetupOpts{}) - if err != nil { - t.Fatal(err) - } - - for { - if backend.raft.AppliedIndex() >= 2 { - break - } - } - } - - backend.DisableAutopilot() - - return backend, raftDir } func connectPeers(nodes ...*RaftBackend) { @@ -207,7 +191,6 @@ func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB, dataOnly bool) error { return nil }) - if err != nil { t.Fatal(err) } @@ -220,26 +203,221 @@ func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB, dataOnly bool) error { } func TestRaft_Backend(t *testing.T) { - b, dir := getRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - physical.ExerciseBackend(t, b) + b, _ := GetRaftWithConfig(t, true, true, conf) + physical.ExerciseBackend(t, b) + }) } -func TestRaft_ParseAutopilotUpgradeVersion(t *testing.T) { - raftDir, err := ioutil.TempDir("", "vault-raft-") +// TestRaft_SwitchFromBoltDBToRaftWal is testing that we don't use raft-wal, even if configured to do so, +// if there is an existing raft.db file on disk (meaning BoltDB was previously in use). +func TestRaft_SwitchFromBoltDBToRaftWal(t *testing.T) { + tmpDir := t.TempDir() + + // configured to use raft-wal + conf := map[string]string{ + "path": tmpDir, + "trailing_logs": "100", + "raft_wal": "true", + } + + // raftBaseDir will end up looking like $tmpDir/raft + raftBaseDir := filepath.Join(tmpDir, raftState) + err := os.MkdirAll(raftBaseDir, 0o777) + if err != nil { + t.Fatal(err) + } + + // create a bogus $tmpDir/raft/raft.db file + db, err := bolt.Open(filepath.Join(raftBaseDir, "raft.db"), 0o777, nil) + if err != nil { + t.Fatal(err) + } + err = db.Close() + if err != nil { + t.Fatal(err) + } + + _, err = NewRaftBackend(conf, hclog.NewNullLogger()) + if err != nil { + t.Fatal(err) + } + + // Check to see if $tmpDir/raft/raft-wal exists. It should not, because we only create that if raft-wal is in use. + // And since raft.db already existed, we should've skipped all the raft-wal setup code. + raftWalExists, err := fileExists(filepath.Join(raftBaseDir, raftWalDir)) if err != nil { t.Fatal(err) } - defer os.RemoveAll(raftDir) + if raftWalExists { + t.Fatal("expected raft-wal dir to not exist, but it does") + } +} + +// TestRaft_VerifierEnabled is not checking to ensure that the verifier works correctly - the verifier has +// its own unit tests for that. What we're checking for here is that we've plumbed everything through correctly, +// i.e. we can stand up a raft cluster with the verifier enabled, do a bunch of raft things, let the verifier +// do its thing, and nothing blows up. +func TestRaft_VerifierEnabled(t *testing.T) { + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + "raft_log_verifier_enabled": "true", + } + + b, _ := GetRaftWithConfig(t, true, true, conf) + physical.ExerciseBackend(t, b) + + err := b.applyVerifierCheckpoint() + if err != nil { + t.Fatal(err) + } + physical.ExerciseBackend(t, b) + }) +} + +// TestRaft_ParseRaftWalBackend ensures that the raft_wal config option parses correctly and returns an error if not +func TestRaft_ParseRaftWalBackend(t *testing.T) { + raftDir := t.TempDir() + conf := map[string]string{ + "path": raftDir, + "node_id": "abc123", + "raft_wal": "notabooleanlol", + } + + _, err := NewRaftBackend(conf, hclog.NewNullLogger()) + if err == nil { + t.Fatal("expected an error but got none") + } + + if !strings.Contains(err.Error(), "does not parse as a boolean") { + t.Fatal("expected an error about parsing config keys but got none") + } +} + +// TestRaft_ParseRaftWalVerifierEnabled checks to make sure we error correctly if raft_log_verifier_enabled is not a boolean +func TestRaft_ParseRaftWalVerifierEnabled(t *testing.T) { + raftDir := t.TempDir() + conf := map[string]string{ + "path": raftDir, + "node_id": "abc123", + "raft_wal": "true", + "raft_log_verifier_enabled": "notabooleanlol", + } + + _, err := NewRaftBackend(conf, hclog.NewNullLogger()) + if err == nil { + t.Fatal("expected an error but got none") + } + + if !strings.Contains(err.Error(), "does not parse as a boolean") { + t.Fatal("expected an error about parsing config keys but got none") + } +} + +// TestRaft_ParseRaftWalVerifierInterval checks to make sure we handle various intervals correctly and have a default +func TestRaft_ParseRaftWalVerifierInterval(t *testing.T) { + testCases := []struct { + name string + givenInterval string + expectedInterval string + shouldError bool + }{ + { + "zero", + "0s", + defaultRaftLogVerificationInterval.String(), + false, + }, + { + "one", + "1s", + defaultRaftLogVerificationInterval.String(), + false, + }, + { + "nothing", + "", + defaultRaftLogVerificationInterval.String(), + false, + }, + { + "default", + "60s", + defaultRaftLogVerificationInterval.String(), + false, + }, + { + "more than the default", + "75s", + "75s", + false, + }, + { + "obviously wrong", + "notadurationlol", + "", + true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + raftDir := t.TempDir() + conf := map[string]string{ + "path": raftDir, + "node_id": "abc123", + "raft_wal": "true", + "raft_log_verifier_enabled": "true", + "raft_log_verification_interval": tc.givenInterval, + } + + rbRaw, err := NewRaftBackend(conf, hclog.NewNullLogger()) + if tc.shouldError { + if err == nil { + t.Fatal("expected an error but got none") + } + + // return early, since we got the error we wanted + return + } + if !tc.shouldError && err != nil { + t.Fatal(err) + } + + rb := rbRaw.(*RaftBackend) + + parsedExpectedInterval, err := parseutil.ParseDurationSecond(tc.expectedInterval) + if err != nil { + t.Fatal(err) + } + + if parsedExpectedInterval != rb.verificationInterval() { + t.Fatal("expected intervals to match but they didn't") + } + }) + } +} + +// TestRaft_ParseAutopilotUpgradeVersion tests that autopilot_upgrade_version parses correctly and returns an error if not +func TestRaft_ParseAutopilotUpgradeVersion(t *testing.T) { + raftDir := t.TempDir() conf := map[string]string{ "path": raftDir, "node_id": "abc123", "autopilot_upgrade_version": "hahano", } - _, err = NewRaftBackend(conf, hclog.NewNullLogger()) + _, err := NewRaftBackend(conf, hclog.NewNullLogger()) if err == nil { t.Fatal("expected an error but got none") } @@ -278,12 +456,7 @@ func TestRaft_ParseNonVoter(t *testing.T) { if tc.envValue != nil { t.Setenv(EnvVaultRaftNonVoter, *tc.envValue) } - raftDir, err := ioutil.TempDir("", "vault-raft-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(raftDir) - + raftDir := t.TempDir() conf := map[string]string{ "path": raftDir, "node_id": "abc123", @@ -316,509 +489,706 @@ func TestRaft_ParseNonVoter(t *testing.T) { } func TestRaft_Backend_LargeKey(t *testing.T) { - b, dir := getRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() - key, err := base62.Random(bolt.MaxKeySize + 1) - if err != nil { - t.Fatal(err) - } - entry := &physical.Entry{Key: key, Value: []byte(key)} + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - err = b.Put(context.Background(), entry) - if err == nil { - t.Fatal("expected error for put entry") - } + b, _ := GetRaftWithConfig(t, true, true, conf) + key, err := base62.Random(bolt.MaxKeySize + 1) + if err != nil { + t.Fatal(err) + } + entry := &physical.Entry{Key: key, Value: []byte(key)} - if !strings.Contains(err.Error(), physical.ErrKeyTooLarge) { - t.Fatalf("expected %q, got %v", physical.ErrKeyTooLarge, err) - } + err = b.Put(context.Background(), entry) + if err == nil { + t.Fatal("expected error for put entry") + } - out, err := b.Get(context.Background(), entry.Key) - if err != nil { - t.Fatalf("unexpected error after failed put: %v", err) - } - if out != nil { - t.Fatal("expected response entry to be nil after a failed put") - } + if !strings.Contains(err.Error(), physical.ErrKeyTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrKeyTooLarge, err) + } + + out, err := b.Get(context.Background(), entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } + }) } func TestRaft_Backend_LargeValue(t *testing.T) { - b, dir := getRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() - value := make([]byte, defaultMaxEntrySize+1) - rand.Read(value) - entry := &physical.Entry{Key: "foo", Value: value} + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - err := b.Put(context.Background(), entry) - if err == nil { - t.Fatal("expected error for put entry") - } + b, _ := GetRaftWithConfig(t, true, true, conf) + value := make([]byte, defaultMaxEntrySize+1) + rand.Read(value) + entry := &physical.Entry{Key: "foo", Value: value} - if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { - t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) - } + err := b.Put(context.Background(), entry) + if err == nil { + t.Fatal("expected error for put entry") + } - out, err := b.Get(context.Background(), entry.Key) - if err != nil { - t.Fatalf("unexpected error after failed put: %v", err) - } - if out != nil { - t.Fatal("expected response entry to be nil after a failed put") - } + if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) + } + + out, err := b.Get(context.Background(), entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } + }) } // TestRaft_TransactionalBackend_GetTransactions tests that passing a slice of transactions to the // raft backend will populate values for any transactions that are Get operations. func TestRaft_TransactionalBackend_GetTransactions(t *testing.T) { - b, dir := getRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - ctx := context.Background() - txns := make([]*physical.TxnEntry, 0) + b, _ := GetRaftWithConfig(t, true, true, conf) + ctx := context.Background() + txns := make([]*physical.TxnEntry, 0) - // Add some seed values to our FSM, and prepare our slice of transactions at the same time - for i := 0; i < 5; i++ { - key := fmt.Sprintf("foo/%d", i) - err := b.fsm.Put(ctx, &physical.Entry{Key: key, Value: []byte(fmt.Sprintf("value-%d", i))}) - if err != nil { - t.Fatal(err) + // Add some seed values to our FSM, and prepare our slice of transactions at the same time + for i := 0; i < 5; i++ { + key := fmt.Sprintf("foo/%d", i) + err := b.fsm.Put(ctx, &physical.Entry{Key: key, Value: []byte(fmt.Sprintf("value-%d", i))}) + if err != nil { + t.Fatal(err) + } + + txns = append(txns, &physical.TxnEntry{ + Operation: physical.GetOperation, + Entry: &physical.Entry{ + Key: key, + }, + }) } - txns = append(txns, &physical.TxnEntry{ - Operation: physical.GetOperation, - Entry: &physical.Entry{ - Key: key, - }, - }) - } + // Add some additional transactions, so we have a mix of operations + for i := 0; i < 10; i++ { + txnEntry := &physical.TxnEntry{ + Entry: &physical.Entry{ + Key: fmt.Sprintf("lol-%d", i), + }, + } - // Add some additional transactions, so we have a mix of operations - for i := 0; i < 10; i++ { - txnEntry := &physical.TxnEntry{ - Entry: &physical.Entry{ - Key: fmt.Sprintf("lol-%d", i), - }, - } + if i%2 == 0 { + txnEntry.Operation = physical.PutOperation + txnEntry.Entry.Value = []byte("lol") + } else { + txnEntry.Operation = physical.DeleteOperation + } - if i%2 == 0 { - txnEntry.Operation = physical.PutOperation - txnEntry.Entry.Value = []byte("lol") - } else { - txnEntry.Operation = physical.DeleteOperation + txns = append(txns, txnEntry) } - txns = append(txns, txnEntry) - } - - err := b.Transaction(ctx, txns) - if err != nil { - t.Fatal(err) - } + err := b.Transaction(ctx, txns) + if err != nil { + t.Fatal(err) + } - // Check that our Get operations were populated with their values - for i, txn := range txns { - if txn.Operation == physical.GetOperation { - val := []byte(fmt.Sprintf("value-%d", i)) - if !bytes.Equal(val, txn.Entry.Value) { - t.Fatalf("expected %s to equal %s but it didn't", hex.EncodeToString(val), hex.EncodeToString(txn.Entry.Value)) + // Check that our Get operations were populated with their values + for i, txn := range txns { + if txn.Operation == physical.GetOperation { + val := []byte(fmt.Sprintf("value-%d", i)) + if !bytes.Equal(val, txn.Entry.Value) { + t.Fatalf("expected %s to equal %s but it didn't", hex.EncodeToString(val), hex.EncodeToString(txn.Entry.Value)) + } } } - } + }) } func TestRaft_TransactionalBackend_LargeKey(t *testing.T) { - b, dir := getRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - value := make([]byte, defaultMaxEntrySize+1) - rand.Read(value) + b, _ := GetRaftWithConfig(t, true, true, conf) + value := make([]byte, defaultMaxEntrySize+1) + rand.Read(value) - key, err := base62.Random(bolt.MaxKeySize + 1) - if err != nil { - t.Fatal(err) - } - txns := []*physical.TxnEntry{ - { - Operation: physical.PutOperation, - Entry: &physical.Entry{ - Key: key, - Value: []byte(key), + key, err := base62.Random(bolt.MaxKeySize + 1) + if err != nil { + t.Fatal(err) + } + txns := []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: &physical.Entry{ + Key: key, + Value: []byte(key), + }, }, - }, - } + } - err = b.Transaction(context.Background(), txns) - if err == nil { - t.Fatal("expected error for transactions") - } + err = b.Transaction(context.Background(), txns) + if err == nil { + t.Fatal("expected error for transactions") + } - if !strings.Contains(err.Error(), physical.ErrKeyTooLarge) { - t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) - } + if !strings.Contains(err.Error(), physical.ErrKeyTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) + } - out, err := b.Get(context.Background(), txns[0].Entry.Key) - if err != nil { - t.Fatalf("unexpected error after failed put: %v", err) - } - if out != nil { - t.Fatal("expected response entry to be nil after a failed put") - } + out, err := b.Get(context.Background(), txns[0].Entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } + }) } func TestRaft_TransactionalBackend_LargeValue(t *testing.T) { - b, dir := getRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - value := make([]byte, defaultMaxEntrySize+1) - rand.Read(value) + b, _ := GetRaftWithConfig(t, true, true, conf) + value := make([]byte, defaultMaxEntrySize+1) + rand.Read(value) - txns := []*physical.TxnEntry{ - { - Operation: physical.PutOperation, - Entry: &physical.Entry{ - Key: "foo", - Value: value, + txns := []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: &physical.Entry{ + Key: "foo", + Value: value, + }, }, - }, - } + } - err := b.Transaction(context.Background(), txns) - if err == nil { - t.Fatal("expected error for transactions") - } + err := b.Transaction(context.Background(), txns) + if err == nil { + t.Fatal("expected error for transactions") + } - if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { - t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) - } + if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) + } - out, err := b.Get(context.Background(), txns[0].Entry.Key) - if err != nil { - t.Fatalf("unexpected error after failed put: %v", err) - } - if out != nil { - t.Fatal("expected response entry to be nil after a failed put") - } + out, err := b.Get(context.Background(), txns[0].Entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } + }) } func TestRaft_Backend_ListPrefix(t *testing.T) { - b, dir := getRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - physical.ExerciseBackend_ListPrefix(t, b) + b, _ := GetRaftWithConfig(t, true, true, conf) + physical.ExerciseBackend_ListPrefix(t, b) + }) } func TestRaft_TransactionalBackend(t *testing.T) { - b, dir := getRaft(t, true, true) - defer os.RemoveAll(dir) + t.Parallel() + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - physical.ExerciseTransactionalBackend(t, b) + b, _ := GetRaftWithConfig(t, true, true, conf) + physical.ExerciseTransactionalBackend(t, b) + }) } func TestRaft_HABackend(t *testing.T) { t.Skip() - raft, dir := getRaft(t, true, true) - defer os.RemoveAll(dir) - raft2, dir2 := getRaft(t, false, true) - defer os.RemoveAll(dir2) + raft1, _ := GetRaft(t, true, true) + raft2, _ := GetRaft(t, false, true) // Add raft2 to the cluster - addPeer(t, raft, raft2) - - physical.ExerciseHABackend(t, raft, raft2) + addPeer(t, raft1, raft2) + physical.ExerciseHABackend(t, raft1, raft2) } func TestRaft_Backend_ThreeNode(t *testing.T) { - raft1, dir := getRaft(t, true, true) - raft2, dir2 := getRaft(t, false, true) - raft3, dir3 := getRaft(t, false, true) - defer os.RemoveAll(dir) - defer os.RemoveAll(dir2) - defer os.RemoveAll(dir3) + t.Parallel() + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - // Add raft2 to the cluster - addPeer(t, raft1, raft2) + raft1, _ := GetRaftWithConfig(t, true, true, conf) + raft2, _ := GetRaftWithConfig(t, false, true, conf) + raft3, _ := GetRaftWithConfig(t, false, true, conf) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) - // Add raft3 to the cluster - addPeer(t, raft1, raft3) + // Add raft3 to the cluster + addPeer(t, raft1, raft3) - physical.ExerciseBackend(t, raft1) + physical.ExerciseBackend(t, raft1) - time.Sleep(10 * time.Second) - // Make sure all stores are the same - compareFSMs(t, raft1.fsm, raft2.fsm) - compareFSMs(t, raft1.fsm, raft3.fsm) + time.Sleep(10 * time.Second) + // Make sure all stores are the same + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft3.fsm) + }) } func TestRaft_GetOfflineConfig(t *testing.T) { - // Create 3 raft nodes - raft1, dir1 := getRaft(t, true, true) - raft2, dir2 := getRaft(t, false, true) - raft3, dir3 := getRaft(t, false, true) - defer os.RemoveAll(dir1) - defer os.RemoveAll(dir2) - defer os.RemoveAll(dir3) - - // Add them all to the cluster - addPeer(t, raft1, raft2) - addPeer(t, raft1, raft3) + t.Parallel() + testBothRaftBackends(t, func(useRaftWal string) { + config := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - // Add some data into the FSM - physical.ExerciseBackend(t, raft1) + // Create 3 raft nodes + raft1, _ := GetRaftWithConfig(t, true, true, config) + raft2, _ := GetRaftWithConfig(t, false, true, config) + raft3, _ := GetRaftWithConfig(t, false, true, config) - time.Sleep(10 * time.Second) + // Add them all to the cluster + addPeer(t, raft1, raft2) + addPeer(t, raft1, raft3) - // Spin down the raft cluster and check that GetConfigurationOffline - // returns 3 voters - raft3.TeardownCluster(nil) - raft2.TeardownCluster(nil) - raft1.TeardownCluster(nil) + // Add some data into the FSM + physical.ExerciseBackend(t, raft1) - conf, err := raft1.GetConfigurationOffline() - if err != nil { - t.Fatal(err) - } - if len(conf.Servers) != 3 { - t.Fatalf("three raft nodes existed but we only see %d", len(conf.Servers)) - } - for _, s := range conf.Servers { - if s.Voter != true { - t.Fatalf("one of the nodes is not a voter") + time.Sleep(10 * time.Second) + + // Spin down the raft cluster and check that GetConfigurationOffline + // returns 3 voters + err := raft3.TeardownCluster(nil) + if err != nil { + t.Fatal(err) } - } + err = raft2.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } + err = raft1.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } + + conf, err := raft1.GetConfigurationOffline() + if err != nil { + t.Fatal(err) + } + if len(conf.Servers) != 3 { + t.Fatalf("three raft nodes existed but we only see %d", len(conf.Servers)) + } + for _, s := range conf.Servers { + if s.Voter != true { + t.Fatalf("one of the nodes is not a voter") + } + } + }) } func TestRaft_Recovery(t *testing.T) { - // Create 4 raft nodes - raft1, dir1 := getRaft(t, true, true) - raft2, dir2 := getRaft(t, false, true) - raft3, dir3 := getRaft(t, false, true) - raft4, dir4 := getRaft(t, false, true) - defer os.RemoveAll(dir1) - defer os.RemoveAll(dir2) - defer os.RemoveAll(dir3) - defer os.RemoveAll(dir4) - - // Add them all to the cluster - addPeer(t, raft1, raft2) - addPeer(t, raft1, raft3) - addPeer(t, raft1, raft4) + t.Parallel() + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - // Add some data into the FSM - physical.ExerciseBackend(t, raft1) + // Create 4 raft nodes + raft1, dir1 := GetRaftWithConfig(t, true, true, conf) + raft2, dir2 := GetRaftWithConfig(t, false, true, conf) + raft3, _ := GetRaftWithConfig(t, false, true, conf) + raft4, dir4 := GetRaftWithConfig(t, false, true, conf) - time.Sleep(10 * time.Second) + // Add them all to the cluster + addPeer(t, raft1, raft2) + addPeer(t, raft1, raft3) + addPeer(t, raft1, raft4) - // Bring down all nodes - raft1.TeardownCluster(nil) - raft2.TeardownCluster(nil) - raft3.TeardownCluster(nil) - raft4.TeardownCluster(nil) + // Add some data into the FSM + physical.ExerciseBackend(t, raft1) - // Prepare peers.json - type RecoveryPeer struct { - ID string `json:"id"` - Address string `json:"address"` - NonVoter bool `json:"non_voter"` - } + time.Sleep(10 * time.Second) - // Leave out node 1 during recovery - peersList := make([]*RecoveryPeer, 0, 3) - peersList = append(peersList, &RecoveryPeer{ - ID: raft1.NodeID(), - Address: raft1.NodeID(), - NonVoter: false, - }) - peersList = append(peersList, &RecoveryPeer{ - ID: raft2.NodeID(), - Address: raft2.NodeID(), - NonVoter: false, - }) - peersList = append(peersList, &RecoveryPeer{ - ID: raft4.NodeID(), - Address: raft4.NodeID(), - NonVoter: false, - }) + // Bring down all nodes + err := raft1.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } + err = raft2.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } + err = raft3.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } + err = raft4.TeardownCluster(nil) + if err != nil { + t.Fatal(err) + } - peersJSONBytes, err := jsonutil.EncodeJSON(peersList) - if err != nil { - t.Fatal(err) - } - err = ioutil.WriteFile(filepath.Join(filepath.Join(dir1, raftState), "peers.json"), peersJSONBytes, 0o644) - if err != nil { - t.Fatal(err) - } - err = ioutil.WriteFile(filepath.Join(filepath.Join(dir2, raftState), "peers.json"), peersJSONBytes, 0o644) - if err != nil { - t.Fatal(err) - } - err = ioutil.WriteFile(filepath.Join(filepath.Join(dir4, raftState), "peers.json"), peersJSONBytes, 0o644) - if err != nil { - t.Fatal(err) - } + // Prepare peers.json + type RecoveryPeer struct { + ID string `json:"id"` + Address string `json:"address"` + NonVoter bool `json:"non_voter"` + } - // Bring up the nodes again - raft1.SetupCluster(context.Background(), SetupOpts{}) - raft2.SetupCluster(context.Background(), SetupOpts{}) - raft4.SetupCluster(context.Background(), SetupOpts{}) + // Leave out node 1 during recovery + peersList := make([]*RecoveryPeer, 0, 3) + peersList = append(peersList, &RecoveryPeer{ + ID: raft1.NodeID(), + Address: raft1.NodeID(), + NonVoter: false, + }) + peersList = append(peersList, &RecoveryPeer{ + ID: raft2.NodeID(), + Address: raft2.NodeID(), + NonVoter: false, + }) + peersList = append(peersList, &RecoveryPeer{ + ID: raft4.NodeID(), + Address: raft4.NodeID(), + NonVoter: false, + }) - peers, err := raft1.Peers(context.Background()) - if err != nil { - t.Fatal(err) - } - if len(peers) != 3 { - t.Fatalf("failed to recover the cluster") - } + peersJSONBytes, err := jsonutil.EncodeJSON(peersList) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(filepath.Join(filepath.Join(dir1, raftState), "peers.json"), peersJSONBytes, 0o644) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(filepath.Join(filepath.Join(dir2, raftState), "peers.json"), peersJSONBytes, 0o644) + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(filepath.Join(filepath.Join(dir4, raftState), "peers.json"), peersJSONBytes, 0o644) + if err != nil { + t.Fatal(err) + } - time.Sleep(10 * time.Second) + // Bring up the nodes again + err = raft1.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } + err = raft2.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } + err = raft4.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } + + peers, err := raft1.Peers(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(peers) != 3 { + t.Fatalf("failed to recover the cluster") + } + + time.Sleep(10 * time.Second) - compareFSMs(t, raft1.fsm, raft2.fsm) - compareFSMs(t, raft1.fsm, raft4.fsm) + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft4.fsm) + }) } func TestRaft_TransactionalBackend_ThreeNode(t *testing.T) { - raft1, dir := getRaft(t, true, true) - raft2, dir2 := getRaft(t, false, true) - raft3, dir3 := getRaft(t, false, true) - defer os.RemoveAll(dir) - defer os.RemoveAll(dir2) - defer os.RemoveAll(dir3) + t.Parallel() + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - // Add raft2 to the cluster - addPeer(t, raft1, raft2) + raft1, _ := GetRaftWithConfig(t, true, true, conf) + raft2, _ := GetRaftWithConfig(t, false, true, conf) + raft3, _ := GetRaftWithConfig(t, false, true, conf) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) - // Add raft3 to the cluster - addPeer(t, raft1, raft3) + // Add raft3 to the cluster + addPeer(t, raft1, raft3) - physical.ExerciseTransactionalBackend(t, raft1) + physical.ExerciseTransactionalBackend(t, raft1) - time.Sleep(10 * time.Second) - // Make sure all stores are the same - compareFSMs(t, raft1.fsm, raft2.fsm) - compareFSMs(t, raft1.fsm, raft3.fsm) + time.Sleep(10 * time.Second) + // Make sure all stores are the same + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft3.fsm) + }) } -func TestRaft_Backend_Performance(t *testing.T) { - b, dir := getRaft(t, true, false) - defer os.RemoveAll(dir) +// TestRaft_TransactionalLimitsEnvOverride ensures the ENV var overrides for +// transaction size limits are plumbed through as expected. +func TestRaft_TransactionalLimitsEnvOverride(t *testing.T) { + tc := []struct { + name string + envEntries string + envSize string + wantEntries int + wantSize int + wantLog string + }{ + { + name: "defaults", + wantEntries: defaultMaxBatchEntries, + wantSize: defaultMaxBatchSize, + }, + { + name: "valid env", + envEntries: "123", + envSize: "456", + wantEntries: 123, + wantSize: 456, + }, + { + name: "invalid entries", + envEntries: "not-a-number", + envSize: "100", + wantEntries: defaultMaxBatchEntries, + wantSize: 100, + wantLog: "failed to parse VAULT_RAFT_MAX_BATCH_ENTRIES", + }, + { + name: "invalid entries", + envEntries: "100", + envSize: "asdasdsasd", + wantEntries: 100, + wantSize: defaultMaxBatchSize, + wantLog: "failed to parse VAULT_RAFT_MAX_BATCH_SIZE_BYTES", + }, + { + name: "zero entries", + envEntries: "0", + envSize: "100", + wantEntries: defaultMaxBatchEntries, + wantSize: 100, + wantLog: "failed to parse VAULT_RAFT_MAX_BATCH_ENTRIES as an integer > 0", + }, + { + name: "zero size", + envEntries: "100", + envSize: "0", + wantEntries: 100, + wantSize: defaultMaxBatchSize, + wantLog: "failed to parse VAULT_RAFT_MAX_BATCH_SIZE_BYTES as an integer > 0", + }, + } - defaultConfig := raft.DefaultConfig() + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + // Set the env vars within this test + if tt.envEntries != "" { + t.Setenv(EnvVaultRaftMaxBatchEntries, tt.envEntries) + } + if tt.envSize != "" { + t.Setenv(EnvVaultRaftMaxBatchSizeBytes, tt.envSize) + } - localConfig := raft.DefaultConfig() - b.applyConfigSettings(localConfig) + var logBuf bytes.Buffer + raft1, dir := GetRaftWithLogOutput(t, false, true, &logBuf) + defer os.RemoveAll(dir) - if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } + e, s := raft1.TransactionLimits() - b.conf = map[string]string{ - "path": dir, - "performance_multiplier": "5", + require.Equal(t, tt.wantEntries, e) + require.Equal(t, tt.wantSize, s) + if tt.wantLog != "" { + require.Contains(t, logBuf.String(), tt.wantLog) + } + }) } +} - localConfig = raft.DefaultConfig() - b.applyConfigSettings(localConfig) +func TestRaft_Backend_Performance(t *testing.T) { + t.Parallel() + testBothRaftBackends(t, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { - t.Fatalf("bad config: %v", localConfig) - } + b, dir := GetRaftWithConfig(t, true, true, conf) - b.conf = map[string]string{ - "path": dir, - "performance_multiplier": "1", - } + defaultConfig := raft.DefaultConfig() + localConfig := raft.DefaultConfig() + err := b.applyConfigSettings(localConfig) + if err != nil { + t.Fatal(err) + } - localConfig = raft.DefaultConfig() - b.applyConfigSettings(localConfig) + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } - if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout { - t.Fatalf("bad config: %v", localConfig) - } - if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout { - t.Fatalf("bad config: %v", localConfig) - } -} + b.conf = map[string]string{ + "path": dir, + "performance_multiplier": "5", + } -func BenchmarkDB_Puts(b *testing.B) { - raft, dir := getRaft(b, true, false) - defer os.RemoveAll(dir) - raft2, dir2 := getRaft(b, true, false) - defer os.RemoveAll(dir2) + localConfig = raft.DefaultConfig() + err = b.applyConfigSettings(localConfig) + if err != nil { + t.Fatal(err) + } + + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } - bench := func(b *testing.B, s physical.Backend, dataSize int) { - data, err := uuid.GenerateRandomBytes(dataSize) + b.conf = map[string]string{ + "path": dir, + "performance_multiplier": "1", + } + + localConfig = raft.DefaultConfig() + err = b.applyConfigSettings(localConfig) if err != nil { - b.Fatal(err) + t.Fatal(err) } - ctx := context.Background() - pe := &physical.Entry{ - Value: data, + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout { + t.Fatalf("bad config: %v", localConfig) } - testName := b.Name() + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout { + t.Fatalf("bad config: %v", localConfig) + } + }) +} - b.ResetTimer() - for i := 0; i < b.N; i++ { - pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) - err := s.Put(ctx, pe) +func BenchmarkDB_Puts(b *testing.B) { + testBothRaftBackendsBenchmark(b, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } + + raft1, _ := GetRaftWithConfig(b, true, false, conf) + raft2, _ := GetRaftWithConfig(b, true, false, conf) + + bench := func(b *testing.B, s physical.Backend, dataSize int) { + data, err := uuid.GenerateRandomBytes(dataSize) if err != nil { b.Fatal(err) } + + ctx := context.Background() + pe := &physical.Entry{ + Value: data, + } + testName := b.Name() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) + err := s.Put(ctx, pe) + if err != nil { + b.Fatal(err) + } + } } - } - b.Run("256b", func(b *testing.B) { bench(b, raft, 256) }) - b.Run("256kb", func(b *testing.B) { bench(b, raft2, 256*1024) }) + b.Run("256b", func(b *testing.B) { bench(b, raft1, 256) }) + b.Run("256kb", func(b *testing.B) { bench(b, raft2, 256*1024) }) + }) } func BenchmarkDB_Snapshot(b *testing.B) { - raft, dir := getRaft(b, true, false) - defer os.RemoveAll(dir) - - data, err := uuid.GenerateRandomBytes(256 * 1024) - if err != nil { - b.Fatal(err) - } - - ctx := context.Background() - pe := &physical.Entry{ - Value: data, - } - testName := b.Name() + testBothRaftBackendsBenchmark(b, func(useRaftWal string) { + conf := map[string]string{ + "trailing_logs": "100", + "raft_wal": useRaftWal, + } - for i := 0; i < 100; i++ { - pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) - err = raft.Put(ctx, pe) + raft1, _ := GetRaftWithConfig(b, true, false, conf) + data, err := uuid.GenerateRandomBytes(256 * 1024) if err != nil { b.Fatal(err) } - } - bench := func(b *testing.B, s *FSM) { - b.ResetTimer() - for i := 0; i < b.N; i++ { + ctx := context.Background() + pe := &physical.Entry{ + Value: data, + } + testName := b.Name() + + for i := 0; i < 100; i++ { pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) - s.writeTo(ctx, discardCloser{Writer: ioutil.Discard}, discardCloser{Writer: ioutil.Discard}) + err = raft1.Put(ctx, pe) + if err != nil { + b.Fatal(err) + } + } + + bench := func(b *testing.B, s *FSM) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) + s.writeTo(ctx, discardCloser{Writer: io.Discard}, discardCloser{Writer: io.Discard}) + } } - } - b.Run("256kb", func(b *testing.B) { bench(b, raft.fsm) }) + b.Run("256kb", func(b *testing.B) { bench(b, raft1.fsm) }) + }) } type discardCloser struct { diff --git a/physical/raft/raft_util.go b/physical/raft/raft_util.go index 34570fba678f..722f8e2fc8d0 100644 --- a/physical/raft/raft_util.go +++ b/physical/raft/raft_util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !enterprise package raft diff --git a/physical/raft/snapshot.go b/physical/raft/snapshot.go index cebcdb0a4a82..b584af7665ed 100644 --- a/physical/raft/snapshot.go +++ b/physical/raft/snapshot.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( @@ -15,13 +18,12 @@ import ( "time" "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" "github.com/hashicorp/vault/sdk/plugin/pb" "github.com/rboyer/safeio" - bolt "go.etcd.io/bbolt" "go.uber.org/atomic" - - "github.com/hashicorp/raft" ) const ( diff --git a/physical/raft/snapshot_test.go b/physical/raft/snapshot_test.go index 53757c5683d7..d85af4da5f73 100644 --- a/physical/raft/snapshot_test.go +++ b/physical/raft/snapshot_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( @@ -15,7 +18,7 @@ import ( "testing" "time" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/raft" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" @@ -55,7 +58,7 @@ func addPeer(t *testing.T, leader, follower *RaftBackend) { } func TestRaft_Snapshot_Loading(t *testing.T) { - raft, dir := getRaft(t, true, false) + raft, dir := GetRaft(t, true, false) defer os.RemoveAll(dir) // Write some data @@ -139,7 +142,7 @@ func TestRaft_Snapshot_Loading(t *testing.T) { } func TestRaft_Snapshot_Index(t *testing.T) { - raft, dir := getRaft(t, true, false) + raft, dir := GetRaft(t, true, false) defer os.RemoveAll(dir) err := raft.Put(context.Background(), &physical.Entry{ @@ -226,9 +229,9 @@ func TestRaft_Snapshot_Index(t *testing.T) { } func TestRaft_Snapshot_Peers(t *testing.T) { - raft1, dir := getRaft(t, true, false) - raft2, dir2 := getRaft(t, false, false) - raft3, dir3 := getRaft(t, false, false) + raft1, dir := GetRaft(t, true, false) + raft2, dir2 := GetRaft(t, false, false) + raft3, dir3 := GetRaft(t, false, false) defer os.RemoveAll(dir) defer os.RemoveAll(dir2) defer os.RemoveAll(dir3) @@ -309,9 +312,9 @@ func ensureCommitApplied(t *testing.T, leaderCommitIdx uint64, backend *RaftBack } func TestRaft_Snapshot_Restart(t *testing.T) { - raft1, dir := getRaft(t, true, false) + raft1, dir := GetRaft(t, true, false) defer os.RemoveAll(dir) - raft2, dir2 := getRaft(t, false, false) + raft2, dir2 := GetRaft(t, false, false) defer os.RemoveAll(dir2) // Write some data @@ -373,9 +376,9 @@ func TestRaft_Snapshot_Restart(t *testing.T) { /* func TestRaft_Snapshot_ErrorRecovery(t *testing.T) { - raft1, dir := getRaft(t, true, false) - raft2, dir2 := getRaft(t, false, false) - raft3, dir3 := getRaft(t, false, false) + raft1, dir := GetRaft(t, true, false) + raft2, dir2 := GetRaft(t, false, false) + raft3, dir3 := GetRaft(t, false, false) defer os.RemoveAll(dir) defer os.RemoveAll(dir2) defer os.RemoveAll(dir3) @@ -455,9 +458,9 @@ func TestRaft_Snapshot_ErrorRecovery(t *testing.T) { }*/ func TestRaft_Snapshot_Take_Restore(t *testing.T) { - raft1, dir := getRaft(t, true, false) + raft1, dir := GetRaft(t, true, false) defer os.RemoveAll(dir) - raft2, dir2 := getRaft(t, false, false) + raft2, dir2 := GetRaft(t, false, false) defer os.RemoveAll(dir2) addPeer(t, raft1, raft2) diff --git a/physical/raft/streamlayer.go b/physical/raft/streamlayer.go index ed154f8bcdaf..861f4f60488c 100644 --- a/physical/raft/streamlayer.go +++ b/physical/raft/streamlayer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( diff --git a/physical/raft/streamlayer_test.go b/physical/raft/streamlayer_test.go index 51a26f832266..bc35eb66ffe4 100644 --- a/physical/raft/streamlayer_test.go +++ b/physical/raft/streamlayer_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package raft import ( diff --git a/physical/raft/testing.go b/physical/raft/testing.go new file mode 100644 index 000000000000..aae0ba1bdbde --- /dev/null +++ b/physical/raft/testing.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package raft + +import ( + "context" + "fmt" + "io" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" +) + +func GetRaft(t testing.TB, bootstrap bool, noStoreState bool) (*RaftBackend, string) { + return getRaftInternal(t, bootstrap, defaultRaftConfig(t, bootstrap, noStoreState), nil, nil) +} + +func GetRaftWithConfig(t testing.TB, bootstrap bool, noStoreState bool, conf map[string]string) (*RaftBackend, string) { + defaultConf := defaultRaftConfig(t, bootstrap, noStoreState) + conf["path"] = defaultConf["path"] + conf["doNotStoreLatestState"] = defaultConf["doNotStoreLatestState"] + return getRaftInternal(t, bootstrap, conf, nil, nil) +} + +func GetRaftWithLogOutput(t testing.TB, bootstrap bool, noStoreState bool, logOutput io.Writer) (*RaftBackend, string) { + return getRaftInternal(t, bootstrap, defaultRaftConfig(t, bootstrap, noStoreState), logOutput, nil) +} + +func defaultRaftConfig(t testing.TB, bootstrap bool, noStoreState bool) map[string]string { + raftDir := t.TempDir() + t.Logf("raft dir: %s", raftDir) + + conf := map[string]string{ + "path": raftDir, + "trailing_logs": "100", + } + + if noStoreState { + conf["doNotStoreLatestState"] = "" + } + + return conf +} + +func getRaftInternal(t testing.TB, bootstrap bool, conf map[string]string, logOutput io.Writer, initFn func(b *RaftBackend)) (*RaftBackend, string) { + id, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + + logger := hclog.New(&hclog.LoggerOptions{ + Name: fmt.Sprintf("raft-%s", id), + Level: hclog.Trace, + Output: logOutput, + }) + + conf["node_id"] = id + + backendRaw, err := NewRaftBackend(conf, logger) + if err != nil { + t.Fatal(err) + } + backend := backendRaw.(*RaftBackend) + if initFn != nil { + initFn(backend) + } + + if bootstrap { + err = backend.Bootstrap([]Peer{ + { + ID: backend.NodeID(), + Address: backend.NodeID(), + }, + }) + if err != nil { + t.Fatal(err) + } + + err = backend.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } + + for { + if backend.raft.AppliedIndex() >= 2 { + break + } + } + + } + + backend.DisableAutopilot() + return backend, conf["path"] +} diff --git a/physical/raft/types.pb.go b/physical/raft/types.pb.go index 8ca994a312a6..cd3230ddfd4e 100644 --- a/physical/raft/types.pb.go +++ b/physical/raft/types.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: physical/raft/types.proto package raft diff --git a/physical/raft/types.proto b/physical/raft/types.proto index 0b1d189ef6e5..6e87157051aa 100644 --- a/physical/raft/types.proto +++ b/physical/raft/types.proto @@ -1,43 +1,46 @@ -syntax = "proto3"; +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 -option go_package = "github.com/hashicorp/vault/physical/raft"; +syntax = "proto3"; package raft; +option go_package = "github.com/hashicorp/vault/physical/raft"; + message LogOperation { - // OpType is the Operation type - uint32 op_type = 1; + // OpType is the Operation type + uint32 op_type = 1; - // Flags is an opaque value, currently unused. Reserved. - uint64 flags = 2; + // Flags is an opaque value, currently unused. Reserved. + uint64 flags = 2; - // Key that is being affected - string key = 3; + // Key that is being affected + string key = 3; - // Value is optional, corresponds to the key - bytes value = 4; + // Value is optional, corresponds to the key + bytes value = 4; } message LogData { - repeated LogOperation operations = 1; + repeated LogOperation operations = 1; } message IndexValue { - uint64 term = 1; - uint64 index = 2; + uint64 term = 1; + uint64 index = 2; } message Server { - int32 suffrage = 1; - string id = 2; - string address = 3; + int32 suffrage = 1; + string id = 2; + string address = 3; } message ConfigurationValue { - uint64 index = 1; - repeated Server servers = 2; + uint64 index = 1; + repeated Server servers = 2; } -message LocalNodeConfigValue{ +message LocalNodeConfigValue { string desired_suffrage = 1; } diff --git a/physical/raft/varint.go b/physical/raft/varint.go index b3b9bfaaebd0..87f59eaa700d 100644 --- a/physical/raft/varint.go +++ b/physical/raft/varint.go @@ -79,14 +79,19 @@ func NewDelimitedReader(r io.Reader, maxSize int) ReadCloser { if c, ok := r.(io.Closer); ok { closer = c } - return &varintReader{bufio.NewReader(r), nil, maxSize, closer} + return &varintReader{bufio.NewReader(r), nil, maxSize, closer, 0} } type varintReader struct { - r *bufio.Reader - buf []byte - maxSize int - closer io.Closer + r *bufio.Reader + buf []byte + maxSize int + closer io.Closer + lastReadSize int +} + +func (this *varintReader) GetLastReadSize() int { + return this.lastReadSize } func (this *varintReader) ReadMsg(msg proto.Message) error { @@ -102,9 +107,11 @@ func (this *varintReader) ReadMsg(msg proto.Message) error { this.buf = make([]byte, length) } buf := this.buf[:length] - if _, err := io.ReadFull(this.r, buf); err != nil { + size, err := io.ReadFull(this.r, buf) + if err != nil { return err } + this.lastReadSize = size return proto.Unmarshal(buf, msg) } diff --git a/physical/raft/vars_32bit.go b/physical/raft/vars_32bit.go index c9662e796c56..1b43384726d6 100644 --- a/physical/raft/vars_32bit.go +++ b/physical/raft/vars_32bit.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build 386 || arm || windows package raft diff --git a/physical/raft/vars_64bit.go b/physical/raft/vars_64bit.go index 40efb4c08910..4c728e23630d 100644 --- a/physical/raft/vars_64bit.go +++ b/physical/raft/vars_64bit.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build !386 && !arm && !windows package raft diff --git a/physical/s3/s3.go b/physical/s3/s3.go index e9e1fd33789a..da82acccd3ca 100644 --- a/physical/s3/s3.go +++ b/physical/s3/s3.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package s3 import ( diff --git a/physical/s3/s3_test.go b/physical/s3/s3_test.go index 794f557e0f39..68b23f129841 100644 --- a/physical/s3/s3_test.go +++ b/physical/s3/s3_test.go @@ -1,12 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package s3 import ( + "context" "fmt" "math/rand" "os" "testing" "time" + "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" @@ -29,6 +34,10 @@ func DoS3BackendTest(t *testing.T, kmsKeyId string) { t.Skip() } + if !hasAWSCredentials() { + t.Skip("Skipping because AWS credentials could not be resolved. See https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials for information on how to set up AWS credentials.") + } + logger := logging.NewVaultLogger(log.Debug) credsConfig := &awsutil.CredentialsConfig{Logger: logger} @@ -109,3 +118,20 @@ func DoS3BackendTest(t *testing.T, kmsKeyId string) { physical.ExerciseBackend(t, b) physical.ExerciseBackend_ListPrefix(t, b) } + +func hasAWSCredentials() bool { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return false + } + + creds, err := cfg.Credentials.Retrieve(ctx) + if err != nil { + return false + } + + return creds.HasKeys() +} diff --git a/physical/spanner/spanner.go b/physical/spanner/spanner.go index 723b788199f7..4151d93ba1b8 100644 --- a/physical/spanner/spanner.go +++ b/physical/spanner/spanner.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package spanner import ( diff --git a/physical/spanner/spanner_ha.go b/physical/spanner/spanner_ha.go index 7aa4f8986dbd..377704d571be 100644 --- a/physical/spanner/spanner_ha.go +++ b/physical/spanner/spanner_ha.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package spanner import ( diff --git a/physical/spanner/spanner_ha_test.go b/physical/spanner/spanner_ha_test.go index 49a818b393cc..c6afbd001bc5 100644 --- a/physical/spanner/spanner_ha_test.go +++ b/physical/spanner/spanner_ha_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package spanner import ( diff --git a/physical/spanner/spanner_test.go b/physical/spanner/spanner_test.go index d484dd316cb5..fc761f1e3235 100644 --- a/physical/spanner/spanner_test.go +++ b/physical/spanner/spanner_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package spanner import ( diff --git a/physical/swift/swift.go b/physical/swift/swift.go index 2155d44c8aba..b19b58bda8ca 100644 --- a/physical/swift/swift.go +++ b/physical/swift/swift.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package swift import ( diff --git a/physical/swift/swift_test.go b/physical/swift/swift_test.go index 0e569c5a9fae..a17b15515ecb 100644 --- a/physical/swift/swift_test.go +++ b/physical/swift/swift_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package swift import ( diff --git a/physical/zookeeper/zookeeper.go b/physical/zookeeper/zookeeper.go index 26c09fb165c7..32a560472b25 100644 --- a/physical/zookeeper/zookeeper.go +++ b/physical/zookeeper/zookeeper.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package zookeeper import ( @@ -13,13 +16,12 @@ import ( "sync" "time" + metrics "github.com/armon/go-metrics" + "github.com/go-zookeeper/zk" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/sdk/physical" - - metrics "github.com/armon/go-metrics" "github.com/hashicorp/go-secure-stdlib/tlsutil" - "github.com/samuel/go-zookeeper/zk" + "github.com/hashicorp/vault/sdk/physical" ) const ( @@ -642,8 +644,9 @@ func (i *ZooKeeperHALock) Unlock() error { return } + timer := time.NewTimer(time.Second) select { - case <-time.After(time.Second): + case <-timer.C: attempts := attempts + 1 if attempts >= 10 { i.logger.Error("release lock max attempts reached. Lock may not be released", "error", err) @@ -651,6 +654,7 @@ func (i *ZooKeeperHALock) Unlock() error { } continue case <-i.stopCh: + timer.Stop() return } } diff --git a/physical/zookeeper/zookeeper_test.go b/physical/zookeeper/zookeeper_test.go index baaa41fdbf53..7c9fe70180fa 100644 --- a/physical/zookeeper/zookeeper_test.go +++ b/physical/zookeeper/zookeeper_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package zookeeper import ( @@ -6,11 +9,10 @@ import ( "testing" "time" + "github.com/go-zookeeper/zk" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" - - "github.com/samuel/go-zookeeper/zk" ) func TestZooKeeperBackend(t *testing.T) { @@ -27,7 +29,6 @@ func TestZooKeeperBackend(t *testing.T) { randPath := fmt.Sprintf("/vault-%d", time.Now().Unix()) acl := zk.WorldACL(zk.PermAll) _, err = client.Create(randPath, []byte("hi"), int32(0), acl) - if err != nil { t.Fatalf("err: %v", err) } @@ -71,7 +72,6 @@ func TestZooKeeperHABackend(t *testing.T) { randPath := fmt.Sprintf("/vault-ha-%d", time.Now().Unix()) acl := zk.WorldACL(zk.PermAll) _, err = client.Create(randPath, []byte("hi"), int32(0), acl) - if err != nil { t.Fatalf("err: %v", err) } diff --git a/plugins/database/cassandra/cassandra-database-plugin/main.go b/plugins/database/cassandra/cassandra-database-plugin/main.go index 4ee0903642e0..6f9f7af954f4 100644 --- a/plugins/database/cassandra/cassandra-database-plugin/main.go +++ b/plugins/database/cassandra/cassandra-database-plugin/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( diff --git a/plugins/database/cassandra/cassandra.go b/plugins/database/cassandra/cassandra.go index de549261fd60..01a4c406cbcf 100644 --- a/plugins/database/cassandra/cassandra.go +++ b/plugins/database/cassandra/cassandra.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cassandra import ( diff --git a/plugins/database/cassandra/cassandra_test.go b/plugins/database/cassandra/cassandra_test.go index ec8b42290d30..9162c467a2a8 100644 --- a/plugins/database/cassandra/cassandra_test.go +++ b/plugins/database/cassandra/cassandra_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cassandra import ( diff --git a/plugins/database/cassandra/connection_producer.go b/plugins/database/cassandra/connection_producer.go index 72f7bb878e16..78f8311fbeee 100644 --- a/plugins/database/cassandra/connection_producer.go +++ b/plugins/database/cassandra/connection_producer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cassandra import ( diff --git a/plugins/database/cassandra/connection_producer_test.go b/plugins/database/cassandra/connection_producer_test.go index 3f99c1d65a00..306b444f4568 100644 --- a/plugins/database/cassandra/connection_producer_test.go +++ b/plugins/database/cassandra/connection_producer_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cassandra import ( diff --git a/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml b/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml index 71fdead51f23..a55afc69309d 100644 --- a/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml +++ b/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # Cassandra storage config YAML # NOTE: diff --git a/plugins/database/cassandra/tls.go b/plugins/database/cassandra/tls.go index cc64d3c3b5f4..e8ad907235c0 100644 --- a/plugins/database/cassandra/tls.go +++ b/plugins/database/cassandra/tls.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package cassandra import ( diff --git a/plugins/database/hana/hana-database-plugin/main.go b/plugins/database/hana/hana-database-plugin/main.go index 2057c36c08d4..8e4311eeb327 100644 --- a/plugins/database/hana/hana-database-plugin/main.go +++ b/plugins/database/hana/hana-database-plugin/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( diff --git a/plugins/database/hana/hana.go b/plugins/database/hana/hana.go index bca437c369a6..314505c15f1e 100644 --- a/plugins/database/hana/hana.go +++ b/plugins/database/hana/hana.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package hana import ( diff --git a/plugins/database/hana/hana_test.go b/plugins/database/hana/hana_test.go index 67c108883489..894d3a4da164 100644 --- a/plugins/database/hana/hana_test.go +++ b/plugins/database/hana/hana_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package hana import ( diff --git a/plugins/database/influxdb/connection_producer.go b/plugins/database/influxdb/connection_producer.go index a9a6964ea210..24d03951e01e 100644 --- a/plugins/database/influxdb/connection_producer.go +++ b/plugins/database/influxdb/connection_producer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package influxdb import ( diff --git a/plugins/database/influxdb/influxdb-database-plugin/main.go b/plugins/database/influxdb/influxdb-database-plugin/main.go index c8f6c5fa1e3f..41ed199556cc 100644 --- a/plugins/database/influxdb/influxdb-database-plugin/main.go +++ b/plugins/database/influxdb/influxdb-database-plugin/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( diff --git a/plugins/database/influxdb/influxdb.go b/plugins/database/influxdb/influxdb.go index 4a8225e52b3b..4df308997639 100644 --- a/plugins/database/influxdb/influxdb.go +++ b/plugins/database/influxdb/influxdb.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package influxdb import ( diff --git a/plugins/database/influxdb/influxdb_test.go b/plugins/database/influxdb/influxdb_test.go index 4ecdac51bcbd..37401c4bc1bb 100644 --- a/plugins/database/influxdb/influxdb_test.go +++ b/plugins/database/influxdb/influxdb_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package influxdb import ( @@ -6,14 +9,15 @@ import ( "net/url" "os" "reflect" + "runtime" "strconv" "strings" "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" - dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/helper/docker" influx "github.com/influxdata/influxdb1-client/v2" "github.com/stretchr/testify/require" ) @@ -48,6 +52,11 @@ func (c *Config) connectionParams() map[string]interface{} { } func prepareInfluxdbTestContainer(t *testing.T) (func(), *Config) { + // Skipping on ARM, as this image can't run on ARM architecture + if strings.Contains(runtime.GOARCH, "arm") { + t.Skip("Skipping, as this image is not supported on ARM architectures") + } + c := &Config{ Username: "influx-root", Password: "influx-root", @@ -58,8 +67,9 @@ func prepareInfluxdbTestContainer(t *testing.T) (func(), *Config) { } runner, err := docker.NewServiceRunner(docker.RunOptions{ - ImageRepo: "influxdb", - ImageTag: "1.8-alpine", + ImageRepo: "docker.mirror.hashicorp.services/influxdb", + ContainerName: "influxdb", + ImageTag: "1.8-alpine", Env: []string{ "INFLUXDB_DB=vault", "INFLUXDB_ADMIN_USER=" + c.Username, diff --git a/plugins/database/mongodb/cert_helpers_test.go b/plugins/database/mongodb/cert_helpers_test.go index deb04ab9c4e4..3a8f3afcb84f 100644 --- a/plugins/database/mongodb/cert_helpers_test.go +++ b/plugins/database/mongodb/cert_helpers_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mongodb import ( diff --git a/plugins/database/mongodb/connection_producer.go b/plugins/database/mongodb/connection_producer.go index 348fb6bd4d43..ca6244aa494a 100644 --- a/plugins/database/mongodb/connection_producer.go +++ b/plugins/database/mongodb/connection_producer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mongodb import ( diff --git a/plugins/database/mongodb/connection_producer_test.go b/plugins/database/mongodb/connection_producer_test.go index 529e4d22fb09..e29332b6092e 100644 --- a/plugins/database/mongodb/connection_producer_test.go +++ b/plugins/database/mongodb/connection_producer_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mongodb import ( diff --git a/plugins/database/mongodb/mongodb-database-plugin/main.go b/plugins/database/mongodb/mongodb-database-plugin/main.go index 30dd5fdd7cff..ab0ff80c3bc9 100644 --- a/plugins/database/mongodb/mongodb-database-plugin/main.go +++ b/plugins/database/mongodb/mongodb-database-plugin/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( diff --git a/plugins/database/mongodb/mongodb.go b/plugins/database/mongodb/mongodb.go index 6cb511b89f8e..7d285b24c5d4 100644 --- a/plugins/database/mongodb/mongodb.go +++ b/plugins/database/mongodb/mongodb.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mongodb import ( @@ -173,7 +176,7 @@ func (m *MongoDB) changeUserPassword(ctx context.Context, username, password str } database := cs.Database - if username == m.Username || database == "" { + if database == "" { database = "admin" } diff --git a/plugins/database/mongodb/mongodb_test.go b/plugins/database/mongodb/mongodb_test.go index dcfda3bc0576..43a3e8d3c242 100644 --- a/plugins/database/mongodb/mongodb_test.go +++ b/plugins/database/mongodb/mongodb_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mongodb import ( @@ -5,6 +8,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "net/http" "reflect" "strings" "sync" @@ -23,7 +27,10 @@ import ( "go.mongodb.org/mongo-driver/mongo/readpref" ) -const mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }` +const ( + mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }` + mongoTestDBAdminRole = `{ "db": "test", "roles": [ { "role": "readWrite" } ] }` +) func TestMongoDB_Initialize(t *testing.T) { cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") @@ -115,6 +122,23 @@ func TestNewUser_usernameTemplate(t *testing.T) { expectedUsernameRegex: "^[A-Z0-9]{2}_[0-9]{10}_TESTROLENAMEWITHMANYCHARACTERS_TOKEN$", }, + "admin in test database username template": { + usernameTemplate: "", + + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "testrolenamewithmanycharacters", + }, + Statements: dbplugin.Statements{ + Commands: []string{mongoTestDBAdminRole}, + }, + Password: "98yq3thgnakjsfhjkl", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: "^v-token-testrolenamewit-[a-zA-Z0-9]{20}-[0-9]{10}$", + }, } for name, test := range tests { @@ -122,6 +146,10 @@ func TestNewUser_usernameTemplate(t *testing.T) { cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") defer cleanup() + if name == "admin in test database username template" { + connURL = connURL + "/test?authSource=test" + } + db := new() defer dbtesting.AssertClose(t, db) @@ -286,6 +314,39 @@ func TestMongoDB_UpdateUser_Password(t *testing.T) { assertCredsExist(t, dbUser, newPassword, connURL) } +func TestMongoDB_RotateRoot_NonAdminDB(t *testing.T) { + cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + defer cleanup() + + connURL = connURL + "/test?authSource=test" + db := new() + defer dbtesting.AssertClose(t, db) + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, initReq) + + dbUser := "testmongouser" + startingPassword := "password" + createDBUser(t, connURL, "test", dbUser, startingPassword) + + newPassword := "myreallysecurecredentials" + + updateReq := dbplugin.UpdateUserRequest{ + Username: dbUser, + Password: &dbplugin.ChangePassword{ + NewPassword: newPassword, + }, + } + dbtesting.AssertUpdateUser(t, db, updateReq) + + assertCredsExist(t, dbUser, newPassword, connURL) +} + func TestGetTLSAuth(t *testing.T) { ca := certhelpers.NewCert(t, certhelpers.CommonName("certificate authority"), @@ -382,6 +443,8 @@ func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.Cer } var cmpClientOptionsOpts = cmp.Options{ + cmpopts.IgnoreTypes(http.Transport{}), + cmp.AllowUnexported(options.ClientOptions{}), cmp.AllowUnexported(tls.Config{}), diff --git a/plugins/database/mongodb/util.go b/plugins/database/mongodb/util.go index a12828f503b8..ebeebb3f433d 100644 --- a/plugins/database/mongodb/util.go +++ b/plugins/database/mongodb/util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mongodb import "go.mongodb.org/mongo-driver/mongo/writeconcern" diff --git a/plugins/database/mssql/mssql-database-plugin/main.go b/plugins/database/mssql/mssql-database-plugin/main.go index 37a81a660012..1ed6f5d0e3ce 100644 --- a/plugins/database/mssql/mssql-database-plugin/main.go +++ b/plugins/database/mssql/mssql-database-plugin/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( diff --git a/plugins/database/mssql/mssql.go b/plugins/database/mssql/mssql.go index 7915732b4d00..33162dc407a9 100644 --- a/plugins/database/mssql/mssql.go +++ b/plugins/database/mssql/mssql.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mssql import ( diff --git a/plugins/database/mssql/mssql_test.go b/plugins/database/mssql/mssql_test.go index 2292490d88a7..6e866fea9505 100644 --- a/plugins/database/mssql/mssql_test.go +++ b/plugins/database/mssql/mssql_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mssql import ( @@ -189,13 +192,13 @@ func TestUpdateUser_password(t *testing.T) { expectedPassword string } - dbUser := "vaultuser" + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() initPassword := "p4$sw0rd" tests := map[string]testCase{ "missing password": { req: dbplugin.UpdateUserRequest{ - Username: dbUser, Password: &dbplugin.ChangePassword{ NewPassword: "", Statements: dbplugin.Statements{}, @@ -206,7 +209,6 @@ func TestUpdateUser_password(t *testing.T) { }, "empty rotation statements": { req: dbplugin.UpdateUserRequest{ - Username: dbUser, Password: &dbplugin.ChangePassword{ NewPassword: "N90gkKLy8$angf", Statements: dbplugin.Statements{}, @@ -217,7 +219,6 @@ func TestUpdateUser_password(t *testing.T) { }, "username rotation": { req: dbplugin.UpdateUserRequest{ - Username: dbUser, Password: &dbplugin.ChangePassword{ NewPassword: "N90gkKLy8$angf", Statements: dbplugin.Statements{ @@ -232,7 +233,6 @@ func TestUpdateUser_password(t *testing.T) { }, "bad statements": { req: dbplugin.UpdateUserRequest{ - Username: dbUser, Password: &dbplugin.ChangePassword{ NewPassword: "N90gkKLy8$angf", Statements: dbplugin.Statements{ @@ -247,11 +247,9 @@ func TestUpdateUser_password(t *testing.T) { }, } + i := 0 for name, test := range tests { t.Run(name, func(t *testing.T) { - cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) - defer cleanup() - initReq := dbplugin.InitializeRequest{ Config: map[string]interface{}{ "connection_url": connURL, @@ -263,6 +261,9 @@ func TestUpdateUser_password(t *testing.T) { dbtesting.AssertInitializeCircleCiTest(t, db, initReq) defer dbtesting.AssertClose(t, db) + dbUser := fmt.Sprintf("vaultuser%d", i) + test.req.Username = dbUser + i++ err := createTestMSSQLUser(connURL, dbUser, initPassword, testMSSQLLogin) if err != nil { t.Fatalf("Failed to create user: %s", err) diff --git a/plugins/database/mysql/connection_producer.go b/plugins/database/mysql/connection_producer.go index f143e85fbf43..0fb027bb29d1 100644 --- a/plugins/database/mysql/connection_producer.go +++ b/plugins/database/mysql/connection_producer.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mysql import ( @@ -10,6 +13,7 @@ import ( "sync" "time" + cloudmysql "cloud.google.com/go/cloudsqlconn/mysql/mysql" "github.com/go-sql-driver/mysql" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-uuid" @@ -18,22 +22,34 @@ import ( "github.com/mitchellh/mapstructure" ) +const ( + cloudSQLMySQL = "cloudsql-mysql" + driverMySQL = "mysql" +) + // mySQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases type mySQLConnectionProducer struct { ConnectionURL string `json:"connection_url" mapstructure:"connection_url" structs:"connection_url"` MaxOpenConnections int `json:"max_open_connections" mapstructure:"max_open_connections" structs:"max_open_connections"` MaxIdleConnections int `json:"max_idle_connections" mapstructure:"max_idle_connections" structs:"max_idle_connections"` MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" mapstructure:"max_connection_lifetime" structs:"max_connection_lifetime"` - - Username string `json:"username" mapstructure:"username" structs:"username"` - Password string `json:"password" mapstructure:"password" structs:"password"` + Username string `json:"username" mapstructure:"username" structs:"username"` + Password string `json:"password" mapstructure:"password" structs:"password"` + AuthType string `json:"auth_type" mapstructure:"auth_type" structs:"auth_type"` + ServiceAccountJSON string `json:"service_account_json" mapstructure:"service_account_json" structs:"service_account_json"` TLSCertificateKeyData []byte `json:"tls_certificate_key" mapstructure:"tls_certificate_key" structs:"-"` TLSCAData []byte `json:"tls_ca" mapstructure:"tls_ca" structs:"-"` + TLSServerName string `json:"tls_server_name" mapstructure:"tls_server_name" structs:"tls_server_name"` + TLSSkipVerify bool `json:"tls_skip_verify" mapstructure:"tls_skip_verify" structs:"tls_skip_verify"` // tlsConfigName is a globally unique name that references the TLS config for this instance in the mysql driver tlsConfigName string + // cloudDriverName is a globally unique name that references the cloud dialer config for this instance of the driver + cloudDriverName string + cloudDialerCleanup func() error + RawConfig map[string]interface{} maxConnectionLifetime time.Duration Initialized bool @@ -106,17 +122,43 @@ func (c *mySQLConnectionProducer) Init(ctx context.Context, conf map[string]inte mysql.RegisterTLSConfig(c.tlsConfigName, tlsConfig) } + // validate auth_type if provided + authType := c.AuthType + if authType != "" { + if ok := connutil.ValidateAuthType(authType); !ok { + return nil, fmt.Errorf("invalid auth_type %s provided", authType) + } + } + + if c.AuthType == connutil.AuthTypeGCPIAM { + c.cloudDriverName, err = uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("unable to generate UUID for IAM configuration: %w", err) + } + + // for _most_ sql databases, the driver itself contains no state. In the case of google's cloudsql drivers, + // however, the driver might store a credentials file, in which case the state stored by the driver is in + // fact critical to the proper function of the connection. So it needs to be registered here inside the + // ConnectionProducer init. + dialerCleanup, err := registerDriverMySQL(c.cloudDriverName, c.ServiceAccountJSON) + if err != nil { + return nil, err + } + + c.cloudDialerCleanup = dialerCleanup + } + // Set initialized to true at this point since all fields are set, // and the connection can be established at a later time. c.Initialized = true if verifyConnection { - if _, err := c.Connection(ctx); err != nil { - return nil, fmt.Errorf("error verifying connection: %w", err) + if _, err = c.Connection(ctx); err != nil { + return nil, fmt.Errorf("error verifying - connection: %w", err) } if err := c.db.PingContext(ctx); err != nil { - return nil, fmt.Errorf("error verifying connection: %w", err) + return nil, fmt.Errorf("error verifying - ping: %w", err) } } @@ -136,6 +178,20 @@ func (c *mySQLConnectionProducer) Connection(ctx context.Context) (interface{}, // If the ping was unsuccessful, close it and ignore errors as we'll be // reestablishing anyways c.db.Close() + + // if IAM authentication was enabled + // ensure open dialer is also closed + if c.AuthType == connutil.AuthTypeGCPIAM { + if c.cloudDialerCleanup != nil { + c.cloudDialerCleanup() + } + } + + } + + driverName := driverMySQL + if c.cloudDriverName != "" { + driverName = c.cloudDriverName } connURL, err := c.addTLStoDSN() @@ -143,7 +199,12 @@ func (c *mySQLConnectionProducer) Connection(ctx context.Context) (interface{}, return nil, err } - c.db, err = sql.Open("mysql", connURL) + cloudURL, err := c.rewriteProtocolForGCP(connURL) + if err != nil { + return nil, err + } + + c.db, err = sql.Open(driverName, cloudURL) if err != nil { return nil, err } @@ -170,6 +231,13 @@ func (c *mySQLConnectionProducer) Close() error { defer c.Unlock() if c.db != nil { + // if auth_type is IAM, ensure cleanup + // of cloudSQL resources + if c.AuthType == connutil.AuthTypeGCPIAM { + if c.cloudDialerCleanup != nil { + c.cloudDialerCleanup() + } + } c.db.Close() } @@ -204,8 +272,10 @@ func (c *mySQLConnectionProducer) getTLSAuth() (tlsConfig *tls.Config, err error } tlsConfig = &tls.Config{ - RootCAs: rootCertPool, - Certificates: clientCert, + RootCAs: rootCertPool, + Certificates: clientCert, + ServerName: c.TLSServerName, + InsecureSkipVerify: c.TLSSkipVerify, } return tlsConfig, nil @@ -222,6 +292,40 @@ func (c *mySQLConnectionProducer) addTLStoDSN() (connURL string, err error) { } connURL = config.FormatDSN() - return connURL, nil } + +// rewriteProtocolForGCP rewrites the protocol in the DSN to contain the protocol name associated +// with the dialer and therefore driver associated with the provided cloudsqlconn.DialerOpts. +// As a safety/sanity check, it will only do this for protocol "cloudsql-mysql", the name GCP uses in its documentation. +// +// For example, it will rewrite the dsn "user@cloudsql-mysql(zone:region:instance)/ to +// "user@the-uuid-generated(zone:region:instance)/ +func (c *mySQLConnectionProducer) rewriteProtocolForGCP(inDSN string) (string, error) { + if c.cloudDriverName == "" { + // unchanged if not cloud + return inDSN, nil + } + + config, err := mysql.ParseDSN(inDSN) + if err != nil { + return "", fmt.Errorf("unable to parse connectionURL: %s", err) + } + + if config.Net != cloudSQLMySQL { + return "", fmt.Errorf("didn't update net name because it wasn't what we expected as a placeholder: %s", config.Net) + } + + config.Net = c.cloudDriverName + + return config.FormatDSN(), nil +} + +func registerDriverMySQL(driverName, credentials string) (cleanup func() error, err error) { + opts, err := connutil.GetCloudSQLAuthOptions(credentials) + if err != nil { + return nil, err + } + + return cloudmysql.RegisterDriver(driverName, opts...) +} diff --git a/plugins/database/mysql/connection_producer_test.go b/plugins/database/mysql/connection_producer_test.go index eacf18fabe8c..ae6014906f02 100644 --- a/plugins/database/mysql/connection_producer_test.go +++ b/plugins/database/mysql/connection_producer_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mysql import ( diff --git a/plugins/database/mysql/mysql-database-plugin/main.go b/plugins/database/mysql/mysql-database-plugin/main.go index 6b1505aff194..2735e082b866 100644 --- a/plugins/database/mysql/mysql-database-plugin/main.go +++ b/plugins/database/mysql/mysql-database-plugin/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( diff --git a/plugins/database/mysql/mysql-legacy-database-plugin/main.go b/plugins/database/mysql/mysql-legacy-database-plugin/main.go index ea6b9839a77e..6818cccbf0ce 100644 --- a/plugins/database/mysql/mysql-legacy-database-plugin/main.go +++ b/plugins/database/mysql/mysql-legacy-database-plugin/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( diff --git a/plugins/database/mysql/mysql.go b/plugins/database/mysql/mysql.go index db47c71dd310..c938d2be9a70 100644 --- a/plugins/database/mysql/mysql.go +++ b/plugins/database/mysql/mysql.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mysql import ( diff --git a/plugins/database/mysql/mysql_test.go b/plugins/database/mysql/mysql_test.go index 3c7eab5af357..4507b91a2a20 100644 --- a/plugins/database/mysql/mysql_test.go +++ b/plugins/database/mysql/mysql_test.go @@ -1,21 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package mysql import ( "context" "database/sql" "fmt" + "os" "strings" "testing" "time" stdmysql "github.com/go-sql-driver/mysql" "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/stretchr/testify/require" + mysqlhelper "github.com/hashicorp/vault/helper/testhelpers/mysql" - dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/credsutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" - "github.com/stretchr/testify/require" ) var _ dbplugin.Database = (*MySQL)(nil) @@ -41,6 +47,79 @@ func TestMySQL_Initialize(t *testing.T) { } } +// TestMySQL_Initialize_CloudGCP validates the proper initialization of a MySQL backend pointing +// to a GCP CloudSQL MySQL instance. This expects some external setup (exact TBD) +func TestMySQL_Initialize_CloudGCP(t *testing.T) { + envConnURL := "CONNECTION_URL" + connURL := os.Getenv(envConnURL) + if connURL == "" { + t.Skipf("env var %s not set, skipping test", envConnURL) + } + + credStr := dbtesting.GetGCPTestCredentials(t) + + tests := map[string]struct { + req dbplugin.InitializeRequest + wantErr bool + expectedError string + }{ + "empty auth type": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": "", + }, + }, + }, + "invalid auth type": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": "invalid", + }, + }, + wantErr: true, + expectedError: "invalid auth_type", + }, + "JSON credentials": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": connutil.AuthTypeGCPIAM, + "service_account_json": credStr, + }, + VerifyConnection: true, + }, + }, + } + + for n, tc := range tests { + t.Run(n, func(t *testing.T) { + db := newMySQL(DefaultUserNameTemplate) + defer dbtesting.AssertClose(t, db) + _, err := db.Initialize(context.Background(), tc.req) + + if tc.wantErr { + if err == nil { + t.Fatalf("expected error but received nil") + } + + if !strings.Contains(err.Error(), tc.expectedError) { + t.Fatalf("expected error %s, got %s", tc.expectedError, err.Error()) + } + } else { + if err != nil { + t.Fatalf("expected no error, received %s", err) + } + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + } + }) + } +} + func testInitialize(t *testing.T, rootPassword string) { cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, rootPassword) defer cleanup() diff --git a/plugins/database/postgresql/passwordauthentication.go b/plugins/database/postgresql/passwordauthentication.go new file mode 100644 index 000000000000..a20214dae4d1 --- /dev/null +++ b/plugins/database/postgresql/passwordauthentication.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package postgresql + +import "fmt" + +// passwordAuthentication determines whether to send passwords in plaintext (password) or hashed (scram-sha-256). +type passwordAuthentication string + +var ( + // passwordAuthenticationPassword is the default. If set, passwords will be sent to PostgreSQL in plain text. + passwordAuthenticationPassword passwordAuthentication = "password" + passwordAuthenticationSCRAMSHA256 passwordAuthentication = "scram-sha-256" +) + +var passwordAuthentications = map[passwordAuthentication]struct{}{ + passwordAuthenticationSCRAMSHA256: {}, + passwordAuthenticationPassword: {}, +} + +func parsePasswordAuthentication(s string) (passwordAuthentication, error) { + if _, ok := passwordAuthentications[passwordAuthentication(s)]; !ok { + return "", fmt.Errorf("'%s' is not a valid password authentication type", s) + } + + return passwordAuthentication(s), nil +} diff --git a/plugins/database/postgresql/postgresql-database-plugin/main.go b/plugins/database/postgresql/postgresql-database-plugin/main.go index 75b5fd9babb2..3efc801e9ed4 100644 --- a/plugins/database/postgresql/postgresql-database-plugin/main.go +++ b/plugins/database/postgresql/postgresql-database-plugin/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go index c76558350586..0bdd91641240 100644 --- a/plugins/database/postgresql/postgresql.go +++ b/plugins/database/postgresql/postgresql.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package postgresql import ( @@ -9,6 +12,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/plugins/database/postgresql/scram" "github.com/hashicorp/vault/sdk/database/dbplugin/v5" "github.com/hashicorp/vault/sdk/database/helper/connutil" "github.com/hashicorp/vault/sdk/database/helper/dbutil" @@ -65,7 +69,8 @@ func new() *PostgreSQL { connProducer.Type = postgreSQLTypeName db := &PostgreSQL{ - SQLConnectionProducer: connProducer, + SQLConnectionProducer: connProducer, + passwordAuthentication: passwordAuthenticationPassword, } return db @@ -74,7 +79,8 @@ func new() *PostgreSQL { type PostgreSQL struct { *connutil.SQLConnectionProducer - usernameProducer template.StringTemplate + usernameProducer template.StringTemplate + passwordAuthentication passwordAuthentication } func (p *PostgreSQL) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { @@ -102,6 +108,20 @@ func (p *PostgreSQL) Initialize(ctx context.Context, req dbplugin.InitializeRequ return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) } + passwordAuthenticationRaw, err := strutil.GetString(req.Config, "password_authentication") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve password_authentication: %w", err) + } + + if passwordAuthenticationRaw != "" { + pwAuthentication, err := parsePasswordAuthentication(passwordAuthenticationRaw) + if err != nil { + return dbplugin.InitializeResponse{}, err + } + + p.passwordAuthentication = pwAuthentication + } + resp := dbplugin.InitializeResponse{ Config: newConf, } @@ -185,6 +205,15 @@ func (p *PostgreSQL) changeUserPassword(ctx context.Context, username string, ch "username": username, "password": password, } + + if p.passwordAuthentication == passwordAuthenticationSCRAMSHA256 { + hashedPassword, err := scram.Hash(password) + if err != nil { + return fmt.Errorf("unable to scram-sha256 password: %w", err) + } + m["password"] = hashedPassword + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { return fmt.Errorf("failed to execute query: %w", err) } @@ -269,15 +298,24 @@ func (p *PostgreSQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) ( } defer tx.Rollback() + m := map[string]string{ + "name": username, + "username": username, + "password": req.Password, + "expiration": expirationStr, + } + + if p.passwordAuthentication == passwordAuthenticationSCRAMSHA256 { + hashedPassword, err := scram.Hash(req.Password) + if err != nil { + return dbplugin.NewUserResponse{}, fmt.Errorf("unable to scram-sha256 password: %w", err) + } + m["password"] = hashedPassword + } + for _, stmt := range req.Statements.Commands { if containsMultilineStatement(stmt) { // Execute it as-is. - m := map[string]string{ - "name": username, - "username": username, - "password": req.Password, - "expiration": expirationStr, - } if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, stmt); err != nil { return dbplugin.NewUserResponse{}, fmt.Errorf("failed to execute query: %w", err) } @@ -290,12 +328,6 @@ func (p *PostgreSQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) ( continue } - m := map[string]string{ - "name": username, - "username": username, - "password": req.Password, - "expiration": expirationStr, - } if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { return dbplugin.NewUserResponse{}, fmt.Errorf("failed to execute query: %w", err) } @@ -338,6 +370,17 @@ func (p *PostgreSQL) customDeleteUser(ctx context.Context, username string, revo }() for _, stmt := range revocationStmts { + if containsMultilineStatement(stmt) { + // Execute it as-is. + m := map[string]string{ + "name": username, + "username": username, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, stmt); err != nil { + return err + } + continue + } for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { query = strings.TrimSpace(query) if len(query) == 0 { diff --git a/plugins/database/postgresql/postgresql_test.go b/plugins/database/postgresql/postgresql_test.go index 86e93822889b..6421568d2a4e 100644 --- a/plugins/database/postgresql/postgresql_test.go +++ b/plugins/database/postgresql/postgresql_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package postgresql import ( @@ -9,13 +12,17 @@ import ( "testing" "time" - "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/hashicorp/vault/helper/testhelpers/postgresql" "github.com/hashicorp/vault/sdk/database/dbplugin/v5" dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/docker" "github.com/hashicorp/vault/sdk/helper/template" - "github.com/stretchr/testify/require" ) func getPostgreSQL(t *testing.T, options map[string]interface{}) (*PostgreSQL, func()) { @@ -90,6 +97,184 @@ func TestPostgreSQL_Initialize_ConnURLWithDSNFormat(t *testing.T) { } } +// Ensures we can successfully initialize and connect to a CloudSQL database +// Requires the following: +// - GOOGLE_APPLICATION_CREDENTIALS either JSON or path to file +// - CONNECTION_URL to a valid Postgres instance on Google CloudSQL +func TestPostgreSQL_Initialize_CloudGCP(t *testing.T) { + envConnURL := "CONNECTION_URL" + connURL := os.Getenv(envConnURL) + if connURL == "" { + t.Skipf("env var %s not set, skipping test", envConnURL) + } + + credStr := dbtesting.GetGCPTestCredentials(t) + + type testCase struct { + req dbplugin.InitializeRequest + wantErr bool + expectedError string + } + + tests := map[string]testCase{ + "empty auth type": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": "", + }, + }, + }, + "invalid auth type": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": "invalid", + }, + }, + wantErr: true, + expectedError: "invalid auth_type", + }, + "default credentials": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": connutil.AuthTypeGCPIAM, + }, + VerifyConnection: true, + }, + }, + "JSON credentials": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "auth_type": connutil.AuthTypeGCPIAM, + "service_account_json": credStr, + }, + VerifyConnection: true, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db := new() + defer dbtesting.AssertClose(t, db) + + _, err := dbtesting.VerifyInitialize(t, db, test.req) + + if test.wantErr { + if err == nil { + t.Fatalf("expected error but received nil") + } + + if !strings.Contains(err.Error(), test.expectedError) { + t.Fatalf("expected error %s, got %s", test.expectedError, err.Error()) + } + } else { + if err != nil { + t.Fatalf("expected no error, received %s", err) + } + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + } + }) + } +} + +// TestPostgreSQL_PasswordAuthentication tests that the default "password_authentication" is "none", and that +// an error is returned if an invalid "password_authentication" is provided. +func TestPostgreSQL_PasswordAuthentication(t *testing.T) { + cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + dsnConnURL, err := dbutil.ParseURL(connURL) + assert.NoError(t, err) + db := new() + + ctx := context.Background() + + t.Run("invalid-password-authentication", func(t *testing.T) { + connectionDetails := map[string]interface{}{ + "connection_url": dsnConnURL, + "password_authentication": "invalid-password-authentication", + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + _, err := db.Initialize(ctx, req) + assert.EqualError(t, err, "'invalid-password-authentication' is not a valid password authentication type") + }) + + t.Run("default-is-none", func(t *testing.T) { + connectionDetails := map[string]interface{}{ + "connection_url": dsnConnURL, + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + _ = dbtesting.AssertInitialize(t, db, req) + assert.Equal(t, passwordAuthenticationPassword, db.passwordAuthentication) + }) +} + +// TestPostgreSQL_PasswordAuthentication_SCRAMSHA256 tests that password_authentication works when set to scram-sha-256. +// When sending an encrypted password, the raw password should still successfully authenticate the user. +func TestPostgreSQL_PasswordAuthentication_SCRAMSHA256(t *testing.T) { + cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + dsnConnURL, err := dbutil.ParseURL(connURL) + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": dsnConnURL, + "password_authentication": string(passwordAuthenticationSCRAMSHA256), + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + resp := dbtesting.AssertInitialize(t, db, req) + assert.Equal(t, string(passwordAuthenticationSCRAMSHA256), resp.Config["password_authentication"]) + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + + ctx := context.Background() + newUserRequest := dbplugin.NewUserRequest{ + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";`, + }, + }, + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + } + newUserResponse, err := db.NewUser(ctx, newUserRequest) + + assertCredsExist(t, db.ConnectionURL, newUserResponse.Username, newUserRequest.Password) +} + func TestPostgreSQL_NewUser(t *testing.T) { type testCase struct { req dbplugin.NewUserRequest @@ -588,6 +773,19 @@ func TestDeleteUser(t *testing.T) { // Wait for a short time before checking because postgres takes a moment to finish deleting the user credsAssertion: assertCredsExistAfter(100 * time.Millisecond), }, + "multiline": { + revokeStmts: []string{` + DO $$ BEGIN + REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{username}}"; + REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{username}}"; + REVOKE USAGE ON SCHEMA public FROM "{{username}}"; + DROP ROLE IF EXISTS "{{username}}"; + END $$; + `}, + expectErr: false, + // Wait for a short time before checking because postgres takes a moment to finish deleting the user + credsAssertion: waitUntilCredsDoNotExist(2 * time.Second), + }, } // Shared test container for speed - there should not be any overlap between the tests @@ -992,6 +1190,86 @@ func TestNewUser_CustomUsername(t *testing.T) { } } +func TestNewUser_CloudGCP(t *testing.T) { + envConnURL := "CONNECTION_URL" + connURL := os.Getenv(envConnURL) + if connURL == "" { + t.Skipf("env var %s not set, skipping test", envConnURL) + } + + credStr := dbtesting.GetGCPTestCredentials(t) + + type testCase struct { + usernameTemplate string + newUserData dbplugin.UsernameMetadata + expectedRegex string + } + + tests := map[string]testCase{ + "default template": { + usernameTemplate: "", + newUserData: dbplugin.UsernameMetadata{ + DisplayName: "displayname", + RoleName: "longrolename", + }, + expectedRegex: "^v-displayn-longrole-[a-zA-Z0-9]{20}-[0-9]{10}$", + }, + "unique template": { + usernameTemplate: "foo-bar", + newUserData: dbplugin.UsernameMetadata{ + DisplayName: "displayname", + RoleName: "longrolename", + }, + expectedRegex: "^foo-bar$", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "username_template": test.usernameTemplate, + "auth_type": connutil.AuthTypeGCPIAM, + "service_account_json": credStr, + }, + VerifyConnection: true, + } + + db := new() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + _, err := db.Initialize(ctx, initReq) + require.NoError(t, err) + + newUserReq := dbplugin.NewUserRequest{ + UsernameConfig: test.newUserData, + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";`, + }, + }, + Password: "myReally-S3curePassword", + Expiration: time.Now().Add(1 * time.Hour), + } + ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + newUserResp, err := db.NewUser(ctx, newUserReq) + require.NoError(t, err) + + require.Regexp(t, test.expectedRegex, newUserResp.Username) + }) + } +} + // This is a long-running integration test which tests the functionality of Postgres's multi-host // connection strings. It uses two Postgres containers preconfigured with Replication Manager // provided by Bitnami. This test currently does not run in CI and must be run manually. This is diff --git a/plugins/database/postgresql/scram/LICENSE b/plugins/database/postgresql/scram/LICENSE new file mode 100644 index 000000000000..cc36995f299f --- /dev/null +++ b/plugins/database/postgresql/scram/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Taishi Kasuga + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/plugins/database/postgresql/scram/scram.go b/plugins/database/postgresql/scram/scram.go new file mode 100644 index 000000000000..f5c6923cef66 --- /dev/null +++ b/plugins/database/postgresql/scram/scram.go @@ -0,0 +1,86 @@ +package scram + +// +// @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/interfaces/libpq/fe-auth.c#L1167-L1285 +// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/interfaces/libpq/fe-auth-scram.c#L868-L905 +// @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/port/pg_strong_random.c#L66-L96 +// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/common/scram-common.c#L160-L274 +// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/common/scram-common.c#L27-L85 + +// Implementation from https://github.com/supercaracal/scram-sha-256/blob/d3c05cd927770a11c6e12de3e3a99c3446a1f78d/main.go +import ( + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "io" + + "golang.org/x/crypto/pbkdf2" +) + +const ( + // @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/include/common/scram-common.h#L36-L41 + saltSize = 16 + + // @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/include/common/sha2.h#L22 + digestLen = 32 + + // @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/include/common/scram-common.h#L43-L47 + iterationCnt = 4096 +) + +var ( + clientRawKey = []byte("Client Key") + serverRawKey = []byte("Server Key") +) + +func genSalt(size int) ([]byte, error) { + salt := make([]byte, size) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + return nil, err + } + return salt, nil +} + +func encodeB64(src []byte) (dst []byte) { + dst = make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(dst, src) + return +} + +func getHMACSum(key, msg []byte) []byte { + h := hmac.New(sha256.New, key) + _, _ = h.Write(msg) + return h.Sum(nil) +} + +func getSHA256Sum(key []byte) []byte { + h := sha256.New() + _, _ = h.Write(key) + return h.Sum(nil) +} + +func hashPassword(rawPassword, salt []byte, iter, keyLen int) string { + digestKey := pbkdf2.Key(rawPassword, salt, iter, keyLen, sha256.New) + clientKey := getHMACSum(digestKey, clientRawKey) + storedKey := getSHA256Sum(clientKey) + serverKey := getHMACSum(digestKey, serverRawKey) + + return fmt.Sprintf("SCRAM-SHA-256$%d:%s$%s:%s", + iter, + string(encodeB64(salt)), + string(encodeB64(storedKey)), + string(encodeB64(serverKey)), + ) +} + +func Hash(password string) (string, error) { + salt, err := genSalt(saltSize) + if err != nil { + return "", err + } + + hashedPassword := hashPassword([]byte(password), salt, iterationCnt, digestLen) + return hashedPassword, nil +} diff --git a/plugins/database/postgresql/scram/scram_test.go b/plugins/database/postgresql/scram/scram_test.go new file mode 100644 index 000000000000..d2933ebbca40 --- /dev/null +++ b/plugins/database/postgresql/scram/scram_test.go @@ -0,0 +1,27 @@ +package scram + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestScram tests the Hash method. The hashed password string should have a SCRAM-SHA-256 prefix. +func TestScram(t *testing.T) { + tcs := map[string]struct { + Password string + }{ + "empty-password": {Password: ""}, + "simple-password": {Password: "password"}, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + got, err := Hash(tc.Password) + assert.NoError(t, err) + assert.True(t, strings.HasPrefix(got, "SCRAM-SHA-256$4096:")) + assert.Len(t, got, 133) + }) + } +} diff --git a/plugins/database/redshift/redshift-database-plugin/main.go b/plugins/database/redshift/redshift-database-plugin/main.go index 8d2f796eeab3..010ddb036276 100644 --- a/plugins/database/redshift/redshift-database-plugin/main.go +++ b/plugins/database/redshift/redshift-database-plugin/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main import ( diff --git a/plugins/database/redshift/redshift.go b/plugins/database/redshift/redshift.go index ce39569d4858..1e658edbcf58 100644 --- a/plugins/database/redshift/redshift.go +++ b/plugins/database/redshift/redshift.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package redshift import ( diff --git a/plugins/database/redshift/redshift_test.go b/plugins/database/redshift/redshift_test.go index 24992183e4b2..d68ae4565f78 100644 --- a/plugins/database/redshift/redshift_test.go +++ b/plugins/database/redshift/redshift_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package redshift import ( @@ -158,7 +161,7 @@ func TestRedshift_NewUser(t *testing.T) { } usernameRegex := regexp.MustCompile("^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$") - if !usernameRegex.Match([]byte(username)) { + if !usernameRegex.MatchString(username) { t.Fatalf("Expected username %q to match regex %q", username, usernameRegex.String()) } } diff --git a/plugins/event/event_subscription_plugin.go b/plugins/event/event_subscription_plugin.go new file mode 100644 index 000000000000..430986632038 --- /dev/null +++ b/plugins/event/event_subscription_plugin.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package event + +import ( + "context" + "time" + + "github.com/hashicorp/vault/sdk/helper/backoff" +) + +type Factory func(context.Context) (SubscriptionPlugin, error) + +// SubscriptionPlugin is the interface implemented by plugins that can subscribe to and receive events. +type SubscriptionPlugin interface { + // Subscribe is used to set up a new connection. + Subscribe(context.Context, *SubscribeRequest) error + // Send is used to send events to a connection. + Send(context.Context, *SendRequest) error + // Unsubscribe is used to teardown a connection. + Unsubscribe(context.Context, *UnsubscribeRequest) error + // PluginMetadata returns the name and version for the particular event subscription plugin. + // The name is usually set as a constant the backend, e.g., "sqs" for the + // AWS SQS backend. + PluginMetadata() *PluginMetadata + // Close closes all connections. + Close(ctx context.Context) error +} + +type Request struct { + Subscribe *SubscribeRequest + Unsubscribe *UnsubscribeRequest + Event *SendRequest +} + +type SubscribeRequest struct { + SubscriptionID string + Config map[string]interface{} + VerifyConnection bool +} + +type UnsubscribeRequest struct { + SubscriptionID string +} + +type SendRequest struct { + SubscriptionID string + EventJSON string +} + +type PluginMetadata struct { + Name string + Version string +} + +// SubscribeConfigDefaults defines configuration map keys for common default options. +// Embed this in your own config struct to pick up these default options. +type SubscribeConfigDefaults struct { + Retries *int `mapstructure:"retries"` + RetryMinBackoff *time.Duration `mapstructure:"retry_min_backoff"` + RetryMaxBackoff *time.Duration `mapstructure:"retry_max_backoff"` +} + +// default values for common configuration keys +const ( + DefaultRetries = 3 + DefaultRetryMinBackoff = 100 * time.Millisecond + DefaultRetryMaxBackoff = 5 * time.Second +) + +func (c *SubscribeConfigDefaults) GetRetries() int { + if c.Retries == nil { + return DefaultRetries + } + return *c.Retries +} + +func (c *SubscribeConfigDefaults) GetRetryMinBackoff() time.Duration { + if c.RetryMinBackoff == nil { + return DefaultRetryMinBackoff + } + return *c.RetryMinBackoff +} + +func (c *SubscribeConfigDefaults) GetRetryMaxBackoff() time.Duration { + if c.RetryMaxBackoff == nil { + return DefaultRetryMaxBackoff + } + return *c.RetryMaxBackoff +} + +func (c *SubscribeConfigDefaults) NewRetryBackoff() *backoff.Backoff { + return backoff.NewBackoff(c.GetRetries(), c.GetRetryMinBackoff(), c.GetRetryMaxBackoff()) +} diff --git a/plugins/event/sqs/sqs.go b/plugins/event/sqs/sqs.go new file mode 100644 index 000000000000..e5537caa3e7a --- /dev/null +++ b/plugins/event/sqs/sqs.go @@ -0,0 +1,239 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package sqs + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/sqs" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/plugins/event" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/version" + "github.com/mitchellh/mapstructure" +) + +var ( + _ event.Factory = New + _ event.SubscriptionPlugin = (*sqsBackend)(nil) +) + +const pluginName = "sqs" + +// ErrQueueRequired is returned if the required queue parameters are not present. +var ErrQueueRequired = errors.New("queue_name or queue_url must be specified") + +// New returns a new instance of the SQS plugin backend. +func New(_ context.Context) (event.SubscriptionPlugin, error) { + return &sqsBackend{ + connections: map[string]*sqsConnection{}, + }, nil +} + +type sqsBackend struct { + connections map[string]*sqsConnection + clientLock sync.RWMutex +} + +type sqsConnection struct { + client *sqs.SQS + config *sqsConfig + queueURL string +} + +type sqsConfig struct { + event.SubscribeConfigDefaults + CreateQueue bool `mapstructure:"create_queue"` + AccessKeyID string `mapstructure:"access_key_id"` + SecretAccessKey string `mapstructure:"secret_access_key"` + Region string `mapstructure:"region"` + QueueName string `mapstructure:"queue_name"` + QueueURL string `mapstructure:"queue_url"` +} + +func newClient(sconfig *sqsConfig) (*sqs.SQS, error) { + var options []awsutil.Option + if sconfig.AccessKeyID != "" && sconfig.SecretAccessKey != "" { + options = append(options, awsutil.WithAccessKey(sconfig.AccessKeyID)) + options = append(options, awsutil.WithSecretKey(sconfig.SecretAccessKey)) + } + if sconfig.Region != "" { + options = append(options, awsutil.WithRegion(sconfig.Region)) + } + options = append(options, awsutil.WithEnvironmentCredentials(true)) + options = append(options, awsutil.WithSharedCredentials(true)) + credConfig, err := awsutil.NewCredentialsConfig(options...) + if err != nil { + return nil, err + } + session, err := credConfig.GetSession() + if err != nil { + return nil, err + } + return sqs.New(session), nil +} + +func (s *sqsBackend) Subscribe(_ context.Context, request *event.SubscribeRequest) error { + var sconfig sqsConfig + err := mapstructure.Decode(request.Config, &sconfig) + if err != nil { + return err + } + if sconfig.QueueName == "" && sconfig.QueueURL == "" { + return ErrQueueRequired + } + client, err := newClient(&sconfig) + if err != nil { + return err + } + var queueURL string + if sconfig.CreateQueue && sconfig.QueueName != "" { + resp, err := client.CreateQueue(&sqs.CreateQueueInput{ + QueueName: &sconfig.QueueName, + }) + var aerr awserr.Error + if errors.As(err, &aerr) { + if aerr.Code() == sqs.ErrCodeQueueNameExists { + // that's okay + err = nil + } + } + if err != nil { + return err + } + if resp == nil || resp.QueueUrl == nil { + return fmt.Errorf("invalid response from AWS: missing queue URL") + } + queueURL = *resp.QueueUrl + } else if sconfig.QueueURL != "" { + queueURL = sconfig.QueueURL + } else { + resp, err := client.GetQueueUrl(&sqs.GetQueueUrlInput{ + QueueName: &sconfig.QueueName, + }) + if err != nil { + return err + } + if resp == nil || resp.QueueUrl == nil { + return fmt.Errorf("invalid response from AWS: missing queue URL") + } + queueURL = *resp.QueueUrl + } + + conn := &sqsConnection{ + client: client, + config: &sconfig, + queueURL: queueURL, + } + s.clientLock.Lock() + defer s.clientLock.Unlock() + if _, ok := s.connections[request.SubscriptionID]; ok { + s.killConnectionWithLock(request.SubscriptionID) + } + s.connections[request.SubscriptionID] = conn + return nil +} + +func (s *sqsBackend) killConnection(subscriptionID string) { + s.clientLock.Lock() + defer s.clientLock.Unlock() + s.killConnectionWithLock(subscriptionID) +} + +func (s *sqsBackend) killConnectionWithLock(subscriptionID string) { + delete(s.connections, subscriptionID) +} + +func (s *sqsBackend) getConn(subscriptionID string) (*sqsConnection, error) { + s.clientLock.RLock() + defer s.clientLock.RUnlock() + conn, ok := s.connections[subscriptionID] + if !ok { + return nil, fmt.Errorf("invalid subscription_id") + } + return conn, nil +} + +func (s *sqsBackend) Send(_ context.Context, send *event.SendRequest) error { + return s.sendSubscriptionEventInternal(send.SubscriptionID, send.EventJSON, false) +} + +func (s *sqsBackend) refreshClient(subscriptionID string) error { + conn, err := s.getConn(subscriptionID) + if err != nil { + return err + } + client, err := newClient(conn.config) + if err != nil { + return err + } + s.clientLock.Lock() + defer s.clientLock.Unlock() + conn.client = client + // probably not necessary, but just in case + s.connections[subscriptionID] = conn + return nil +} + +func (s *sqsBackend) sendSubscriptionEventInternal(subscriptionID string, eventJson string, isRetry bool) error { + conn, err := s.getConn(subscriptionID) + if err != nil { + return err + } + backoff := conn.config.NewRetryBackoff() + err = backoff.Retry(func() error { + _, err = conn.client.SendMessage(&sqs.SendMessageInput{ + MessageBody: &eventJson, + QueueUrl: &conn.queueURL, + }) + return err + }) + if err != nil && !isRetry { + // refresh client and try again, once + err2 := s.refreshClient(subscriptionID) + if err2 != nil { + return errors.Join(err, err2) + } + return s.sendSubscriptionEventInternal(subscriptionID, eventJson, true) + } else if err != nil && isRetry { + s.killConnection(subscriptionID) + return err + } + return nil +} + +func (s *sqsBackend) Unsubscribe(_ context.Context, request *event.UnsubscribeRequest) error { + s.killConnection(request.SubscriptionID) + return nil +} + +func (s *sqsBackend) PluginMetadata() *event.PluginMetadata { + return &event.PluginMetadata{ + Name: pluginName, + Version: version.GetVersion().Version, + } +} + +func (s *sqsBackend) PluginVersion() logical.PluginVersion { + return logical.PluginVersion{ + Version: version.GetVersion().Version, + } +} + +func (s *sqsBackend) Close(_ context.Context) error { + s.clientLock.Lock() + defer s.clientLock.Unlock() + var subscriptions []string + for k := range s.connections { + subscriptions = append(subscriptions, k) + } + for _, subscription := range subscriptions { + s.killConnectionWithLock(subscription) + } + return nil +} diff --git a/plugins/event/sqs/sqs_test.go b/plugins/event/sqs/sqs_test.go new file mode 100644 index 000000000000..35d91b22f147 --- /dev/null +++ b/plugins/event/sqs/sqs_test.go @@ -0,0 +1,118 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package sqs + +import ( + "context" + "os" + "testing" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/plugins/event" + "github.com/stretchr/testify/assert" +) + +func getTestClient(t *testing.T) *sqs.Client { + awsConfig, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(os.Getenv("AWS_REGION"))) + if err != nil { + t.Fatal(err) + } + return sqs.NewFromConfig(awsConfig) +} + +func createQueue(t *testing.T, client *sqs.Client, queueName string) string { + resp, err := client.CreateQueue(context.Background(), &sqs.CreateQueueInput{ + QueueName: &queueName, + }) + if err != nil { + t.Fatal(err) + } + return *resp.QueueUrl +} + +func deleteQueue(t *testing.T, client *sqs.Client, queueURL string) { + _, err := client.DeleteQueue(context.Background(), &sqs.DeleteQueueInput{ + QueueUrl: &queueURL, + }) + if err != nil { + t.Fatal(err) + } +} + +func receiveMessage(t *testing.T, client *sqs.Client, queueURL string) string { + resp, err := client.ReceiveMessage(context.Background(), &sqs.ReceiveMessageInput{ + QueueUrl: &queueURL, + WaitTimeSeconds: 5, + }) + if err != nil { + t.Fatal(err) + } + assert.Len(t, resp.Messages, 1) + msg := resp.Messages[0] + _, err = client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: &queueURL, + ReceiptHandle: msg.ReceiptHandle, + }) + if err != nil { + t.Fatal(err) + } + return *msg.Body +} + +// TestSQS_SendOneMessage tests that the plugin basic flow of subscribe/sendevent/unsubscribe will send a message to SQS. +func TestSQS_SendOneMessage(t *testing.T) { + region := os.Getenv("AWS_REGION") + if region == "" { + t.Skip("Must set AWS_REGION") + } + sqsClient := getTestClient(t) + temp, err := uuid.GenerateUUID() + assert.Nil(t, err) + tempQueueName := "event-sqs-test-" + temp + tempQueueURL := createQueue(t, sqsClient, tempQueueName) + t.Cleanup(func() { + deleteQueue(t, sqsClient, tempQueueURL) + }) + + backend, _ := New(nil) + subID, err := uuid.GenerateUUID() + assert.Nil(t, err) + + err = backend.Subscribe(nil, &event.SubscribeRequest{ + SubscriptionID: subID, + Config: map[string]interface{}{ + "queue_name": tempQueueName, + "region": os.Getenv("AWS_REGION"), + "create_queue": true, + }, + VerifyConnection: false, + }) + assert.Nil(t, err) + + // create another subscription with the same queue to make sure we are okay with using an existing queue + err = backend.Subscribe(nil, &event.SubscribeRequest{ + SubscriptionID: subID + "2", + Config: map[string]interface{}{ + "queue_name": tempQueueName, + "region": os.Getenv("AWS_REGION"), + "create_queue": true, + }, + VerifyConnection: false, + }) + assert.Nil(t, err) + + err = backend.Send(nil, &event.SendRequest{ + SubscriptionID: subID, + EventJSON: "{}", + }) + assert.Nil(t, err) + + msg := receiveMessage(t, sqsClient, tempQueueURL) + assert.Equal(t, "{}", msg) + + err = backend.Unsubscribe(nil, &event.UnsubscribeRequest{SubscriptionID: subID}) + assert.Nil(t, err) +} diff --git a/scan.hcl b/scan.hcl index 2c3c63121192..6fb499794c7f 100644 --- a/scan.hcl +++ b/scan.hcl @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + repository { go_modules = true osv = true diff --git a/scripts/assetcheck.sh b/scripts/assetcheck.sh index 7100f84d9fae..158f5bc4aa7f 100755 --- a/scripts/assetcheck.sh +++ b/scripts/assetcheck.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + if [[ ! -e http/web_ui/index.html ]] then diff --git a/scripts/build.sh b/scripts/build.sh index 1856389cc6cf..ae247e636bf8 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # # This script builds the application from source for multiple platforms. set -e @@ -47,11 +50,13 @@ ${GO_CMD} build \ # Move all the compiled things to the $GOPATH/bin OLDIFS=$IFS -IFS=: MAIN_GOPATH=($GOPATH) +IFS=: FIRST=($GOPATH) BIN_PATH=${GOBIN:-${FIRST}/bin} IFS=$OLDIFS -rm -f ${MAIN_GOPATH}/bin/vault -cp bin/vault ${MAIN_GOPATH}/bin/ +# Ensure the go bin folder exists +mkdir -p ${BIN_PATH} +rm -f ${BIN_PATH}/vault +cp bin/vault ${BIN_PATH} # Done! echo diff --git a/scripts/ci-helper.sh b/scripts/ci-helper.sh index 585f89786cf7..856a4391e3d3 100755 --- a/scripts/ci-helper.sh +++ b/scripts/ci-helper.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # The ci-helper is used to determine build metadata, build Vault binaries, # package those binaries into artifacts, and execute tests with those artifacts. @@ -8,86 +11,6 @@ set -euo pipefail # We don't want to get stuck in some kind of interactive pager export GIT_PAGER=cat -# Get the full version information -function version() { - local version - local prerelease - local metadata - - version=$(version_base) - prerelease=$(version_pre) - metadata=$(version_metadata) - - if [ -n "$metadata" ] && [ -n "$prerelease" ]; then - echo "$version-$prerelease+$metadata" - elif [ -n "$metadata" ]; then - echo "$version+$metadata" - elif [ -n "$prerelease" ]; then - echo "$version-$prerelease" - else - echo "$version" - fi -} - -# Get the base version -function version_base() { - : "${VAULT_VERSION:=""}" - - if [ -n "$VAULT_VERSION" ]; then - echo "$VAULT_VERSION" - return - fi - - : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" - awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" -} - -# Get the version major -function version_major() { - version_base | cut -d '.' -f 1 -} - -# Get the version minor -function version_minor() { - version_base | cut -d '.' -f 2 -} - -# Get the version patch -function version_patch() { - version_base | cut -d '.' -f 3 -} - -# Get the version pre-release -function version_pre() { - : "${VAULT_PRERELEASE:=""}" - - if [ -n "$VAULT_PRERELEASE" ]; then - echo "$VAULT_PRERELEASE" - return - fi - - : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" - awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" -} - -# Get the version metadata, which is commonly the edition -function version_metadata() { - : "${VAULT_METADATA:=""}" - - if [[ (-n "$VAULT_METADATA") && ("$VAULT_METADATA" != "oss") ]]; then - echo "$VAULT_METADATA" - return - fi - - : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" - awk '$1 == "VersionMetadata" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" -} - -# Get the version formatted for Debian and RHEL packages -function version_package() { - version | awk '{ gsub("-","~",$1); print $1 }' -} - # Get the build date from the latest commit since it can be used across all # builds function build_date() { @@ -106,18 +29,38 @@ function repo() { basename -s .git "$(git config --get remote.origin.url)" } -# Determine the root directory of the repository -function repo_root() { - git rev-parse --show-toplevel -} - # Determine the artifact basename based on metadata function artifact_basename() { : "${PKG_NAME:="vault"}" : "${GOOS:=$(go env GOOS)}" : "${GOARCH:=$(go env GOARCH)}" + : "${VERSION_METADATA:="ce"}" + + : "${VERSION:=""}" + if [ -z "$VERSION" ]; then + echo "You must specify the VERSION variable for this command" >&2 + exit 1 + fi - echo "${PKG_NAME}_$(version)_${GOOS}_${GOARCH}" + local version + version="$VERSION" + if [ "$VERSION_METADATA" != "ce" ]; then + version="${VERSION}+${VERSION_METADATA}" + fi + + echo "${PKG_NAME}_${version}_${GOOS}_${GOARCH}" +} + +# Bundle the dist directory into a zip +function bundle() { + : "${BUNDLE_PATH:=$(repo_root)/vault.zip}" + echo "--> Bundling dist/* to $BUNDLE_PATH..." + zip -r -j "$BUNDLE_PATH" dist/ +} + +# Determine the root directory of the repository +function repo_root() { + git rev-parse --show-toplevel } # Build the UI @@ -129,50 +72,43 @@ function build_ui() { mkdir -p http/web_ui popd pushd "$repo_root/ui" - yarn install --ignore-optional + yarn install npm rebuild node-sass - yarn --verbose run build + yarn run build popd } # Build Vault function build() { - local version local revision - local prerelease local build_date local ldflags local msg # Get or set our basic build metadata - version=$(version_base) revision=$(build_revision) - metadata=$(version_metadata) - prerelease=$(version_pre) - build_date=$(build_date) + build_date=$(build_date) # + : "${BIN_PATH:="dist/"}" #if not run by actions-go-build (enos local) then set this explicitly : "${GO_TAGS:=""}" - : "${KEEP_SYMBOLS:=""}" + : "${REMOVE_SYMBOLS:=""}" + + (unset GOOS; unset GOARCH; go generate ./...) # Build our ldflags - msg="--> Building Vault v$version, revision $revision, built $build_date" + msg="--> Building Vault revision $revision, built $build_date..." - # Strip the symbol and dwarf information by default - if [ -n "$KEEP_SYMBOLS" ]; then - ldflags="" - else + # Keep the symbol and dwarf information by default + if [ -n "$REMOVE_SYMBOLS" ]; then ldflags="-s -w " + else + ldflags="" fi - ldflags="${ldflags}-X github.com/hashicorp/vault/version.Version=$version -X github.com/hashicorp/vault/version.GitCommit=$revision -X github.com/hashicorp/vault/version.BuildDate=$build_date" + ldflags="${ldflags} -X github.com/hashicorp/vault/version.GitCommit=$revision -X github.com/hashicorp/vault/version.BuildDate=$build_date" - if [ -n "$prerelease" ]; then - msg="${msg}, prerelease ${prerelease}" - ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionPrerelease=$prerelease" - fi - - if [ -n "$metadata" ]; then - msg="${msg}, metadata ${metadata}" - ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionMetadata=$metadata" + if [[ ${VERSION_METADATA+x} ]]; then + msg="${msg}, metadata ${VERSION_METADATA}" + ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionMetadata=$VERSION_METADATA" fi # Build vault @@ -181,20 +117,14 @@ function build() { mkdir -p dist mkdir -p out set -x + go env go build -v -tags "$GO_TAGS" -ldflags "$ldflags" -o dist/ set +x popd } -# Bundle the dist directory into a zip -function bundle() { - : "${BUNDLE_PATH:=$(repo_root)/vault.zip}" - echo "--> Bundling dist/* to $BUNDLE_PATH" - zip -r -j "$BUNDLE_PATH" dist/ -} - -# Prepare legal requirements for packaging -function prepare_legal() { +# ENT: Prepare legal requirements for packaging +function prepare_ent_legal() { : "${PKG_NAME:="vault"}" pushd "$(repo_root)" @@ -207,47 +137,25 @@ function prepare_legal() { popd } -# Determine the matrix group number that we'll select for execution. If the -# MATRIX_TEST_GROUP environment variable has set then it will always return -# that value. If has not been set, we will randomly select a number between 1 -# and the value of MATRIX_MAX_TEST_GROUPS. -function matrix_group_id() { - : "${MATRIX_TEST_GROUP:=""}" - if [ -n "$MATRIX_TEST_GROUP" ]; then - echo "$MATRIX_TEST_GROUP" - return - fi +# CE: Prepare legal requirements for packaging +function prepare_ce_legal() { + : "${PKG_NAME:="vault"}" - : "${MATRIX_MAX_TEST_GROUPS:=1}" - awk -v min=1 -v max=$MATRIX_MAX_TEST_GROUPS 'BEGIN{srand(); print int(min+rand()*(max-min+1))}' -} + pushd "$(repo_root)" -# Filter matrix file reads in the contents of MATRIX_FILE and filters out -# scenarios that are not in the current test group and/or those that have not -# met minimux or maximum version requirements. -function matrix_filter_file() { - : "${MATRIX_FILE:=""}" - if [ -z "$MATRIX_FILE" ]; then - echo "You must specify the MATRIX_FILE variable for this command" >&2 - exit 1 - fi + mkdir -p dist + cp LICENSE dist/LICENSE.txt + + mkdir -p ".release/linux/package/usr/share/doc/$PKG_NAME" + cp LICENSE ".release/linux/package/usr/share/doc/$PKG_NAME/LICENSE.txt" - : "${MATRIX_TEST_GROUP:=$(matrix_group_id)}" - - local path - local matrix - path=$(readlink -f $MATRIX_FILE) - matrix=$(cat "$path" | jq ".include | - map(. | - select( - ((.min_minor_version == null) or (.min_minor_version <= $(version_minor))) and - ((.max_minor_version == null) or (.max_minor_version >= $(version_minor))) and - ((.test_group == null) or (.test_group == $MATRIX_TEST_GROUP)) - ) - )" - ) - - echo "{\"include\":$matrix}" | jq -c . + popd +} + +# Package version converts a vault version string into a compatible representation for system +# packages. +function version_package() { + awk '{ gsub("-","~",$1); print $1 }' <<< "$VAULT_VERSION" } # Run the CI Helper @@ -268,42 +176,18 @@ function main() { date) build_date ;; - prepare-legal) - prepare_legal - ;; - matrix-filter-file) - matrix_filter_file + prepare-ent-legal) + prepare_ent_legal ;; - matrix-group-id) - matrix_group_id + prepare-ce-legal) + prepare_ce_legal ;; revision) build_revision ;; - version) - version - ;; - version-base) - version_base - ;; - version-pre) - version_pre - ;; - version-major) - version_major - ;; - version-meta) - version_metadata - ;; - version-minor) - version_minor - ;; version-package) version_package ;; - version-patch) - version_patch - ;; *) echo "unknown sub-command" >&2 exit 1 diff --git a/scripts/copywrite-exceptions.sh b/scripts/copywrite-exceptions.sh new file mode 100755 index 000000000000..0e55acb400d1 --- /dev/null +++ b/scripts/copywrite-exceptions.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +# Used as a stopgap for copywrite bot in MPL-licensed subdirs, detects BUSL licensed +# headers and deletes them, then runs the copywrite bot to utilize local subdir config +# to inject correct headers. + +find . -type f -name '*.go' | while read line; do + if grep "SPDX-License-Identifier: BUSL-1.1" $line; then + sed -i '/SPDX-License-Identifier: BUSL-1.1/d' $line + sed -i '/Copyright (c) HashiCorp, Inc./d' $line + fi +done + +copywrite headers --plan diff --git a/scripts/coverage.sh b/scripts/coverage.sh index ad80496d1578..90ade0e2d32c 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -1,4 +1,7 @@ #!/bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # Generate test coverage statistics for Go packages. # # Works around the fact that `go test -coverprofile` currently does not work diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile index 504399c3ff38..030ef9c379fd 100644 --- a/scripts/cross/Dockerfile +++ b/scripts/cross/Dockerfile @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + FROM debian:buster RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ @@ -12,7 +15,7 @@ RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ libltdl-dev \ libltdl7 -RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list diff --git a/scripts/deprecations-checker.sh b/scripts/deprecations-checker.sh new file mode 100755 index 000000000000..b63ab905d776 --- /dev/null +++ b/scripts/deprecations-checker.sh @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This script is sourced into the shell running in a Github Actions workflow. + +# Usage: +# To check deprecations locally using the script, follow these steps: +# From the repository root or within a package folder, execute deprecations-checker.sh +# Optionally: to only show deprecations in changed files between the current branch and +# a specific branch, pass the other branch name as an argument to the script. +# +# For example: +# ./scripts/deprecations-checker.sh (or) make deprecations +# ./scripts/deprecations-checker.sh main (or) make ci-deprecations +# +# If no branch name is specified, the command will show all usage of deprecations in the code. +# +# GitHub Actions runs this against the PR's base ref branch. + +# Staticcheck uses static analysis to finds bugs and performance issues, offers simplifications, +# and enforces style rules. +# Here, it is used to check if a deprecated function, variable, constant or field is used. + +# Run staticcheck +set -e +echo "==> Performing deprecations check: running staticcheck..." + + +# If no compare branch name is specified, output all deprecations +# Else only output the deprecations from the changes added +if [ -z $1 ] + then + staticcheck -checks="SA1019" -tags="$BUILD_TAGS" + else + # GitHub Actions will use this to find only changes wrt PR's base ref branch + # revgrep CLI tool will return an exit status of 1 if any issues match, else it will return 0 + staticcheck -checks="SA1019" -tags="$BUILD_TAGS" 2>&1 | revgrep origin/"$1" +fi diff --git a/scripts/deps_upgrade.py b/scripts/deps_upgrade.py index 9531696cee56..88607e348770 100644 --- a/scripts/deps_upgrade.py +++ b/scripts/deps_upgrade.py @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + import os import sys diff --git a/scripts/dist.sh b/scripts/dist.sh index e9891b059e1c..0431bcc61286 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -e # Get the version from the command line diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile index 6208badf4bc8..c975e445b497 100644 --- a/scripts/docker/Dockerfile +++ b/scripts/docker/Dockerfile @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # Multi-stage builder to avoid polluting users environment with wrong # architecture binaries. ARG VERSION diff --git a/scripts/docker/Dockerfile.ui b/scripts/docker/Dockerfile.ui index b13f0fe1fbd4..f67d04029b7a 100644 --- a/scripts/docker/Dockerfile.ui +++ b/scripts/docker/Dockerfile.ui @@ -19,7 +19,7 @@ RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ libltdl-dev \ libltdl7 -RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list diff --git a/scripts/docker/docker-entrypoint.sh b/scripts/docker/docker-entrypoint.sh index 3b72da25b7f4..a3b581697c35 100755 --- a/scripts/docker/docker-entrypoint.sh +++ b/scripts/docker/docker-entrypoint.sh @@ -1,4 +1,7 @@ #!/usr/bin/dumb-init /bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -e # Note above that we run dumb-init as PID 1 in order to reap zombie processes diff --git a/scripts/gen_openapi.sh b/scripts/gen_openapi.sh index 0119a3198bb6..ef2acca0a0b3 100755 --- a/scripts/gen_openapi.sh +++ b/scripts/gen_openapi.sh @@ -1,4 +1,7 @@ #!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -e @@ -21,108 +24,90 @@ then fi vault server -dev -dev-root-token-id=root & -sleep 2 VAULT_PID=$! +# Allow time for Vault to start its HTTP listener +sleep 1 + +defer_stop_vault() { + echo "Stopping Vault..." + kill $VAULT_PID + # Allow time for Vault to print final logging and exit, + # before this script ends, and the shell prints its next prompt + sleep 1 +} + +trap defer_stop_vault INT TERM EXIT + export VAULT_ADDR=http://127.0.0.1:8200 -echo "Mounting all builtin plugins..." +echo "Unmounting the default kv-v2 secrets engine ..." + +# Unmount the default kv-v2 engine so that we can remount it at 'kv_v2/' later. +# The mount path will be reflected in the resultant OpenAPI document. +vault secrets disable "secret/" + +echo "Mounting all builtin plugins ..." # Enable auth plugins -codeLinesStarted=false - -while read -r line; do - if [[ $line == *"credentialBackends:"* ]] ; then - codeLinesStarted=true - elif [[ $line == *"databasePlugins:"* ]] ; then - break - elif [ $codeLinesStarted = true ] && [[ $line == *"consts.Deprecated"* || $line == *"consts.PendingRemoval"* ]] ; then - auth_plugin_previous="" - elif [ $codeLinesStarted = true ] && [[ $line =~ ^\s*\"(.*)\"\:.*$ ]] ; then - auth_plugin_current=${BASH_REMATCH[1]} - - if [[ -n "${auth_plugin_previous}" ]] ; then - echo "enabling auth plugin: ${auth_plugin_previous}" - vault auth enable "${auth_plugin_previous}" - fi - - auth_plugin_previous="${auth_plugin_current}" - fi -done <../../vault/helper/builtinplugins/registry.go - -if [[ -n "${auth_plugin_previous}" ]] ; then - echo "enabling auth plugin: ${auth_plugin_previous}" - vault auth enable "${auth_plugin_previous}" -fi +vault auth enable "alicloud" +vault auth enable "approle" +vault auth enable "aws" +vault auth enable "azure" +vault auth enable "centrify" +vault auth enable "cert" +vault auth enable "cf" +vault auth enable "gcp" +vault auth enable "github" +vault auth enable "jwt" +vault auth enable "kerberos" +vault auth enable "kubernetes" +vault auth enable "ldap" +vault auth enable "oci" +vault auth enable "okta" +vault auth enable "radius" +vault auth enable "userpass" # Enable secrets plugins -codeLinesStarted=false - -while read -r line; do - if [[ $line == *"logicalBackends:"* ]] ; then - codeLinesStarted=true - elif [[ $line == *"addExternalPlugins("* ]] ; then - break - elif [ $codeLinesStarted = true ] && [[ $line == *"consts.Deprecated"* || $line == *"consts.PendingRemoval"* ]] ; then - secrets_plugin_previous="" - elif [ $codeLinesStarted = true ] && [[ $line =~ ^\s*\"(.*)\"\:.*$ ]] ; then - secrets_plugin_current=${BASH_REMATCH[1]} - - if [[ -n "${secrets_plugin_previous}" ]] ; then - echo "enabling secrets plugin: ${secrets_plugin_previous}" - vault secrets enable "${secrets_plugin_previous}" - fi - - secrets_plugin_previous="${secrets_plugin_current}" - fi -done <../../vault/helper/builtinplugins/registry.go - -if [[ -n "${secrets_plugin_previous}" ]] ; then - echo "enabling secrets plugin: ${secrets_plugin_previous}" - vault secrets enable "${secrets_plugin_previous}" -fi +vault secrets enable "alicloud" +vault secrets enable "aws" +vault secrets enable "azure" +vault secrets enable "consul" +vault secrets enable "database" +vault secrets enable "gcp" +vault secrets enable "gcpkms" +vault secrets enable "kubernetes" +vault secrets enable -path="kv-v1/" -version=1 "kv" +vault secrets enable -path="kv-v2/" -version=2 "kv" +vault secrets enable "ldap" +vault secrets enable "mongodbatlas" +vault secrets enable "nomad" +vault secrets enable "pki" +vault secrets enable "rabbitmq" +vault secrets enable "ssh" +vault secrets enable "terraform" +vault secrets enable "totp" +vault secrets enable "transit" # Enable enterprise features -entRegFile=../../vault/helper/builtinplugins/registry_util_ent.go -if [ -f $entRegFile ] && [[ -n "${VAULT_LICENSE}" ]]; then - vault write sys/license text="${VAULT_LICENSE}" - - codeLinesStarted=false - - while read -r line; do - if [[ $line == *"ExternalPluginsEnt:"* ]] ; then - codeLinesStarted=true - elif [[ $line == *"addExtPluginsEntImpl("* ]] ; then - break - elif [ $codeLinesStarted = true ] && [[ $line == *"consts.Deprecated"* || $line == *"consts.PendingRemoval"* ]] ; then - secrets_plugin_previous="" - elif [ $codeLinesStarted = true ] && [[ $line =~ ^\s*\"(.*)\"\:.*$ ]] ; then - ent_plugin_current=${BASH_REMATCH[1]} - - if [[ -n "${ent_plugin_previous}" ]] ; then - echo "enabling enterprise plugin: ${ent_plugin_previous}" - vault secrets enable "${ent_plugin_previous}" - fi - - ent_plugin_previous="${ent_plugin_current}" - fi - done <$entRegFile - - if [[ -n "${ent_plugin_previous}" ]] ; then - echo "enabling enterprise plugin: ${ent_plugin_previous}" - vault secrets enable "${ent_plugin_previous}" - fi +if [[ -n "${VAULT_LICENSE:-}" ]]; then + vault secrets enable "keymgmt" + vault secrets enable "kmip" + vault secrets enable "transform" + vault auth enable "saml" fi # Output OpenAPI, optionally formatted if [ "$1" == "-p" ]; then - curl -H "X-Vault-Token: root" "http://127.0.0.1:8200/v1/sys/internal/specs/openapi" | jq > openapi.json + curl --header 'X-Vault-Token: root' \ + --data '{"generic_mount_paths": true}' \ + 'http://127.0.0.1:8200/v1/sys/internal/specs/openapi' | jq > openapi.json else - curl -H "X-Vault-Token: root" "http://127.0.0.1:8200/v1/sys/internal/specs/openapi" > openapi.json + curl --header 'X-Vault-Token: root' \ + --data '{"generic_mount_paths": true}' \ + 'http://127.0.0.1:8200/v1/sys/internal/specs/openapi' > openapi.json fi -kill $VAULT_PID -sleep 1 - echo echo "openapi.json generated" +echo diff --git a/scripts/go-helper.sh b/scripts/go-helper.sh new file mode 100755 index 000000000000..27fc0151cb57 --- /dev/null +++ b/scripts/go-helper.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -euo pipefail + +# Perform Go formatting checks with gofumpt. +check_fmt() { + echo "==> Checking code formatting..." + + declare -a malformed=() + IFS=" " read -r -a files <<< "$(tr '\n' ' ' <<< "$@")" + if [ -n "${files+set}" ] && [[ "${files[0]}" != "" ]]; then + echo "--> Checking changed files..." + for file in "${files[@]}"; do + if [ ! -f "$file" ]; then + echo "--> $file no longer exists ⚠" + continue + fi + + if echo "$file" | grep -v pb.go | grep -v vendor > /dev/null; then + local output + if ! output=$(gofumpt -l "$file") || [ "$output" != "" ]; then + echo "--> ${file} ✖" + malformed+=("$file") + continue + fi + fi + + echo "--> ${file} ✔" + done + else + echo "--> Checking all files..." + IFS=" " read -r -a malformed <<< "$(find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs gofumpt -l)" + fi + + if [ "${#malformed[@]}" -ne 0 ] && [ -n "${malformed[0]}" ] ; then + echo "--> The following files need to be reformatted with gofumpt" + printf '%s\n' "${malformed[@]}" + echo "Run \`make fmt\` to reformat code." + for file in "${malformed[@]}"; do + gofumpt -w "$file" + echo "$(git diff --no-color "$file")" + done + exit 1 + fi +} + +# Check that the Go toolchain meets minimum version requiremets. +check_version() { + GO_CMD=${GO_CMD:-go} + + GO_VERSION_MIN=$1 + echo "==> Checking that build is using go version >= $1..." + + if $GO_CMD version | grep -q devel; then + GO_VERSION="devel" + else + GO_VERSION=$($GO_CMD version | grep -o 'go[0-9]\+\.[0-9]\+\(\.[0-9]\+\)\?' | tr -d 'go') + + IFS="." read -r -a GO_VERSION_ARR <<< "$GO_VERSION" + IFS="." read -r -a GO_VERSION_REQ <<< "$GO_VERSION_MIN" + + if [[ ${GO_VERSION_ARR[0]} -lt ${GO_VERSION_REQ[0]} || + ( ${GO_VERSION_ARR[0]} -eq ${GO_VERSION_REQ[0]} && + ( ${GO_VERSION_ARR[1]} -lt ${GO_VERSION_REQ[1]} || + ( ${GO_VERSION_ARR[1]} -eq ${GO_VERSION_REQ[1]} && ${GO_VERSION_ARR[2]} -lt ${GO_VERSION_REQ[2]} ))) + ]]; then + echo "Vault requires go $GO_VERSION_MIN to build; found $GO_VERSION." + exit 1 + fi + fi + + echo "--> Using go version $GO_VERSION..." +} + +# Download all the modules for all go.mod's defined in the project. +mod_download() { + while IFS= read -r -d '' mod; do + echo "==> Downloading Go modules for $mod to $(go env GOMODCACHE)..." + pushd "$(dirname "$mod")" > /dev/null || (echo "failed to push into module dir" && exit 1) + GOOS=linux GOARCH=amd64 go mod download -x + popd > /dev/null || (echo "failed to pop out of module dir" && exit 1) + done < <(find . -type f -name go.mod -print0) +} + +# Tidy all the go.mod's defined in the project. +mod_tidy() { + while IFS= read -r -d '' mod; do + echo "==> Tidying $mod..." + pushd "$(dirname "$mod")" > /dev/null || (echo "failed to push into module dir" && exit 1) + GOOS=linux GOARCH=amd64 go mod tidy + popd > /dev/null || (echo "failed to pop out of module dir" && exit 1) + done < <(find . -type f -name go.mod -print0) +} + +main() { + case $1 in + mod-download) + mod_download + ;; + mod-tidy) + mod_tidy + ;; + check-fmt) + check_fmt "${@:2}" + ;; + check-version) + check_version "$2" + ;; + *) + echo "unknown sub-command" >&2 + exit 1 + ;; + esac +} + +main "$@" diff --git a/scripts/gofmtcheck.sh b/scripts/gofmtcheck.sh deleted file mode 100755 index 574f4d7167c9..000000000000 --- a/scripts/gofmtcheck.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - -echo "==> Checking that code complies with gofmt requirements..." - -gofmt_files=$(gofmt -l `find . -name '*.go' | grep -v vendor`) -if [[ -n ${gofmt_files} ]]; then - echo 'gofmt needs running on the following files:' - echo "${gofmt_files}" - echo "You can use the command: \`make fmt\` to reformat code." - exit 1 -fi diff --git a/scripts/goversioncheck.sh b/scripts/goversioncheck.sh deleted file mode 100755 index 6f55260099f2..000000000000 --- a/scripts/goversioncheck.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -GO_CMD=${GO_CMD:-go} - -GO_VERSION_MIN=$1 -echo "==> Checking that build is using go version >= $1..." - -if $GO_CMD version | grep -q devel; -then - GO_VERSION="devel" -else - GO_VERSION=$($GO_CMD version | grep -o 'go[0-9]\+\.[0-9]\+\(\.[0-9]\+\)\?' | tr -d 'go') - - IFS="." read -r -a GO_VERSION_ARR <<< "$GO_VERSION" - IFS="." read -r -a GO_VERSION_REQ <<< "$GO_VERSION_MIN" - - if [[ ${GO_VERSION_ARR[0]} -lt ${GO_VERSION_REQ[0]} || - ( ${GO_VERSION_ARR[0]} -eq ${GO_VERSION_REQ[0]} && - ( ${GO_VERSION_ARR[1]} -lt ${GO_VERSION_REQ[1]} || - ( ${GO_VERSION_ARR[1]} -eq ${GO_VERSION_REQ[1]} && ${GO_VERSION_ARR[2]} -lt ${GO_VERSION_REQ[2]} ))) - ]]; then - echo "Vault requires go $GO_VERSION_MIN to build; found $GO_VERSION." - exit 1 - fi -fi - -echo "==> Using go version $GO_VERSION..." diff --git a/scripts/protocversioncheck.sh b/scripts/protocversioncheck.sh deleted file mode 100755 index 4b081674806b..000000000000 --- a/scripts/protocversioncheck.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -PROTOC_CMD=${PROTOC_CMD:-protoc} -PROTOC_VERSION_EXACT="$1" -echo "==> Checking that protoc is at version $1..." - -PROTOC_VERSION=$($PROTOC_CMD --version | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+') - -if [ "$PROTOC_VERSION" == "$PROTOC_VERSION_EXACT" ]; then - echo "Using protoc version $PROTOC_VERSION" -else - echo "protoc should be at $PROTOC_VERSION_EXACT; found $PROTOC_VERSION." - echo "If your version is higher than the version this script is looking for, updating the Makefile with the newer version." - exit 1 -fi diff --git a/scripts/semgrep_plugin_repos.sh b/scripts/semgrep_plugin_repos.sh index 41f6dfd7a158..1f70763feacb 100755 --- a/scripts/semgrep_plugin_repos.sh +++ b/scripts/semgrep_plugin_repos.sh @@ -1,4 +1,7 @@ #!/bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -e set -x diff --git a/scripts/testciphers.sh b/scripts/testciphers.sh index 324d6bce7e02..89c1e9304334 100755 --- a/scripts/testciphers.sh +++ b/scripts/testciphers.sh @@ -1,4 +1,7 @@ #!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # Adapted from https://superuser.com/a/224263 diff --git a/scripts/update_deps.sh b/scripts/update_deps.sh deleted file mode 100755 index 35f0fecdf1b5..000000000000 --- a/scripts/update_deps.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/sh - -set -e - -TOOL=vault - -## Make a temp dir -tempdir=$(mktemp -d update-${TOOL}-deps.XXXXXX) - -## Set paths -export GOPATH="$(pwd)/${tempdir}" -export PATH="${GOPATH}/bin:${PATH}" -cd $tempdir - -## Get Vault -mkdir -p src/github.com/hashicorp -cd src/github.com/hashicorp -echo "Fetching ${TOOL}..." -git clone https://github.com/hashicorp/${TOOL} -cd ${TOOL} - -## Clean out earlier vendoring -rm -rf Godeps vendor - -## Get govendor -go get github.com/kardianos/govendor - -## Init -govendor init - -## Fetch deps -echo "Fetching deps, will take some time..." -govendor fetch -v +missing - -# Clean up after the logrus mess -govendor remove -v github.com/Sirupsen/logrus -cd vendor -find -type f | grep '.go' | xargs sed -i -e 's/Sirupsen/sirupsen/' - -# Need the v2 branch for Azure -govendor fetch -v github.com/coreos/go-oidc@v2 - -# Need the v3 branch for dockertest -govendor fetch -v github.com/ory/dockertest@v3 - -# Current influx master is alpha, pin to v1.7.3 -govendor fetch github.com/influxdata/influxdb/client/v2@v1.7.4 -govendor fetch github.com/influxdata/influxdb/models@v1.7.4 -govendor fetch github.com/influxdata/influxdb/pkg/escape@v1.7.4 - -# Current circonus needs v3 -grep circonus-gometrics vendor.json | cut -d '"' -f 4 | while read -r i; do govendor fetch $i@v2; done - -# API breakage -govendor fetch github.com/satori/go.uuid@f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 - -echo "Done; to commit run \n\ncd ${GOPATH}/src/github.com/hashicorp/${TOOL}\n" diff --git a/scripts/update_plugin_modules.sh b/scripts/update_plugin_modules.sh index ae87fd8d6a5b..7d88f04ade7d 100755 --- a/scripts/update_plugin_modules.sh +++ b/scripts/update_plugin_modules.sh @@ -1,4 +1,7 @@ #!/bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + set -e diff --git a/sdk/.copywrite.hcl b/sdk/.copywrite.hcl new file mode 100644 index 000000000000..c4b09f33640c --- /dev/null +++ b/sdk/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2024 + + header_ignore = [] +} diff --git a/sdk/LICENSE b/sdk/LICENSE new file mode 100644 index 000000000000..f4f97ee5853a --- /dev/null +++ b/sdk/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/sdk/database/dbplugin/client.go b/sdk/database/dbplugin/client.go index c30c86d0c910..265b46b6108a 100644 --- a/sdk/database/dbplugin/client.go +++ b/sdk/database/dbplugin/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/database.pb.go b/sdk/database/dbplugin/database.pb.go index ae0dbd723dfc..978d35e25f3f 100644 --- a/sdk/database/dbplugin/database.pb.go +++ b/sdk/database/dbplugin/database.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: sdk/database/dbplugin/database.proto package dbplugin @@ -21,7 +24,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. type InitializeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -367,19 +370,19 @@ type Statements struct { // DEPRECATED, will be removed in 0.12 // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. CreationStatements string `protobuf:"bytes,1,opt,name=creation_statements,json=creationStatements,proto3" json:"creation_statements,omitempty"` // DEPRECATED, will be removed in 0.12 // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. RevocationStatements string `protobuf:"bytes,2,opt,name=revocation_statements,json=revocationStatements,proto3" json:"revocation_statements,omitempty"` // DEPRECATED, will be removed in 0.12 // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. RollbackStatements string `protobuf:"bytes,3,opt,name=rollback_statements,json=rollbackStatements,proto3" json:"rollback_statements,omitempty"` // DEPRECATED, will be removed in 0.12 // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. RenewStatements string `protobuf:"bytes,4,opt,name=renew_statements,json=renewStatements,proto3" json:"renew_statements,omitempty"` Creation []string `protobuf:"bytes,5,rep,name=creation,proto3" json:"creation,omitempty"` Revocation []string `protobuf:"bytes,6,rep,name=revocation,proto3" json:"revocation,omitempty"` @@ -420,7 +423,7 @@ func (*Statements) Descriptor() ([]byte, []int) { return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{6} } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. func (x *Statements) GetCreationStatements() string { if x != nil { return x.CreationStatements @@ -428,7 +431,7 @@ func (x *Statements) GetCreationStatements() string { return "" } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. func (x *Statements) GetRevocationStatements() string { if x != nil { return x.RevocationStatements @@ -436,7 +439,7 @@ func (x *Statements) GetRevocationStatements() string { return "" } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. func (x *Statements) GetRollbackStatements() string { if x != nil { return x.RollbackStatements @@ -444,7 +447,7 @@ func (x *Statements) GetRollbackStatements() string { return "" } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in sdk/database/dbplugin/database.proto. func (x *Statements) GetRenewStatements() string { if x != nil { return x.RenewStatements diff --git a/sdk/database/dbplugin/database.proto b/sdk/database/dbplugin/database.proto index d8c208099b36..e32f34d221f4 100644 --- a/sdk/database/dbplugin/database.proto +++ b/sdk/database/dbplugin/database.proto @@ -1,116 +1,119 @@ -syntax = "proto3"; +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 -option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin"; +syntax = "proto3"; package dbplugin; import "google/protobuf/timestamp.proto"; +option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin"; + message InitializeRequest { - option deprecated = true; - bytes config = 1; - bool verify_connection = 2; + option deprecated = true; + bytes config = 1; + bool verify_connection = 2; } message InitRequest { - bytes config = 1; - bool verify_connection = 2; + bytes config = 1; + bool verify_connection = 2; } message CreateUserRequest { - Statements statements = 1; - UsernameConfig username_config = 2; - google.protobuf.Timestamp expiration = 3; + Statements statements = 1; + UsernameConfig username_config = 2; + google.protobuf.Timestamp expiration = 3; } message RenewUserRequest { - Statements statements = 1; - string username = 2; - google.protobuf.Timestamp expiration = 3; + Statements statements = 1; + string username = 2; + google.protobuf.Timestamp expiration = 3; } message RevokeUserRequest { - Statements statements = 1; - string username = 2; + Statements statements = 1; + string username = 2; } message RotateRootCredentialsRequest { - repeated string statements = 1; + repeated string statements = 1; } message Statements { - // DEPRECATED, will be removed in 0.12 - string creation_statements = 1 [deprecated=true]; - // DEPRECATED, will be removed in 0.12 - string revocation_statements = 2 [deprecated=true]; - // DEPRECATED, will be removed in 0.12 - string rollback_statements = 3 [deprecated=true]; - // DEPRECATED, will be removed in 0.12 - string renew_statements = 4 [deprecated=true]; - - repeated string creation = 5; - repeated string revocation = 6; - repeated string rollback = 7; - repeated string renewal = 8; - repeated string rotation = 9; + // DEPRECATED, will be removed in 0.12 + string creation_statements = 1 [deprecated = true]; + // DEPRECATED, will be removed in 0.12 + string revocation_statements = 2 [deprecated = true]; + // DEPRECATED, will be removed in 0.12 + string rollback_statements = 3 [deprecated = true]; + // DEPRECATED, will be removed in 0.12 + string renew_statements = 4 [deprecated = true]; + + repeated string creation = 5; + repeated string revocation = 6; + repeated string rollback = 7; + repeated string renewal = 8; + repeated string rotation = 9; } message UsernameConfig { - string DisplayName = 1; - string RoleName = 2; + string DisplayName = 1; + string RoleName = 2; } message InitResponse { - bytes config = 1; + bytes config = 1; } message CreateUserResponse { - string username = 1; - string password = 2; + string username = 1; + string password = 2; } message TypeResponse { - string type = 1; + string type = 1; } message RotateRootCredentialsResponse { - bytes config = 1; + bytes config = 1; } message Empty {} message GenerateCredentialsResponse { - string password = 1; + string password = 1; } -message StaticUserConfig{ - string username = 1; - string password = 2; - bool create = 3; +message StaticUserConfig { + string username = 1; + string password = 2; + bool create = 3; } message SetCredentialsRequest { - Statements statements = 1; - StaticUserConfig static_user_config = 2; + Statements statements = 1; + StaticUserConfig static_user_config = 2; } message SetCredentialsResponse { - string username = 1; - string password = 2; + string username = 1; + string password = 2; } service Database { - rpc Type(Empty) returns (TypeResponse); - rpc CreateUser(CreateUserRequest) returns (CreateUserResponse); - rpc RenewUser(RenewUserRequest) returns (Empty); - rpc RevokeUser(RevokeUserRequest) returns (Empty); - rpc RotateRootCredentials(RotateRootCredentialsRequest) returns (RotateRootCredentialsResponse); - rpc Init(InitRequest) returns (InitResponse); - rpc Close(Empty) returns (Empty); - rpc SetCredentials(SetCredentialsRequest) returns (SetCredentialsResponse); - rpc GenerateCredentials(Empty) returns (GenerateCredentialsResponse); - - rpc Initialize(InitializeRequest) returns (Empty) { - option deprecated = true; - }; + rpc Type(Empty) returns (TypeResponse); + rpc CreateUser(CreateUserRequest) returns (CreateUserResponse); + rpc RenewUser(RenewUserRequest) returns (Empty); + rpc RevokeUser(RevokeUserRequest) returns (Empty); + rpc RotateRootCredentials(RotateRootCredentialsRequest) returns (RotateRootCredentialsResponse); + rpc Init(InitRequest) returns (InitResponse); + rpc Close(Empty) returns (Empty); + rpc SetCredentials(SetCredentialsRequest) returns (SetCredentialsResponse); + rpc GenerateCredentials(Empty) returns (GenerateCredentialsResponse); + + rpc Initialize(InitializeRequest) returns (Empty) { + option deprecated = true; + } } diff --git a/sdk/database/dbplugin/database_grpc.pb.go b/sdk/database/dbplugin/database_grpc.pb.go index 0e34e00a3cc4..f62de0236ba8 100644 --- a/sdk/database/dbplugin/database_grpc.pb.go +++ b/sdk/database/dbplugin/database_grpc.pb.go @@ -1,4 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: sdk/database/dbplugin/database.proto package dbplugin @@ -14,6 +21,19 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Database_Type_FullMethodName = "/dbplugin.Database/Type" + Database_CreateUser_FullMethodName = "/dbplugin.Database/CreateUser" + Database_RenewUser_FullMethodName = "/dbplugin.Database/RenewUser" + Database_RevokeUser_FullMethodName = "/dbplugin.Database/RevokeUser" + Database_RotateRootCredentials_FullMethodName = "/dbplugin.Database/RotateRootCredentials" + Database_Init_FullMethodName = "/dbplugin.Database/Init" + Database_Close_FullMethodName = "/dbplugin.Database/Close" + Database_SetCredentials_FullMethodName = "/dbplugin.Database/SetCredentials" + Database_GenerateCredentials_FullMethodName = "/dbplugin.Database/GenerateCredentials" + Database_Initialize_FullMethodName = "/dbplugin.Database/Initialize" +) + // DatabaseClient is the client API for Database service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -41,7 +61,7 @@ func NewDatabaseClient(cc grpc.ClientConnInterface) DatabaseClient { func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeResponse, error) { out := new(TypeResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/Type", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Type_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -50,7 +70,7 @@ func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallO func (c *databaseClient) CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) { out := new(CreateUserResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/CreateUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_CreateUser_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -59,7 +79,7 @@ func (c *databaseClient) CreateUser(ctx context.Context, in *CreateUserRequest, func (c *databaseClient) RenewUser(ctx context.Context, in *RenewUserRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/dbplugin.Database/RenewUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_RenewUser_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -68,7 +88,7 @@ func (c *databaseClient) RenewUser(ctx context.Context, in *RenewUserRequest, op func (c *databaseClient) RevokeUser(ctx context.Context, in *RevokeUserRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/dbplugin.Database/RevokeUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_RevokeUser_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -77,7 +97,7 @@ func (c *databaseClient) RevokeUser(ctx context.Context, in *RevokeUserRequest, func (c *databaseClient) RotateRootCredentials(ctx context.Context, in *RotateRootCredentialsRequest, opts ...grpc.CallOption) (*RotateRootCredentialsResponse, error) { out := new(RotateRootCredentialsResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/RotateRootCredentials", in, out, opts...) + err := c.cc.Invoke(ctx, Database_RotateRootCredentials_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -86,7 +106,7 @@ func (c *databaseClient) RotateRootCredentials(ctx context.Context, in *RotateRo func (c *databaseClient) Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*InitResponse, error) { out := new(InitResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/Init", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Init_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -95,7 +115,7 @@ func (c *databaseClient) Init(ctx context.Context, in *InitRequest, opts ...grpc func (c *databaseClient) Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/dbplugin.Database/Close", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Close_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -104,7 +124,7 @@ func (c *databaseClient) Close(ctx context.Context, in *Empty, opts ...grpc.Call func (c *databaseClient) SetCredentials(ctx context.Context, in *SetCredentialsRequest, opts ...grpc.CallOption) (*SetCredentialsResponse, error) { out := new(SetCredentialsResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/SetCredentials", in, out, opts...) + err := c.cc.Invoke(ctx, Database_SetCredentials_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -113,7 +133,7 @@ func (c *databaseClient) SetCredentials(ctx context.Context, in *SetCredentialsR func (c *databaseClient) GenerateCredentials(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*GenerateCredentialsResponse, error) { out := new(GenerateCredentialsResponse) - err := c.cc.Invoke(ctx, "/dbplugin.Database/GenerateCredentials", in, out, opts...) + err := c.cc.Invoke(ctx, Database_GenerateCredentials_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -123,7 +143,7 @@ func (c *databaseClient) GenerateCredentials(ctx context.Context, in *Empty, opt // Deprecated: Do not use. func (c *databaseClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/dbplugin.Database/Initialize", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Initialize_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -205,7 +225,7 @@ func _Database_Type_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/Type", + FullMethod: Database_Type_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Type(ctx, req.(*Empty)) @@ -223,7 +243,7 @@ func _Database_CreateUser_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/CreateUser", + FullMethod: Database_CreateUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).CreateUser(ctx, req.(*CreateUserRequest)) @@ -241,7 +261,7 @@ func _Database_RenewUser_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/RenewUser", + FullMethod: Database_RenewUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).RenewUser(ctx, req.(*RenewUserRequest)) @@ -259,7 +279,7 @@ func _Database_RevokeUser_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/RevokeUser", + FullMethod: Database_RevokeUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).RevokeUser(ctx, req.(*RevokeUserRequest)) @@ -277,7 +297,7 @@ func _Database_RotateRootCredentials_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/RotateRootCredentials", + FullMethod: Database_RotateRootCredentials_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).RotateRootCredentials(ctx, req.(*RotateRootCredentialsRequest)) @@ -295,7 +315,7 @@ func _Database_Init_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/Init", + FullMethod: Database_Init_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Init(ctx, req.(*InitRequest)) @@ -313,7 +333,7 @@ func _Database_Close_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/Close", + FullMethod: Database_Close_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Close(ctx, req.(*Empty)) @@ -331,7 +351,7 @@ func _Database_SetCredentials_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/SetCredentials", + FullMethod: Database_SetCredentials_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).SetCredentials(ctx, req.(*SetCredentialsRequest)) @@ -349,7 +369,7 @@ func _Database_GenerateCredentials_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/GenerateCredentials", + FullMethod: Database_GenerateCredentials_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).GenerateCredentials(ctx, req.(*Empty)) @@ -367,7 +387,7 @@ func _Database_Initialize_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.Database/Initialize", + FullMethod: Database_Initialize_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Initialize(ctx, req.(*InitializeRequest)) diff --git a/sdk/database/dbplugin/databasemiddleware.go b/sdk/database/dbplugin/databasemiddleware.go index 29c806113844..d7cabafefe29 100644 --- a/sdk/database/dbplugin/databasemiddleware.go +++ b/sdk/database/dbplugin/databasemiddleware.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/grpc_transport.go b/sdk/database/dbplugin/grpc_transport.go index fbae626df397..3740ef59c3b8 100644 --- a/sdk/database/dbplugin/grpc_transport.go +++ b/sdk/database/dbplugin/grpc_transport.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/plugin.go b/sdk/database/dbplugin/plugin.go index 29f2f1f898b8..0b01454123c8 100644 --- a/sdk/database/dbplugin/plugin.go +++ b/sdk/database/dbplugin/plugin.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/server.go b/sdk/database/dbplugin/server.go index 4949384baf56..bf96a3bba4d9 100644 --- a/sdk/database/dbplugin/server.go +++ b/sdk/database/dbplugin/server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/conversions_test.go b/sdk/database/dbplugin/v5/conversions_test.go index 6207f0f39f7f..5e65c3467068 100644 --- a/sdk/database/dbplugin/v5/conversions_test.go +++ b/sdk/database/dbplugin/v5/conversions_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( @@ -62,6 +65,7 @@ func TestConversionsHaveAllFields(t *testing.T) { CredentialType: CredentialTypeRSAPrivateKey, PublicKey: []byte("-----BEGIN PUBLIC KEY-----"), Password: "password", + Subject: "subject", Expiration: time.Now(), } diff --git a/sdk/database/dbplugin/v5/credentialtype_enumer.go b/sdk/database/dbplugin/v5/credentialtype_enumer.go new file mode 100644 index 000000000000..d61011b718ee --- /dev/null +++ b/sdk/database/dbplugin/v5/credentialtype_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=CredentialType -trimprefix=CredentialType -transform=snake"; DO NOT EDIT. + +package dbplugin + +import ( + "fmt" +) + +const _CredentialTypeName = "passwordrsa_private_keyclient_certificate" + +var _CredentialTypeIndex = [...]uint8{0, 8, 23, 41} + +func (i CredentialType) String() string { + if i < 0 || i >= CredentialType(len(_CredentialTypeIndex)-1) { + return fmt.Sprintf("CredentialType(%d)", i) + } + return _CredentialTypeName[_CredentialTypeIndex[i]:_CredentialTypeIndex[i+1]] +} + +var _CredentialTypeValues = []CredentialType{0, 1, 2} + +var _CredentialTypeNameToValueMap = map[string]CredentialType{ + _CredentialTypeName[0:8]: 0, + _CredentialTypeName[8:23]: 1, + _CredentialTypeName[23:41]: 2, +} + +// CredentialTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func CredentialTypeString(s string) (CredentialType, error) { + if val, ok := _CredentialTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to CredentialType values", s) +} + +// CredentialTypeValues returns all values of the enum +func CredentialTypeValues() []CredentialType { + return _CredentialTypeValues +} + +// IsACredentialType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i CredentialType) IsACredentialType() bool { + for _, v := range _CredentialTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/database/dbplugin/v5/database.go b/sdk/database/dbplugin/v5/database.go index b73bd6858dca..ddbcb6c81bbc 100644 --- a/sdk/database/dbplugin/v5/database.go +++ b/sdk/database/dbplugin/v5/database.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( @@ -120,6 +123,10 @@ type NewUserRequest struct { // The value is set when the credential type is CredentialTypeRSAPrivateKey. PublicKey []byte + // Subject is the distinguished name for the client certificate credential. + // Value is set when the credential type is CredentialTypeClientCertificate. + Subject string + // Expiration of the user. Not all database plugins will support this. Expiration time.Time } @@ -137,25 +144,17 @@ type NewUserResponse struct { Username string } +//go:generate enumer -type=CredentialType -trimprefix=CredentialType -transform=snake + // CredentialType is a type of database credential. type CredentialType int const ( CredentialTypePassword CredentialType = iota CredentialTypeRSAPrivateKey + CredentialTypeClientCertificate ) -func (k CredentialType) String() string { - switch k { - case CredentialTypePassword: - return "password" - case CredentialTypeRSAPrivateKey: - return "rsa_private_key" - default: - return "unknown" - } -} - // /////////////////////////////////////////////////////// // UpdateUser() // /////////////////////////////////////////////////////// diff --git a/sdk/database/dbplugin/v5/grpc_client.go b/sdk/database/dbplugin/v5/grpc_client.go index cfddfcd578ef..9b0b984f42af 100644 --- a/sdk/database/dbplugin/v5/grpc_client.go +++ b/sdk/database/dbplugin/v5/grpc_client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( @@ -101,6 +104,10 @@ func newUserReqToProto(req NewUserRequest) (*proto.NewUserRequest, error) { if len(req.PublicKey) == 0 { return nil, fmt.Errorf("missing public key credential") } + case CredentialTypeClientCertificate: + if req.Subject == "" { + return nil, fmt.Errorf("missing certificate subject") + } default: return nil, fmt.Errorf("unknown credential type") } @@ -118,6 +125,7 @@ func newUserReqToProto(req NewUserRequest) (*proto.NewUserRequest, error) { CredentialType: int32(req.CredentialType), Password: req.Password, PublicKey: req.PublicKey, + Subject: req.Subject, Expiration: expiration, Statements: &proto.Statements{ Commands: req.Statements.Commands, diff --git a/sdk/database/dbplugin/v5/grpc_client_test.go b/sdk/database/dbplugin/v5/grpc_client_test.go index b187d736d80e..05ecb960e60a 100644 --- a/sdk/database/dbplugin/v5/grpc_client_test.go +++ b/sdk/database/dbplugin/v5/grpc_client_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/grpc_database_plugin.go b/sdk/database/dbplugin/v5/grpc_database_plugin.go index 441030df93e0..b428d4ce06ef 100644 --- a/sdk/database/dbplugin/v5/grpc_database_plugin.go +++ b/sdk/database/dbplugin/v5/grpc_database_plugin.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/grpc_server.go b/sdk/database/dbplugin/v5/grpc_server.go index ce3be1efb7c6..7e1bc3fa1fc7 100644 --- a/sdk/database/dbplugin/v5/grpc_server.go +++ b/sdk/database/dbplugin/v5/grpc_server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( @@ -149,6 +152,7 @@ func (g *gRPCServer) NewUser(ctx context.Context, req *proto.NewUserRequest) (*p CredentialType: CredentialType(req.GetCredentialType()), Password: req.GetPassword(), PublicKey: req.GetPublicKey(), + Subject: req.GetSubject(), Expiration: expiration, Statements: getStatementsFromProto(req.GetStatements()), RollbackStatements: getStatementsFromProto(req.GetRollbackStatements()), diff --git a/sdk/database/dbplugin/v5/grpc_server_test.go b/sdk/database/dbplugin/v5/grpc_server_test.go index 7399bf55789b..53d44c7c2a65 100644 --- a/sdk/database/dbplugin/v5/grpc_server_test.go +++ b/sdk/database/dbplugin/v5/grpc_server_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/marshalling.go b/sdk/database/dbplugin/v5/marshalling.go index e14a21e58335..2b3e8cb346ac 100644 --- a/sdk/database/dbplugin/v5/marshalling.go +++ b/sdk/database/dbplugin/v5/marshalling.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/middleware.go b/sdk/database/dbplugin/v5/middleware.go index 240d64e6915e..2091e672084a 100644 --- a/sdk/database/dbplugin/v5/middleware.go +++ b/sdk/database/dbplugin/v5/middleware.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/middleware_test.go b/sdk/database/dbplugin/v5/middleware_test.go index 5dd97cdb9e5a..a2a76336fb7d 100644 --- a/sdk/database/dbplugin/v5/middleware_test.go +++ b/sdk/database/dbplugin/v5/middleware_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/plugin_client.go b/sdk/database/dbplugin/v5/plugin_client.go index caea00a8fdaf..b4085ead6cb9 100644 --- a/sdk/database/dbplugin/v5/plugin_client.go +++ b/sdk/database/dbplugin/v5/plugin_client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/plugin_client_test.go b/sdk/database/dbplugin/v5/plugin_client_test.go index 903cec65dcbd..fb6852d1a4b3 100644 --- a/sdk/database/dbplugin/v5/plugin_client_test.go +++ b/sdk/database/dbplugin/v5/plugin_client_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( @@ -153,3 +156,7 @@ func (m *mockRunnerUtil) MlockEnabled() bool { args := m.Called() return args.Bool(0) } + +func (m *mockRunnerUtil) ClusterID(ctx context.Context) (string, error) { + return "clusterid", nil +} diff --git a/sdk/database/dbplugin/v5/plugin_factory.go b/sdk/database/dbplugin/v5/plugin_factory.go index 649f0f3fc944..4b158c319e26 100644 --- a/sdk/database/dbplugin/v5/plugin_factory.go +++ b/sdk/database/dbplugin/v5/plugin_factory.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/plugin_server.go b/sdk/database/dbplugin/v5/plugin_server.go index 090894ae5521..216219df1d7f 100644 --- a/sdk/database/dbplugin/v5/plugin_server.go +++ b/sdk/database/dbplugin/v5/plugin_server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbplugin import ( diff --git a/sdk/database/dbplugin/v5/proto/database.pb.go b/sdk/database/dbplugin/v5/proto/database.pb.go index 3789a51c1460..fd76db7cef85 100644 --- a/sdk/database/dbplugin/v5/proto/database.pb.go +++ b/sdk/database/dbplugin/v5/proto/database.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: sdk/database/dbplugin/v5/proto/database.proto package proto @@ -139,6 +142,7 @@ type NewUserRequest struct { RollbackStatements *Statements `protobuf:"bytes,5,opt,name=rollback_statements,json=rollbackStatements,proto3" json:"rollback_statements,omitempty"` CredentialType int32 `protobuf:"varint,6,opt,name=credential_type,json=credentialType,proto3" json:"credential_type,omitempty"` PublicKey []byte `protobuf:"bytes,7,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + Subject string `protobuf:"bytes,8,opt,name=subject,proto3" json:"subject,omitempty"` } func (x *NewUserRequest) Reset() { @@ -222,6 +226,13 @@ func (x *NewUserRequest) GetPublicKey() []byte { return nil } +func (x *NewUserRequest) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + type UsernameConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -866,7 +877,7 @@ var file_sdk_database_dbplugin_v5_proto_database_proto_rawDesc = []byte{ 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0xf9, 0x02, 0x0a, 0x0e, 0x4e, 0x65, 0x77, 0x55, + 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x93, 0x03, 0x0a, 0x0e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, @@ -890,100 +901,102 @@ var file_sdk_database_dbplugin_v5_proto_database_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x4b, 0x65, 0x79, 0x22, 0x50, 0x0a, 0x0e, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, - 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6c, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x6c, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8d, 0x02, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, + 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x50, 0x0a, + 0x0e, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, + 0x2d, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8d, + 0x02, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x37, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x3d, 0x0a, 0x0a, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, + 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6c, + 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x50, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x70, 0x0a, 0x0f, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, + 0x24, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x8e, + 0x01, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x69, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0x14, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, - 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, - 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, - 0x3d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, - 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, - 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, - 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x63, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x54, 0x79, 0x70, 0x65, 0x22, 0x6c, 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, - 0x77, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x22, 0x70, 0x0a, 0x0f, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, - 0x65, 0x77, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x0a, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, - 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x6e, 0x65, 0x77, - 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, - 0x65, 0x77, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0a, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, 0x11, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0a, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x0a, 0x0c, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x22, - 0x28, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, - 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x73, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x32, 0xa5, 0x03, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, - 0x4d, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x2e, - 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, - 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, - 0x0a, 0x07, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, - 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, - 0x72, 0x12, 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, - 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x43, 0x6c, 0x6f, - 0x73, 0x65, 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, - 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x64, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x76, - 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x22, 0x28, 0x0a, 0x0a, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x73, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0xa5, 0x03, 0x0a, + 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x49, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x4e, 0x65, 0x77, 0x55, + 0x73, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, + 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x4e, + 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, + 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, + 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x64, 0x62, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, + 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/sdk/database/dbplugin/v5/proto/database.proto b/sdk/database/dbplugin/v5/proto/database.proto index b4959f709e1f..2b0ebde48574 100644 --- a/sdk/database/dbplugin/v5/proto/database.proto +++ b/sdk/database/dbplugin/v5/proto/database.proto @@ -1,69 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + syntax = "proto3"; package dbplugin.v5; -option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto"; - import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; +option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto"; + ///////////////// // Initialize() ///////////////// message InitializeRequest { - google.protobuf.Struct config_data = 1; - bool verify_connection = 2; + google.protobuf.Struct config_data = 1; + bool verify_connection = 2; } message InitializeResponse { - google.protobuf.Struct config_data = 1; + google.protobuf.Struct config_data = 1; } ///////////////// // NewUser() ///////////////// message NewUserRequest { - UsernameConfig username_config = 1; - string password = 2; - google.protobuf.Timestamp expiration = 3; - Statements statements = 4; - Statements rollback_statements = 5; - int32 credential_type = 6; - bytes public_key = 7; + UsernameConfig username_config = 1; + string password = 2; + google.protobuf.Timestamp expiration = 3; + Statements statements = 4; + Statements rollback_statements = 5; + int32 credential_type = 6; + bytes public_key = 7; + string subject = 8; } message UsernameConfig { - string display_name = 1; - string role_name = 2; + string display_name = 1; + string role_name = 2; } message NewUserResponse { - string username = 1; + string username = 1; } ///////////////// // UpdateUser() ///////////////// message UpdateUserRequest { - string username = 1; - ChangePassword password = 2; - ChangeExpiration expiration = 3; - ChangePublicKey public_key = 4; - int32 credential_type = 5; + string username = 1; + ChangePassword password = 2; + ChangeExpiration expiration = 3; + ChangePublicKey public_key = 4; + int32 credential_type = 5; } message ChangePassword { - string new_password = 1; - Statements statements = 2; + string new_password = 1; + Statements statements = 2; } message ChangePublicKey { - bytes new_public_key = 1; - Statements statements = 2; + bytes new_public_key = 1; + Statements statements = 2; } message ChangeExpiration { - google.protobuf.Timestamp new_expiration = 1; - Statements statements = 2; + google.protobuf.Timestamp new_expiration = 1; + Statements statements = 2; } message UpdateUserResponse {} @@ -72,8 +76,8 @@ message UpdateUserResponse {} // DeleteUser() ///////////////// message DeleteUserRequest { - string username = 1; - Statements statements = 2; + string username = 1; + Statements statements = 2; } message DeleteUserResponse {} @@ -82,23 +86,23 @@ message DeleteUserResponse {} // Type() ///////////////// message TypeResponse { - string Type = 1; + string Type = 1; } ///////////////// // General purpose ///////////////// message Statements { - repeated string Commands = 1; + repeated string Commands = 1; } message Empty {} service Database { - rpc Initialize(InitializeRequest) returns (InitializeResponse); - rpc NewUser(NewUserRequest) returns (NewUserResponse); - rpc UpdateUser(UpdateUserRequest) returns (UpdateUserResponse); - rpc DeleteUser(DeleteUserRequest) returns (DeleteUserResponse); - rpc Type(Empty) returns (TypeResponse); - rpc Close(Empty) returns (Empty); -} \ No newline at end of file + rpc Initialize(InitializeRequest) returns (InitializeResponse); + rpc NewUser(NewUserRequest) returns (NewUserResponse); + rpc UpdateUser(UpdateUserRequest) returns (UpdateUserResponse); + rpc DeleteUser(DeleteUserRequest) returns (DeleteUserResponse); + rpc Type(Empty) returns (TypeResponse); + rpc Close(Empty) returns (Empty); +} diff --git a/sdk/database/dbplugin/v5/proto/database_grpc.pb.go b/sdk/database/dbplugin/v5/proto/database_grpc.pb.go index 8a549fef92f0..49610c3b0462 100644 --- a/sdk/database/dbplugin/v5/proto/database_grpc.pb.go +++ b/sdk/database/dbplugin/v5/proto/database_grpc.pb.go @@ -1,4 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: sdk/database/dbplugin/v5/proto/database.proto package proto @@ -14,6 +21,15 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Database_Initialize_FullMethodName = "/dbplugin.v5.Database/Initialize" + Database_NewUser_FullMethodName = "/dbplugin.v5.Database/NewUser" + Database_UpdateUser_FullMethodName = "/dbplugin.v5.Database/UpdateUser" + Database_DeleteUser_FullMethodName = "/dbplugin.v5.Database/DeleteUser" + Database_Type_FullMethodName = "/dbplugin.v5.Database/Type" + Database_Close_FullMethodName = "/dbplugin.v5.Database/Close" +) + // DatabaseClient is the client API for Database service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -36,7 +52,7 @@ func NewDatabaseClient(cc grpc.ClientConnInterface) DatabaseClient { func (c *databaseClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*InitializeResponse, error) { out := new(InitializeResponse) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/Initialize", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Initialize_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -45,7 +61,7 @@ func (c *databaseClient) Initialize(ctx context.Context, in *InitializeRequest, func (c *databaseClient) NewUser(ctx context.Context, in *NewUserRequest, opts ...grpc.CallOption) (*NewUserResponse, error) { out := new(NewUserResponse) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/NewUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_NewUser_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -54,7 +70,7 @@ func (c *databaseClient) NewUser(ctx context.Context, in *NewUserRequest, opts . func (c *databaseClient) UpdateUser(ctx context.Context, in *UpdateUserRequest, opts ...grpc.CallOption) (*UpdateUserResponse, error) { out := new(UpdateUserResponse) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/UpdateUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_UpdateUser_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -63,7 +79,7 @@ func (c *databaseClient) UpdateUser(ctx context.Context, in *UpdateUserRequest, func (c *databaseClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error) { out := new(DeleteUserResponse) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/DeleteUser", in, out, opts...) + err := c.cc.Invoke(ctx, Database_DeleteUser_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -72,7 +88,7 @@ func (c *databaseClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeResponse, error) { out := new(TypeResponse) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/Type", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Type_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -81,7 +97,7 @@ func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallO func (c *databaseClient) Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/Close", in, out, opts...) + err := c.cc.Invoke(ctx, Database_Close_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -146,7 +162,7 @@ func _Database_Initialize_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/Initialize", + FullMethod: Database_Initialize_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Initialize(ctx, req.(*InitializeRequest)) @@ -164,7 +180,7 @@ func _Database_NewUser_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/NewUser", + FullMethod: Database_NewUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).NewUser(ctx, req.(*NewUserRequest)) @@ -182,7 +198,7 @@ func _Database_UpdateUser_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/UpdateUser", + FullMethod: Database_UpdateUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).UpdateUser(ctx, req.(*UpdateUserRequest)) @@ -200,7 +216,7 @@ func _Database_DeleteUser_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/DeleteUser", + FullMethod: Database_DeleteUser_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).DeleteUser(ctx, req.(*DeleteUserRequest)) @@ -218,7 +234,7 @@ func _Database_Type_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/Type", + FullMethod: Database_Type_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Type(ctx, req.(*Empty)) @@ -236,7 +252,7 @@ func _Database_Close_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dbplugin.v5.Database/Close", + FullMethod: Database_Close_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DatabaseServer).Close(ctx, req.(*Empty)) diff --git a/sdk/database/dbplugin/v5/testing/test_helpers.go b/sdk/database/dbplugin/v5/testing/test_helpers.go index 55a402c7fe3c..4ecebe5de08e 100644 --- a/sdk/database/dbplugin/v5/testing/test_helpers.go +++ b/sdk/database/dbplugin/v5/testing/test_helpers.go @@ -1,11 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbtesting import ( "context" + "io/ioutil" "os" "testing" "time" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/database/dbplugin/v5" ) @@ -19,7 +24,7 @@ func getRequestTimeout(t *testing.T) time.Duration { return 10 * time.Second } - dur, err := time.ParseDuration(rawDur) + dur, err := parseutil.ParseDurationSecond(rawDur) if err != nil { t.Fatalf("Failed to parse custom request timeout %q: %s", rawDur, err) } @@ -35,7 +40,7 @@ func AssertInitializeCircleCiTest(t *testing.T, db dbplugin.Database, req dbplug var err error for i := 1; i <= maxAttempts; i++ { - resp, err = verifyInitialize(t, db, req) + resp, err = VerifyInitialize(t, db, req) if err != nil { t.Errorf("Failed AssertInitialize attempt: %d with error:\n%+v\n", i, err) time.Sleep(1 * time.Second) @@ -53,14 +58,14 @@ func AssertInitializeCircleCiTest(t *testing.T, db dbplugin.Database, req dbplug func AssertInitialize(t *testing.T, db dbplugin.Database, req dbplugin.InitializeRequest) dbplugin.InitializeResponse { t.Helper() - resp, err := verifyInitialize(t, db, req) + resp, err := VerifyInitialize(t, db, req) if err != nil { t.Fatalf("Failed to initialize: %s", err) } return resp } -func verifyInitialize(t *testing.T, db dbplugin.Database, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { +func VerifyInitialize(t *testing.T, db dbplugin.Database, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { ctx, cancel := context.WithTimeout(context.Background(), getRequestTimeout(t)) defer cancel() @@ -115,3 +120,31 @@ func AssertClose(t *testing.T, db dbplugin.Database) { t.Fatalf("Failed to close database: %s", err) } } + +// GetGCPTestCredentials reads the credentials from the +// GOOGLE_APPLICATIONS_CREDENTIALS environment variable +// The credentials are read from a file if a file exists +// otherwise they are returned as JSON +func GetGCPTestCredentials(t *testing.T) string { + t.Helper() + envCredentials := "GOOGLE_APPLICATIONS_CREDENTIALS" + + var credsStr string + credsEnv := os.Getenv(envCredentials) + if credsEnv == "" { + t.Skipf("env var %s not set, skipping test", envCredentials) + } + + // Attempt to read as file path; if invalid, assume given JSON value directly + if _, err := os.Stat(credsEnv); err == nil { + credsBytes, err := ioutil.ReadFile(credsEnv) + if err != nil { + t.Fatalf("unable to read credentials file %s: %v", credsStr, err) + } + credsStr = string(credsBytes) + } else { + credsStr = credsEnv + } + + return credsStr +} diff --git a/sdk/database/helper/connutil/cloudsql.go b/sdk/database/helper/connutil/cloudsql.go new file mode 100644 index 000000000000..5330c1cc22a7 --- /dev/null +++ b/sdk/database/helper/connutil/cloudsql.go @@ -0,0 +1,72 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package connutil + +import ( + "fmt" + + "cloud.google.com/go/cloudsqlconn" + "cloud.google.com/go/cloudsqlconn/postgres/pgxv4" +) + +var configurableAuthTypes = []string{ + AuthTypeGCPIAM, +} + +func (c *SQLConnectionProducer) getCloudSQLDriverType() (string, error) { + var driverType string + // using switch case for future extensibility + switch c.Type { + case dbTypePostgres: + driverType = cloudSQLPostgres + default: + return "", fmt.Errorf("unsupported DB type for cloud IAM: %s", c.Type) + } + + return driverType, nil +} + +func (c *SQLConnectionProducer) registerDrivers(driverName string, credentials string) (func() error, error) { + typ, err := c.getCloudSQLDriverType() + if err != nil { + return nil, err + } + + opts, err := GetCloudSQLAuthOptions(credentials) + if err != nil { + return nil, err + } + + // using switch case for future extensibility + switch typ { + case cloudSQLPostgres: + return pgxv4.RegisterDriver(driverName, opts...) + } + + return nil, fmt.Errorf("unrecognized cloudsql type encountered: %s", typ) +} + +// GetCloudSQLAuthOptions takes a credentials JSON and returns +// a set of GCP CloudSQL options - always WithIAMAUthN, and then the appropriate file/JSON option. +func GetCloudSQLAuthOptions(credentials string) ([]cloudsqlconn.Option, error) { + opts := []cloudsqlconn.Option{cloudsqlconn.WithIAMAuthN()} + + if credentials != "" { + opts = append(opts, cloudsqlconn.WithCredentialsJSON([]byte(credentials))) + } + + return opts, nil +} + +func ValidateAuthType(authType string) bool { + var valid bool + for _, typ := range configurableAuthTypes { + if authType == typ { + valid = true + break + } + } + + return valid +} diff --git a/sdk/database/helper/connutil/connutil.go b/sdk/database/helper/connutil/connutil.go index 1749b275a260..50582aa8196a 100644 --- a/sdk/database/helper/connutil/connutil.go +++ b/sdk/database/helper/connutil/connutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package connutil import ( diff --git a/sdk/database/helper/connutil/sql.go b/sdk/database/helper/connutil/sql.go index 6256ff1a4cf0..ca3cd489aa7d 100644 --- a/sdk/database/helper/connutil/sql.go +++ b/sdk/database/helper/connutil/sql.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package connutil import ( @@ -11,11 +14,19 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/database/dbplugin" "github.com/hashicorp/vault/sdk/database/helper/dbutil" "github.com/mitchellh/mapstructure" ) +const ( + AuthTypeGCPIAM = "gcp_iam" + + dbTypePostgres = "pgx" + cloudSQLPostgres = "cloudsql-postgres" +) + var _ ConnectionProducer = &SQLConnectionProducer{} // SQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases @@ -26,8 +37,15 @@ type SQLConnectionProducer struct { MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" mapstructure:"max_connection_lifetime" structs:"max_connection_lifetime"` Username string `json:"username" mapstructure:"username" structs:"username"` Password string `json:"password" mapstructure:"password" structs:"password"` + AuthType string `json:"auth_type" mapstructure:"auth_type" structs:"auth_type"` + ServiceAccountJSON string `json:"service_account_json" mapstructure:"service_account_json" structs:"service_account_json"` DisableEscaping bool `json:"disable_escaping" mapstructure:"disable_escaping" structs:"disable_escaping"` + // cloud options here - cloudDriverName is globally unique, but only needs to be retained for the lifetime + // of driver registration, not across plugin restarts. + cloudDriverName string + cloudDialerCleanup func() error + Type string RawConfig map[string]interface{} maxConnectionLifetime time.Duration @@ -104,6 +122,32 @@ func (c *SQLConnectionProducer) Init(ctx context.Context, conf map[string]interf return nil, errwrap.Wrapf("invalid max_connection_lifetime: {{err}}", err) } + // validate auth_type if provided + authType := c.AuthType + if authType != "" { + if ok := ValidateAuthType(authType); !ok { + return nil, fmt.Errorf("invalid auth_type %s provided", authType) + } + } + + if authType == AuthTypeGCPIAM { + c.cloudDriverName, err = uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("unable to generate UUID for IAM configuration: %w", err) + } + + // for _most_ sql databases, the driver itself contains no state. In the case of google's cloudsql drivers, + // however, the driver might store a credentials file, in which case the state stored by the driver is in + // fact critical to the proper function of the connection. So it needs to be registered here inside the + // ConnectionProducer init. + dialerCleanup, err := c.registerDrivers(c.cloudDriverName, c.ServiceAccountJSON) + if err != nil { + return nil, err + } + + c.cloudDialerCleanup = dialerCleanup + } + // Set initialized to true at this point since all fields are set, // and the connection can be established at a later time. c.Initialized = true @@ -134,12 +178,24 @@ func (c *SQLConnectionProducer) Connection(ctx context.Context) (interface{}, er // If the ping was unsuccessful, close it and ignore errors as we'll be // reestablishing anyways c.db.Close() + + // if IAM authentication is enabled + // ensure open dialer is also closed + if c.AuthType == AuthTypeGCPIAM { + if c.cloudDialerCleanup != nil { + c.cloudDialerCleanup() + } + } } - // For mssql backend, switch to sqlserver instead - dbType := c.Type - if c.Type == "mssql" { - dbType = "sqlserver" + // default non-IAM behavior + driverName := c.Type + + if c.AuthType == AuthTypeGCPIAM { + driverName = c.cloudDriverName + } else if c.Type == "mssql" { + // For mssql backend, switch to sqlserver instead + driverName = "sqlserver" } // Otherwise, attempt to make connection @@ -161,7 +217,7 @@ func (c *SQLConnectionProducer) Connection(ctx context.Context) (interface{}, er } var err error - c.db, err = sql.Open(dbType, conn) + c.db, err = sql.Open(driverName, conn) if err != nil { return nil, err } @@ -189,6 +245,13 @@ func (c *SQLConnectionProducer) Close() error { if c.db != nil { c.db.Close() + + // cleanup IAM dialer if it exists + if c.AuthType == AuthTypeGCPIAM { + if c.cloudDialerCleanup != nil { + c.cloudDialerCleanup() + } + } } c.db = nil diff --git a/sdk/database/helper/connutil/sql_test.go b/sdk/database/helper/connutil/sql_test.go index 2ca11b758986..9f29d4ae2c5a 100644 --- a/sdk/database/helper/connutil/sql_test.go +++ b/sdk/database/helper/connutil/sql_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package connutil import ( diff --git a/sdk/database/helper/credsutil/caseop_enumer.go b/sdk/database/helper/credsutil/caseop_enumer.go new file mode 100644 index 000000000000..3a96c63222e9 --- /dev/null +++ b/sdk/database/helper/credsutil/caseop_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=CaseOp -transform=snake"; DO NOT EDIT. + +package credsutil + +import ( + "fmt" +) + +const _CaseOpName = "keep_caseuppercaselowercase" + +var _CaseOpIndex = [...]uint8{0, 9, 18, 27} + +func (i CaseOp) String() string { + if i < 0 || i >= CaseOp(len(_CaseOpIndex)-1) { + return fmt.Sprintf("CaseOp(%d)", i) + } + return _CaseOpName[_CaseOpIndex[i]:_CaseOpIndex[i+1]] +} + +var _CaseOpValues = []CaseOp{0, 1, 2} + +var _CaseOpNameToValueMap = map[string]CaseOp{ + _CaseOpName[0:9]: 0, + _CaseOpName[9:18]: 1, + _CaseOpName[18:27]: 2, +} + +// CaseOpString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func CaseOpString(s string) (CaseOp, error) { + if val, ok := _CaseOpNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to CaseOp values", s) +} + +// CaseOpValues returns all values of the enum +func CaseOpValues() []CaseOp { + return _CaseOpValues +} + +// IsACaseOp returns "true" if the value is listed in the enum definition. "false" otherwise +func (i CaseOp) IsACaseOp() bool { + for _, v := range _CaseOpValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/database/helper/credsutil/credsutil.go b/sdk/database/helper/credsutil/credsutil.go index 064552d1fa9f..503999c868b3 100644 --- a/sdk/database/helper/credsutil/credsutil.go +++ b/sdk/database/helper/credsutil/credsutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package credsutil import ( diff --git a/sdk/database/helper/credsutil/credsutil_test.go b/sdk/database/helper/credsutil/credsutil_test.go index e094719d0797..77e1a2862f3c 100644 --- a/sdk/database/helper/credsutil/credsutil_test.go +++ b/sdk/database/helper/credsutil/credsutil_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package credsutil import ( diff --git a/sdk/database/helper/credsutil/sql.go b/sdk/database/helper/credsutil/sql.go index 39fb467a79bf..2c27adf37cc1 100644 --- a/sdk/database/helper/credsutil/sql.go +++ b/sdk/database/helper/credsutil/sql.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package credsutil import ( diff --git a/sdk/database/helper/credsutil/usernames.go b/sdk/database/helper/credsutil/usernames.go index c1e3ccb5298e..4ea4491c4f1d 100644 --- a/sdk/database/helper/credsutil/usernames.go +++ b/sdk/database/helper/credsutil/usernames.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package credsutil import ( @@ -6,6 +9,7 @@ import ( "time" ) +//go:generate enumer -type=CaseOp -transform=snake type CaseOp int const ( diff --git a/sdk/database/helper/credsutil/usernames_test.go b/sdk/database/helper/credsutil/usernames_test.go index b1e79ce26d6e..a3e883491fc2 100644 --- a/sdk/database/helper/credsutil/usernames_test.go +++ b/sdk/database/helper/credsutil/usernames_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package credsutil import ( diff --git a/sdk/database/helper/dbutil/dbutil.go b/sdk/database/helper/dbutil/dbutil.go index 19198bcfdddd..efc7e01e13f0 100644 --- a/sdk/database/helper/dbutil/dbutil.go +++ b/sdk/database/helper/dbutil/dbutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbutil import ( diff --git a/sdk/database/helper/dbutil/dbutil_test.go b/sdk/database/helper/dbutil/dbutil_test.go index 64ca9924d390..797712b4d902 100644 --- a/sdk/database/helper/dbutil/dbutil_test.go +++ b/sdk/database/helper/dbutil/dbutil_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbutil import ( diff --git a/sdk/framework/backend.go b/sdk/framework/backend.go index 489509721759..6765b7e2d4a2 100644 --- a/sdk/framework/backend.go +++ b/sdk/framework/backend.go @@ -1,9 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( "context" "crypto/rand" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -56,6 +60,11 @@ type Backend struct { // InitializeFunc is the callback, which if set, will be invoked via // Initialize() just after a plugin has been mounted. + // + // Note that storage writes should only occur on the active instance within a + // primary cluster or local mount on a performance secondary. If your InitializeFunc + // writes to storage, you can use the backend's WriteSafeReplicationState() method + // to prevent it from attempting to write on a Vault instance with read-only storage. InitializeFunc InitializeFunc // PeriodicFunc is the callback, which if set, will be invoked when the @@ -66,6 +75,11 @@ type Backend struct { // entries in backend's storage, while the backend is still being used. // (Note the difference between this action and `Clean`, which is // invoked just before the backend is unmounted). + // + // Note that storage writes should only occur on the active instance within a + // primary cluster or local mount on a performance secondary. If your PeriodicFunc + // writes to storage, you can use the backend's WriteSafeReplicationState() method + // to prevent it from attempting to write on a Vault instance with read-only storage. PeriodicFunc periodicFunc // WALRollback is called when a WAL entry (see wal.go) has to be rolled @@ -97,6 +111,7 @@ type Backend struct { logger log.Logger system logical.SystemView + events logical.EventSender once sync.Once pathsRe []*regexp.Regexp } @@ -128,6 +143,10 @@ type InitializeFunc func(context.Context, *logical.InitializationRequest) error // the input as defined by request handler prior to JSON marshaling type PatchPreprocessorFunc func(map[string]interface{}) (map[string]interface{}, error) +// ErrNoEvents is returned when attempting to send an event, but when the event +// sender was not passed in during `backend.Setup()`. +var ErrNoEvents = errors.New("no event sender configured") + // Initialize is the logical.Backend implementation. func (b *Backend) Initialize(ctx context.Context, req *logical.InitializationRequest) error { if b.InitializeFunc != nil { @@ -202,7 +221,7 @@ func (b *Backend) HandleRequest(ctx context.Context, req *logical.Request) (*log // If the path is empty and it is a help operation, handle that. if req.Path == "" && req.Operation == logical.HelpOperation { - return b.handleRootHelp(req) + return b.handleRootHelp(ctx, req) } // Find the matching route @@ -398,6 +417,7 @@ func (b *Backend) InvalidateKey(ctx context.Context, key string) { func (b *Backend) Setup(ctx context.Context, config *logical.BackendConfig) error { b.logger = config.Logger b.system = config.System + b.events = config.EventsSender return nil } @@ -456,12 +476,38 @@ func (b *Backend) Secret(k string) *Secret { return nil } +// WriteSafeReplicationState returns true if this backend instance is capable of writing +// to storage without receiving an ErrReadOnly error. The active instance in a primary +// cluster or a local mount on a performance secondary is capable of writing to storage. +func (b *Backend) WriteSafeReplicationState() bool { + replicationState := b.System().ReplicationState() + return (b.System().LocalMount() || !replicationState.HasState(consts.ReplicationPerformanceSecondary)) && + !replicationState.HasState(consts.ReplicationDRSecondary) && + !replicationState.HasState(consts.ReplicationPerformanceStandby) +} + +// init runs as a sync.Once function from any plugin entry point which needs to route requests by paths. +// It may panic if a coding error in the plugin is detected. +// For builtin plugins, this is unit tested in helper/builtinplugins/builtinplugins_test.go. +// For other plugins, any unit test that attempts to perform any request to the plugin will exercise these checks. func (b *Backend) init() { b.pathsRe = make([]*regexp.Regexp, len(b.Paths)) for i, p := range b.Paths { + // Detect the coding error of failing to initialise Pattern if len(p.Pattern) == 0 { panic(fmt.Sprintf("Routing pattern cannot be blank")) } + + // Detect the coding error of attempting to define a CreateOperation without defining an ExistenceCheck + if p.ExistenceCheck == nil { + if _, ok := p.Operations[logical.CreateOperation]; ok { + panic(fmt.Sprintf("Pattern %v defines a CreateOperation but no ExistenceCheck", p.Pattern)) + } + if _, ok := p.Callbacks[logical.CreateOperation]; ok { + panic(fmt.Sprintf("Pattern %v defines a CreateOperation but no ExistenceCheck", p.Pattern)) + } + } + // Automatically anchor the pattern if p.Pattern[0] != '^' { p.Pattern = "^" + p.Pattern @@ -469,6 +515,8 @@ func (b *Backend) init() { if p.Pattern[len(p.Pattern)-1] != '$' { p.Pattern = p.Pattern + "$" } + + // Detect the coding error of an invalid Pattern b.pathsRe[i] = regexp.MustCompile(p.Pattern) } } @@ -501,7 +549,7 @@ func (b *Backend) route(path string) (*Path, map[string]string) { return nil, nil } -func (b *Backend) handleRootHelp(req *logical.Request) (*logical.Response, error) { +func (b *Backend) handleRootHelp(ctx context.Context, req *logical.Request) (*logical.Response, error) { // Build a mapping of the paths and get the paths alphabetized to // make the output prettier. pathsMap := make(map[string]*Path) @@ -549,6 +597,10 @@ func (b *Backend) handleRootHelp(req *logical.Request) (*logical.Response, error vaultVersion = env.VaultVersion } + redactVersion, _, _, _ := logical.CtxRedactionSettingsValue(ctx) + if redactVersion { + vaultVersion = "" + } doc := NewOASDocument(vaultVersion) if err := documentPaths(b, requestResponsePrefix, doc); err != nil { b.Logger().Warn("error generating OpenAPI", "error", err) @@ -682,6 +734,15 @@ func (b *Backend) handleWALRollback(ctx context.Context, req *logical.Request) ( return logical.ErrorResponse(merr.Error()), nil } +// SendEvent is used to send events through the underlying EventSender. +// It returns ErrNoEvents if the events system has not been configured or enabled. +func (b *Backend) SendEvent(ctx context.Context, eventType logical.EventType, event *logical.EventData) error { + if b.events == nil { + return ErrNoEvents + } + return b.events.SendEvent(ctx, eventType, event) +} + // FieldSchema is a basic schema to describe the format of a path field. type FieldSchema struct { Type FieldType @@ -693,16 +754,35 @@ type FieldSchema struct { Required bool Deprecated bool - // Query indicates this field will be sent as a query parameter: + // Query indicates this field will be expected as a query parameter as part + // of ReadOperation, ListOperation or DeleteOperation requests: // // /v1/foo/bar?some_param=some_value // - // It doesn't affect handling of the value, but may be used for documentation. + // The field will still be expected as a request body parameter for + // CreateOperation or UpdateOperation requests! + // + // To put that another way, you should set Query for any non-path parameter + // you want to use in a read/list/delete operation. While setting the Query + // field to `true` is not required in such cases (Vault will expose the + // query parameters to you via req.Data regardless), it is highly + // recommended to do so in order to improve the quality of the generated + // OpenAPI documentation (as well as any code generation based on it), which + // will otherwise incorrectly omit the parameter. + // + // The reason for this design is historical: back at the start of 2018, + // query parameters were not mapped to fields at all, and it was implicit + // that all non-path fields were exclusively for the use of create/update + // operations. Since then, support for query parameters has gradually been + // extended to read, delete and list operations - and now this declarative + // metadata is needed, so that the OpenAPI generator can know which + // parameters are actually referred to, from within the code of + // read/delete/list operation handler functions. Query bool // AllowedValues is an optional list of permitted values for this field. // This constraint is not (yet) enforced by the framework, but the list is - // output as part of OpenAPI generation and may effect documentation and + // output as part of OpenAPI generation and may affect documentation and // dynamic UI generation. AllowedValues []interface{} @@ -738,6 +818,8 @@ func (t FieldType) Zero() interface{} { return "" case TypeInt: return 0 + case TypeInt64: + return int64(0) case TypeBool: return false case TypeMap: diff --git a/sdk/framework/backend_test.go b/sdk/framework/backend_test.go index 9a2b5941457a..0b7a2054373d 100644 --- a/sdk/framework/backend_test.go +++ b/sdk/framework/backend_test.go @@ -1,7 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( "context" + "fmt" "net/http" "reflect" "strings" @@ -811,3 +815,23 @@ func TestInitializeBackend(t *testing.T) { t.Fatal("backend should be open") } } + +// TestFieldTypeMethods tries to ensure our switch-case statements for the +// FieldType "enum" are complete. +func TestFieldTypeMethods(t *testing.T) { + unknownFormat := convertType(TypeInvalid).format + + for i := TypeInvalid + 1; i < typeInvalidMax; i++ { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + if i.String() == TypeInvalid.String() { + t.Errorf("unknown type string for %d", i) + } + + if convertType(i).format == unknownFormat { + t.Errorf("unknown schema for %d", i) + } + + _ = i.Zero() + }) + } +} diff --git a/sdk/framework/field_data.go b/sdk/framework/field_data.go index 99e3fb7ab888..e5f69acdb81a 100644 --- a/sdk/framework/field_data.go +++ b/sdk/framework/field_data.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( @@ -25,7 +28,7 @@ type FieldData struct { Schema map[string]*FieldSchema } -// Validate cycles through raw data and validate conversions in +// Validate cycles through raw data and validates conversions in // the schema, so we don't get an error/panic later when // trying to get data out. Data not in the schema is not // an error at this point, so we don't worry about it. @@ -53,6 +56,40 @@ func (d *FieldData) Validate() error { return nil } +// ValidateStrict cycles through raw data and validates conversions in the +// schema. In addition to the checks done by Validate, this function ensures +// that the raw data has all of the schema's required fields and does not +// have any fields outside of the schema. It will return a non-nil error if: +// +// 1. a conversion (parsing of the field's value) fails +// 2. a raw field does not exist in the schema (unless the schema is nil) +// 3. a required schema field is missing from the raw data +// +// This function is currently used for validating response schemas in tests. +func (d *FieldData) ValidateStrict() error { + // the schema is nil, nothing to validate + if d.Schema == nil { + return nil + } + + for field := range d.Raw { + if _, _, err := d.GetOkErr(field); err != nil { + return fmt.Errorf("field %q: %w", field, err) + } + } + + for field, schema := range d.Schema { + if !schema.Required { + continue + } + if _, ok := d.Raw[field]; !ok { + return fmt.Errorf("missing required field %q", field) + } + } + + return nil +} + // Get gets the value for the given field. If the key is an invalid field, // FieldData will panic. If you want a safer version of this method, use // GetOk. If the field k is not set, the default value (if set) will be diff --git a/sdk/framework/field_data_test.go b/sdk/framework/field_data_test.go index d7cbd976157a..078c6fbcd5b8 100644 --- a/sdk/framework/field_data_test.go +++ b/sdk/framework/field_data_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( @@ -1157,8 +1160,118 @@ func TestFieldDataGetFirst(t *testing.T) { t.Fatal("should have gotten buzz for fizz") } - result, ok = data.GetFirst("cats") + _, ok = data.GetFirst("cats") if ok { t.Fatal("shouldn't have gotten anything for cats") } } + +func TestValidateStrict(t *testing.T) { + cases := map[string]struct { + Schema map[string]*FieldSchema + Raw map[string]interface{} + ExpectError bool + }{ + "string type, string value": { + map[string]*FieldSchema{ + "foo": {Type: TypeString}, + }, + map[string]interface{}{ + "foo": "bar", + }, + false, + }, + + "string type, int value": { + map[string]*FieldSchema{ + "foo": {Type: TypeString}, + }, + map[string]interface{}{ + "foo": 42, + }, + false, + }, + + "string type, unset value": { + map[string]*FieldSchema{ + "foo": {Type: TypeString}, + }, + map[string]interface{}{}, + false, + }, + + "string type, unset required value": { + map[string]*FieldSchema{ + "foo": { + Type: TypeString, + Required: true, + }, + }, + map[string]interface{}{}, + true, + }, + + "value not in schema": { + map[string]*FieldSchema{ + "foo": { + Type: TypeString, + Required: true, + }, + }, + map[string]interface{}{ + "foo": 42, + "bar": 43, + }, + true, + }, + + "value not in schema, empty schema": { + map[string]*FieldSchema{}, + map[string]interface{}{ + "foo": 42, + "bar": 43, + }, + true, + }, + + "value not in schema, nil schema": { + nil, + map[string]interface{}{ + "foo": 42, + "bar": 43, + }, + false, + }, + + "type time, invalid value": { + map[string]*FieldSchema{ + "foo": {Type: TypeTime}, + }, + map[string]interface{}{ + "foo": "2021-13-11T09:08:07+02:00", + }, + true, + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + data := &FieldData{ + Raw: tc.Raw, + Schema: tc.Schema, + } + + err := data.ValidateStrict() + + if err == nil && tc.ExpectError == true { + t.Fatalf("expected an error, got nil") + } + if err != nil && tc.ExpectError == false { + t.Fatalf("unexpected error: %v", err) + } + }) + } +} diff --git a/sdk/framework/field_type.go b/sdk/framework/field_type.go index ef7f08191e1a..ee07b6afe866 100644 --- a/sdk/framework/field_type.go +++ b/sdk/framework/field_type.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework // FieldType is the enum of types that a field can be. @@ -58,11 +61,15 @@ const ( // TypeFloat parses both float32 and float64 values TypeFloat - // TypeTime represents absolute time. It accepts an RFC3999-formatted + // TypeTime represents absolute time. It accepts an RFC3339-formatted // string (with or without fractional seconds), or an epoch timestamp // formatted as a string or a number. The resulting time.Time // is converted to UTC. TypeTime + + // DO NOT USE. Any new values must be inserted before this value. + // Used to write tests that ensure type methods handle all possible values. + typeInvalidMax ) func (t FieldType) String() string { @@ -75,6 +82,8 @@ func (t FieldType) String() string { return "name string" case TypeInt: return "int" + case TypeInt64: + return "int64" case TypeBool: return "bool" case TypeMap: diff --git a/sdk/framework/filter.go b/sdk/framework/filter.go index faaccba2a864..b9b99799b916 100644 --- a/sdk/framework/filter.go +++ b/sdk/framework/filter.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/identity.go b/sdk/framework/identity.go index ebb2aa4dcc6c..157f3c193cf4 100644 --- a/sdk/framework/identity.go +++ b/sdk/framework/identity.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/identity_test.go b/sdk/framework/identity_test.go index cb71eefdf9d8..1667fb960636 100644 --- a/sdk/framework/identity_test.go +++ b/sdk/framework/identity_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/lease.go b/sdk/framework/lease.go index 4d0240fbe7fd..24824ca52a24 100644 --- a/sdk/framework/lease.go +++ b/sdk/framework/lease.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/lease_test.go b/sdk/framework/lease_test.go index e145c2a82b78..5d1f9f091bbc 100644 --- a/sdk/framework/lease_test.go +++ b/sdk/framework/lease_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/openapi.go b/sdk/framework/openapi.go index b09d43a3446b..82e7f5fb6441 100644 --- a/sdk/framework/openapi.go +++ b/sdk/framework/openapi.go @@ -1,9 +1,11 @@ package framework import ( + "errors" "fmt" "reflect" "regexp" + "regexp/syntax" "sort" "strconv" "strings" @@ -105,13 +107,12 @@ type OASLicense struct { } type OASPathItem struct { - Description string `json:"description,omitempty"` - Parameters []OASParameter `json:"parameters,omitempty"` - Sudo bool `json:"x-vault-sudo,omitempty" mapstructure:"x-vault-sudo"` - Unauthenticated bool `json:"x-vault-unauthenticated,omitempty" mapstructure:"x-vault-unauthenticated"` - CreateSupported bool `json:"x-vault-createSupported,omitempty" mapstructure:"x-vault-createSupported"` - DisplayNavigation bool `json:"x-vault-displayNavigation,omitempty" mapstructure:"x-vault-displayNavigation"` - DisplayAttrs *DisplayAttributes `json:"x-vault-displayAttrs,omitempty" mapstructure:"x-vault-displayAttrs"` + Description string `json:"description,omitempty"` + Parameters []OASParameter `json:"parameters,omitempty"` + Sudo bool `json:"x-vault-sudo,omitempty" mapstructure:"x-vault-sudo"` + Unauthenticated bool `json:"x-vault-unauthenticated,omitempty" mapstructure:"x-vault-unauthenticated"` + CreateSupported bool `json:"x-vault-createSupported,omitempty" mapstructure:"x-vault-createSupported"` + DisplayAttrs *DisplayAttributes `json:"x-vault-displayAttrs,omitempty" mapstructure:"x-vault-displayAttrs"` Get *OASOperation `json:"get,omitempty"` Post *OASOperation `json:"post,omitempty"` @@ -163,6 +164,8 @@ type OASSchema struct { Description string `json:"description,omitempty"` Properties map[string]*OASSchema `json:"properties,omitempty"` + AdditionalProperties interface{} `json:"additionalProperties,omitempty"` + // Required is a list of keys in Properties that are required to be present. This is a different // approach than OASParameter (unfortunately), but is how JSONSchema handles 'required'. Required []string `json:"required,omitempty"` @@ -194,30 +197,42 @@ var OASStdRespNoContent = &OASResponse{ Description: "empty body", } -// Regex for handling optional and named parameters in paths, and string cleanup. -// Predefined here to avoid substantial recompilation. +var OASStdRespListOK = &OASResponse{ + Description: "OK", + Content: OASContent{ + "application/json": &OASMediaTypeObject{ + Schema: &OASSchema{ + Ref: "#/components/schemas/StandardListResponse", + }, + }, + }, +} -// Capture optional path elements in ungreedy (?U) fashion -// Both "(leases/)?renew" and "(/(?P.+))?" formats are detected -var optRe = regexp.MustCompile(`(?U)\([^(]*\)\?|\(/\(\?P<[^(]*\)\)\?`) +var OASStdSchemaStandardListResponse = &OASSchema{ + Type: "object", + Properties: map[string]*OASSchema{ + "keys": { + Type: "array", + Items: &OASSchema{ + Type: "string", + }, + }, + }, +} + +// Regex for handling fields in paths, and string cleanup. +// Predefined here to avoid substantial recompilation. var ( - altFieldsGroupRe = regexp.MustCompile(`\(\?P<\w+>\w+(\|\w+)+\)`) // Match named groups that limit options, e.g. "(?a|b|c)" - altFieldsRe = regexp.MustCompile(`\w+(\|\w+)+`) // Match an options set, e.g. "a|b|c" - altRe = regexp.MustCompile(`\((.*)\|(.*)\)`) // Capture alternation elements, e.g. "(raw/?$|raw/(?P.+))" - altRootsRe = regexp.MustCompile(`^\(([\w\-_]+(?:\|[\w\-_]+)+)\)(/.*)$`) // Pattern starting with alts, e.g. "(root1|root2)/(?Pregex)" - cleanCharsRe = regexp.MustCompile("[()^$?]") // Set of regex characters that will be stripped during cleaning - cleanSuffixRe = regexp.MustCompile(`/\?\$?$`) // Path suffix patterns that will be stripped during cleaning - nonWordRe = regexp.MustCompile(`[^a-zA-Z0-9]+`) // Match a sequence of non-word characters - pathFieldsRe = regexp.MustCompile(`{(\w+)}`) // Capture OpenAPI-style named parameters, e.g. "lookup/{urltoken}", - reqdRe = regexp.MustCompile(`\(?\?P<(\w+)>[^)]*\)?`) // Capture required parameters, e.g. "(?Pregex)" - wsRe = regexp.MustCompile(`\s+`) // Match whitespace, to be compressed during cleaning + nonWordRe = regexp.MustCompile(`[^\w]+`) // Match a sequence of non-word characters + pathFieldsRe = regexp.MustCompile(`{(\w+)}`) // Capture OpenAPI-style named parameters, e.g. "lookup/{urltoken}", + wsRe = regexp.MustCompile(`\s+`) // Match whitespace, to be compressed during cleaning ) // documentPaths parses all paths in a framework.Backend into OpenAPI paths. func documentPaths(backend *Backend, requestResponsePrefix string, doc *OASDocument) error { for _, p := range backend.Paths { - if err := documentPath(p, backend.SpecialPaths(), requestResponsePrefix, backend.BackendType, doc); err != nil { + if err := documentPath(p, backend, requestResponsePrefix, doc); err != nil { return err } } @@ -226,19 +241,34 @@ func documentPaths(backend *Backend, requestResponsePrefix string, doc *OASDocum } // documentPath parses a framework.Path into one or more OpenAPI paths. -func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix string, backendType logical.BackendType, doc *OASDocument) error { +func documentPath(p *Path, backend *Backend, requestResponsePrefix string, doc *OASDocument) error { var sudoPaths []string var unauthPaths []string - if specialPaths != nil { - sudoPaths = specialPaths.Root - unauthPaths = specialPaths.Unauthenticated + if backend.PathsSpecial != nil { + sudoPaths = backend.PathsSpecial.Root + unauthPaths = backend.PathsSpecial.Unauthenticated } // Convert optional parameters into distinct patterns to be processed independently. - paths := expandPattern(p.Pattern) + forceUnpublished := false + paths, captures, err := expandPattern(p.Pattern) + if err != nil { + if errors.Is(err, errUnsupportableRegexpOperationForOpenAPI) { + // Pattern cannot be transformed into sensible OpenAPI paths. In this case, we override the later + // processing to use the regexp, as is, as the path, and behave as if Unpublished was set on every + // operation (meaning the operations will not be represented in the OpenAPI document). + // + // This allows a human reading the OpenAPI document to notice that, yes, a path handler does exist, + // even though it was not able to contribute actual OpenAPI operations. + forceUnpublished = true + paths = []string{p.Pattern} + } else { + return err + } + } - for _, path := range paths { + for pathIndex, path := range paths { // Construct a top level PathItem which will be populated as the path is processed. pi := OASPathItem{ Description: cleanString(p.HelpSynopsis), @@ -246,7 +276,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st pi.Sudo = specialPathMatch(path, sudoPaths) pi.Unauthenticated = specialPathMatch(path, unauthPaths) - pi.DisplayAttrs = p.DisplayAttrs + pi.DisplayAttrs = withoutOperationHints(p.DisplayAttrs) // If the newer style Operations map isn't defined, create one from the legacy fields. operations := p.Operations @@ -263,54 +293,22 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st // Process path and header parameters, which are common to all operations. // Body fields will be added to individual operations. - pathFields, bodyFields := splitFields(p.Fields, path) - - defaultMountPath := requestResponsePrefix - if requestResponsePrefix == "kv" { - defaultMountPath = "secret" - } - - if defaultMountPath != "system" && defaultMountPath != "identity" { - p := OASParameter{ - Name: fmt.Sprintf("%s_mount_path", defaultMountPath), - Description: "Path where the backend was mounted; the endpoint path will be offset by the mount path", - In: "path", - Schema: &OASSchema{ - Type: "string", - Default: defaultMountPath, - }, - Required: false, - } - - pi.Parameters = append(pi.Parameters, p) - } + pathFields, queryFields, bodyFields := splitFields(p.Fields, path, captures) for name, field := range pathFields { - location := "path" - required := true - - if field == nil { - continue - } - - if field.Query { - location = "query" - required = false - } - t := convertType(field.Type) p := OASParameter{ Name: name, Description: cleanString(field.Description), - In: location, + In: "path", Schema: &OASSchema{ Type: t.baseType, Pattern: t.pattern, Enum: field.AllowedValues, Default: field.Default, - DisplayAttrs: field.DisplayAttrs, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), }, - Required: required, + Required: true, Deprecated: field.Deprecated, } pi.Parameters = append(pi.Parameters, p) @@ -318,14 +316,15 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st // Sort parameters for a stable output sort.Slice(pi.Parameters, func(i, j int) bool { - return strings.ToLower(pi.Parameters[i].Name) < strings.ToLower(pi.Parameters[j].Name) + return pi.Parameters[i].Name < pi.Parameters[j].Name }) // Process each supported operation by building up an Operation object // with descriptions, properties and examples from the framework.Path data. + var listOperation *OASOperation for opType, opHandler := range operations { props := opHandler.Properties() - if props.Unpublished { + if props.Unpublished || forceUnpublished { continue } @@ -338,19 +337,28 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st } } - // If both List and Read are defined, only process Read. - if opType == logical.ListOperation && operations[logical.ReadOperation] != nil { - continue - } - op := NewOASOperation() + operationID := constructOperationID( + path, + pathIndex, + p.DisplayAttrs, + opType, + props.DisplayAttrs, + requestResponsePrefix, + ) + op.Summary = props.Summary op.Description = props.Description op.Deprecated = props.Deprecated + op.OperationID = operationID - // Add any fields not present in the path as body parameters for POST. - if opType == logical.CreateOperation || opType == logical.UpdateOperation { + switch opType { + // For the operation types which map to POST/PUT methods, and so allow for request body parameters, + // prepare the request body definition + case logical.CreateOperation: + fallthrough + case logical.UpdateOperation: s := &OASSchema{ Type: "object", Properties: make(map[string]*OASSchema), @@ -364,38 +372,41 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st continue } - openapiField := convertType(field.Type) - if field.Required { - s.Required = append(s.Required, name) - } + addFieldToOASSchema(s, name, field) + } - p := OASSchema{ - Type: openapiField.baseType, - Description: cleanString(field.Description), - Format: openapiField.format, - Pattern: openapiField.pattern, - Enum: field.AllowedValues, - Default: field.Default, - Deprecated: field.Deprecated, - DisplayAttrs: field.DisplayAttrs, - } - if openapiField.baseType == "array" { - p.Items = &OASSchema{ - Type: openapiField.items, - } - } - s.Properties[name] = &p + // Contrary to what one might guess, fields marked with "Query: true" are only query fields when the + // request method is one which does not allow for a request body - they are still body fields when + // dealing with a POST/PUT request. + for name, field := range queryFields { + addFieldToOASSchema(s, name, field) } + // Make the ordering deterministic, so that the generated OpenAPI spec document, observed over several + // versions, doesn't contain spurious non-semantic changes. + sort.Strings(s.Required) + // If examples were given, use the first one as the sample // of this schema. if len(props.Examples) > 0 { s.Example = props.Examples[0].Data } + // TakesArbitraryInput is a case like writing to: + // - sys/wrapping/wrap + // - kv-v1/{path} + // - cubbyhole/{path} + // where the entire request body is an arbitrary JSON object used directly as input. + if p.TakesArbitraryInput { + // Whilst the default value of additionalProperties is true according to the JSON Schema standard, + // making this explicit helps communicate this to humans, and also tools such as + // https://openapi-generator.tech/ which treat it as defaulting to false. + s.AdditionalProperties = true + } + // Set the final request body. Only JSON request data is supported. - if len(s.Properties) > 0 || s.Example != nil { - requestName := constructRequestResponseName(path, requestResponsePrefix, "Request") + if len(s.Properties) > 0 { + requestName := hyphenatedToTitleCase(operationID) + "Request" doc.Components.Schemas[requestName] = s op.RequestBody = &OASRequestBody{ Required: true, @@ -405,12 +416,24 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st }, }, } + } else if p.TakesArbitraryInput { + // When there are no properties, the schema is trivial enough that it makes more sense to write it + // inline, rather than as a named component. + op.RequestBody = &OASRequestBody{ + Required: true, + Content: OASContent{ + "application/json": &OASMediaTypeObject{ + Schema: s, + }, + }, + } } - } - // LIST is represented as GET with a `list` query parameter. - if opType == logical.ListOperation { - // Only accepts List (due to the above skipping of ListOperations that also have ReadOperations) + // For the operation types which map to HTTP methods without a request body, populate query parameters + case logical.ListOperation: + // LIST is represented as GET with a `list` query parameter. Code later on in this function will assign + // list operations to a path with an extra trailing slash, ensuring they do not collide with read + // operations. op.Parameters = append(op.Parameters, OASParameter{ Name: "list", Description: "Must be set to `true`", @@ -418,19 +441,37 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st In: "query", Schema: &OASSchema{Type: "string", Enum: []interface{}{"true"}}, }) - } else if opType == logical.ReadOperation && operations[logical.ListOperation] != nil { - // Accepts both Read and List - op.Parameters = append(op.Parameters, OASParameter{ - Name: "list", - Description: "Return a list if `true`", - In: "query", - Schema: &OASSchema{Type: "string"}, + fallthrough + case logical.DeleteOperation: + fallthrough + case logical.ReadOperation: + for name, field := range queryFields { + t := convertType(field.Type) + p := OASParameter{ + Name: name, + Description: cleanString(field.Description), + In: "query", + Schema: &OASSchema{ + Type: t.baseType, + Pattern: t.pattern, + Enum: field.AllowedValues, + Default: field.Default, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), + }, + Deprecated: field.Deprecated, + } + op.Parameters = append(op.Parameters, p) + } + + // Sort parameters for a stable output + sort.Slice(op.Parameters, func(i, j int) bool { + return op.Parameters[i].Name < op.Parameters[j].Name }) } // Add tags based on backend type var tags []string - switch backendType { + switch backend.BackendType { case logical.TypeLogical: tags = []string{"secrets"} case logical.TypeCredential: @@ -443,6 +484,9 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st if len(props.Responses) == 0 { if opType == logical.DeleteOperation { op.Responses[204] = OASStdRespNoContent + } else if opType == logical.ListOperation { + op.Responses[200] = OASStdRespListOK + doc.Components.Schemas["StandardListResponse"] = OASStdSchemaStandardListResponse } else { op.Responses[200] = OASStdRespOK } @@ -491,7 +535,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st Enum: field.AllowedValues, Default: field.Default, Deprecated: field.Deprecated, - DisplayAttrs: field.DisplayAttrs, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), } if openapiField.baseType == "array" { p.Items = &OASSchema{ @@ -502,7 +546,7 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st } if len(resp.Fields) != 0 { - responseName := constructRequestResponseName(path, requestResponsePrefix, "Response") + responseName := hyphenatedToTitleCase(operationID) + "Response" doc.Components.Schemas[responseName] = responseSchema content = OASContent{ "application/json": &OASMediaTypeObject{ @@ -521,138 +565,473 @@ func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix st switch opType { case logical.CreateOperation, logical.UpdateOperation: pi.Post = op - case logical.ReadOperation, logical.ListOperation: + case logical.ReadOperation: pi.Get = op case logical.DeleteOperation: pi.Delete = op + case logical.ListOperation: + listOperation = op } } - doc.Paths["/"+path] = &pi - } + // The conventions enforced by the Vault HTTP routing code make it impossible to match a path with a trailing + // slash to anything other than a ListOperation. Catch mistakes in path definition, to enforce that if both of + // the two following blocks of code (non-list, and list) write an OpenAPI path to the output document, then the + // first one will definitely not have a trailing slash. + originalPathHasTrailingSlash := strings.HasSuffix(path, "/") + if originalPathHasTrailingSlash && (pi.Get != nil || pi.Post != nil || pi.Delete != nil) { + backend.Logger().Warn( + "OpenAPI spec generation: discarding impossible-to-invoke non-list operations from path with "+ + "required trailing slash; this is a bug in the backend code", "path", path) + pi.Get = nil + pi.Post = nil + pi.Delete = nil + } - return nil -} + // Write the regular, non-list, OpenAPI path to the OpenAPI document, UNLESS we generated a ListOperation, and + // NO OTHER operation types. In that fairly common case (there are lots of list-only endpoints), we avoid + // writing a redundant OpenAPI path for (e.g.) "auth/token/accessors" with no operations, only to then write + // one for "auth/token/accessors/" immediately below. + // + // On the other hand, we do still write the OpenAPI path here if we generated ZERO operation types - this serves + // to provide documentation to a human that an endpoint exists, even if it has no invokable OpenAPI operations. + // Examples of this include kv-v2's ".*" endpoint (regex cannot be translated to OpenAPI parameters), and the + // auth/oci/login endpoint (implements ResolveRoleOperation only, only callable from inside Vault). + if listOperation == nil || pi.Get != nil || pi.Post != nil || pi.Delete != nil { + openAPIPath := "/" + path + if doc.Paths[openAPIPath] != nil { + backend.Logger().Warn( + "OpenAPI spec generation: multiple framework.Path instances generated the same path; "+ + "last processed wins", "path", openAPIPath) + } + doc.Paths[openAPIPath] = &pi + } -// constructRequestResponseName joins the given path with prefix & suffix into -// a CamelCase request or response name. -// -// For example, path=/config/lease/{name}, prefix="secret", suffix="request" -// will result in "SecretConfigLeaseRequest" -func constructRequestResponseName(path, prefix, suffix string) string { - var b strings.Builder + // If there is a ListOperation, write it to a separate OpenAPI path in the document. + if listOperation != nil { + // Append a slash here to disambiguate from the path written immediately above. + // However, if the path already contains a trailing slash, we want to avoid doubling it, and it is + // guaranteed (through the interaction of logic in the last two blocks) that the block immediately above + // will NOT have written a path to the OpenAPI document. + if !originalPathHasTrailingSlash { + path += "/" + } - title := cases.Title(language.English) + listPathItem := OASPathItem{ + Description: pi.Description, + Parameters: pi.Parameters, + DisplayAttrs: pi.DisplayAttrs, - b.WriteString(title.String(prefix)) + // Since the path may now have an extra slash on the end, we need to recalculate the special path + // matches, as the sudo or unauthenticated status may be changed as a result! + Sudo: specialPathMatch(path, sudoPaths), + Unauthenticated: specialPathMatch(path, unauthPaths), - // split the path by / _ - separators - for _, token := range strings.FieldsFunc(path, func(r rune) bool { - return r == '/' || r == '_' || r == '-' - }) { - // exclude request fields - if !strings.ContainsAny(token, "{}") { - b.WriteString(title.String(token)) + Get: listOperation, + } + + openAPIPath := "/" + path + if doc.Paths[openAPIPath] != nil { + backend.Logger().Warn( + "OpenAPI spec generation: multiple framework.Path instances generated the same path; "+ + "last processed wins", "path", openAPIPath) + } + doc.Paths[openAPIPath] = &listPathItem } } - b.WriteString(suffix) + return nil +} - return b.String() +func addFieldToOASSchema(s *OASSchema, name string, field *FieldSchema) { + openapiField := convertType(field.Type) + if field.Required { + s.Required = append(s.Required, name) + } + + p := OASSchema{ + Type: openapiField.baseType, + Description: cleanString(field.Description), + Format: openapiField.format, + Pattern: openapiField.pattern, + Enum: field.AllowedValues, + Default: field.Default, + Deprecated: field.Deprecated, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), + } + if openapiField.baseType == "array" { + p.Items = &OASSchema{ + Type: openapiField.items, + } + } + + s.Properties[name] = &p } +// specialPathMatch checks whether the given path matches one of the special +// paths, taking into account * and + wildcards (e.g. foo/+/bar/*) func specialPathMatch(path string, specialPaths []string) bool { - // Test for exact or prefix match of special paths. + // pathMatchesByParts determines if the path matches the special path's + // pattern, accounting for the '+' and '*' wildcards + pathMatchesByParts := func(pathParts []string, specialPathParts []string) bool { + if len(pathParts) < len(specialPathParts) { + return false + } + for i := 0; i < len(specialPathParts); i++ { + var ( + part = pathParts[i] + pattern = specialPathParts[i] + ) + if pattern == "+" { + continue + } + if pattern == "*" { + return true + } + if strings.HasSuffix(pattern, "*") && strings.HasPrefix(part, pattern[0:len(pattern)-1]) { + return true + } + if pattern != part { + return false + } + } + return len(pathParts) == len(specialPathParts) + } + + pathParts := strings.Split(path, "/") + for _, sp := range specialPaths { - if sp == path || - (strings.HasSuffix(sp, "*") && strings.HasPrefix(path, sp[0:len(sp)-1])) { + // exact match + if sp == path { + return true + } + + // match * + if strings.HasSuffix(sp, "*") && strings.HasPrefix(path, sp[0:len(sp)-1]) { + return true + } + + // match + + if strings.Contains(sp, "+") && pathMatchesByParts(pathParts, strings.Split(sp, "/")) { return true } } + return false } -// expandPattern expands a regex pattern by generating permutations of any optional parameters -// and changing named parameters into their {openapi} equivalents. -func expandPattern(pattern string) []string { - var paths []string +// constructOperationID joins the given inputs into a hyphen-separated +// lower-case operation id, which is also used as a prefix for request and +// response names. +// +// The OperationPrefix / -Verb / -Suffix found in display attributes will be +// used, if provided. Otherwise, the function falls back to using the path and +// the operation. +// +// Examples of generated operation identifiers: +// - kvv2-write +// - kvv2-read +// - google-cloud-login +// - google-cloud-write-role +func constructOperationID( + path string, + pathIndex int, + pathAttributes *DisplayAttributes, + operation logical.Operation, + operationAttributes *DisplayAttributes, + defaultPrefix string, +) string { + var ( + prefix string + verb string + suffix string + ) + + if operationAttributes != nil { + prefix = operationAttributes.OperationPrefix + verb = operationAttributes.OperationVerb + suffix = operationAttributes.OperationSuffix + } - // Determine if the pattern starts with an alternation for multiple roots - // example (root1|root2)/(?Pregex) -> match['(root1|root2)/(?Pregex)','root1|root2','/(?Pregex)'] - match := altRootsRe.FindStringSubmatch(pattern) - if len(match) == 3 { - var expandedRoots []string - for _, root := range strings.Split(match[1], "|") { - expandedRoots = append(expandedRoots, expandPattern(root+match[2])...) + if pathAttributes != nil { + if prefix == "" { + prefix = pathAttributes.OperationPrefix + } + if verb == "" { + verb = pathAttributes.OperationVerb + } + if suffix == "" { + suffix = pathAttributes.OperationSuffix } - return expandedRoots } - // GenericNameRegex adds a regex that complicates our parsing. It is much easier to - // detect and remove it now than to compensate for in the other regexes. + // A single suffix string can contain multiple pipe-delimited strings. To + // determine the actual suffix, we attempt to match it by the index of the + // paths returned from `expandPattern(...)`. For example: + // + // pki/ + // Pattern: "keys/generate/(internal|exported|kms)", + // DisplayAttrs: { + // ... + // OperationSuffix: "internal-key|exported-key|kms-key", + // }, + // + // will expand into three paths and corresponding suffixes: + // + // path 0: "keys/generate/internal" suffix: internal-key + // path 1: "keys/generate/exported" suffix: exported-key + // path 2: "keys/generate/kms" suffix: kms-key // - // example: (?P\\w(([\\w-.]+)?\\w)?) -> (?P) - base := GenericNameRegex("") - start := strings.Index(base, ">") - end := strings.LastIndex(base, ")") - regexToRemove := "" - if start != -1 && end != -1 && end > start { - regexToRemove = base[start+1 : end] + pathIndexOutOfRange := false + + if suffixes := strings.Split(suffix, "|"); len(suffixes) > 1 || pathIndex > 0 { + // if the index is out of bounds, fall back to the old logic + if pathIndex >= len(suffixes) { + suffix = "" + pathIndexOutOfRange = true + } else { + suffix = suffixes[pathIndex] + } + } + + // a helper that hyphenates & lower-cases the slice except the empty elements + toLowerHyphenate := func(parts []string) string { + filtered := make([]string, 0, len(parts)) + for _, e := range parts { + if e != "" { + filtered = append(filtered, e) + } + } + return strings.ToLower(strings.Join(filtered, "-")) + } + + // fall back to using the path + operation to construct the operation id + var ( + needPrefix = prefix == "" && verb == "" + needVerb = verb == "" + needSuffix = suffix == "" && (verb == "" || pathIndexOutOfRange) + ) + + if needPrefix { + prefix = defaultPrefix + } + + if needVerb { + if operation == logical.UpdateOperation { + verb = "write" + } else { + verb = string(operation) + } + } + + if needSuffix { + suffix = toLowerHyphenate(nonWordRe.Split(path, -1)) } - pattern = strings.ReplaceAll(pattern, regexToRemove, "") - // Simplify named fields that have limited options, e.g. (?Pa|b|c) -> (.+) - pattern = altFieldsGroupRe.ReplaceAllStringFunc(pattern, func(s string) string { - return altFieldsRe.ReplaceAllString(s, ".+") - }) + return toLowerHyphenate([]string{prefix, verb, suffix}) +} - // Initialize paths with the original pattern or the halves of an - // alternation, which is also present in some patterns. - matches := altRe.FindAllStringSubmatch(pattern, -1) - if len(matches) > 0 { - paths = []string{matches[0][1], matches[0][2]} - } else { - paths = []string{pattern} +// expandPattern expands a regex pattern by generating permutations of any optional parameters +// and changing named parameters into their {openapi} equivalents. It also returns the names of all capturing groups +// observed in the pattern. +func expandPattern(pattern string) (paths []string, captures map[string]struct{}, err error) { + // Happily, the Go regexp library exposes its underlying "parse to AST" functionality, so we can rely on that to do + // the hard work of interpreting the regexp syntax. + rx, err := syntax.Parse(pattern, syntax.Perl) + if err != nil { + // This should be impossible to reach, since regexps have previously been compiled with MustCompile in + // Backend.init. + panic(err) } - // Expand all optional regex elements into two paths. This approach is really only useful up to 2 optional - // groups, but we probably don't want to deal with the exponential increase beyond that anyway. - for i := 0; i < len(paths); i++ { - p := paths[i] + paths, captures, err = collectPathsFromRegexpAST(rx) + if err != nil { + return nil, nil, err + } - // match is a 2-element slice that will have a start and end index - // for the left-most match of a regex of form: (lease/)? - match := optRe.FindStringIndex(p) + return paths, captures, nil +} - if match != nil { - // create a path that includes the optional element but without - // parenthesis or the '?' character. - paths[i] = p[:match[0]] + p[match[0]+1:match[1]-2] + p[match[1]:] +type pathCollector struct { + strings.Builder + conditionalSlashAppendedAtLength int +} - // create a path that excludes the optional element. - paths = append(paths, p[:match[0]]+p[match[1]:]) - i-- +// collectPathsFromRegexpAST performs a depth-first recursive walk through a regexp AST, collecting an OpenAPI-style +// path as it goes. +// +// Each time it encounters alternation (a|b) or an optional part (a?), it forks its processing to produce additional +// results, to account for each possibility. Note: This does mean that an input pattern with lots of these regexp +// features can produce a lot of different OpenAPI endpoints. At the time of writing, the most complex known example is +// +// "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/crl(/pem|/der|/delta(/pem|/der)?)?" +// +// in the PKI secrets engine which expands to 6 separate paths. +// +// Each named capture group - i.e. (?Psomething here) - is replaced with an OpenAPI parameter - i.e. {name} - and +// the subtree of regexp AST inside the parameter is completely skipped. +func collectPathsFromRegexpAST(rx *syntax.Regexp) (paths []string, captures map[string]struct{}, err error) { + captures = make(map[string]struct{}) + pathCollectors, err := collectPathsFromRegexpASTInternal(rx, []*pathCollector{{}}, captures) + if err != nil { + return nil, nil, err + } + paths = make([]string, 0, len(pathCollectors)) + for _, collector := range pathCollectors { + if collector.conditionalSlashAppendedAtLength != collector.Len() { + paths = append(paths, collector.String()) } } + return paths, captures, nil +} - // Replace named parameters (?P) with {foo} - var replacedPaths []string +var errUnsupportableRegexpOperationForOpenAPI = errors.New("path regexp uses an operation that cannot be translated to an OpenAPI pattern") - for _, path := range paths { - result := reqdRe.FindAllStringSubmatch(path, -1) - if result != nil { - for _, p := range result { - par := p[1] - path = strings.Replace(path, p[0], fmt.Sprintf("{%s}", par), 1) +func collectPathsFromRegexpASTInternal( + rx *syntax.Regexp, + appendingTo []*pathCollector, + captures map[string]struct{}, +) ([]*pathCollector, error) { + var err error + + // Depending on the type of this regexp AST node (its Op, i.e. operation), figure out whether it contributes any + // characters to the URL path, and whether we need to recurse through child AST nodes. + // + // Each element of the appendingTo slice tracks a separate path, defined by the alternatives chosen when traversing + // the | and ? conditional regexp features, and new elements are added as each of these features are traversed. + // + // To share this slice across multiple recursive calls of this function, it is passed down as a parameter to each + // recursive call, potentially modified throughout this switch block, and passed back up as a return value at the + // end of this function - the parent call uses the return value to update its own local variable. + switch rx.Op { + + // These AST operations are leaf nodes (no children), that match zero characters, so require no processing at all + case syntax.OpEmptyMatch: // e.g. (?:) + case syntax.OpBeginLine: // i.e. ^ when (?m) + case syntax.OpEndLine: // i.e. $ when (?m) + case syntax.OpBeginText: // i.e. \A, or ^ when (?-m) + case syntax.OpEndText: // i.e. \z, or $ when (?-m) + case syntax.OpWordBoundary: // i.e. \b + case syntax.OpNoWordBoundary: // i.e. \B + + // OpConcat simply represents multiple parts of the pattern appearing one after the other, so just recurse through + // those pieces. + case syntax.OpConcat: + for _, child := range rx.Sub { + appendingTo, err = collectPathsFromRegexpASTInternal(child, appendingTo, captures) + if err != nil { + return nil, err + } + } + + // OpLiteral is a literal string in the pattern - append it to the paths we are building. + case syntax.OpLiteral: + for _, collector := range appendingTo { + collector.WriteString(string(rx.Rune)) + } + + // OpAlternate, i.e. a|b, means we clone all of the pathCollector instances we are currently accumulating paths + // into, and independently recurse through each alternate option. + case syntax.OpAlternate: // i.e | + var totalAppendingTo []*pathCollector + lastIndex := len(rx.Sub) - 1 + for index, child := range rx.Sub { + var childAppendingTo []*pathCollector + if index == lastIndex { + // Optimization: last time through this loop, we can simply re-use the existing set of pathCollector + // instances, as we no longer need to preserve them unmodified to make further copies of. + childAppendingTo = appendingTo + } else { + for _, collector := range appendingTo { + newCollector := new(pathCollector) + newCollector.WriteString(collector.String()) + newCollector.conditionalSlashAppendedAtLength = collector.conditionalSlashAppendedAtLength + childAppendingTo = append(childAppendingTo, newCollector) + } + } + childAppendingTo, err = collectPathsFromRegexpASTInternal(child, childAppendingTo, captures) + if err != nil { + return nil, err + } + totalAppendingTo = append(totalAppendingTo, childAppendingTo...) + } + appendingTo = totalAppendingTo + + // OpQuest, i.e. a?, is much like an alternation between exactly two options, one of which is the empty string. + case syntax.OpQuest: + child := rx.Sub[0] + var childAppendingTo []*pathCollector + for _, collector := range appendingTo { + newCollector := new(pathCollector) + newCollector.WriteString(collector.String()) + newCollector.conditionalSlashAppendedAtLength = collector.conditionalSlashAppendedAtLength + childAppendingTo = append(childAppendingTo, newCollector) + } + childAppendingTo, err = collectPathsFromRegexpASTInternal(child, childAppendingTo, captures) + if err != nil { + return nil, err + } + appendingTo = append(appendingTo, childAppendingTo...) + + // Many Vault path patterns end with `/?` to accept paths that end with or without a slash. Our current + // convention for generating the OpenAPI is to strip away these slashes. To do that, this very special case + // detects when we just appended a single conditional slash, and records the length of the path at this point, + // so we can later discard this path variant, if nothing else is appended to it later. + if child.Op == syntax.OpLiteral && string(child.Rune) == "/" { + for _, collector := range childAppendingTo { + collector.conditionalSlashAppendedAtLength = collector.Len() + } + } + + // OpCapture, i.e. ( ) or (?P ), a capturing group + case syntax.OpCapture: + if rx.Name == "" { + // In Vault, an unnamed capturing group is not actually used for capturing. + // We treat it exactly the same as OpConcat. + for _, child := range rx.Sub { + appendingTo, err = collectPathsFromRegexpASTInternal(child, appendingTo, captures) + if err != nil { + return nil, err + } + } + } else { + // A named capturing group is replaced with the OpenAPI parameter syntax, and the regexp inside the group + // is NOT added to the OpenAPI path. + for _, builder := range appendingTo { + builder.WriteRune('{') + builder.WriteString(rx.Name) + builder.WriteRune('}') } + captures[rx.Name] = struct{}{} } - // Final cleanup - path = cleanSuffixRe.ReplaceAllString(path, "") - path = cleanCharsRe.ReplaceAllString(path, "") - replacedPaths = append(replacedPaths, path) + + // Any other kind of operation is a problem, and will trigger an error, resulting in the pattern being left out of + // the OpenAPI entirely - that's better than generating a path which is incorrect. + // + // The Op types we expect to hit the default condition are: + // + // OpCharClass - i.e. [something] + // OpAnyCharNotNL - i.e. . + // OpAnyChar - i.e. (?s:.) + // OpStar - i.e. * + // OpPlus - i.e. + + // OpRepeat - i.e. {N}, {N,M}, etc. + // + // In any of these conditions, there is no sensible translation of the path to OpenAPI syntax. (Note, this only + // applies to these appearing outside of a named capture group, otherwise they are handled in the previous case.) + // + // At the time of writing, the only pattern in the builtin Vault plugins that hits this codepath is the ".*" + // pattern in the KVv2 secrets engine, which is not a valid path, but rather, is a catch-all used to implement + // custom error handling behaviour to guide users who attempt to treat a KVv2 as a KVv1. It is already marked as + // Unpublished, so is withheld from the OpenAPI anyway. + // + // For completeness, one other Op type exists, OpNoMatch, which is never generated by syntax.Parse - only by + // subsequent Simplify in preparation to Compile, which is not used here. + default: + return nil, errUnsupportableRegexpOperationForOpenAPI } - return replacedPaths + return appendingTo, nil } // schemaType is a subset of the JSON Schema elements used as a target @@ -684,8 +1063,8 @@ func convertType(t FieldType) schemaType { ret.baseType = "integer" ret.format = "int64" case TypeDurationSecond, TypeSignedDurationSecond: - ret.baseType = "integer" - ret.format = "seconds" + ret.baseType = "string" + ret.format = "duration" case TypeBool: ret.baseType = "boolean" case TypeMap: @@ -725,52 +1104,96 @@ func cleanString(s string) string { return s } -// splitFields partitions fields into path and body groups -// The input pattern is expected to have been run through expandPattern, -// with paths parameters denotes in {braces}. -func splitFields(allFields map[string]*FieldSchema, pattern string) (pathFields, bodyFields map[string]*FieldSchema) { +// splitFields partitions fields into path, query and body groups. It uses information on capturing groups previously +// collected by expandPattern, which is necessary to correctly match the treatment in (*Backend).HandleRequest: +// a field counts as a path field if it appears in any capture in the regex, and if that capture was inside an +// alternation or optional part of the regex which does not survive in the OpenAPI path pattern currently being +// processed, that field should NOT be rendered to the OpenAPI spec AT ALL. +func splitFields( + allFields map[string]*FieldSchema, + openAPIPathPattern string, + captures map[string]struct{}, +) (pathFields, queryFields, bodyFields map[string]*FieldSchema) { pathFields = make(map[string]*FieldSchema) + queryFields = make(map[string]*FieldSchema) bodyFields = make(map[string]*FieldSchema) - for _, match := range pathFieldsRe.FindAllStringSubmatch(pattern, -1) { + for _, match := range pathFieldsRe.FindAllStringSubmatch(openAPIPathPattern, -1) { name := match[1] pathFields[name] = allFields[name] } for name, field := range allFields { - if _, ok := pathFields[name]; !ok { + // Any field which relates to a regex capture was already processed above, if it needed to be. + if _, ok := captures[name]; !ok { if field.Query { - pathFields[name] = field + queryFields[name] = field } else { bodyFields[name] = field } } } - return pathFields, bodyFields + return pathFields, queryFields, bodyFields +} + +// withoutOperationHints returns a copy of the given DisplayAttributes without +// OperationPrefix / OperationVerb / OperationSuffix since we don't need these +// fields in the final output. +func withoutOperationHints(in *DisplayAttributes) *DisplayAttributes { + if in == nil { + return nil + } + + copy := *in + + copy.OperationPrefix = "" + copy.OperationVerb = "" + copy.OperationSuffix = "" + + // return nil if all fields are empty to avoid empty JSON objects + if copy == (DisplayAttributes{}) { + return nil + } + + return © +} + +func hyphenatedToTitleCase(in string) string { + var b strings.Builder + + title := cases.Title(language.English, cases.NoLower) + + for _, word := range strings.Split(in, "-") { + b.WriteString(title.String(word)) + } + + return b.String() } // cleanedResponse is identical to logical.Response but with nulls // removed from from JSON encoding type cleanedResponse struct { - Secret *logical.Secret `json:"secret,omitempty"` - Auth *logical.Auth `json:"auth,omitempty"` - Data map[string]interface{} `json:"data,omitempty"` - Redirect string `json:"redirect,omitempty"` - Warnings []string `json:"warnings,omitempty"` - WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info,omitempty"` - Headers map[string][]string `json:"headers,omitempty"` + Secret *logical.Secret `json:"secret,omitempty"` + Auth *logical.Auth `json:"auth,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Redirect string `json:"redirect,omitempty"` + Warnings []string `json:"warnings,omitempty"` + WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` + MountType string `json:"mount_type,omitempty"` } func cleanResponse(resp *logical.Response) *cleanedResponse { return &cleanedResponse{ - Secret: resp.Secret, - Auth: resp.Auth, - Data: resp.Data, - Redirect: resp.Redirect, - Warnings: resp.Warnings, - WrapInfo: resp.WrapInfo, - Headers: resp.Headers, + Secret: resp.Secret, + Auth: resp.Auth, + Data: resp.Data, + Redirect: resp.Redirect, + Warnings: resp.Warnings, + WrapInfo: resp.WrapInfo, + Headers: resp.Headers, + MountType: resp.MountType, } } @@ -784,10 +1207,10 @@ func cleanResponse(resp *logical.Response) *cleanedResponse { // postSysToolsRandomUrlbytes_2 // // An optional user-provided suffix ("context") may also be appended. +// +// Deprecated: operationID's are now populated using `constructOperationID`. +// This function is here for backwards compatibility with older plugins. func (d *OASDocument) CreateOperationIDs(context string) { - // title caser - title := cases.Title(language.English) - opIDCount := make(map[string]int) var paths []string @@ -814,12 +1237,16 @@ func (d *OASDocument) CreateOperationIDs(context string) { continue } + if oasOperation.OperationID != "" { + continue + } + // Discard "_mount_path" from any {thing_mount_path} parameters path = strings.Replace(path, "_mount_path", "", 1) // Space-split on non-words, title case everything, recombine opID := nonWordRe.ReplaceAllString(strings.ToLower(path), " ") - opID = title.String(opID) + opID = strings.Title(opID) opID = method + strings.ReplaceAll(opID, " ", "") // deduplicate operationIds. This is a safeguard, since generated IDs should diff --git a/sdk/framework/openapi_test.go b/sdk/framework/openapi_test.go index 00895ca3fb17..4cb94342ffde 100644 --- a/sdk/framework/openapi_test.go +++ b/sdk/framework/openapi_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( @@ -18,75 +21,6 @@ import ( ) func TestOpenAPI_Regex(t *testing.T) { - t.Run("Required", func(t *testing.T) { - tests := []struct { - input string - captures []string - }{ - {`/foo/bar/(?P.*)`, []string{"val"}}, - {`/foo/bar/` + GenericNameRegex("val"), []string{"val"}}, - {`/foo/bar/` + GenericNameRegex("first") + "/b/" + GenericNameRegex("second"), []string{"first", "second"}}, - {`/foo/bar`, []string{}}, - } - - for _, test := range tests { - result := reqdRe.FindAllStringSubmatch(test.input, -1) - if len(result) != len(test.captures) { - t.Fatalf("Capture error (%s): expected %d matches, actual: %d", test.input, len(test.captures), len(result)) - } - - for i := 0; i < len(result); i++ { - if result[i][1] != test.captures[i] { - t.Fatalf("Capture error (%s): expected %s, actual: %s", test.input, test.captures[i], result[i][1]) - } - } - } - }) - t.Run("Optional", func(t *testing.T) { - input := "foo/(maybe/)?bar" - expStart := len("foo/") - expEnd := len(input) - len("bar") - - match := optRe.FindStringIndex(input) - if diff := deep.Equal(match, []int{expStart, expEnd}); diff != nil { - t.Fatal(diff) - } - - input = "/foo/maybe/bar" - match = optRe.FindStringIndex(input) - if match != nil { - t.Fatalf("Expected nil match (%s), got %+v", input, match) - } - }) - t.Run("Alternation", func(t *testing.T) { - input := `(raw/?$|raw/(?P.+))` - - matches := altRe.FindAllStringSubmatch(input, -1) - exp1 := "raw/?$" - exp2 := "raw/(?P.+)" - if matches[0][1] != exp1 || matches[0][2] != exp2 { - t.Fatalf("Capture error. Expected %s and %s, got %v", exp1, exp2, matches[0][1:]) - } - - input = `/foo/bar/` + GenericNameRegex("val") - - matches = altRe.FindAllStringSubmatch(input, -1) - if matches != nil { - t.Fatalf("Expected nil match (%s), got %+v", input, matches) - } - }) - t.Run("Alternation Fields", func(t *testing.T) { - input := `/foo/bar/(?Pauth|database|secret)/(?Pa|b)` - - act := altFieldsGroupRe.ReplaceAllStringFunc(input, func(s string) string { - return altFieldsRe.ReplaceAllString(s, ".+") - }) - - exp := "/foo/bar/(?P.+)/(?P.+)" - if act != exp { - t.Fatalf("Replace error. Expected %s, got %v", exp, act) - } - }) t.Run("Path fields", func(t *testing.T) { input := `/foo/bar/{inner}/baz/{outer}` @@ -111,21 +45,6 @@ func TestOpenAPI_Regex(t *testing.T) { regex *regexp.Regexp output string }{ - { - input: `ab?cde^fg(hi?j$k`, - regex: cleanCharsRe, - output: "abcdefghijk", - }, - { - input: `abcde/?`, - regex: cleanSuffixRe, - output: "abcde", - }, - { - input: `abcde/?$`, - regex: cleanSuffixRe, - output: "abcde", - }, { input: `abcde`, regex: wsRe, @@ -152,136 +71,300 @@ func TestOpenAPI_ExpandPattern(t *testing.T) { inPattern string outPathlets []string }{ + // A simple string without regexp metacharacters passes through as is {"rekey/backup", []string{"rekey/backup"}}, + // A trailing regexp anchor metacharacter is removed {"rekey/backup$", []string{"rekey/backup"}}, + // As is a leading one + {"^rekey/backup", []string{"rekey/backup"}}, + // Named capture groups become OpenAPI parameters {"auth/(?P.+?)/tune$", []string{"auth/{path}/tune"}}, {"auth/(?P.+?)/tune/(?P.*?)$", []string{"auth/{path}/tune/{more}"}}, + // Even if the capture group contains very complex regexp structure inside it + {"something/(?P(a|b(c|d))|e+|f{1,3}[ghi-k]?.*)", []string{"something/{something}"}}, + // A question-mark results in a result without and with the optional path part {"tools/hash(/(?P.+))?", []string{ "tools/hash", "tools/hash/{urlalgorithm}", }}, + // Multiple question-marks evaluate each possible combination {"(leases/)?renew(/(?P.+))?", []string{ "leases/renew", "leases/renew/{url_lease_id}", "renew", "renew/{url_lease_id}", }}, + // GenericNameRegex is one particular way of writing a named capture group, so behaves the same {`config/ui/headers/` + GenericNameRegex("header"), []string{"config/ui/headers/{header}"}}, + // The question-mark behaviour is still works when the question-mark is directly applied to a named capture group {`leases/lookup/(?P.+?)?`, []string{ "leases/lookup/", "leases/lookup/{prefix}", }}, + // Optional trailing slashes at the end of the path get stripped - even if appearing deep inside an alternation {`(raw/?$|raw/(?P.+))`, []string{ "raw", "raw/{path}", }}, + // OptionalParamRegex is also another way of writing a named capture group, that is optional {"lookup" + OptionalParamRegex("urltoken"), []string{ "lookup", "lookup/{urltoken}", }}, + // Optional trailign slashes at the end of the path get stripped in simpler cases too {"roles/?$", []string{ "roles", }}, {"roles/?", []string{ "roles", }}, + // Non-optional trailing slashes remain... although don't do this, it breaks HelpOperation! + // (Existing real examples of this pattern being fixed via https://github.com/hashicorp/vault/pull/18571) {"accessors/$", []string{ "accessors/", }}, + // GenericNameRegex and OptionalParamRegex still work when concatenated {"verify/" + GenericNameRegex("name") + OptionalParamRegex("urlalgorithm"), []string{ "verify/{name}", "verify/{name}/{urlalgorithm}", }}, + // Named capture groups that specify enum-like parameters work as expected {"^plugins/catalog/(?Pauth|database|secret)/(?P.+)$", []string{ "plugins/catalog/{type}/{name}", }}, {"^plugins/catalog/(?Pauth|database|secret)/?$", []string{ "plugins/catalog/{type}", }}, + // Alternations between various literal path segments work {"(pathOne|pathTwo)/", []string{"pathOne/", "pathTwo/"}}, {"(pathOne|pathTwo)/" + GenericNameRegex("name"), []string{"pathOne/{name}", "pathTwo/{name}"}}, { "(pathOne|path-2|Path_3)/" + GenericNameRegex("name"), []string{"Path_3/{name}", "path-2/{name}", "pathOne/{name}"}, }, + // They still work when combined with GenericNameWithAtRegex + {"(creds|sts)/" + GenericNameWithAtRegex("name"), []string{ + "creds/{name}", + "sts/{name}", + }}, + // And when they're somewhere other than the start of the pattern + {"keys/generate/(internal|exported|kms)", []string{ + "keys/generate/exported", + "keys/generate/internal", + "keys/generate/kms", + }}, + // If a plugin author makes their list operation support both singular and plural forms, the OpenAPI notices + {"rolesets?/?", []string{"roleset", "rolesets"}}, + // Complex nested alternation and question-marks are correctly interpreted + {"crl(/pem|/delta(/pem)?)?", []string{"crl", "crl/delta", "crl/delta/pem", "crl/pem"}}, + } + + for i, test := range tests { + paths, _, err := expandPattern(test.inPattern) + if err != nil { + t.Fatal(err) + } + sort.Strings(paths) + if !reflect.DeepEqual(paths, test.outPathlets) { + t.Fatalf("Test %d: Expected %v got %v", i, test.outPathlets, paths) + } + } +} + +func TestOpenAPI_ExpandPattern_ReturnsError(t *testing.T) { + tests := []struct { + inPattern string + outError error + }{ + // None of these regexp constructs are allowed outside of named capture groups + {"[a-z]", errUnsupportableRegexpOperationForOpenAPI}, + {".", errUnsupportableRegexpOperationForOpenAPI}, + {"a+", errUnsupportableRegexpOperationForOpenAPI}, + {"a*", errUnsupportableRegexpOperationForOpenAPI}, + // So this pattern, which is a combination of two of the above isn't either - this pattern occurs in the KV + // secrets engine for its catch-all error handler, which provides a helpful hint to people treating a KV v2 as + // a KV v1. + {".*", errUnsupportableRegexpOperationForOpenAPI}, } for i, test := range tests { - out := expandPattern(test.inPattern) - sort.Strings(out) - if !reflect.DeepEqual(out, test.outPathlets) { - t.Fatalf("Test %d: Expected %v got %v", i, test.outPathlets, out) + _, _, err := expandPattern(test.inPattern) + if err != test.outError { + t.Fatalf("Test %d: Expected %q got %q", i, test.outError, err) } } } func TestOpenAPI_SplitFields(t *testing.T) { + paths, captures, err := expandPattern("some/" + GenericNameRegex("a") + "/path" + OptionalParamRegex("e")) + if err != nil { + t.Fatal(err) + } + fields := map[string]*FieldSchema{ "a": {Description: "path"}, "b": {Description: "body"}, "c": {Description: "body"}, "d": {Description: "body"}, "e": {Description: "path"}, + "f": {Description: "query", Query: true}, } - pathFields, bodyFields := splitFields(fields, "some/{a}/path/{e}") + for index, path := range paths { + pathFields, queryFields, bodyFields := splitFields(fields, path, captures) - lp := len(pathFields) - lb := len(bodyFields) - l := len(fields) - if lp+lb != l { - t.Fatalf("split length error: %d + %d != %d", lp, lb, l) - } + numPath := len(pathFields) + numQuery := len(queryFields) + numBody := len(bodyFields) + numExpectedDiscarded := 0 + // The first path generated is expected to be the one omitting the optional parameter field "e" + if index == 0 { + numExpectedDiscarded = 1 + } + l := len(fields) + if numPath+numQuery+numBody+numExpectedDiscarded != l { + t.Fatalf("split length error: %d + %d + %d + %d != %d", numPath, numQuery, numBody, numExpectedDiscarded, l) + } - for name, field := range pathFields { - if field.Description != "path" { - t.Fatalf("expected field %s to be in 'path', found in %s", name, field.Description) + for name, field := range pathFields { + if field.Description != "path" { + t.Fatalf("expected field %s to be in 'path', found in %s", name, field.Description) + } } - } - for name, field := range bodyFields { - if field.Description != "body" { - t.Fatalf("expected field %s to be in 'body', found in %s", name, field.Description) + for name, field := range queryFields { + if field.Description != "query" { + t.Fatalf("expected field %s to be in 'query', found in %s", name, field.Description) + } + } + for name, field := range bodyFields { + if field.Description != "body" { + t.Fatalf("expected field %s to be in 'body', found in %s", name, field.Description) + } } } } func TestOpenAPI_SpecialPaths(t *testing.T) { - tests := []struct { - pattern string - rootPaths []string - root bool - unauthPaths []string - unauth bool + tests := map[string]struct { + pattern string + rootPaths []string + rootExpected bool + unauthenticatedPaths []string + unauthenticatedExpected bool }{ - {"foo", []string{}, false, []string{"foo"}, true}, - {"foo", []string{"foo"}, true, []string{"bar"}, false}, - {"foo/bar", []string{"foo"}, false, []string{"foo/*"}, true}, - {"foo/bar", []string{"foo/*"}, true, []string{"foo"}, false}, - {"foo/", []string{"foo/*"}, true, []string{"a", "b", "foo/"}, true}, - {"foo", []string{"foo*"}, true, []string{"a", "fo*"}, true}, - {"foo/bar", []string{"a", "b", "foo/*"}, true, []string{"foo/baz/*"}, false}, + "empty": { + pattern: "foo", + rootPaths: []string{}, + rootExpected: false, + unauthenticatedPaths: []string{}, + unauthenticatedExpected: false, + }, + "exact-match-unauthenticated": { + pattern: "foo", + rootPaths: []string{}, + rootExpected: false, + unauthenticatedPaths: []string{"foo"}, + unauthenticatedExpected: true, + }, + "exact-match-root": { + pattern: "foo", + rootPaths: []string{"foo"}, + rootExpected: true, + unauthenticatedPaths: []string{"bar"}, + unauthenticatedExpected: false, + }, + "asterisk-match-unauthenticated": { + pattern: "foo/bar", + rootPaths: []string{"foo"}, + rootExpected: false, + unauthenticatedPaths: []string{"foo/*"}, + unauthenticatedExpected: true, + }, + "asterisk-match-root": { + pattern: "foo/bar", + rootPaths: []string{"foo/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo"}, + unauthenticatedExpected: false, + }, + "path-ends-with-slash": { + pattern: "foo/", + rootPaths: []string{"foo/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"a", "b", "foo*"}, + unauthenticatedExpected: true, + }, + "asterisk-match-no-slash": { + pattern: "foo", + rootPaths: []string{"foo*"}, + rootExpected: true, + unauthenticatedPaths: []string{"a", "fo*"}, + unauthenticatedExpected: true, + }, + "multiple-root-paths": { + pattern: "foo/bar", + rootPaths: []string{"a", "b", "foo/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/baz/*"}, + unauthenticatedExpected: false, + }, + "plus-match-unauthenticated": { + pattern: "foo/bar/baz", + rootPaths: []string{"foo/bar"}, + rootExpected: false, + unauthenticatedPaths: []string{"foo/+/baz"}, + unauthenticatedExpected: true, + }, + "plus-match-root": { + pattern: "foo/bar/baz", + rootPaths: []string{"foo/+/baz"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/bar"}, + unauthenticatedExpected: false, + }, + "plus-and-asterisk": { + pattern: "foo/bar/baz/something", + rootPaths: []string{"foo/+/baz/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/+/baz*"}, + unauthenticatedExpected: true, + }, + "double-plus-good": { + pattern: "foo/bar/baz", + rootPaths: []string{"foo/+/+"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/bar"}, + unauthenticatedExpected: false, + }, } - for i, test := range tests { - doc := NewOASDocument("version") - path := Path{ - Pattern: test.pattern, - } - sp := &logical.Paths{ - Root: test.rootPaths, - Unauthenticated: test.unauthPaths, - } - err := documentPath(&path, sp, "kv", logical.TypeLogical, doc) - if err != nil { - t.Fatal(err) - } - result := test.root - if doc.Paths["/"+test.pattern].Sudo != result { - t.Fatalf("Test (root) %d: Expected %v got %v", i, test.root, result) - } - result = test.unauth - if doc.Paths["/"+test.pattern].Unauthenticated != result { - t.Fatalf("Test (unauth) %d: Expected %v got %v", i, test.unauth, result) - } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + doc := NewOASDocument("version") + path := Path{ + Pattern: test.pattern, + } + backend := &Backend{ + PathsSpecial: &logical.Paths{ + Root: test.rootPaths, + Unauthenticated: test.unauthenticatedPaths, + }, + BackendType: logical.TypeLogical, + } + + if err := documentPath(&path, backend, "kv", doc); err != nil { + t.Fatal(err) + } + + actual := doc.Paths["/"+test.pattern].Sudo + if actual != test.rootExpected { + t.Fatalf("Test (root): expected: %v; got: %v", test.rootExpected, actual) + } + + actual = doc.Paths["/"+test.pattern].Unauthenticated + if actual != test.unauthenticatedExpected { + t.Fatalf("Test (unauth): expected: %v; got: %v", test.unauthenticatedExpected, actual) + } + }) } } @@ -503,66 +586,6 @@ func TestOpenAPI_Paths(t *testing.T) { }) } -func TestOpenAPI_OperationID(t *testing.T) { - path1 := &Path{ - Pattern: "foo/" + GenericNameRegex("id"), - Fields: map[string]*FieldSchema{ - "id": {Type: TypeString}, - }, - Operations: map[logical.Operation]OperationHandler{ - logical.ReadOperation: &PathOperation{}, - logical.UpdateOperation: &PathOperation{}, - logical.DeleteOperation: &PathOperation{}, - }, - } - - path2 := &Path{ - Pattern: "Foo/" + GenericNameRegex("id"), - Fields: map[string]*FieldSchema{ - "id": {Type: TypeString}, - }, - Operations: map[logical.Operation]OperationHandler{ - logical.ReadOperation: &PathOperation{}, - }, - } - - for _, context := range []string{"", "bar"} { - doc := NewOASDocument("version") - err := documentPath(path1, nil, "kv", logical.TypeLogical, doc) - if err != nil { - t.Fatal(err) - } - err = documentPath(path2, nil, "kv", logical.TypeLogical, doc) - if err != nil { - t.Fatal(err) - } - doc.CreateOperationIDs(context) - - tests := []struct { - path string - op string - opID string - }{ - {"/Foo/{id}", "get", "getFooId"}, - {"/foo/{id}", "get", "getFooId_2"}, - {"/foo/{id}", "post", "postFooId"}, - {"/foo/{id}", "delete", "deleteFooId"}, - } - - for _, test := range tests { - actual := getPathOp(doc.Paths[test.path], test.op).OperationID - expected := test.opID - if context != "" { - expected += "_" + context - } - - if actual != expected { - t.Fatalf("expected %v, got %v", expected, actual) - } - } - } -} - func TestOpenAPI_CustomDecoder(t *testing.T) { p := &Path{ Pattern: "foo", @@ -592,7 +615,7 @@ func TestOpenAPI_CustomDecoder(t *testing.T) { } docOrig := NewOASDocument("version") - err := documentPath(p, nil, "kv", logical.TypeLogical, docOrig) + err := documentPath(p, &Backend{BackendType: logical.TypeLogical}, "kv", docOrig) if err != nil { t.Fatal(err) } @@ -632,13 +655,14 @@ func TestOpenAPI_CleanResponse(t *testing.T) { // logical.Response. This will fail if logical.Response changes without a corresponding // change to cleanResponse() orig = &logical.Response{ - Secret: new(logical.Secret), - Auth: new(logical.Auth), - Data: map[string]interface{}{"foo": 42}, - Redirect: "foo", - Warnings: []string{"foo"}, - WrapInfo: &wrapping.ResponseWrapInfo{Token: "foo"}, - Headers: map[string][]string{"foo": {"bar"}}, + Secret: new(logical.Secret), + Auth: new(logical.Auth), + Data: map[string]interface{}{"foo": 42}, + Redirect: "foo", + Warnings: []string{"foo"}, + WrapInfo: &wrapping.ResponseWrapInfo{Token: "foo"}, + Headers: map[string][]string{"foo": {"bar"}}, + MountType: "mount", } origJSON := mustJSONMarshal(t, orig) @@ -651,11 +675,224 @@ func TestOpenAPI_CleanResponse(t *testing.T) { } } +func TestOpenAPI_constructOperationID(t *testing.T) { + tests := map[string]struct { + path string + pathIndex int + pathAttributes *DisplayAttributes + operation logical.Operation + operationAttributes *DisplayAttributes + defaultPrefix string + expected string + }{ + "empty": { + path: "", + pathIndex: 0, + pathAttributes: nil, + operation: logical.Operation(""), + operationAttributes: nil, + defaultPrefix: "", + expected: "", + }, + "simple-read": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.ReadOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "test-read-path-to-thing", + }, + "simple-write": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "test-write-path-to-thing", + }, + "operation-verb": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationVerb: "do-something"}, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "do-something", + }, + "operation-verb-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationVerb: "do-something"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationVerb: "do-something-else"}, + defaultPrefix: "test", + expected: "do-something-else", + }, + "operation-prefix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix"}, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "my-prefix-write-path-to-thing", + }, + "operation-prefix-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix"}, + defaultPrefix: "test", + expected: "better-prefix-write-path-to-thing", + }, + "operation-prefix-and-suffix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix"}, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "my-prefix-write-my-suffix", + }, + "operation-prefix-and-suffix-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix"}, + defaultPrefix: "test", + expected: "better-prefix-write-better-suffix", + }, + "operation-prefix-verb-suffix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix", OperationVerb: "Create"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix"}, + defaultPrefix: "test", + expected: "better-prefix-create-better-suffix", + }, + "operation-prefix-verb-suffix-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix", OperationVerb: "Create"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix", OperationVerb: "Login"}, + defaultPrefix: "test", + expected: "better-prefix-login-better-suffix", + }, + "operation-prefix-verb": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationVerb: "Login"}, + defaultPrefix: "test", + expected: "better-prefix-login", + }, + "operation-verb-suffix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationVerb: "Login", OperationSuffix: "better-suffix"}, + defaultPrefix: "test", + expected: "login-better-suffix", + }, + "pipe-delimited-suffix-0": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, + defaultPrefix: "test", + expected: "better-prefix-write-suffix0", + }, + "pipe-delimited-suffix-1": { + path: "path/to/thing", + pathIndex: 1, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, + defaultPrefix: "test", + expected: "better-prefix-write-suffix1", + }, + "pipe-delimited-suffix-2-fallback": { + path: "path/to/thing", + pathIndex: 2, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, + defaultPrefix: "test", + expected: "better-prefix-write-path-to-thing", + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + actual := constructOperationID( + test.path, + test.pathIndex, + test.pathAttributes, + test.operation, + test.operationAttributes, + test.defaultPrefix, + ) + if actual != test.expected { + t.Fatalf("expected: %s; got: %s", test.expected, actual) + } + }) + } +} + +func TestOpenAPI_hyphenatedToTitleCase(t *testing.T) { + tests := map[string]struct { + in string + expected string + }{ + "simple": { + in: "test", + expected: "Test", + }, + "two-words": { + in: "two-words", + expected: "TwoWords", + }, + "three-words": { + in: "one-two-three", + expected: "OneTwoThree", + }, + "not-hyphenated": { + in: "something_like_this", + expected: "Something_like_this", + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + actual := hyphenatedToTitleCase(test.in) + if actual != test.expected { + t.Fatalf("expected: %s; got: %s", test.expected, actual) + } + }) + } +} + func testPath(t *testing.T, path *Path, sp *logical.Paths, expectedJSON string) { t.Helper() doc := NewOASDocument("dummyversion") - if err := documentPath(path, sp, "kv", logical.TypeLogical, doc); err != nil { + if err := documentPath(path, &Backend{ + PathsSpecial: sp, + BackendType: logical.TypeLogical, + }, "kv", doc); err != nil { t.Fatal(err) } doc.CreateOperationIDs("") @@ -664,9 +901,6 @@ func testPath(t *testing.T, path *Path, sp *logical.Paths, expectedJSON string) if err != nil { t.Fatal(err) } - - t.Log(string(docJSON)) - // Compare json by first decoding, then comparing with a deep equality check. var expected, actual interface{} if err := jsonutil.DecodeJSON(docJSON, &actual); err != nil { diff --git a/sdk/framework/path.go b/sdk/framework/path.go index 80f4d5dc6c3e..067b005e0fe5 100644 --- a/sdk/framework/path.go +++ b/sdk/framework/path.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( @@ -53,15 +56,38 @@ type Path struct { // This should be a valid regular expression. Named captures will be // exposed as fields that should map to a schema in Fields. If a named // capture is not a field in the Fields map, then it will be ignored. + // + // The pattern will automatically have a ^ prepended and a $ appended before + // use, if these are not already present, so these may be omitted for clarity. + // + // If a ListOperation is being defined, the pattern must end with /? to match + // a trailing slash optionally, as ListOperations are always processed with a + // trailing slash added to the path if not already present. The match must not + // require the presence of a trailing slash, as HelpOperations, even for a + // path which only implements ListOperation, are processed without a trailing + // slash - so failure to make the trailing slash optional will break the + // `vault path-help` command for the path. Pattern string // Fields is the mapping of data fields to a schema describing that - // field. Named captures in the Pattern also map to fields. If a named - // capture name matches a PUT body name, the named capture takes - // priority. + // field. + // + // Field values are obtained from: + // + // - Named captures in the Pattern. // - // Note that only named capture fields are available in every operation, - // whereas all fields are available in the Write operation. + // - Parameters in the HTTP request body, for HTTP methods where a + // request body is expected, i.e. PUT/POST/PATCH. The request body is + // typically formatted as JSON, though + // "application/x-www-form-urlencoded" format can also be accepted. + // + // - Parameters in the HTTP URL query-string, for HTTP methods where + // there is no request body, i.e. GET/LIST/DELETE. The query-string + // is *not* parsed at all for PUT/POST/PATCH requests. + // + // Should the same field be specified both as a named capture and as + // a parameter, the named capture takes precedence, and a warning is + // returned. Fields map[string]*FieldSchema // Operations is the set of operations supported and the associated OperationsHandler. @@ -192,6 +218,11 @@ type DisplayAttributes struct { // Name is the name of the field suitable as a label or documentation heading. Name string `json:"name,omitempty"` + // Description of the field that renders as tooltip help text beside the label (name) in the UI. + // This may be used to replace descriptions that reference comma separation but correspond + // to UI inputs where only arrays are valid. For example params with Type: framework.TypeCommaStringSlice + Description string `json:"description,omitempty"` + // Value is a sample value to display for this field. This may be used // to indicate a default value, but it is for display only and completely separate // from any Default member handling. @@ -212,6 +243,28 @@ type DisplayAttributes struct { // Action is the verb to use for the operation. Action string `json:"action,omitempty"` + // OperationPrefix is a hyphenated lower-case string used to construct + // OpenAPI OperationID (prefix + verb + suffix). OperationPrefix is + // typically a human-readable name of the plugin or a prefix shared by + // multiple related endpoints. + OperationPrefix string `json:"operationPrefix,omitempty"` + + // OperationVerb is a hyphenated lower-case string used to construct + // OpenAPI OperationID (prefix + verb + suffix). OperationVerb is typically + // an action to be performed (e.g. "generate", "sign", "login", etc.). If + // not specified, the verb defaults to `logical.Operation.String()` + // (e.g. "read", "list", "delete", "write" for Create/Update) + OperationVerb string `json:"operationVerb,omitempty"` + + // OperationSuffix is a hyphenated lower-case string used to construct + // OpenAPI OperationID (prefix + verb + suffix). It is typically the name + // of the resource on which the action is performed (e.g. "role", + // "credentials", etc.). A pipe (|) separator can be used to list different + // suffixes for various permutations of the `Path.Pattern` regular + // expression. If not specified, the suffix defaults to the `Path.Pattern` + // split by dashes. + OperationSuffix string `json:"operationSuffix,omitempty"` + // EditType is the optional type of form field needed for a property // This is only necessary for a "textarea" or "file" EditType string `json:"editType,omitempty"` @@ -229,7 +282,7 @@ type RequestExample struct { // Response describes and optional demonstrations an operation response. type Response struct { - Description string // summary of the the response and should always be provided + Description string // summary of the response and should always be provided MediaType string // media type of the response, defaulting to "application/json" if empty Fields map[string]*FieldSchema // the fields present in this response, used to generate openapi response Example *logical.Response // example response data @@ -246,6 +299,7 @@ type PathOperation struct { Deprecated bool ForwardPerformanceSecondary bool ForwardPerformanceStandby bool + DisplayAttrs *DisplayAttributes } func (p *PathOperation) Handler() OperationFunc { @@ -262,6 +316,7 @@ func (p *PathOperation) Properties() OperationProperties { Deprecated: p.Deprecated, ForwardPerformanceSecondary: p.ForwardPerformanceSecondary, ForwardPerformanceStandby: p.ForwardPerformanceStandby, + DisplayAttrs: p.DisplayAttrs, } } @@ -329,8 +384,12 @@ func (p *Path) helpCallback(b *Backend) OperationFunc { vaultVersion = env.VaultVersion } } + redactVersion, _, _, _ := logical.CtxRedactionSettingsValue(ctx) + if redactVersion { + vaultVersion = "" + } doc := NewOASDocument(vaultVersion) - if err := documentPath(p, b.SpecialPaths(), requestResponsePrefix, b.BackendType, doc); err != nil { + if err := documentPath(p, b, requestResponsePrefix, doc); err != nil { b.Logger().Warn("error generating OpenAPI", "error", err) } diff --git a/sdk/framework/path_map.go b/sdk/framework/path_map.go index 0cba8ea2fb16..46cf4720e96e 100644 --- a/sdk/framework/path_map.go +++ b/sdk/framework/path_map.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/path_map_test.go b/sdk/framework/path_map_test.go index 11e1f37c9d54..3fe6308cbcd2 100644 --- a/sdk/framework/path_map_test.go +++ b/sdk/framework/path_map_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/path_struct.go b/sdk/framework/path_struct.go index 2a2848e58508..cba855065ea2 100644 --- a/sdk/framework/path_struct.go +++ b/sdk/framework/path_struct.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/path_struct_test.go b/sdk/framework/path_struct_test.go index 9e81cc2e301c..88662af5300d 100644 --- a/sdk/framework/path_struct_test.go +++ b/sdk/framework/path_struct_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/path_test.go b/sdk/framework/path_test.go index ca359d1f5776..4541930ed591 100644 --- a/sdk/framework/path_test.go +++ b/sdk/framework/path_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/policy_map.go b/sdk/framework/policy_map.go index 7befb399545c..94accf88ae74 100644 --- a/sdk/framework/policy_map.go +++ b/sdk/framework/policy_map.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/policy_map_test.go b/sdk/framework/policy_map_test.go index 6a88b8051185..b785fddc783c 100644 --- a/sdk/framework/policy_map_test.go +++ b/sdk/framework/policy_map_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/secret.go b/sdk/framework/secret.go index 0c8f0dfcccdf..095bc12b7246 100644 --- a/sdk/framework/secret.go +++ b/sdk/framework/secret.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/secret_test.go b/sdk/framework/secret_test.go index 83af4753b6d9..29058dc84e3a 100644 --- a/sdk/framework/secret_test.go +++ b/sdk/framework/secret_test.go @@ -1 +1,4 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework diff --git a/sdk/framework/template.go b/sdk/framework/template.go index 3abdd624c55e..d395c8f8dbd5 100644 --- a/sdk/framework/template.go +++ b/sdk/framework/template.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/testdata/legacy.json b/sdk/framework/testdata/legacy.json index 3b4b1c2afd52..548151c6f9e6 100644 --- a/sdk/framework/testdata/legacy.json +++ b/sdk/framework/testdata/legacy.json @@ -21,21 +21,14 @@ "type": "string" }, "required": true - }, - { - "name": "secret_mount_path", - "description": "Path where the backend was mounted; the endpoint path will be offset by the mount path", - "in": "path", - "schema": { - "type": "string", - "default": "secret" - } } ], "get": { - "operationId": "getLookupId", + "operationId": "kv-read-lookup-id", "summary": "Synopsis", - "tags": ["secrets"], + "tags": [ + "secrets" + ], "responses": { "200": { "description": "OK" @@ -43,15 +36,17 @@ } }, "post": { - "operationId": "postLookupId", + "operationId": "kv-write-lookup-id", "summary": "Synopsis", - "tags": ["secrets"], + "tags": [ + "secrets" + ], "requestBody": { "required": true, "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/KvLookupRequest" + "$ref": "#/components/schemas/KvWriteLookupIdRequest" } } } @@ -66,7 +61,7 @@ }, "components": { "schemas": { - "KvLookupRequest": { + "KvWriteLookupIdRequest": { "type": "object", "properties": { "token": { @@ -78,4 +73,3 @@ } } } - diff --git a/sdk/framework/testdata/operations.json b/sdk/framework/testdata/operations.json index 0cd198c069d5..8e9ec9b8d0e1 100644 --- a/sdk/framework/testdata/operations.json +++ b/sdk/framework/testdata/operations.json @@ -12,20 +12,7 @@ "paths": { "/foo/{id}": { "description": "Synopsis", - "x-vault-createSupported": true, - "x-vault-sudo": true, - "x-vault-displayAttrs": { - "navigation": true - }, "parameters": [ - { - "name": "format", - "description": "a query param", - "in": "query", - "schema": { - "type": "string" - } - }, { "name": "id", "description": "id path parameter", @@ -34,49 +21,49 @@ "type": "string" }, "required": true - }, - { - "name": "secret_mount_path", - "description": "Path where the backend was mounted; the endpoint path will be offset by the mount path", - "in": "path", - "schema": { - "type": "string", - "default": "secret" - } } ], + "x-vault-sudo": true, + "x-vault-createSupported": true, + "x-vault-displayAttrs": { + "navigation": true + }, "get": { - "operationId": "getFooId", - "tags": ["secrets"], "summary": "My Summary", "description": "My Description", - "responses": { - "200": { - "description": "OK" - } - }, + "operationId": "kv-read-foo-id", + "tags": [ + "secrets" + ], "parameters": [ { - "name": "list", - "description": "Return a list if `true`", + "name": "format", + "description": "a query param", "in": "query", "schema": { "type": "string" } } - ] + ], + "responses": { + "200": { + "description": "OK" + } + } }, "post": { - "operationId": "postFooId", - "tags": ["secrets"], "summary": "Update Summary", "description": "Update Description", + "operationId": "kv-write-foo-id", + "tags": [ + "secrets" + ], "requestBody": { "required": true, "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/KvFooRequest" + "$ref": "#/components/schemas/KvWriteFooIdRequest" } } } @@ -87,14 +74,88 @@ } } } + }, + "/foo/{id}/": { + "description": "Synopsis", + "parameters": [ + { + "name": "id", + "description": "id path parameter", + "in": "path", + "schema": { + "type": "string" + }, + "required": true + } + ], + "x-vault-sudo": true, + "x-vault-displayAttrs": { + "navigation": true + }, + "get": { + "summary": "List Summary", + "description": "List Description", + "operationId": "kv-list-foo-id", + "tags": [ + "secrets" + ], + "parameters": [ + { + "name": "format", + "description": "a query param", + "in": "query", + "schema": { + "type": "string" + } + }, + { + "name": "list", + "description": "Must be set to `true`", + "in": "query", + "schema": { + "type": "string", + "enum": [ + "true" + ] + }, + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StandardListResponse" + } + } + } + } + } + } } }, "components": { "schemas": { - "KvFooRequest": { + "KvWriteFooIdRequest": { "type": "object", - "required": ["age"], "properties": { + "age": { + "type": "integer", + "description": "the age", + "enum": [ + 1, + 2, + 3 + ], + "x-vault-displayAttrs": { + "name": "Age", + "value": 7, + "sensitive": true, + "group": "Some Group" + } + }, "flavors": { "type": "array", "description": "the flavors", @@ -102,35 +163,46 @@ "type": "string" } }, - "age": { + "format": { + "type": "string", + "description": "a query param" + }, + "maximum": { "type": "integer", - "description": "the age", - "enum": [1, 2, 3], - "x-vault-displayAttrs": { - "name": "Age", - "sensitive": true, - "group": "Some Group", - "value": 7 - } + "description": "a maximum value", + "format": "int64" }, "name": { "type": "string", "description": "the name", - "default": "Larry", - "pattern": "\\w([\\w-.]*\\w)?" + "pattern": "\\w([\\w-.]*\\w)?", + "default": "Larry" }, "x-abc-token": { "type": "string", "description": "a header value", - "enum": ["a", "b", "c"] - }, - "maximum" : { - "type": "integer", - "description": "a maximum value", - "format": "int64" + "enum": [ + "a", + "b", + "c" + ] + } + }, + "required": [ + "age" + ] + }, + "StandardListResponse": { + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "string" + } } } } } } -} +} \ No newline at end of file diff --git a/sdk/framework/testdata/operations_list.json b/sdk/framework/testdata/operations_list.json index f9616c8e1271..feb7b2ccba08 100644 --- a/sdk/framework/testdata/operations_list.json +++ b/sdk/framework/testdata/operations_list.json @@ -10,21 +10,9 @@ } }, "paths": { - "/foo/{id}": { + "/foo/{id}/": { "description": "Synopsis", - "x-vault-sudo": true, - "x-vault-displayAttrs": { - "navigation": true - }, "parameters": [ - { - "name": "format", - "description": "a query param", - "in": "query", - "schema": { - "type": "string" - } - }, { "name": "id", "description": "id path parameter", @@ -33,44 +21,69 @@ "type": "string" }, "required": true - }, - { - "name": "secret_mount_path", - "description": "Path where the backend was mounted; the endpoint path will be offset by the mount path", - "in": "path", - "schema": { - "type": "string", - "default": "secret" - } } ], + "x-vault-sudo": true, + "x-vault-displayAttrs": { + "navigation": true + }, "get": { - "operationId": "getFooId", - "tags": ["secrets"], "summary": "List Summary", "description": "List Description", - "responses": { - "200": { - "description": "OK" - } - }, + "operationId": "kv-list-foo-id", + "tags": [ + "secrets" + ], "parameters": [ + { + "name": "format", + "description": "a query param", + "in": "query", + "schema": { + "type": "string" + } + }, { "name": "list", "description": "Must be set to `true`", - "required": true, "in": "query", "schema": { "type": "string", - "enum": ["true"] + "enum": [ + "true" + ] + }, + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/StandardListResponse" + } + } } } - ] + } } } }, "components": { "schemas": { + "StandardListResponse": { + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "string" + } + } + } + } } } -} +} \ No newline at end of file diff --git a/sdk/framework/testdata/responses.json b/sdk/framework/testdata/responses.json index b9cb5d152baa..98d501ec5e89 100644 --- a/sdk/framework/testdata/responses.json +++ b/sdk/framework/testdata/responses.json @@ -12,21 +12,12 @@ "paths": { "/foo": { "description": "Synopsis", - "parameters": [ - { - "name": "secret_mount_path", - "description": "Path where the backend was mounted; the endpoint path will be offset by the mount path", - "in": "path", - "schema": { - "type": "string", - "default": "secret" - } - } - ], "x-vault-unauthenticated": true, "delete": { - "operationId": "deleteFoo", - "tags": ["secrets"], + "operationId": "kv-delete-foo", + "tags": [ + "secrets" + ], "summary": "Delete stuff", "responses": { "204": { @@ -35,8 +26,10 @@ } }, "get": { - "operationId": "getFoo", - "tags": ["secrets"], + "operationId": "kv-read-foo", + "tags": [ + "secrets" + ], "summary": "My Summary", "description": "My Description", "responses": { @@ -45,7 +38,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/KvFooResponse" + "$ref": "#/components/schemas/KvReadFooResponse" } } } @@ -56,7 +49,7 @@ }, "components": { "schemas": { - "KvFooResponse": { + "KvReadFooResponse": { "type": "object", "properties": { "field_a": { @@ -72,4 +65,3 @@ } } } - diff --git a/sdk/framework/testing.go b/sdk/framework/testing.go index a00a3241cf82..d2035d676f0c 100644 --- a/sdk/framework/testing.go +++ b/sdk/framework/testing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/wal.go b/sdk/framework/wal.go index 7e7bb1afa959..b090f03e7bf3 100644 --- a/sdk/framework/wal.go +++ b/sdk/framework/wal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/framework/wal_test.go b/sdk/framework/wal_test.go index 958be7e79ecd..040749239c88 100644 --- a/sdk/framework/wal_test.go +++ b/sdk/framework/wal_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package framework import ( diff --git a/sdk/go.mod b/sdk/go.mod index 002ec3f4fa59..bd44fb57b87c 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -3,63 +3,122 @@ module github.com/hashicorp/vault/sdk go 1.19 require ( - github.com/armon/go-metrics v0.3.9 + cloud.google.com/go/cloudsqlconn v1.4.3 + github.com/armon/go-metrics v0.4.1 github.com/armon/go-radix v1.0.0 - github.com/evanphx/json-patch/v5 v5.5.0 + github.com/cenkalti/backoff/v3 v3.2.2 + github.com/docker/docker v25.0.5+incompatible + github.com/docker/go-connections v0.4.0 + github.com/evanphx/json-patch/v5 v5.6.0 github.com/fatih/structs v1.1.0 - github.com/go-ldap/ldap/v3 v3.1.10 - github.com/go-test/deep v1.0.2 - github.com/golang/protobuf v1.5.2 + github.com/go-ldap/ldap/v3 v3.4.4 + github.com/go-test/deep v1.1.0 + github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.4 + github.com/google/tink/go v1.6.1 + github.com/hashicorp/cap/ldap v0.0.0-20230914221201-c4eecc7e31f7 github.com/hashicorp/errwrap v1.1.0 - github.com/hashicorp/go-hclog v0.16.2 + github.com/hashicorp/go-cleanhttp v0.5.2 + github.com/hashicorp/go-hclog v1.5.0 github.com/hashicorp/go-immutable-radix v1.3.1 github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-plugin v1.4.5 - github.com/hashicorp/go-retryablehttp v0.5.3 - github.com/hashicorp/go-secure-stdlib/base62 v0.1.1 - github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 + github.com/hashicorp/go-plugin v1.6.0 + github.com/hashicorp/go-retryablehttp v0.7.1 + github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 + github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 github.com/hashicorp/go-secure-stdlib/password v0.1.1 + github.com/hashicorp/go-secure-stdlib/plugincontainer v0.3.0 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 github.com/hashicorp/go-sockaddr v1.0.2 - github.com/hashicorp/go-uuid v1.0.2 - github.com/hashicorp/go-version v1.2.0 + github.com/hashicorp/go-uuid v1.0.3 + github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/golang-lru v0.5.4 - github.com/hashicorp/hcl v1.0.0 - github.com/mitchellh/copystructure v1.0.0 - github.com/mitchellh/go-testing-interface v1.0.0 + github.com/hashicorp/hcl v1.0.1-vault-5 + github.com/hashicorp/vault/api v1.9.1 + github.com/mitchellh/copystructure v1.2.0 + github.com/mitchellh/go-testing-interface v1.14.1 github.com/mitchellh/mapstructure v1.5.0 - github.com/pierrec/lz4 v2.5.2+incompatible + github.com/pierrec/lz4 v2.6.1+incompatible github.com/ryanuber/go-glob v1.0.0 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.4 go.uber.org/atomic v1.9.0 - golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 - golang.org/x/text v0.3.3 - google.golang.org/grpc v1.41.0 - google.golang.org/protobuf v1.26.0 + golang.org/x/crypto v0.23.0 + golang.org/x/net v0.25.0 + golang.org/x/text v0.15.0 + google.golang.org/grpc v1.60.1 + google.golang.org/protobuf v1.34.1 ) require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.7.0 // indirect - github.com/frankban/quicktest v1.10.0 // indirect - github.com/go-asn1-ber/asn1-ber v1.3.1 // indirect - github.com/hashicorp/go-cleanhttp v0.5.0 // indirect - github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect - github.com/kr/text v0.2.0 // indirect - github.com/mattn/go-colorable v0.1.6 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect - github.com/mitchellh/reflectwalk v1.0.0 // indirect - github.com/oklog/run v1.0.0 // indirect + cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/containerd/containerd v1.7.12 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/frankban/quicktest v1.14.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/s2a-go v0.1.4 // indirect + github.com/google/uuid v1.3.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.3 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/pgx/v4 v4.18.3 // indirect + github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 // indirect + github.com/klauspost/compress v1.16.5 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/sys/user v0.1.0 // indirect + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.1.1 // indirect - golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect - golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect - golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/stretchr/objx v0.5.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + golang.org/x/mod v0.11.0 // indirect + golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.10.0 // indirect + google.golang.org/api v0.134.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect + gopkg.in/square/go-jose.v2 v2.6.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sdk/go.sum b/sdk/go.sum index 2c9a7fd11f0c..b69fa2f0e8d5 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -1,66 +1,183 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/cloudsqlconn v1.4.3 h1:/WYFbB1NtMtoMxCbqpzzTFPDkxxlLTPme390KEGaEPc= +cloud.google.com/go/cloudsqlconn v1.4.3/go.mod h1:QL3tuStVOO70txb3rs4G8j5uMfo5ztZii8K3oGD3VYA= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= -github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= +github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= +github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.5.0 h1:bAmFiUJ+o0o2B4OiTFeE3MqCOtyo+jjPP9iZ0VRxYUc= -github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-asn1-ber/asn1-ber v1.3.1 h1:gvPdv/Hr++TRFCl0UbPFHC54P9N9jgsRPnmnr419Uck= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= +github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.1.10 h1:7WsKqasmPThNvdl0Q5GPpbTDD/ZD98CfuawrMIuh7qQ= -github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-ldap/ldap/v3 v3.4.4 h1:qPjipEpt+qDa6SI/h1fzuGWoRUY+qqQ9sOZq67/PYUs= +github.com/go-ldap/ldap/v3 v3.4.4/go.mod h1:fe1MsuN5eJJ1FeLT/LEBVdWfNWKh459R7aXgXtJC+aI= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= -github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -70,48 +187,98 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/tink/go v1.6.1 h1:t7JHqO8Ath2w2ig5vjwQYJzhGEZymedQc90lQXUBa4I= +github.com/google/tink/go v1.6.1/go.mod h1:IGW53kTgag+st5yPhKKwJ6u2l+SSp5/v9XF7spovjlY= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/hashicorp/cap/ldap v0.0.0-20230914221201-c4eecc7e31f7 h1:jgVdtp5YMn++PxnYhAFfrURfLf+nlqzBeddbvRG+tTg= +github.com/hashicorp/cap/ldap v0.0.0-20230914221201-c4eecc7e31f7/go.mod h1:q+c9XV1VqloZFZMu+zdvfb0cm7UrvKbvtmTF5wX5Q9o= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 h1:9Q2lu1YbbmiAgvYZ7Pr31RdlVonUpX+mmDL7Z7qTA2U= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo= -github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1 h1:6KMBnfEv0/kLAz0O76sliN5mXbCDcLfs2kP7ssP7+DQ= -github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= -github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/plugincontainer v0.3.0 h1:KMWpBsC65ZBXDpoxJ0n2/zVfZaZIW73k2d8cy5Dv/Kk= +github.com/hashicorp/go-secure-stdlib/plugincontainer v0.3.0/go.mod h1:qKYwSZ2EOpppko5ud+Sh9TrUgiTAZSaQCr8XWIYXsbM= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= @@ -120,69 +287,179 @@ github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+Er github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/api v1.9.1 h1:LtY/I16+5jVGU8rufyyAkwopgq/HpUnxFBg+QLOAV38= +github.com/hashicorp/vault/api v1.9.1/go.mod h1:78kktNcQYbBGSrOjQfHjXN32OhhxXnbYl3zxpd2uPUs= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jimlambrt/gldap v0.1.4 h1:PoB5u4ND0E+6W99JtQJvcjGFw+iKi3Gx3M60oOJBOqE= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 h1:hgVxRoDDPtQE68PT4LFvNlPz2nBKd3OMlGKIQ69OmR4= +github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531/go.mod h1:fqTUQpVYBvhCNIsMXGl2GE9q6z94DIP6NtFKXCSTVbg= +github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d h1:J8tJzRyiddAFF65YVgxli+TyWBi0f79Sld6rJP6CBcY= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microsoft/go-mssqldb v1.5.0 h1:CgENxkwtOBNj3Jg6T1X209y2blCfTTcwuOlznd2k9fk= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -197,33 +474,133 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -231,62 +608,268 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= +golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw= +google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -295,20 +878,41 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/sdk/helper/authmetadata/auth_metadata.go b/sdk/helper/authmetadata/auth_metadata.go index 0fd2bd50f830..e490ab359aba 100644 --- a/sdk/helper/authmetadata/auth_metadata.go +++ b/sdk/helper/authmetadata/auth_metadata.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package authmetadata /* diff --git a/sdk/helper/authmetadata/auth_metadata_acc_test.go b/sdk/helper/authmetadata/auth_metadata_acc_test.go index 39888c69a16c..189c960098d3 100644 --- a/sdk/helper/authmetadata/auth_metadata_acc_test.go +++ b/sdk/helper/authmetadata/auth_metadata_acc_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package authmetadata import ( diff --git a/sdk/helper/authmetadata/auth_metadata_test.go b/sdk/helper/authmetadata/auth_metadata_test.go index 62341ebc85fb..a82044f9bc43 100644 --- a/sdk/helper/authmetadata/auth_metadata_test.go +++ b/sdk/helper/authmetadata/auth_metadata_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package authmetadata import ( diff --git a/sdk/helper/backoff/backoff.go b/sdk/helper/backoff/backoff.go new file mode 100644 index 000000000000..ebf9aaa7e721 --- /dev/null +++ b/sdk/helper/backoff/backoff.go @@ -0,0 +1,107 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backoff + +import ( + "errors" + "math" + "math/rand" + "time" +) + +var ErrMaxRetry = errors.New("exceeded maximum number of retries") + +const maxJitter = 0.25 + +// Backoff is used to do capped exponential backoff with jitter, with a maximum number of retries. +// Generally, use this struct by calling Next() or NextSleep() after a failure. +// If configured for N max retries, Next() and NextSleep() will return an error on the call N+1. +// The jitter is set to 25%, so values returned will have up to 25% less than twice the previous value. +// The min value will also include jitter, so the first call will almost always be less than the requested minimum value. +// Backoff is not thread-safe. +type Backoff struct { + currentAttempt int + maxRetries int + min time.Duration + max time.Duration + current time.Duration +} + +// NewBackoff creates a new exponential backoff with the given number of maximum retries and min/max durations. +func NewBackoff(maxRetries int, min, max time.Duration) *Backoff { + b := &Backoff{ + maxRetries: maxRetries, + max: max, + min: min, + } + b.Reset() + return b +} + +// Current returns the next time that will be returned by Next() (or slept in NextSleep()). +func (b *Backoff) Current() time.Duration { + return b.current +} + +// Next determines the next backoff duration that is roughly twice +// the current value, capped to a max value, with a measure of randomness. +// It returns an error if there are no more retries left. +func (b *Backoff) Next() (time.Duration, error) { + if b.currentAttempt >= b.maxRetries { + return time.Duration(-1), ErrMaxRetry + } + defer func() { + b.currentAttempt += 1 + }() + if b.currentAttempt == 0 { + return b.current, nil + } + next := 2 * b.current + if next > b.max { + next = b.max + } + next = jitter(next) + b.current = next + return next, nil +} + +// NextSleep will synchronously sleep the next backoff amount (see Next()). +// It returns an error if there are no more retries left. +func (b *Backoff) NextSleep() error { + next, err := b.Next() + if err != nil { + return err + } + time.Sleep(next) + return nil +} + +// Reset resets the state to the initial backoff amount and 0 retries. +func (b *Backoff) Reset() { + b.current = b.min + b.current = jitter(b.current) + b.currentAttempt = 0 +} + +func jitter(t time.Duration) time.Duration { + f := float64(t) * (1.0 - maxJitter*rand.Float64()) + return time.Duration(math.Floor(f)) +} + +// Retry calls the given function until it does not return an error, at least once and up to max_retries + 1 times. +// If the number of retries is exceeded, Retry() will return the last error seen joined with ErrMaxRetry. +func (b *Backoff) Retry(f func() error) error { + for { + err := f() + if err == nil { + return nil + } + + maxRetryErr := b.NextSleep() + if maxRetryErr != nil { + return errors.Join(maxRetryErr, err) + } + } + return nil // unreachable +} diff --git a/sdk/helper/backoff/backoff_test.go b/sdk/helper/backoff/backoff_test.go new file mode 100644 index 000000000000..46b85257bad5 --- /dev/null +++ b/sdk/helper/backoff/backoff_test.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backoff + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +// TestBackoff_Basic tests that basic exponential backoff works as expected up to a max of 3 times. +func TestBackoff_Basic(t *testing.T) { + for i := 0; i < 100; i++ { + b := NewBackoff(3, 1*time.Millisecond, 10*time.Millisecond) + x, err := b.Next() + assert.Nil(t, err) + assert.LessOrEqual(t, x, 1*time.Millisecond) + assert.GreaterOrEqual(t, x, 750*time.Microsecond) + + x2, err := b.Next() + assert.Nil(t, err) + assert.LessOrEqual(t, x2, x*2) + assert.GreaterOrEqual(t, x2, x*3/4) + + x3, err := b.Next() + assert.Nil(t, err) + assert.LessOrEqual(t, x3, x2*2) + assert.GreaterOrEqual(t, x3, x2*3/4) + + _, err = b.Next() + assert.NotNil(t, err) + } +} + +// TestBackoff_ZeroRetriesAlwaysFails checks that if retries is set to zero, then an error is returned immediately. +func TestBackoff_ZeroRetriesAlwaysFails(t *testing.T) { + b := NewBackoff(0, 1*time.Millisecond, 10*time.Millisecond) + _, err := b.Next() + assert.NotNil(t, err) +} + +// TestBackoff_MaxIsEnforced checks that the maximum backoff is enforced. +func TestBackoff_MaxIsEnforced(t *testing.T) { + b := NewBackoff(1001, 1*time.Millisecond, 2*time.Millisecond) + for i := 0; i < 1000; i++ { + x, err := b.Next() + assert.LessOrEqual(t, x, 2*time.Millisecond) + assert.Nil(t, err) + } +} diff --git a/sdk/helper/base62/base62.go b/sdk/helper/base62/base62.go index 981face425d4..7d2c7d5ba158 100644 --- a/sdk/helper/base62/base62.go +++ b/sdk/helper/base62/base62.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package base62 diff --git a/sdk/helper/certutil/certutil_test.go b/sdk/helper/certutil/certutil_test.go index 9c10f38a818a..8b550946121d 100644 --- a/sdk/helper/certutil/certutil_test.go +++ b/sdk/helper/certutil/certutil_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package certutil import ( @@ -10,6 +13,7 @@ import ( "crypto/rsa" "crypto/x509" "crypto/x509/pkix" + "encoding/asn1" "encoding/json" "encoding/pem" "fmt" @@ -942,6 +946,66 @@ func TestSignatureAlgorithmRoundTripping(t *testing.T) { } } +// TestParseBasicConstraintExtension Verify extension generation and parsing of x509 basic constraint extensions +// works as expected. +func TestBasicConstraintExtension(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + isCA bool + maxPathLen int + }{ + {"empty-seq", false, -1}, + {"just-ca-true", true, -1}, + {"just-ca-with-maxpathlen", true, 2}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ext, err := CreateBasicConstraintExtension(tt.isCA, tt.maxPathLen) + if err != nil { + t.Fatalf("failed generating basic extension: %v", err) + } + + gotIsCa, gotMaxPathLen, err := ParseBasicConstraintExtension(ext) + if err != nil { + t.Fatalf("failed parsing basic extension: %v", err) + } + + if tt.isCA != gotIsCa { + t.Fatalf("expected isCa (%v) got isCa (%v)", tt.isCA, gotIsCa) + } + + if tt.maxPathLen != gotMaxPathLen { + t.Fatalf("expected maxPathLen (%v) got maxPathLen (%v)", tt.maxPathLen, gotMaxPathLen) + } + }) + } + + t.Run("bad-extension-oid", func(t *testing.T) { + // Test invalid type errors out + _, _, err := ParseBasicConstraintExtension(pkix.Extension{}) + if err == nil { + t.Fatalf("should have failed parsing non-basic constraint extension") + } + }) + + t.Run("garbage-value", func(t *testing.T) { + extraBytes, err := asn1.Marshal("a string") + if err != nil { + t.Fatalf("failed encoding the struct: %v", err) + } + ext := pkix.Extension{ + Id: ExtensionBasicConstraintsOID, + Value: extraBytes, + } + _, _, err = ParseBasicConstraintExtension(ext) + if err == nil { + t.Fatalf("should have failed parsing basic constraint with extra information") + } + }) +} + func genRsaKey(t *testing.T) *rsa.PrivateKey { key, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { diff --git a/sdk/helper/certutil/cieps.go b/sdk/helper/certutil/cieps.go new file mode 100644 index 000000000000..9943cf3977a5 --- /dev/null +++ b/sdk/helper/certutil/cieps.go @@ -0,0 +1,161 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package certutil + +import ( + "crypto/x509" + "encoding/pem" + "fmt" +) + +// Source of the issuance request: sign implies that the key material was +// generated by the user and submitted via a CSR request but only ACL level +// validation was applied; issue implies that Vault created the key material +// on behalf of the user with ACL level validation occurring; ACME implies +// that the user submitted a CSR and that additional ACME validation has +// occurred before sending the request to the external service for +// construction. +type CIEPSIssuanceMode string + +const ( + SignCIEPSMode = "sign" + IssueCIEPSMode = "issue" + ACMECIEPSMode = "acme" + ICACIEPSMode = "ica" +) + +// Configuration of the issuer and mount at the time of this request; +// states the issuer's templated AIA information (falling back to the +// mount-global config if no per-issuer AIA info is set, the issuer's +// leaf_not_after_behavior (permit/truncate/err) for TTLs exceeding the +// issuer's validity period, and the mount's default and max TTL. +type CIEPSIssuanceConfig struct { + AIAValues *URLEntries `json:"aia_values"` + LeafNotAfterBehavior string `json:"leaf_not_after_behavior"` + MountDefaultTTL string `json:"mount_default_ttl"` + MountMaxTTL string `json:"mount_max_ttl"` +} + +// Structured parameters sent by Vault or explicitly validated by Vault +// prior to sending. +type CIEPSVaultParams struct { + PolicyName string `json:"policy_name,omitempty"` + Mount string `json:"mount"` + Namespace string `json:"ns"` + + // These indicate the type of the cluster node talking to the CIEPS + // service. When IsPerfStandby=true, setting StoreCert=true in the + // response will result in Vault forwarding the client's request + // up to the Performance Secondary's active node and re-trying the + // operation (including re-submitting the request to the CIEPS + // service). + // + // Any response returned by the CIEPS service in this case will be + // ignored and not signed by the CA's keys. + // + // IsPRSecondary is set to false when a local mount is used on a + // PR Secondary; in this scenario, PR Secondary nodes behave like + // PR Primary nodes. From a CIEPS service perspective, no behavior + // difference is expected between PR Primary and PR Secondary nodes; + // both will issue and store certificates on their active nodes. + // This information is included for audit tracking purposes. + IsPerfStandby bool `json:"vault_is_performance_standby"` + IsPRSecondary bool `json:"vault_is_performance_secondary"` + + IssuanceMode CIEPSIssuanceMode `json:"issuance_mode"` + + GeneratedKey bool `json:"vault_generated_private_key"` + + IssuerName string `json:"requested_issuer_name"` + IssuerID string `json:"requested_issuer_id"` + IssuerCert string `json:"requested_issuer_cert"` + + Config CIEPSIssuanceConfig `json:"requested_issuance_config"` +} + +// Outer request object sent by Vault to the external CIEPS service. +// +// The top-level fields denote properties about the CIEPS request, +// with various request fields containing untrusted and trusted input +// respectively. +type CIEPSRequest struct { + Version int `json:"request_version"` + UUID string `json:"request_uuid"` + Sync bool `json:"synchronous"` + + UserRequestKV map[string]interface{} `json:"user_request_key_values"` + IdentityRequestKV map[string]interface{} `json:"identity_request_key_values,omitempty"` + ACMERequestKV map[string]interface{} `json:"acme_request_key_values,omitempty"` + VaultRequestKV CIEPSVaultParams `json:"vault_request_values"` + + // Vault guarantees that UserRequestKV will contain a csr parameter + // for all request types; this field is useful for engine implementations + // to have in parsed format. We assume that this is sent in PEM format, + // aligning with other Vault requests. + ParsedCSR *x509.CertificateRequest `json:"-"` +} + +func (req *CIEPSRequest) ParseUserCSR() error { + csrValueRaw, present := req.UserRequestKV["csr"] + if !present { + return fmt.Errorf("missing expected 'csr' attribute on the request") + } + + csrValue, ok := csrValueRaw.(string) + if !ok { + return fmt.Errorf("unexpected type of 'csr' attribute: %T", csrValueRaw) + } + + if csrValue == "" { + return fmt.Errorf("unexpectedly empty 'csr' attribute on the request") + } + + block, rest := pem.Decode([]byte(csrValue)) + if len(rest) > 0 { + return fmt.Errorf("failed to decode 'csr': %v bytes of trailing data after PEM block", len(rest)) + } + if block == nil { + return fmt.Errorf("failed to decode 'csr' PEM block") + } + + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return fmt.Errorf("failed to parse certificate request: %w", err) + } + + req.ParsedCSR = csr + return nil +} + +// Expected response object from the external CIEPS service. +// +// When parsing, Vault will disallow unknown fields, failing the +// parse if unknown fields are sent. +type CIEPSResponse struct { + UUID string `json:"request_uuid"` + Error string `json:"error,omitempty"` + Warnings []string `json:"warnings,omitempty"` + Certificate string `json:"certificate"` + ParsedCertificate *x509.Certificate `json:"-"` + IssuerRef string `json:"issuer_ref"` + StoreCert bool `json:"store_certificate"` + GenerateLease bool `json:"generate_lease"` +} + +func (c *CIEPSResponse) MarshalCertificate() error { + if c.ParsedCertificate == nil || len(c.ParsedCertificate.Raw) == 0 { + return fmt.Errorf("no certificate present") + } + + pem := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: c.ParsedCertificate.Raw, + }) + if len(pem) == 0 { + return fmt.Errorf("failed to generate PEM: no body") + } + c.Certificate = string(pem) + + return nil +} diff --git a/sdk/helper/certutil/helpers.go b/sdk/helper/certutil/helpers.go index eace1aafd1fe..b7fc22db0262 100644 --- a/sdk/helper/certutil/helpers.go +++ b/sdk/helper/certutil/helpers.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package certutil import ( @@ -10,9 +13,12 @@ import ( "crypto/rand" "crypto/rsa" "crypto/sha1" + "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" + "encoding/binary" + "encoding/hex" "encoding/pem" "errors" "fmt" @@ -78,11 +84,29 @@ var InvSignatureAlgorithmNames = map[x509.SignatureAlgorithm]string{ x509.PureEd25519: "Ed25519", } +// OIDs for X.509 SAN Extension +var OidExtensionSubjectAltName = asn1.ObjectIdentifier([]int{2, 5, 29, 17}) + +// OID for RFC 5280 CRL Number extension. +// +// > id-ce-cRLNumber OBJECT IDENTIFIER ::= { id-ce 20 } +var CRLNumberOID = asn1.ObjectIdentifier([]int{2, 5, 29, 20}) + // OID for RFC 5280 Delta CRL Indicator CRL extension. // // > id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } var DeltaCRLIndicatorOID = asn1.ObjectIdentifier([]int{2, 5, 29, 27}) +// OID for KeyUsage from RFC 2459 : https://www.rfc-editor.org/rfc/rfc2459.html#section-4.2.1.3 +// +// > id-ce-keyUsage OBJECT IDENTIFIER ::= { id-ce 15 } +var KeyUsageOID = asn1.ObjectIdentifier([]int{2, 5, 29, 15}) + +// OID for Extended Key Usage from RFC 5280 : https://www.rfc-editor.org/rfc/rfc5280#section-4.2.1.12 +// +// id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 } +var ExtendedKeyUsageOID = asn1.ObjectIdentifier([]int{2, 5, 29, 37}) + // GetHexFormatted returns the byte buffer formatted in hex with // the specified separator between bytes. func GetHexFormatted(buf []byte, sep string) string { @@ -118,7 +142,7 @@ func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) { if privateKey == nil { return nil, errutil.InternalError{Err: "passed-in private key is nil"} } - return getSubjectKeyID(privateKey.Public()) + return GetSubjectKeyID(privateKey.Public()) } // Returns the explicit SKID when used for cross-signing, else computes a new @@ -128,10 +152,10 @@ func getSubjectKeyIDFromBundle(data *CreationBundle) ([]byte, error) { return data.Params.SKID, nil } - return getSubjectKeyID(data.CSR.PublicKey) + return GetSubjectKeyID(data.CSR.PublicKey) } -func getSubjectKeyID(pub interface{}) ([]byte, error) { +func GetSubjectKeyID(pub interface{}) ([]byte, error) { var publicKeyBytes []byte switch pub := pub.(type) { case *rsa.PublicKey: @@ -296,6 +320,18 @@ func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) { return parsedBundle, nil } +func (p *ParsedCertBundle) ToTLSCertificate() tls.Certificate { + var cert tls.Certificate + cert.Certificate = append(cert.Certificate, p.CertificateBytes) + cert.Leaf = p.Certificate + cert.PrivateKey = p.PrivateKey + for _, ca := range p.CAChain { + cert.Certificate = append(cert.Certificate, ca.Bytes) + } + + return cert +} + // GeneratePrivateKey generates a private key with the specified type and key bits. func GeneratePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer) error { return generatePrivateKey(keyType, keyBits, container, nil) @@ -1034,8 +1070,8 @@ func selectSignatureAlgorithmForECDSA(pub crypto.PublicKey, signatureBits int) x } var ( - oidExtensionBasicConstraints = []int{2, 5, 29, 19} - oidExtensionSubjectAltName = []int{2, 5, 29, 17} + ExtensionBasicConstraintsOID = []int{2, 5, 29, 19} + ExtensionSubjectAltNameOID = []int{2, 5, 29, 17} ) // CreateCSR creates a CSR with the default rand.Reader to @@ -1090,7 +1126,7 @@ func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Rea return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()} } ext := pkix.Extension{ - Id: oidExtensionBasicConstraints, + Id: ExtensionBasicConstraintsOID, Value: val, Critical: true, } @@ -1211,7 +1247,7 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun certTemplate.URIs = data.CSR.URIs for _, name := range data.CSR.Extensions { - if !name.Id.Equal(oidExtensionBasicConstraints) && !(len(data.Params.OtherSANs) > 0 && name.Id.Equal(oidExtensionSubjectAltName)) { + if !name.Id.Equal(ExtensionBasicConstraintsOID) && !(len(data.Params.OtherSANs) > 0 && name.Id.Equal(ExtensionSubjectAltNameOID)) { certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) } } @@ -1263,7 +1299,6 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun } certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, data.CSR.PublicKey, data.SigningBundle.PrivateKey) - if err != nil { return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} } @@ -1284,7 +1319,7 @@ func NewCertPool(reader io.Reader) (*x509.CertPool, error) { if err != nil { return nil, err } - certs, err := parseCertsPEM(pemBlock) + certs, err := ParseCertsPEM(pemBlock) if err != nil { return nil, fmt.Errorf("error reading certs: %s", err) } @@ -1295,9 +1330,9 @@ func NewCertPool(reader io.Reader) (*x509.CertPool, error) { return pool, nil } -// parseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array +// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array // Returns an error if a certificate could not be parsed, or if the data does not contain any certificates -func parseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { +func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { ok := false certs := []*x509.Certificate{} for len(pemCerts) > 0 { @@ -1384,3 +1419,624 @@ func CreateDeltaCRLIndicatorExt(completeCRLNumber int64) (pkix.Extension, error) Value: bigNumValue, }, nil } + +// ParseBasicConstraintExtension parses a basic constraint pkix.Extension, useful if attempting to validate +// CSRs are requesting CA privileges as Go does not expose its implementation. Values returned are +// IsCA, MaxPathLen or error. If MaxPathLen was not set, a value of -1 will be returned. +func ParseBasicConstraintExtension(ext pkix.Extension) (bool, int, error) { + if !ext.Id.Equal(ExtensionBasicConstraintsOID) { + return false, -1, fmt.Errorf("passed in extension was not a basic constraint extension") + } + + // All elements are set to optional here, as it is possible that we receive a CSR with the extension + // containing an empty sequence by spec. + type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` + } + bc := &basicConstraints{} + leftOver, err := asn1.Unmarshal(ext.Value, bc) + if err != nil { + return false, -1, fmt.Errorf("failed unmarshalling extension value: %w", err) + } + + numLeftOver := len(bytes.TrimSpace(leftOver)) + if numLeftOver > 0 { + return false, -1, fmt.Errorf("%d extra bytes within basic constraints value extension", numLeftOver) + } + + return bc.IsCA, bc.MaxPathLen, nil +} + +// CreateBasicConstraintExtension create a basic constraint extension based on inputs, +// if isCa is false, an empty value sequence will be returned with maxPath being +// ignored. If isCa is true maxPath can be set to -1 to not set a maxPath value. +func CreateBasicConstraintExtension(isCa bool, maxPath int) (pkix.Extension, error) { + var asn1Bytes []byte + var err error + + switch { + case isCa && maxPath >= 0: + CaAndMaxPathLen := struct { + IsCa bool `asn1:""` + MaxPathLen int `asn1:""` + }{ + IsCa: isCa, + MaxPathLen: maxPath, + } + asn1Bytes, err = asn1.Marshal(CaAndMaxPathLen) + case isCa && maxPath < 0: + justCa := struct { + IsCa bool `asn1:""` + }{IsCa: isCa} + asn1Bytes, err = asn1.Marshal(justCa) + default: + asn1Bytes, err = asn1.Marshal(struct{}{}) + } + + if err != nil { + return pkix.Extension{}, err + } + + return pkix.Extension{ + Id: ExtensionBasicConstraintsOID, + Critical: true, + Value: asn1Bytes, + }, nil +} + +// GetOtherSANsFromX509Extensions is used to find all the extensions which have the identifier (OID) of +// a SAN (Subject Alternative Name), and then look at each extension to find out if it is one of a set of +// well-known types (like IP SANs) or "other". Currently, the only OtherSANs vault supports are of type UTF8. +func GetOtherSANsFromX509Extensions(exts []pkix.Extension) ([]OtherNameUtf8, error) { + var ret []OtherNameUtf8 + for _, ext := range exts { + if !ext.Id.Equal(OidExtensionSubjectAltName) { + continue + } + err := forEachSAN(ext.Value, func(tag int, data []byte) error { + if tag != 0 { + return nil + } + + var other OtherNameRaw + _, err := asn1.UnmarshalWithParams(data, &other, "tag:0") + if err != nil { + return fmt.Errorf("could not parse requested other SAN: %w", err) + } + val, err := other.ExtractUTF8String() + if err != nil { + return err + } + ret = append(ret, *val) + return nil + }) + if err != nil { + return nil, err + } + } + + return ret, nil +} + +func forEachSAN(extension []byte, callback func(tag int, data []byte) error) error { + // RFC 5280, 4.2.1.6 + + // SubjectAltName ::= GeneralNames + // + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + rest, err := asn1.Unmarshal(extension, &seq) + if err != nil { + return err + } else if len(rest) != 0 { + return fmt.Errorf("x509: trailing data after X.509 extension") + } + if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { + return asn1.StructuralError{Msg: "bad SAN sequence"} + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return err + } + + if err := callback(v.Tag, v.FullBytes); err != nil { + return err + } + } + + return nil +} + +// otherNameRaw describes a name related to a certificate which is not in one +// of the standard name formats. RFC 5280, 4.2.1.6: +// +// OtherName ::= SEQUENCE { +// type-id OBJECT IDENTIFIER, +// Value [0] EXPLICIT ANY DEFINED BY type-id } +type OtherNameRaw struct { + TypeID asn1.ObjectIdentifier + Value asn1.RawValue +} + +type OtherNameUtf8 struct { + Oid string + Value string +} + +// String() turns an OtherNameUtf8 object into the storage or field-value used to assign that name +// to a certificate in an API call +func (o OtherNameUtf8) String() string { + return fmt.Sprintf("%s;%s:%s", o.Oid, "UTF-8", o.Value) +} + +// ExtractUTF8String returns the UTF8 string contained in the Value, or an error +// if none is present. +func (oraw *OtherNameRaw) ExtractUTF8String() (*OtherNameUtf8, error) { + svalue := cryptobyte.String(oraw.Value.Bytes) + var outTag cbasn1.Tag + var val cryptobyte.String + read := svalue.ReadAnyASN1(&val, &outTag) + + if read && outTag == asn1.TagUTF8String { + return &OtherNameUtf8{Oid: oraw.TypeID.String(), Value: string(val)}, nil + } + return nil, fmt.Errorf("no UTF-8 string found in OtherName") +} + +func getOtherSANsStringFromExtensions(exts []pkix.Extension) (string, error) { + otherNames, err := GetOtherSANsFromX509Extensions(exts) + if err != nil { + return "", err + } + + otherSansList := make([]string, len(otherNames)) + for i, otherName := range otherNames { + otherSansList[i] = otherName.String() + } + + otherSans := strings.Join(otherSansList, ",") + + return otherSans, nil +} + +func getOtherSANsMapFromExtensions(exts []pkix.Extension) (map[string][]string, error) { + otherNames, err := GetOtherSANsFromX509Extensions(exts) + if err != nil { + return nil, err + } + + otherSans := make(map[string][]string) + for _, name := range otherNames { + if otherSans[name.Oid] == nil { + otherSans[name.Oid] = []string{name.Value} + } else { + otherSans[name.Oid] = append(otherSans[name.Oid], name.Value) + } + } + + return otherSans, nil +} + +func getKeyUsage(exts []pkix.Extension) (x509.KeyUsage, error) { + keyUsage := x509.KeyUsage(0) + for _, ext := range exts { + if ext.Id.Equal(KeyUsageOID) { + // ASN1 is Big Endian + // another example of equivalent code: https://cs.opensource.google/go/go/+/master:src/crypto/x509/parser.go;drc=dd84bb682482390bb8465482cb7b13d2e3b17297;l=319 + buf := bytes.NewReader(ext.Value) + err := binary.Read(buf, binary.BigEndian, &keyUsage) + if err != nil { + return keyUsage, err + } + return keyUsage, nil + } + } + return keyUsage, nil +} + +func getExtKeyUsageOids(exts []pkix.Extension) ([]string, error) { + keyUsageOidStrings := make([]string, 0) + keyUsageOids := make([]asn1.ObjectIdentifier, 0) + for _, ext := range exts { + if ext.Id.Equal(ExtendedKeyUsageOID) { + _, err := asn1.Unmarshal(ext.Value, &keyUsageOids) + if err != nil { + return nil, fmt.Errorf("unable to unmarshal KeyUsageOid extension: %w", err) + } + for _, oid := range keyUsageOids { + keyUsageOidStrings = append(keyUsageOidStrings, oid.String()) + } + return keyUsageOidStrings, nil + } + } + return nil, nil +} + +func getPolicyIdentifiers(exts []pkix.Extension) ([]string, error) { + policyIdentifiers := make([]string, 0) + for _, ext := range exts { + if ext.Id.Equal(policyInformationOid) { + // PolicyInformation ::= SEQUENCE { + // policyIdentifier CertPolicyId, + // policyQualifiers SEQUENCE SIZE (1..MAX) OF + // PolicyQualifierInfo OPTIONAL } + type policyInformation struct { + PolicyIdentifier asn1.ObjectIdentifier `asn1:"optional"` + PolicyQualifier any `asn1:"optional"` + } + policies := make([]policyInformation, 0) + _, err := asn1.Unmarshal(ext.Value, &policies) + if err != nil { + return nil, err + } + for _, policy := range policies { + policyIdentifiers = append(policyIdentifiers, policy.PolicyIdentifier.String()) + } + return policyIdentifiers, nil + } + } + return nil, nil +} + +// Translate Certificates and CSRs into Certificate Template +// Four "Types" Here: Certificates; Certificate Signing Requests; Fields map[string]interface{}; Creation Parameters + +func ParseCertificateToCreationParameters(certificate x509.Certificate) (creationParameters CreationParameters, err error) { + otherSans, err := getOtherSANsMapFromExtensions(certificate.Extensions) + if err != nil { + return CreationParameters{}, err + } + + creationParameters = CreationParameters{ + Subject: removeNames(certificate.Subject), + DNSNames: certificate.DNSNames, + EmailAddresses: certificate.EmailAddresses, + IPAddresses: certificate.IPAddresses, + URIs: certificate.URIs, + OtherSANs: otherSans, + IsCA: certificate.IsCA, + KeyType: GetKeyType(certificate.PublicKeyAlgorithm.String()), + KeyBits: FindBitLength(certificate.PublicKey), + NotAfter: certificate.NotAfter, + KeyUsage: certificate.KeyUsage, + // ExtKeyUsage: We use ExtKeyUsageOIDs instead as the more general field + // ExtKeyUsageOIDs: this is an extension that may not be set, so is handled below + // PolicyIdentifiers: this is an extension that may not be set, so is handled below + BasicConstraintsValidForNonCA: certificate.BasicConstraintsValid, + SignatureBits: FindSignatureBits(certificate.SignatureAlgorithm), + UsePSS: IsPSS(certificate.SignatureAlgorithm), + // The following two values are on creation parameters, but are impossible to parse from the certificate + // ForceAppendCaChain + // UseCSRValues + PermittedDNSDomains: certificate.PermittedDNSDomains, + // URLs: punting on this for now + MaxPathLength: certificate.MaxPathLen, + NotBeforeDuration: time.Now().Sub(certificate.NotBefore), // Assumes Certificate was created this moment + SKID: certificate.SubjectKeyId, + } + + extKeyUsageOIDS, err := getExtKeyUsageOids(certificate.Extensions) + if err != nil { + return CreationParameters{}, err + } + creationParameters.ExtKeyUsageOIDs = extKeyUsageOIDS + + policyInformationOids, err := getPolicyIdentifiers(certificate.Extensions) + if err != nil { + return CreationParameters{}, err + } + creationParameters.PolicyIdentifiers = policyInformationOids + + return creationParameters, err +} + +func removeNames(name pkix.Name) pkix.Name { + name.Names = nil + return name +} + +func ParseCsrToCreationParameters(csr x509.CertificateRequest) (CreationParameters, error) { + otherSANs, err := getOtherSANsMapFromExtensions(csr.Extensions) + if err != nil { + return CreationParameters{}, err + } + + creationParameters := CreationParameters{ + Subject: removeNames(csr.Subject), + DNSNames: csr.DNSNames, + EmailAddresses: csr.EmailAddresses, + IPAddresses: csr.IPAddresses, + URIs: csr.URIs, + OtherSANs: otherSANs, + // IsCA: is handled below since the basic constraint it comes from might not be set on the CSR + KeyType: GetKeyType(csr.PublicKeyAlgorithm.String()), + KeyBits: FindBitLength(csr.PublicKey), + // NotAfter: this is not set on a CSR + // KeyUsage: handled below since this may not be set + // ExtKeyUsage: We use exclusively ExtKeyUsageOIDs here + // ExtKeyUsageOIDs: handled below since this may not be set + // PolicyIdentifiers: handled below since this may not be set + // BasicConstraintsValidForNonCA is handled below, since it may or may not be set on the CSR + SignatureBits: FindSignatureBits(csr.SignatureAlgorithm), + UsePSS: IsPSS(csr.SignatureAlgorithm), + // The following two values are on creation parameters, but are impossible to parse from the csr + // ForceAppendCaChain + // UseCSRValues + // PermittedDNSDomains : omitted, this generally isn't on a CSR + // URLs : omitted, this generally isn't on a CSR + // MaxPathLength is handled below since the basic constraint it comes from may not be set on the CSR + // NotBeforeDuration : this is not set on a CSR + // SKID: this is generally not set on a CSR, but calculated from the Key information itself + } + + keyUsage, err := getKeyUsage(csr.Extensions) + if err != nil { + return CreationParameters{}, err + } + creationParameters.KeyUsage = keyUsage + + extKeyUsageOIDS, err := getExtKeyUsageOids(csr.Extensions) + if err != nil { + return CreationParameters{}, err + } + creationParameters.ExtKeyUsageOIDs = extKeyUsageOIDS + + policyInformationOids, err := getPolicyIdentifiers(csr.Extensions) + if err != nil { + return CreationParameters{}, err + } + creationParameters.PolicyIdentifiers = policyInformationOids + + found, isCA, maxPathLength, err := getBasicConstraintsFromExtension(csr.Extensions) + if err != nil { + return CreationParameters{}, err + } + if found { + creationParameters.IsCA = isCA + creationParameters.BasicConstraintsValidForNonCA = (isCA && maxPathLength != 0) || (!isCA && (maxPathLength == 0)) + if isCA { // MaxPathLength Only Has a Meaning on a Certificate Authority + creationParameters.MaxPathLength = maxPathLength + } + } + + return creationParameters, err +} + +func ParseCsrToFields(csr x509.CertificateRequest) (map[string]interface{}, error) { + otherSans, err := getOtherSANsStringFromExtensions(csr.Extensions) + if err != nil { + return nil, err + } + + templateData := map[string]interface{}{ + "common_name": csr.Subject.CommonName, + "alt_names": MakeAltNamesCommaSeparatedString(csr.DNSNames, csr.EmailAddresses), + "ip_sans": MakeIpAddressCommaSeparatedString(csr.IPAddresses), + "uri_sans": MakeUriCommaSeparatedString(csr.URIs), + "other_sans": otherSans, + "signature_bits": FindSignatureBits(csr.SignatureAlgorithm), + "exclude_cn_from_sans": DetermineExcludeCnFromCsrSans(csr), + "ou": makeCommaSeparatedString(csr.Subject.OrganizationalUnit), + "organization": makeCommaSeparatedString(csr.Subject.Organization), + "country": makeCommaSeparatedString(csr.Subject.Country), + "locality": makeCommaSeparatedString(csr.Subject.Locality), + "province": makeCommaSeparatedString(csr.Subject.Province), + "street_address": makeCommaSeparatedString(csr.Subject.StreetAddress), + "postal_code": makeCommaSeparatedString(csr.Subject.PostalCode), + "serial_number": csr.Subject.SerialNumber, + // There is no "TTL" on a CSR, that is always set by the signer + // max_path_length is handled below + // permitted_dns_domains is a CA thing, it generally does not appear on a CSR + "use_pss": IsPSS(csr.SignatureAlgorithm), + // skid could be calculated, but does not directly exist on a csr, so punting for now + "key_type": GetKeyType(csr.PublicKeyAlgorithm.String()), + "key_bits": FindBitLength(csr.PublicKey), + } + + // isCA is not a field in our data call - that is represented inside vault by using a different endpoint + found, _, _, err := getBasicConstraintsFromExtension(csr.Extensions) + if err != nil { + return nil, err + } + templateData["add_basic_constraints"] = found + + return templateData, nil +} + +func ParseCertificateToFields(certificate x509.Certificate) (map[string]interface{}, error) { + otherSans, err := getOtherSANsStringFromExtensions(certificate.Extensions) + if err != nil { + return nil, err + } + + templateData := map[string]interface{}{ + "common_name": certificate.Subject.CommonName, + "alt_names": MakeAltNamesCommaSeparatedString(certificate.DNSNames, certificate.EmailAddresses), + "ip_sans": MakeIpAddressCommaSeparatedString(certificate.IPAddresses), + "uri_sans": MakeUriCommaSeparatedString(certificate.URIs), + "other_sans": otherSans, + "signature_bits": FindSignatureBits(certificate.SignatureAlgorithm), + "exclude_cn_from_sans": DetermineExcludeCnFromCertSans(certificate), + "ou": makeCommaSeparatedString(certificate.Subject.OrganizationalUnit), + "organization": makeCommaSeparatedString(certificate.Subject.Organization), + "country": makeCommaSeparatedString(certificate.Subject.Country), + "locality": makeCommaSeparatedString(certificate.Subject.Locality), + "province": makeCommaSeparatedString(certificate.Subject.Province), + "street_address": makeCommaSeparatedString(certificate.Subject.StreetAddress), + "postal_code": makeCommaSeparatedString(certificate.Subject.PostalCode), + "serial_number": certificate.Subject.SerialNumber, + "ttl": (certificate.NotAfter.Sub(certificate.NotBefore)).String(), + "max_path_length": certificate.MaxPathLen, + "permitted_dns_domains": strings.Join(certificate.PermittedDNSDomains, ","), + "use_pss": IsPSS(certificate.SignatureAlgorithm), + "skid": hex.EncodeToString(certificate.SubjectKeyId), + "key_type": GetKeyType(certificate.PublicKeyAlgorithm.String()), + "key_bits": FindBitLength(certificate.PublicKey), + } + + return templateData, nil +} + +func getBasicConstraintsFromExtension(exts []pkix.Extension) (found bool, isCA bool, maxPathLength int, err error) { + for _, ext := range exts { + if ext.Id.Equal(ExtensionBasicConstraintsOID) { + isCA, maxPathLength, err = ParseBasicConstraintExtension(ext) + if err != nil { + return false, false, -1, err + } + return true, isCA, maxPathLength, nil + } + } + + return false, false, -1, nil +} + +func MakeAltNamesCommaSeparatedString(names []string, emails []string) string { + return strings.Join(append(names, emails...), ",") +} + +func MakeUriCommaSeparatedString(uris []*url.URL) string { + stringAddresses := make([]string, len(uris)) + for i, uri := range uris { + stringAddresses[i] = uri.String() + } + return strings.Join(stringAddresses, ",") +} + +func MakeIpAddressCommaSeparatedString(addresses []net.IP) string { + stringAddresses := make([]string, len(addresses)) + for i, address := range addresses { + stringAddresses[i] = address.String() + } + return strings.Join(stringAddresses, ",") +} + +func makeCommaSeparatedString(values []string) string { + return strings.Join(values, ",") +} + +func DetermineExcludeCnFromCertSans(certificate x509.Certificate) bool { + cn := certificate.Subject.CommonName + if cn == "" { + return false + } + + emails := certificate.EmailAddresses + for _, email := range emails { + if email == cn { + return false + } + } + + dnses := certificate.DNSNames + for _, dns := range dnses { + if dns == cn { + return false + } + } + + return true +} + +func DetermineExcludeCnFromCsrSans(csr x509.CertificateRequest) bool { + cn := csr.Subject.CommonName + if cn == "" { + return false + } + + emails := csr.EmailAddresses + for _, email := range emails { + if email == cn { + return false + } + } + + dnses := csr.DNSNames + for _, dns := range dnses { + if dns == cn { + return false + } + } + + return true +} + +func FindBitLength(publicKey any) int { + if publicKey == nil { + return 0 + } + switch pub := publicKey.(type) { + case *rsa.PublicKey: + return pub.N.BitLen() + case *ecdsa.PublicKey: + switch pub.Curve { + case elliptic.P224(): + return 224 + case elliptic.P256(): + return 256 + case elliptic.P384(): + return 384 + case elliptic.P521(): + return 521 + default: + return 0 + } + default: + return 0 + } +} + +func FindSignatureBits(algo x509.SignatureAlgorithm) int { + switch algo { + case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.DSAWithSHA1, x509.ECDSAWithSHA1: + return -1 + case x509.SHA256WithRSA, x509.DSAWithSHA256, x509.ECDSAWithSHA256, x509.SHA256WithRSAPSS: + return 256 + case x509.SHA384WithRSA, x509.ECDSAWithSHA384, x509.SHA384WithRSAPSS: + return 384 + case x509.SHA512WithRSA, x509.SHA512WithRSAPSS, x509.ECDSAWithSHA512: + return 512 + case x509.PureEd25519: + return 0 + default: + return -1 + } +} + +func GetKeyType(goKeyType string) string { + switch goKeyType { + case "RSA": + return "rsa" + case "ECDSA": + return "ec" + case "Ed25519": + return "ed25519" + default: + return "" + } +} + +func IsPSS(algorithm x509.SignatureAlgorithm) bool { + switch algorithm { + case x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS, x509.SHA256WithRSAPSS: + return true + default: + return false + } +} diff --git a/sdk/helper/certutil/types.go b/sdk/helper/certutil/types.go index 15b816f0c8ea..bfdc153c4852 100644 --- a/sdk/helper/certutil/types.go +++ b/sdk/helper/certutil/types.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package certutil contains helper functions that are mostly used // with the PKI backend but can be generally useful. Functionality // includes helpers for converting a certificate/private key bundle @@ -160,6 +163,21 @@ func GetPrivateKeyTypeFromSigner(signer crypto.Signer) PrivateKeyType { return UnknownPrivateKey } +// GetPrivateKeyTypeFromPublicKey based on the public key, return the PrivateKeyType +// that would be associated with it, returning UnknownPrivateKey for unsupported types +func GetPrivateKeyTypeFromPublicKey(pubKey crypto.PublicKey) PrivateKeyType { + switch pubKey.(type) { + case *rsa.PublicKey: + return RSAPrivateKey + case *ecdsa.PublicKey: + return ECPrivateKey + case ed25519.PublicKey: + return Ed25519PrivateKey + default: + return UnknownPrivateKey + } +} + // ToPEMBundle converts a string-based certificate bundle // to a PEM-based string certificate bundle in trust path // order, leaf certificate first @@ -1013,3 +1031,6 @@ func CreatePolicyInformationExtensionFromStorageStrings(policyIdentifiers []stri Value: asn1Bytes, }, nil } + +// Subject Attribute OIDs +var SubjectPilotUserIDAttributeOID = asn1.ObjectIdentifier{0, 9, 2342, 19200300, 100, 1, 1} diff --git a/sdk/helper/certutil/types_test.go b/sdk/helper/certutil/types_test.go new file mode 100644 index 000000000000..2cf383afaa02 --- /dev/null +++ b/sdk/helper/certutil/types_test.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package certutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "testing" +) + +func TestGetPrivateKeyTypeFromPublicKey(t *testing.T) { + rsaKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("error generating rsa key: %s", err) + } + + ecdsaKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + t.Fatalf("error generating ecdsa key: %s", err) + } + + publicKey, _, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + t.Fatalf("error generating ed25519 key: %s", err) + } + + testCases := map[string]struct { + publicKey crypto.PublicKey + expectedKeyType PrivateKeyType + }{ + "rsa": { + publicKey: rsaKey.Public(), + expectedKeyType: RSAPrivateKey, + }, + "ecdsa": { + publicKey: ecdsaKey.Public(), + expectedKeyType: ECPrivateKey, + }, + "ed25519": { + publicKey: publicKey, + expectedKeyType: Ed25519PrivateKey, + }, + "bad key type": { + publicKey: []byte{}, + expectedKeyType: UnknownPrivateKey, + }, + } + + for name, tt := range testCases { + t.Run(name, func(t *testing.T) { + keyType := GetPrivateKeyTypeFromPublicKey(tt.publicKey) + + if keyType != tt.expectedKeyType { + t.Fatalf("key type mismatch: expected %s, got %s", tt.expectedKeyType, keyType) + } + }) + } +} diff --git a/sdk/helper/cidrutil/cidr.go b/sdk/helper/cidrutil/cidr.go index 7e48c2be5034..9d2a41829c4c 100644 --- a/sdk/helper/cidrutil/cidr.go +++ b/sdk/helper/cidrutil/cidr.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package cidrutil import ( diff --git a/sdk/helper/cidrutil/cidr_test.go b/sdk/helper/cidrutil/cidr_test.go index 6a8662cdf17f..e6fc5764452f 100644 --- a/sdk/helper/cidrutil/cidr_test.go +++ b/sdk/helper/cidrutil/cidr_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package cidrutil import ( diff --git a/sdk/helper/clientcountutil/clientcountutil.go b/sdk/helper/clientcountutil/clientcountutil.go new file mode 100644 index 000000000000..dfafd4bee873 --- /dev/null +++ b/sdk/helper/clientcountutil/clientcountutil.go @@ -0,0 +1,405 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package clientcountutil provides a library to generate activity log data for +// testing. +package clientcountutil + +import ( + "context" + "errors" + "fmt" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/clientcountutil/generation" + "google.golang.org/protobuf/encoding/protojson" +) + +// ActivityLogDataGenerator holds an ActivityLogMockInput. Users can create the +// generator with NewActivityLogData(), add content to the generator using +// the fluent API methods, and generate and write the JSON representation of the +// input to the Vault API. +type ActivityLogDataGenerator struct { + data *generation.ActivityLogMockInput + addingToMonth *generation.Data + addingToSegment *generation.Segment + client *api.Client +} + +// NewActivityLogData creates a new instance of an activity log data generator +// The type returned by this function cannot be called concurrently +func NewActivityLogData(client *api.Client) *ActivityLogDataGenerator { + return &ActivityLogDataGenerator{ + client: client, + data: new(generation.ActivityLogMockInput), + } +} + +// NewCurrentMonthData opens a new month of data for the current month. All +// clients will continue to be added to this month until a new month is created +// with NewPreviousMonthData. +func (d *ActivityLogDataGenerator) NewCurrentMonthData() *ActivityLogDataGenerator { + return d.newMonth(&generation.Data{Month: &generation.Data_CurrentMonth{CurrentMonth: true}}) +} + +// NewPreviousMonthData opens a new month of data, where the clients will be +// recorded as having been seen monthsAgo months ago. All clients will continue +// to be added to this month until a new month is created with +// NewPreviousMonthData or NewCurrentMonthData. +func (d *ActivityLogDataGenerator) NewPreviousMonthData(monthsAgo int) *ActivityLogDataGenerator { + return d.newMonth(&generation.Data{Month: &generation.Data_MonthsAgo{MonthsAgo: int32(monthsAgo)}}) +} + +func (d *ActivityLogDataGenerator) newMonth(newMonth *generation.Data) *ActivityLogDataGenerator { + d.data.Data = append(d.data.Data, newMonth) + d.addingToMonth = newMonth + d.addingToSegment = nil + return d +} + +// MonthOption holds an option that can be set for the entire month +type MonthOption func(m *generation.Data) + +// WithMaximumSegmentIndex sets the maximum segment index for the segments in +// the open month. Set this value in order to set how many indexes the data +// should be split across. This must include any empty or skipped indexes. For +// example, say that you would like all of your data split across indexes 0 and +// 3, with the following empty and skipped indexes: +// +// empty indexes: [2] +// skipped indexes: [1] +// +// To accomplish that, you will need to call WithMaximumSegmentIndex(3). +// This value will be ignored if you have called Segment() for the open month +// If not set, all data will be in 1 segment. +func WithMaximumSegmentIndex(n int) MonthOption { + return func(m *generation.Data) { + m.NumSegments = int32(n) + } +} + +// WithEmptySegmentIndexes sets which segment indexes should be empty for the +// segments in the open month. If you use this option, you must either: +// 1. ensure that you've called Segment() for the open month +// 2. use WithMaximumSegmentIndex() to set the total number of segments +// +// If you haven't set either of those values then this option will be ignored, +// unless you included 0 as an empty segment index in which case only an empty +// segment will be created. +func WithEmptySegmentIndexes(i ...int) MonthOption { + return func(m *generation.Data) { + indexes := make([]int32, 0, len(i)) + for _, index := range i { + indexes = append(indexes, int32(index)) + } + m.EmptySegmentIndexes = indexes + } +} + +// WithSkipSegmentIndexes sets which segment indexes should be skipped for the +// segments in the open month. If you use this option, you must either: +// 1. ensure that you've called Segment() for the open month +// 2. use WithMaximumSegmentIndex() to set the total number of segments +// +// If you haven't set either of those values then this option will be ignored, +// unless you included 0 as a skipped segment index in which case no segments +// will be created. +func WithSkipSegmentIndexes(i ...int) MonthOption { + return func(m *generation.Data) { + indexes := make([]int32, 0, len(i)) + for _, index := range i { + indexes = append(indexes, int32(index)) + } + m.SkipSegmentIndexes = indexes + } +} + +// SetMonthOptions can be called at any time to set options for the open month +func (d *ActivityLogDataGenerator) SetMonthOptions(opts ...MonthOption) *ActivityLogDataGenerator { + for _, opt := range opts { + opt(d.addingToMonth) + } + return d +} + +// ClientOption defines additional options for the client +// This type and the functions that return it are here for ease of use. A user +// could also choose to create the *generation.Client themselves, without using +// a ClientOption +type ClientOption func(client *generation.Client) + +// WithClientNamespace sets the namespace for the client +func WithClientNamespace(n string) ClientOption { + return func(client *generation.Client) { + client.Namespace = n + } +} + +// WithClientMount sets the mount path for the client +func WithClientMount(m string) ClientOption { + return func(client *generation.Client) { + client.Mount = m + } +} + +// WithClientIsNonEntity sets whether the client is an entity client or a non- +// entity token client +func WithClientIsNonEntity() ClientOption { + return WithClientType("non-entity") +} + +// WithClientType sets the client type to the given string. If this client type +// is not "entity", then the client will be counted in the activity log as a +// non-entity client +func WithClientType(typ string) ClientOption { + return func(client *generation.Client) { + client.ClientType = typ + } +} + +// WithClientID sets the ID for the client +func WithClientID(id string) ClientOption { + return func(client *generation.Client) { + client.Id = id + } +} + +// ClientsSeen adds clients to the month that was most recently opened with +// NewPreviousMonthData or NewCurrentMonthData. +func (d *ActivityLogDataGenerator) ClientsSeen(clients ...*generation.Client) *ActivityLogDataGenerator { + if d.addingToSegment == nil { + if d.addingToMonth.Clients == nil { + d.addingToMonth.Clients = &generation.Data_All{All: &generation.Clients{}} + } + d.addingToMonth.GetAll().Clients = append(d.addingToMonth.GetAll().Clients, clients...) + return d + } + d.addingToSegment.Clients.Clients = append(d.addingToSegment.Clients.Clients, clients...) + return d +} + +// NewClientSeen adds 1 new client with the given options to the most recently +// opened month. +func (d *ActivityLogDataGenerator) NewClientSeen(opts ...ClientOption) *ActivityLogDataGenerator { + return d.NewClientsSeen(1, opts...) +} + +// NewClientsSeen adds n new clients with the given options to the most recently +// opened month. +func (d *ActivityLogDataGenerator) NewClientsSeen(n int, opts ...ClientOption) *ActivityLogDataGenerator { + c := new(generation.Client) + for _, opt := range opts { + opt(c) + } + c.Count = int32(n) + return d.ClientsSeen(c) +} + +// RepeatedClientSeen adds 1 client that was seen in the previous month to +// the month that was most recently opened. This client will have the attributes +// described by the provided options. +func (d *ActivityLogDataGenerator) RepeatedClientSeen(opts ...ClientOption) *ActivityLogDataGenerator { + return d.RepeatedClientsSeen(1, opts...) +} + +// RepeatedClientsSeen adds n clients that were seen in the previous month to +// the month that was most recently opened. These clients will have the +// attributes described by provided options. +func (d *ActivityLogDataGenerator) RepeatedClientsSeen(n int, opts ...ClientOption) *ActivityLogDataGenerator { + c := new(generation.Client) + for _, opt := range opts { + opt(c) + } + c.Repeated = true + c.Count = int32(n) + return d.ClientsSeen(c) +} + +// RepeatedClientSeenFromMonthsAgo adds 1 client that was seen in monthsAgo +// month to the month that was most recently opened. This client will have the +// attributes described by provided options. +func (d *ActivityLogDataGenerator) RepeatedClientSeenFromMonthsAgo(monthsAgo int, opts ...ClientOption) *ActivityLogDataGenerator { + return d.RepeatedClientsSeenFromMonthsAgo(1, monthsAgo, opts...) +} + +// RepeatedClientsSeenFromMonthsAgo adds n clients that were seen in monthsAgo +// month to the month that was most recently opened. These clients will have the +// attributes described by provided options. +func (d *ActivityLogDataGenerator) RepeatedClientsSeenFromMonthsAgo(n, monthsAgo int, opts ...ClientOption) *ActivityLogDataGenerator { + c := new(generation.Client) + for _, opt := range opts { + opt(c) + } + c.RepeatedFromMonth = int32(monthsAgo) + c.Count = int32(n) + return d.ClientsSeen(c) +} + +// SegmentOption defines additional options for the segment +type SegmentOption func(segment *generation.Segment) + +// WithSegmentIndex sets the index for the segment to n. If this option is not +// provided, the segment will be given the next consecutive index +func WithSegmentIndex(n int) SegmentOption { + return func(segment *generation.Segment) { + index := int32(n) + segment.SegmentIndex = &index + } +} + +// Segment starts a segment within the current month. All clients will be added +// to this segment, until either Segment is called again to create a new open +// segment, or NewPreviousMonthData or NewCurrentMonthData is called to open a +// new month. +func (d *ActivityLogDataGenerator) Segment(opts ...SegmentOption) *ActivityLogDataGenerator { + s := &generation.Segment{ + Clients: &generation.Clients{}, + } + for _, opt := range opts { + opt(s) + } + if d.addingToMonth.GetSegments() == nil { + d.addingToMonth.Clients = &generation.Data_Segments{Segments: &generation.Segments{}} + } + d.addingToMonth.GetSegments().Segments = append(d.addingToMonth.GetSegments().Segments, s) + d.addingToSegment = s + return d +} + +// ToJSON returns the JSON representation of the data +func (d *ActivityLogDataGenerator) ToJSON() ([]byte, error) { + return protojson.Marshal(d.data) +} + +// ToProto returns the ActivityLogMockInput protobuf +func (d *ActivityLogDataGenerator) ToProto() *generation.ActivityLogMockInput { + return d.data +} + +// Write writes the data to the API with the given write options. The method +// returns the new paths that have been written. Note that the API endpoint will +// only be present when Vault has been compiled with the "testonly" flag. +func (d *ActivityLogDataGenerator) Write(ctx context.Context, writeOptions ...generation.WriteOptions) ([]string, error) { + d.data.Write = writeOptions + err := VerifyInput(d.data) + if err != nil { + return nil, err + } + data, err := d.ToJSON() + if err != nil { + return nil, err + } + resp, err := d.client.Logical().WriteWithContext(ctx, "sys/internal/counters/activity/write", map[string]interface{}{"input": string(data)}) + if err != nil { + return nil, err + } + if resp.Data == nil { + return nil, fmt.Errorf("received no data") + } + paths := resp.Data["paths"] + castedPaths, ok := paths.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid paths data: %v", paths) + } + returnPaths := make([]string, 0, len(castedPaths)) + for _, path := range castedPaths { + returnPaths = append(returnPaths, path.(string)) + } + return returnPaths, nil +} + +// VerifyInput checks that the input data is valid +func VerifyInput(input *generation.ActivityLogMockInput) error { + // mapping from monthsAgo to the month's data + months := make(map[int32]*generation.Data) + + // this keeps track of the index of the earliest month. We need to verify + // that this month doesn't have any repeated clients + earliestMonthsAgo := int32(0) + + // this map holds a set of the month indexes for any RepeatedFromMonth + // values. Each element will be checked to ensure month that should be + // repeated from exists in the input data + repeatedFromMonths := make(map[int32]struct{}) + + for _, month := range input.Data { + monthsAgo := month.GetMonthsAgo() + if monthsAgo > earliestMonthsAgo { + earliestMonthsAgo = monthsAgo + } + + // verify that no monthsAgo value is repeated + if _, seen := months[monthsAgo]; seen { + return fmt.Errorf("multiple months with monthsAgo %d", monthsAgo) + } + months[monthsAgo] = month + + // the number of segments should be correct + if month.NumSegments > 0 && int(month.NumSegments)-len(month.GetSkipSegmentIndexes())-len(month.GetEmptySegmentIndexes()) <= 0 { + return fmt.Errorf("number of segments %d is too small. It must be large enough to include the empty (%v) and skipped (%v) segments", month.NumSegments, month.GetSkipSegmentIndexes(), month.GetEmptySegmentIndexes()) + } + + if segments := month.GetSegments(); segments != nil { + if month.NumSegments > 0 { + return errors.New("cannot specify both number of segments and create segmented data") + } + + segmentIndexes := make(map[int32]struct{}) + for _, segment := range segments.Segments { + + // collect any RepeatedFromMonth values + for _, client := range segment.GetClients().GetClients() { + if repeatFrom := client.RepeatedFromMonth; repeatFrom > 0 { + repeatedFromMonths[repeatFrom] = struct{}{} + } + } + + // verify that no segment indexes are repeated + segmentIndex := segment.SegmentIndex + if segmentIndex == nil { + continue + } + if _, seen := segmentIndexes[*segmentIndex]; seen { + return fmt.Errorf("cannot have repeated segment index %d", *segmentIndex) + } + segmentIndexes[*segmentIndex] = struct{}{} + } + } else { + for _, client := range month.GetAll().GetClients() { + // collect any RepeatedFromMonth values + if repeatFrom := client.RepeatedFromMonth; repeatFrom > 0 { + repeatedFromMonths[repeatFrom] = struct{}{} + } + } + } + } + + // check that the corresponding month exists for all the RepeatedFromMonth + // values + for repeated := range repeatedFromMonths { + if _, ok := months[repeated]; !ok { + return fmt.Errorf("cannot repeat from %d months ago", repeated) + } + } + // the earliest month can't have any repeated clients, because there are no + // earlier months to repeat from + earliestMonth := months[earliestMonthsAgo] + repeatedClients := false + if all := earliestMonth.GetAll(); all != nil { + for _, client := range all.GetClients() { + repeatedClients = repeatedClients || client.Repeated || client.RepeatedFromMonth != 0 + } + } else { + for _, segment := range earliestMonth.GetSegments().GetSegments() { + for _, client := range segment.GetClients().GetClients() { + repeatedClients = repeatedClients || client.Repeated || client.RepeatedFromMonth != 0 + } + } + } + + if repeatedClients { + return fmt.Errorf("%d months ago cannot have repeated clients, because it is the earliest month", earliestMonthsAgo) + } + + return nil +} diff --git a/sdk/helper/clientcountutil/clientcountutil_test.go b/sdk/helper/clientcountutil/clientcountutil_test.go new file mode 100644 index 000000000000..6a5b224bc675 --- /dev/null +++ b/sdk/helper/clientcountutil/clientcountutil_test.go @@ -0,0 +1,279 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package clientcountutil + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/clientcountutil/generation" + "github.com/stretchr/testify/require" +) + +// TestNewCurrentMonthData verifies that current month is set correctly and that +// there are no open segments +func TestNewCurrentMonthData(t *testing.T) { + generator := NewActivityLogData(nil).NewCurrentMonthData() + require.True(t, generator.data.Data[0].GetCurrentMonth()) + require.True(t, generator.addingToMonth.GetCurrentMonth()) + require.Nil(t, generator.addingToSegment) +} + +// TestNewMonthDataMonthsAgo verifies that months ago is set correctly and that +// there are no open segments +func TestNewMonthDataMonthsAgo(t *testing.T) { + generator := NewActivityLogData(nil).NewPreviousMonthData(3) + require.Equal(t, int32(3), generator.data.Data[0].GetMonthsAgo()) + require.Equal(t, int32(3), generator.addingToMonth.GetMonthsAgo()) + require.Nil(t, generator.addingToSegment) +} + +// TestNewMonthData_MultipleMonths opens a month 3 months ago then 2 months ago. +// The test verifies that the generator is set to add to the correct month. We +// then open a current month, and verify that the generator will add to the +// current month. +func TestNewMonthData_MultipleMonths(t *testing.T) { + generator := NewActivityLogData(nil).NewPreviousMonthData(3).NewPreviousMonthData(2) + require.Equal(t, int32(2), generator.data.Data[1].GetMonthsAgo()) + require.Equal(t, int32(2), generator.addingToMonth.GetMonthsAgo()) + generator = generator.NewCurrentMonthData() + require.True(t, generator.data.Data[2].GetCurrentMonth()) + require.True(t, generator.addingToMonth.GetCurrentMonth()) +} + +// TestNewCurrentMonthData_ClientsSeen calls ClientsSeen with 3 clients, and +// verifies that they are added to the input data +func TestNewCurrentMonthData_ClientsSeen(t *testing.T) { + wantClients := []*generation.Client{ + { + Id: "1", + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }, + { + Id: "2", + }, + { + Id: "3", + Count: int32(3), + }, + } + generator := NewActivityLogData(nil).NewCurrentMonthData().ClientsSeen(wantClients...) + require.Equal(t, generator.data.Data[0].GetAll().Clients, wantClients) + require.True(t, generator.data.Data[0].GetCurrentMonth()) +} + +// TestSegment_AddClients adds clients in a variety of ways to an open segment +// and verifies that the clients are present in the segment with the correct +// options +func TestSegment_AddClients(t *testing.T) { + testAddClients(t, func() *ActivityLogDataGenerator { + return NewActivityLogData(nil).NewCurrentMonthData().Segment() + }, func(g *ActivityLogDataGenerator) *generation.Client { + return g.data.Data[0].GetSegments().Segments[0].Clients.Clients[0] + }) +} + +// TestSegment_MultipleSegments opens a current month and adds a client to an +// un-indexed segment, then opens an indexed segment and adds a client. The test +// verifies that clients are present in both segments, and that the segment +// index is correctly recorded +func TestSegment_MultipleSegments(t *testing.T) { + generator := NewActivityLogData(nil).NewCurrentMonthData().Segment().NewClientSeen().Segment(WithSegmentIndex(2)).NewClientSeen() + require.Len(t, generator.data.Data[0].GetSegments().Segments[0].Clients.Clients, 1) + require.Len(t, generator.data.Data[0].GetSegments().Segments[1].Clients.Clients, 1) + require.Equal(t, int32(2), *generator.data.Data[0].GetSegments().Segments[1].SegmentIndex) + require.Equal(t, int32(2), *generator.addingToSegment.SegmentIndex) +} + +// TestSegment_NewMonth adds a client to a segment, then starts a new month. The +// test verifies that there are no open segments +func TestSegment_NewMonth(t *testing.T) { + generator := NewActivityLogData(nil).NewCurrentMonthData().Segment().NewClientSeen().NewPreviousMonthData(1) + require.Nil(t, generator.addingToSegment) +} + +// TestNewCurrentMonthData_AddClients adds clients in a variety of ways to an +// the current month and verifies that the clients are present in the month with +// the correct options +func TestNewCurrentMonthData_AddClients(t *testing.T) { + testAddClients(t, func() *ActivityLogDataGenerator { + return NewActivityLogData(nil).NewCurrentMonthData() + }, func(g *ActivityLogDataGenerator) *generation.Client { + return g.data.Data[0].GetAll().Clients[0] + }) +} + +// TestWrite creates a mock http server and writes generated data to it. The +// test verifies that the returned paths are parsed correctly, and that the JSON +// sent to the server is correct. +func TestWrite(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := io.WriteString(w, `{"data":{"paths":["path1","path2"]}}`) + require.NoError(t, err) + body, err := io.ReadAll(r.Body) + require.NoError(t, err) + raw := map[string]string{} + err = json.Unmarshal(body, &raw) + require.NoError(t, err) + require.JSONEq(t, `{"write":["WRITE_ENTITIES"],"data":[{"monthsAgo":3,"all":{"clients":[{"count":1}]}},{"monthsAgo":2,"segments":{"segments":[{"segmentIndex":2,"clients":{"clients":[{"count":1,"repeated":true}]}}]}},{"currentMonth":true}]}`, raw["input"]) + })) + defer ts.Close() + + client, err := api.NewClient(&api.Config{ + Address: ts.URL, + }) + require.NoError(t, err) + paths, err := NewActivityLogData(client). + NewPreviousMonthData(3). + NewClientSeen(). + NewPreviousMonthData(2). + Segment(WithSegmentIndex(2)). + RepeatedClientSeen(). + NewCurrentMonthData().Write(context.Background(), generation.WriteOptions_WRITE_ENTITIES) + + require.NoError(t, err) + require.Equal(t, []string{"path1", "path2"}, paths) +} + +func testAddClients(t *testing.T, makeGenerator func() *ActivityLogDataGenerator, getClient func(data *ActivityLogDataGenerator) *generation.Client) { + t.Helper() + clientOptions := []ClientOption{ + WithClientNamespace("ns"), WithClientMount("mount"), WithClientIsNonEntity(), WithClientID("1"), + } + generator := makeGenerator().NewClientSeen(clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 1, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) + + generator = makeGenerator().NewClientsSeen(4, clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 4, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) + + generator = makeGenerator().RepeatedClientSeen(clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 1, + Repeated: true, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) + + generator = makeGenerator().RepeatedClientsSeen(4, clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 4, + Repeated: true, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) + + generator = makeGenerator().RepeatedClientSeenFromMonthsAgo(3, clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 1, + RepeatedFromMonth: 3, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) + + generator = makeGenerator().RepeatedClientsSeenFromMonthsAgo(4, 3, clientOptions...) + require.Equal(t, getClient(generator), &generation.Client{ + Id: "1", + Count: 4, + RepeatedFromMonth: 3, + Namespace: "ns", + Mount: "mount", + ClientType: "non-entity", + }) +} + +// TestSetMonthOptions sets month options and verifies that they are saved +func TestSetMonthOptions(t *testing.T) { + generator := NewActivityLogData(nil).NewCurrentMonthData().SetMonthOptions(WithEmptySegmentIndexes(3, 4), + WithMaximumSegmentIndex(7), WithSkipSegmentIndexes(1, 2)) + require.Equal(t, int32(7), generator.data.Data[0].NumSegments) + require.Equal(t, []int32{3, 4}, generator.data.Data[0].EmptySegmentIndexes) + require.Equal(t, []int32{1, 2}, generator.data.Data[0].SkipSegmentIndexes) +} + +// TestVerifyInput constructs invalid inputs and ensures that VerifyInput +// returns an error +func TestVerifyInput(t *testing.T) { + cases := []struct { + name string + generator *ActivityLogDataGenerator + }{ + { + name: "repeated client with only 1 month", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + RepeatedClientSeen(), + }, + { + name: "repeated client with segment", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + Segment(). + RepeatedClientSeen(), + }, + { + name: "repeated client with earliest month", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + NewClientSeen(). + NewPreviousMonthData(2). + RepeatedClientSeen(), + }, + { + name: "repeated month", + generator: NewActivityLogData(nil). + NewPreviousMonthData(1). + NewPreviousMonthData(1), + }, + { + name: "repeated current month", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + NewCurrentMonthData(), + }, + { + name: "repeated segment index", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + Segment(WithSegmentIndex(1)). + Segment(WithSegmentIndex(1)), + }, + { + name: "segment with num segments", + generator: NewActivityLogData(nil). + NewCurrentMonthData(). + Segment(). + SetMonthOptions(WithMaximumSegmentIndex(1)), + }, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + require.Error(t, VerifyInput(tc.generator.data)) + }) + } +} diff --git a/sdk/helper/clientcountutil/generation/generate_data.pb.go b/sdk/helper/clientcountutil/generation/generate_data.pb.go new file mode 100644 index 000000000000..dfe5eba189aa --- /dev/null +++ b/sdk/helper/clientcountutil/generation/generate_data.pb.go @@ -0,0 +1,753 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc (unknown) +// source: sdk/helper/clientcountutil/generation/generate_data.proto + +package generation + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type WriteOptions int32 + +const ( + WriteOptions_WRITE_UNKNOWN WriteOptions = 0 + WriteOptions_WRITE_PRECOMPUTED_QUERIES WriteOptions = 1 + WriteOptions_WRITE_DISTINCT_CLIENTS WriteOptions = 2 + WriteOptions_WRITE_ENTITIES WriteOptions = 3 + WriteOptions_WRITE_DIRECT_TOKENS WriteOptions = 4 + WriteOptions_WRITE_INTENT_LOGS WriteOptions = 5 +) + +// Enum value maps for WriteOptions. +var ( + WriteOptions_name = map[int32]string{ + 0: "WRITE_UNKNOWN", + 1: "WRITE_PRECOMPUTED_QUERIES", + 2: "WRITE_DISTINCT_CLIENTS", + 3: "WRITE_ENTITIES", + 4: "WRITE_DIRECT_TOKENS", + 5: "WRITE_INTENT_LOGS", + } + WriteOptions_value = map[string]int32{ + "WRITE_UNKNOWN": 0, + "WRITE_PRECOMPUTED_QUERIES": 1, + "WRITE_DISTINCT_CLIENTS": 2, + "WRITE_ENTITIES": 3, + "WRITE_DIRECT_TOKENS": 4, + "WRITE_INTENT_LOGS": 5, + } +) + +func (x WriteOptions) Enum() *WriteOptions { + p := new(WriteOptions) + *p = x + return p +} + +func (x WriteOptions) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (WriteOptions) Descriptor() protoreflect.EnumDescriptor { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_enumTypes[0].Descriptor() +} + +func (WriteOptions) Type() protoreflect.EnumType { + return &file_sdk_helper_clientcountutil_generation_generate_data_proto_enumTypes[0] +} + +func (x WriteOptions) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use WriteOptions.Descriptor instead. +func (WriteOptions) EnumDescriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{0} +} + +type ActivityLogMockInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Write []WriteOptions `protobuf:"varint,1,rep,packed,name=write,proto3,enum=generation.WriteOptions" json:"write,omitempty"` + Data []*Data `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty"` +} + +func (x *ActivityLogMockInput) Reset() { + *x = ActivityLogMockInput{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ActivityLogMockInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityLogMockInput) ProtoMessage() {} + +func (x *ActivityLogMockInput) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityLogMockInput.ProtoReflect.Descriptor instead. +func (*ActivityLogMockInput) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{0} +} + +func (x *ActivityLogMockInput) GetWrite() []WriteOptions { + if x != nil { + return x.Write + } + return nil +} + +func (x *ActivityLogMockInput) GetData() []*Data { + if x != nil { + return x.Data + } + return nil +} + +type Data struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Month: + // + // *Data_CurrentMonth + // *Data_MonthsAgo + Month isData_Month `protobuf_oneof:"month"` + // Types that are assignable to Clients: + // + // *Data_All + // *Data_Segments + Clients isData_Clients `protobuf_oneof:"clients"` + EmptySegmentIndexes []int32 `protobuf:"varint,5,rep,packed,name=empty_segment_indexes,json=emptySegmentIndexes,proto3" json:"empty_segment_indexes,omitempty"` + SkipSegmentIndexes []int32 `protobuf:"varint,6,rep,packed,name=skip_segment_indexes,json=skipSegmentIndexes,proto3" json:"skip_segment_indexes,omitempty"` + NumSegments int32 `protobuf:"varint,7,opt,name=num_segments,json=numSegments,proto3" json:"num_segments,omitempty"` +} + +func (x *Data) Reset() { + *x = Data{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Data) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Data) ProtoMessage() {} + +func (x *Data) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Data.ProtoReflect.Descriptor instead. +func (*Data) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{1} +} + +func (m *Data) GetMonth() isData_Month { + if m != nil { + return m.Month + } + return nil +} + +func (x *Data) GetCurrentMonth() bool { + if x, ok := x.GetMonth().(*Data_CurrentMonth); ok { + return x.CurrentMonth + } + return false +} + +func (x *Data) GetMonthsAgo() int32 { + if x, ok := x.GetMonth().(*Data_MonthsAgo); ok { + return x.MonthsAgo + } + return 0 +} + +func (m *Data) GetClients() isData_Clients { + if m != nil { + return m.Clients + } + return nil +} + +func (x *Data) GetAll() *Clients { + if x, ok := x.GetClients().(*Data_All); ok { + return x.All + } + return nil +} + +func (x *Data) GetSegments() *Segments { + if x, ok := x.GetClients().(*Data_Segments); ok { + return x.Segments + } + return nil +} + +func (x *Data) GetEmptySegmentIndexes() []int32 { + if x != nil { + return x.EmptySegmentIndexes + } + return nil +} + +func (x *Data) GetSkipSegmentIndexes() []int32 { + if x != nil { + return x.SkipSegmentIndexes + } + return nil +} + +func (x *Data) GetNumSegments() int32 { + if x != nil { + return x.NumSegments + } + return 0 +} + +type isData_Month interface { + isData_Month() +} + +type Data_CurrentMonth struct { + CurrentMonth bool `protobuf:"varint,1,opt,name=current_month,json=currentMonth,proto3,oneof"` +} + +type Data_MonthsAgo struct { + MonthsAgo int32 `protobuf:"varint,2,opt,name=months_ago,json=monthsAgo,proto3,oneof"` +} + +func (*Data_CurrentMonth) isData_Month() {} + +func (*Data_MonthsAgo) isData_Month() {} + +type isData_Clients interface { + isData_Clients() +} + +type Data_All struct { + All *Clients `protobuf:"bytes,3,opt,name=all,proto3,oneof"` // you can’t have repeated fields in a oneof, which is why these are separate message types +} + +type Data_Segments struct { + Segments *Segments `protobuf:"bytes,4,opt,name=segments,proto3,oneof"` +} + +func (*Data_All) isData_Clients() {} + +func (*Data_Segments) isData_Clients() {} + +type Segments struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Segments []*Segment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"` +} + +func (x *Segments) Reset() { + *x = Segments{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Segments) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Segments) ProtoMessage() {} + +func (x *Segments) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Segments.ProtoReflect.Descriptor instead. +func (*Segments) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{2} +} + +func (x *Segments) GetSegments() []*Segment { + if x != nil { + return x.Segments + } + return nil +} + +type Segment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SegmentIndex *int32 `protobuf:"varint,1,opt,name=segment_index,json=segmentIndex,proto3,oneof" json:"segment_index,omitempty"` + Clients *Clients `protobuf:"bytes,2,opt,name=clients,proto3" json:"clients,omitempty"` +} + +func (x *Segment) Reset() { + *x = Segment{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Segment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Segment) ProtoMessage() {} + +func (x *Segment) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Segment.ProtoReflect.Descriptor instead. +func (*Segment) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{3} +} + +func (x *Segment) GetSegmentIndex() int32 { + if x != nil && x.SegmentIndex != nil { + return *x.SegmentIndex + } + return 0 +} + +func (x *Segment) GetClients() *Clients { + if x != nil { + return x.Clients + } + return nil +} + +type Clients struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Clients []*Client `protobuf:"bytes,1,rep,name=clients,proto3" json:"clients,omitempty"` +} + +func (x *Clients) Reset() { + *x = Clients{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Clients) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Clients) ProtoMessage() {} + +func (x *Clients) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Clients.ProtoReflect.Descriptor instead. +func (*Clients) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{4} +} + +func (x *Clients) GetClients() []*Client { + if x != nil { + return x.Clients + } + return nil +} + +type Client struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Count int32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + Repeated bool `protobuf:"varint,3,opt,name=repeated,proto3" json:"repeated,omitempty"` + RepeatedFromMonth int32 `protobuf:"varint,4,opt,name=repeated_from_month,json=repeatedFromMonth,proto3" json:"repeated_from_month,omitempty"` + Namespace string `protobuf:"bytes,5,opt,name=namespace,proto3" json:"namespace,omitempty"` + Mount string `protobuf:"bytes,6,opt,name=mount,proto3" json:"mount,omitempty"` + ClientType string `protobuf:"bytes,7,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"` +} + +func (x *Client) Reset() { + *x = Client{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Client) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Client) ProtoMessage() {} + +func (x *Client) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Client.ProtoReflect.Descriptor instead. +func (*Client) Descriptor() ([]byte, []int) { + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP(), []int{5} +} + +func (x *Client) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Client) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *Client) GetRepeated() bool { + if x != nil { + return x.Repeated + } + return false +} + +func (x *Client) GetRepeatedFromMonth() int32 { + if x != nil { + return x.RepeatedFromMonth + } + return 0 +} + +func (x *Client) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Client) GetMount() string { + if x != nil { + return x.Mount + } + return "" +} + +func (x *Client) GetClientType() string { + if x != nil { + return x.ClientType + } + return "" +} + +var File_sdk_helper_clientcountutil_generation_generate_data_proto protoreflect.FileDescriptor + +var file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x73, 0x64, 0x6b, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6c, 0x0a, 0x14, 0x41, 0x63, 0x74, 0x69, 0x76, + 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x67, 0x4d, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, + 0x2e, 0x0a, 0x05, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, + 0x24, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc8, 0x02, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x25, + 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x4d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0a, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x5f, + 0x61, 0x67, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x6f, 0x6e, + 0x74, 0x68, 0x73, 0x41, 0x67, 0x6f, 0x12, 0x27, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x48, 0x01, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, + 0x32, 0x0a, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, + 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x48, 0x01, 0x52, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x73, 0x65, 0x67, + 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x05, 0x52, 0x13, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x6b, 0x69, 0x70, 0x5f, + 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x05, 0x52, 0x12, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x65, 0x67, 0x6d, 0x65, + 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, + 0x5f, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0b, 0x6e, 0x75, 0x6d, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x42, 0x07, 0x0a, 0x05, + 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0x3b, 0x0a, 0x08, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2f, 0x0a, 0x08, + 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, + 0x07, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x65, 0x67, 0x6d, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, + 0x00, 0x52, 0x0c, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x88, + 0x01, 0x01, 0x12, 0x2d, 0x0a, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x73, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x22, 0x37, 0x0a, 0x07, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2c, + 0x0a, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x52, 0x07, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xcf, 0x01, 0x0a, + 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x70, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6d, 0x6f, 0x6e, 0x74, 0x68, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x46, 0x72, 0x6f, 0x6d, 0x4d, 0x6f, 0x6e, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x2a, 0xa0, + 0x01, 0x0a, 0x0c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x11, 0x0a, 0x0d, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x43, + 0x4f, 0x4d, 0x50, 0x55, 0x54, 0x45, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x49, 0x45, 0x53, 0x10, + 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x44, 0x49, 0x53, 0x54, 0x49, + 0x4e, 0x43, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x53, 0x10, 0x02, 0x12, 0x12, 0x0a, + 0x0e, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x49, 0x45, 0x53, 0x10, + 0x03, 0x12, 0x17, 0x0a, 0x13, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, + 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x53, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x57, 0x52, + 0x49, 0x54, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x4f, 0x47, 0x53, 0x10, + 0x05, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, + 0x73, 0x64, 0x6b, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x75, + 0x74, 0x69, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescOnce sync.Once + file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescData = file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDesc +) + +func file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescGZIP() []byte { + file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescOnce.Do(func() { + file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescData) + }) + return file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDescData +} + +var file_sdk_helper_clientcountutil_generation_generate_data_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_sdk_helper_clientcountutil_generation_generate_data_proto_goTypes = []interface{}{ + (WriteOptions)(0), // 0: generation.WriteOptions + (*ActivityLogMockInput)(nil), // 1: generation.ActivityLogMockInput + (*Data)(nil), // 2: generation.Data + (*Segments)(nil), // 3: generation.Segments + (*Segment)(nil), // 4: generation.Segment + (*Clients)(nil), // 5: generation.Clients + (*Client)(nil), // 6: generation.Client +} +var file_sdk_helper_clientcountutil_generation_generate_data_proto_depIdxs = []int32{ + 0, // 0: generation.ActivityLogMockInput.write:type_name -> generation.WriteOptions + 2, // 1: generation.ActivityLogMockInput.data:type_name -> generation.Data + 5, // 2: generation.Data.all:type_name -> generation.Clients + 3, // 3: generation.Data.segments:type_name -> generation.Segments + 4, // 4: generation.Segments.segments:type_name -> generation.Segment + 5, // 5: generation.Segment.clients:type_name -> generation.Clients + 6, // 6: generation.Clients.clients:type_name -> generation.Client + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_sdk_helper_clientcountutil_generation_generate_data_proto_init() } +func file_sdk_helper_clientcountutil_generation_generate_data_proto_init() { + if File_sdk_helper_clientcountutil_generation_generate_data_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ActivityLogMockInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Data); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Segments); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Segment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Clients); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Client); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Data_CurrentMonth)(nil), + (*Data_MonthsAgo)(nil), + (*Data_All)(nil), + (*Data_Segments)(nil), + } + file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes[3].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDesc, + NumEnums: 1, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_helper_clientcountutil_generation_generate_data_proto_goTypes, + DependencyIndexes: file_sdk_helper_clientcountutil_generation_generate_data_proto_depIdxs, + EnumInfos: file_sdk_helper_clientcountutil_generation_generate_data_proto_enumTypes, + MessageInfos: file_sdk_helper_clientcountutil_generation_generate_data_proto_msgTypes, + }.Build() + File_sdk_helper_clientcountutil_generation_generate_data_proto = out.File + file_sdk_helper_clientcountutil_generation_generate_data_proto_rawDesc = nil + file_sdk_helper_clientcountutil_generation_generate_data_proto_goTypes = nil + file_sdk_helper_clientcountutil_generation_generate_data_proto_depIdxs = nil +} diff --git a/sdk/helper/clientcountutil/generation/generate_data.proto b/sdk/helper/clientcountutil/generation/generate_data.proto new file mode 100644 index 000000000000..0a48b7bcafe7 --- /dev/null +++ b/sdk/helper/clientcountutil/generation/generate_data.proto @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +package generation; + +option go_package = "github.com/hashicorp/vault/sdk/clientcountutil/generation"; +enum WriteOptions { + WRITE_UNKNOWN = 0; + WRITE_PRECOMPUTED_QUERIES = 1; + WRITE_DISTINCT_CLIENTS = 2; + WRITE_ENTITIES = 3; + WRITE_DIRECT_TOKENS = 4; + WRITE_INTENT_LOGS = 5; +} +message ActivityLogMockInput { + repeated WriteOptions write = 1; + repeated Data data = 2; +} +message Data { + oneof month { + bool current_month = 1; + int32 months_ago = 2; + } + oneof clients { + Clients all = 3; // you can’t have repeated fields in a oneof, which is why these are separate message types + Segments segments = 4; + } + repeated int32 empty_segment_indexes = 5; + repeated int32 skip_segment_indexes = 6; + int32 num_segments = 7; +} + +message Segments { + repeated Segment segments = 1; +} + +message Segment { + optional int32 segment_index = 1; + Clients clients = 2; +} + +message Clients { + repeated Client clients = 1; +} + +message Client { + string id = 1; + int32 count = 2; + bool repeated = 3; + int32 repeated_from_month = 4; + string namespace = 5; + string mount = 6; + string client_type = 7; +} diff --git a/sdk/helper/compressutil/compress.go b/sdk/helper/compressutil/compress.go index 924f82a2a1ba..2e096f1509ce 100644 --- a/sdk/helper/compressutil/compress.go +++ b/sdk/helper/compressutil/compress.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package compressutil import ( @@ -8,7 +11,6 @@ import ( "io" "github.com/golang/snappy" - "github.com/hashicorp/errwrap" "github.com/pierrec/lz4" ) @@ -31,7 +33,7 @@ const ( CompressionCanaryLZ4 byte = '4' ) -// SnappyReadCloser embeds the snappy reader which implements the io.Reader +// CompressUtilReadCloser embeds the snappy reader which implements the io.Reader // interface. The decompress procedure in this utility expects an // io.ReadCloser. This type implements the io.Closer interface to retain the // generic way of decompression. @@ -95,7 +97,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) { // These are valid compression levels default: // If compression level is set to NoCompression or to - // any invalid value, fallback to Defaultcompression + // any invalid value, fallback to DefaultCompression config.GzipCompressionLevel = gzip.DefaultCompression } writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) @@ -113,7 +115,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) { } if err != nil { - return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) + return nil, fmt.Errorf("failed to create a compression writer: %w", err) } if writer == nil { @@ -123,7 +125,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) { // Compress the input and place it in the same buffer containing the // canary byte. if _, err = writer.Write(data); err != nil { - return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) + return nil, fmt.Errorf("failed to compress input data: err: %w", err) } // Close the io.WriteCloser @@ -203,7 +205,7 @@ func DecompressWithCanary(data []byte) ([]byte, string, bool, error) { return nil, "", true, nil } if err != nil { - return nil, "", false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) + return nil, "", false, fmt.Errorf("failed to create a compression reader: %w", err) } if reader == nil { return nil, "", false, fmt.Errorf("failed to create a compression reader") @@ -214,8 +216,18 @@ func DecompressWithCanary(data []byte) ([]byte, string, bool, error) { // Read all the compressed data into a buffer var buf bytes.Buffer - if _, err = io.Copy(&buf, reader); err != nil { - return nil, "", false, err + + // Read the compressed data into a buffer, but do so + // slowly to prevent reading all the data into memory + // at once (protecting against e.g. zip bombs). + for { + _, err := io.CopyN(&buf, reader, 1024) + if err != nil { + if err == io.EOF { + break + } + return nil, "", false, err + } } return buf.Bytes(), compressionType, false, nil diff --git a/sdk/helper/compressutil/compress_test.go b/sdk/helper/compressutil/compress_test.go index f85f3c935ba1..28117d8c2258 100644 --- a/sdk/helper/compressutil/compress_test.go +++ b/sdk/helper/compressutil/compress_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package compressutil import ( @@ -113,3 +116,40 @@ func TestCompressUtil_InvalidConfigurations(t *testing.T) { t.Fatal("expected an error") } } + +// TestDecompressWithCanaryLargeInput tests that DecompressWithCanary works +// as expected even with large values. +func TestDecompressWithCanaryLargeInput(t *testing.T) { + t.Parallel() + + inputJSON := `{"sample":"data` + for i := 0; i < 100000; i++ { + inputJSON += " and data" + } + inputJSON += `"}` + inputJSONBytes := []byte(inputJSON) + + compressedJSONBytes, err := Compress(inputJSONBytes, &CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.BestCompression}) + if err != nil { + t.Fatal(err) + } + + decompressedJSONBytes, wasNotCompressed, err := Decompress(compressedJSONBytes) + if err != nil { + t.Fatal(err) + } + + // Check if the input for decompress was not compressed in the first place + if wasNotCompressed { + t.Fatalf("bytes were not compressed as expected") + } + + if len(decompressedJSONBytes) == 0 { + t.Fatalf("bytes were not compressed as expected") + } + + // Compare the value after decompression + if !bytes.Equal(inputJSONBytes, decompressedJSONBytes) { + t.Fatalf("decompressed value differs: decompressed value;\nexpected: %q\nactual: %q", string(inputJSONBytes), string(decompressedJSONBytes)) + } +} diff --git a/sdk/helper/consts/agent.go b/sdk/helper/consts/agent.go index 92207e3d818b..53b8b8e2e76e 100644 --- a/sdk/helper/consts/agent.go +++ b/sdk/helper/consts/agent.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package consts // AgentPathCacheClear is the path that the agent will use as its cache-clear diff --git a/sdk/helper/consts/consts.go b/sdk/helper/consts/consts.go index a4b7c5040422..ccc7494a281c 100644 --- a/sdk/helper/consts/consts.go +++ b/sdk/helper/consts/consts.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package consts const ( @@ -16,6 +19,10 @@ const ( // SSRF protection. RequestHeaderName = "X-Vault-Request" + // WrapTTLHeaderName is the name of the header containing a directive to + // wrap the response + WrapTTLHeaderName = "X-Vault-Wrap-TTL" + // PerformanceReplicationALPN is the negotiated protocol used for // performance replication. PerformanceReplicationALPN = "replication_v1" @@ -36,4 +43,8 @@ const ( VaultEnableFilePermissionsCheckEnv = "VAULT_ENABLE_FILE_PERMISSIONS_CHECK" VaultDisableUserLockout = "VAULT_DISABLE_USER_LOCKOUT" + + PerformanceReplicationPathTarget = "performance" + + DRReplicationPathTarget = "dr" ) diff --git a/sdk/helper/consts/deprecation_status.go b/sdk/helper/consts/deprecation_status.go index 656d6cc992a7..e72292bee6c3 100644 --- a/sdk/helper/consts/deprecation_status.go +++ b/sdk/helper/consts/deprecation_status.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package consts // EnvVaultAllowPendingRemovalMounts allows Pending Removal builtins to be diff --git a/sdk/helper/consts/error.go b/sdk/helper/consts/error.go index 1a9175c6392d..5bd3f5e6e261 100644 --- a/sdk/helper/consts/error.go +++ b/sdk/helper/consts/error.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package consts import "errors" diff --git a/sdk/helper/consts/plugin_runtime_types.go b/sdk/helper/consts/plugin_runtime_types.go new file mode 100644 index 000000000000..1b7714d0b11f --- /dev/null +++ b/sdk/helper/consts/plugin_runtime_types.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +// NOTE: this file has been copied to +// https://github.com/hashicorp/vault/blob/main/api/plugin_runtime_types.go +// Any changes made should be made to both files at the same time. + +import "fmt" + +var PluginRuntimeTypes = _PluginRuntimeTypeValues + +//go:generate enumer -type=PluginRuntimeType -trimprefix=PluginRuntimeType -transform=snake +type PluginRuntimeType uint32 + +// This is a list of PluginRuntimeTypes used by Vault. +const ( + DefaultContainerPluginOCIRuntime = "runsc" + + PluginRuntimeTypeUnsupported PluginRuntimeType = iota + PluginRuntimeTypeContainer +) + +// ParsePluginRuntimeType is a wrapper around PluginRuntimeTypeString kept for backwards compatibility. +func ParsePluginRuntimeType(PluginRuntimeType string) (PluginRuntimeType, error) { + t, err := PluginRuntimeTypeString(PluginRuntimeType) + if err != nil { + return PluginRuntimeTypeUnsupported, fmt.Errorf("%q is not a supported plugin runtime type", PluginRuntimeType) + } + return t, nil +} diff --git a/sdk/helper/consts/plugin_types.go b/sdk/helper/consts/plugin_types.go index e0a00e4860c6..a7a383827312 100644 --- a/sdk/helper/consts/plugin_types.go +++ b/sdk/helper/consts/plugin_types.go @@ -1,6 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package consts -import "fmt" +// NOTE: this file has been copied to +// https://github.com/hashicorp/vault/blob/main/api/plugin_types.go +// Any changes made should be made to both files at the same time. + +import ( + "encoding/json" + "fmt" +) var PluginTypes = []PluginType{ PluginTypeUnknown, @@ -57,3 +67,34 @@ func ParsePluginType(pluginType string) (PluginType, error) { return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) } } + +// UnmarshalJSON implements json.Unmarshaler. It supports unmarshaling either a +// string or a uint32. All new serialization will be as a string, but we +// previously serialized as a uint32 so we need to support that for backwards +// compatibility. +func (p *PluginType) UnmarshalJSON(data []byte) error { + var asString string + err := json.Unmarshal(data, &asString) + if err == nil { + *p, err = ParsePluginType(asString) + return err + } + + var asUint32 uint32 + err = json.Unmarshal(data, &asUint32) + if err != nil { + return err + } + *p = PluginType(asUint32) + switch *p { + case PluginTypeUnknown, PluginTypeCredential, PluginTypeDatabase, PluginTypeSecrets: + return nil + default: + return fmt.Errorf("%d is not a supported plugin type", asUint32) + } +} + +// MarshalJSON implements json.Marshaler. +func (p PluginType) MarshalJSON() ([]byte, error) { + return json.Marshal(p.String()) +} diff --git a/sdk/helper/consts/plugin_types_test.go b/sdk/helper/consts/plugin_types_test.go new file mode 100644 index 000000000000..ff1299f2e465 --- /dev/null +++ b/sdk/helper/consts/plugin_types_test.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +// NOTE: this file has been copied to +// https://github.com/hashicorp/vault/blob/main/api/plugin_types_test.go +// Any changes made should be made to both files at the same time. + +import ( + "encoding/json" + "testing" +) + +type testType struct { + PluginType PluginType `json:"plugin_type"` +} + +func TestPluginTypeJSONRoundTrip(t *testing.T) { + for _, pluginType := range PluginTypes { + original := testType{ + PluginType: pluginType, + } + asBytes, err := json.Marshal(original) + if err != nil { + t.Fatal(err) + } + + var roundTripped testType + err = json.Unmarshal(asBytes, &roundTripped) + if err != nil { + t.Fatal(err) + } + + if original != roundTripped { + t.Fatalf("expected %v, got %v", original, roundTripped) + } + } +} + +func TestPluginTypeJSONUnmarshal(t *testing.T) { + // Failure/unsupported cases. + for name, tc := range map[string]string{ + "unsupported": `{"plugin_type":"unsupported"}`, + "random string": `{"plugin_type":"foo"}`, + "boolean": `{"plugin_type":true}`, + "empty": `{"plugin_type":""}`, + "negative": `{"plugin_type":-1}`, + "out of range": `{"plugin_type":10}`, + } { + t.Run(name, func(t *testing.T) { + var result testType + err := json.Unmarshal([]byte(tc), &result) + if err == nil { + t.Fatal("expected error") + } + }) + } + + // Valid cases. + for name, tc := range map[string]struct { + json string + expected PluginType + }{ + "unknown": {`{"plugin_type":"unknown"}`, PluginTypeUnknown}, + "auth": {`{"plugin_type":"auth"}`, PluginTypeCredential}, + "secret": {`{"plugin_type":"secret"}`, PluginTypeSecrets}, + "database": {`{"plugin_type":"database"}`, PluginTypeDatabase}, + "absent": {`{}`, PluginTypeUnknown}, + "integer unknown": {`{"plugin_type":0}`, PluginTypeUnknown}, + "integer auth": {`{"plugin_type":1}`, PluginTypeCredential}, + "integer db": {`{"plugin_type":2}`, PluginTypeDatabase}, + "integer secret": {`{"plugin_type":3}`, PluginTypeSecrets}, + } { + t.Run(name, func(t *testing.T) { + var result testType + err := json.Unmarshal([]byte(tc.json), &result) + if err != nil { + t.Fatal(err) + } + if tc.expected != result.PluginType { + t.Fatalf("expected %v, got %v", tc.expected, result.PluginType) + } + }) + } +} + +func TestUnknownTypeExcludedWithOmitEmpty(t *testing.T) { + type testTypeOmitEmpty struct { + Type PluginType `json:"type,omitempty"` + } + bytes, err := json.Marshal(testTypeOmitEmpty{}) + if err != nil { + t.Fatal(err) + } + m := map[string]any{} + json.Unmarshal(bytes, &m) + if _, exists := m["type"]; exists { + t.Fatal("type should not be present") + } +} diff --git a/sdk/helper/consts/pluginruntimetype_enumer.go b/sdk/helper/consts/pluginruntimetype_enumer.go new file mode 100644 index 000000000000..337afc29c3e2 --- /dev/null +++ b/sdk/helper/consts/pluginruntimetype_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=PluginRuntimeType -trimprefix=PluginRuntimeType -transform=snake"; DO NOT EDIT. + +package consts + +import ( + "fmt" +) + +const _PluginRuntimeTypeName = "unsupportedcontainer" + +var _PluginRuntimeTypeIndex = [...]uint8{0, 11, 20} + +func (i PluginRuntimeType) String() string { + i -= 1 + if i >= PluginRuntimeType(len(_PluginRuntimeTypeIndex)-1) { + return fmt.Sprintf("PluginRuntimeType(%d)", i+1) + } + return _PluginRuntimeTypeName[_PluginRuntimeTypeIndex[i]:_PluginRuntimeTypeIndex[i+1]] +} + +var _PluginRuntimeTypeValues = []PluginRuntimeType{1, 2} + +var _PluginRuntimeTypeNameToValueMap = map[string]PluginRuntimeType{ + _PluginRuntimeTypeName[0:11]: 1, + _PluginRuntimeTypeName[11:20]: 2, +} + +// PluginRuntimeTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func PluginRuntimeTypeString(s string) (PluginRuntimeType, error) { + if val, ok := _PluginRuntimeTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to PluginRuntimeType values", s) +} + +// PluginRuntimeTypeValues returns all values of the enum +func PluginRuntimeTypeValues() []PluginRuntimeType { + return _PluginRuntimeTypeValues +} + +// IsAPluginRuntimeType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i PluginRuntimeType) IsAPluginRuntimeType() bool { + for _, v := range _PluginRuntimeTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/helper/consts/proxy.go b/sdk/helper/consts/proxy.go new file mode 100644 index 000000000000..0fc4117ccc1d --- /dev/null +++ b/sdk/helper/consts/proxy.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +// ProxyPathCacheClear is the path that the proxy will use as its cache-clear +// endpoint. +const ProxyPathCacheClear = "/proxy/v1/cache-clear" + +// ProxyPathMetrics is the path the proxy will use to expose its internal +// metrics. +const ProxyPathMetrics = "/proxy/v1/metrics" + +// ProxyPathQuit is the path that the proxy will use to trigger stopping it. +const ProxyPathQuit = "/proxy/v1/quit" diff --git a/sdk/helper/consts/replication.go b/sdk/helper/consts/replication.go index f72c2f47aee2..2a1511a9a933 100644 --- a/sdk/helper/consts/replication.go +++ b/sdk/helper/consts/replication.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package consts const ( diff --git a/sdk/helper/consts/token_consts.go b/sdk/helper/consts/token_consts.go index 2b4e0278bf28..108e7ba42d78 100644 --- a/sdk/helper/consts/token_consts.go +++ b/sdk/helper/consts/token_consts.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package consts const ( diff --git a/sdk/helper/cryptoutil/cryptoutil.go b/sdk/helper/cryptoutil/cryptoutil.go index a37086c645d8..956dad340878 100644 --- a/sdk/helper/cryptoutil/cryptoutil.go +++ b/sdk/helper/cryptoutil/cryptoutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package cryptoutil import "golang.org/x/crypto/blake2b" diff --git a/sdk/helper/cryptoutil/cryptoutil_test.go b/sdk/helper/cryptoutil/cryptoutil_test.go index a277e4fcee40..35799e42a2ea 100644 --- a/sdk/helper/cryptoutil/cryptoutil_test.go +++ b/sdk/helper/cryptoutil/cryptoutil_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package cryptoutil import "testing" diff --git a/sdk/helper/custommetadata/custom_metadata.go b/sdk/helper/custommetadata/custom_metadata.go index 7d4ff8763d11..81d4c27035d2 100644 --- a/sdk/helper/custommetadata/custom_metadata.go +++ b/sdk/helper/custommetadata/custom_metadata.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package custommetadata import ( diff --git a/sdk/helper/custommetadata/custom_metadata_test.go b/sdk/helper/custommetadata/custom_metadata_test.go index e71bd59462fe..2b25d991203c 100644 --- a/sdk/helper/custommetadata/custom_metadata_test.go +++ b/sdk/helper/custommetadata/custom_metadata_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package custommetadata import ( diff --git a/sdk/helper/dbtxn/dbtxn.go b/sdk/helper/dbtxn/dbtxn.go index 133b360e73e8..12288d5b37c8 100644 --- a/sdk/helper/dbtxn/dbtxn.go +++ b/sdk/helper/dbtxn/dbtxn.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package dbtxn import ( diff --git a/sdk/helper/docker/testhelpers.go b/sdk/helper/docker/testhelpers.go new file mode 100644 index 000000000000..f1fefd65cd70 --- /dev/null +++ b/sdk/helper/docker/testhelpers.go @@ -0,0 +1,936 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/cenkalti/backoff/v3" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/go-connections/nat" + "github.com/hashicorp/go-uuid" +) + +const DockerAPIVersion = "1.40" + +type Runner struct { + DockerAPI *client.Client + RunOptions RunOptions +} + +type RunOptions struct { + ImageRepo string + ImageTag string + ContainerName string + Cmd []string + Entrypoint []string + Env []string + NetworkName string + NetworkID string + CopyFromTo map[string]string + Ports []string + DoNotAutoRemove bool + AuthUsername string + AuthPassword string + OmitLogTimestamps bool + LogConsumer func(string) + Capabilities []string + PreDelete bool + PostStart func(string, string) error + LogStderr io.Writer + LogStdout io.Writer + VolumeNameToMountPoint map[string]string +} + +func NewDockerAPI() (*client.Client, error) { + return client.NewClientWithOpts(client.FromEnv, client.WithVersion(DockerAPIVersion)) +} + +func NewServiceRunner(opts RunOptions) (*Runner, error) { + dapi, err := NewDockerAPI() + if err != nil { + return nil, err + } + + if opts.NetworkName == "" { + opts.NetworkName = os.Getenv("TEST_DOCKER_NETWORK_NAME") + } + if opts.NetworkName != "" { + nets, err := dapi.NetworkList(context.TODO(), types.NetworkListOptions{ + Filters: filters.NewArgs(filters.Arg("name", opts.NetworkName)), + }) + if err != nil { + return nil, err + } + if len(nets) != 1 { + return nil, fmt.Errorf("expected exactly one docker network named %q, got %d", opts.NetworkName, len(nets)) + } + opts.NetworkID = nets[0].ID + } + if opts.NetworkID == "" { + opts.NetworkID = os.Getenv("TEST_DOCKER_NETWORK_ID") + } + if opts.ContainerName == "" { + if strings.Contains(opts.ImageRepo, "/") { + return nil, fmt.Errorf("ContainerName is required for non-library images") + } + // If there's no slash in the repo it's almost certainly going to be + // a good container name. + opts.ContainerName = opts.ImageRepo + } + return &Runner{ + DockerAPI: dapi, + RunOptions: opts, + }, nil +} + +type ServiceConfig interface { + Address() string + URL() *url.URL +} + +func NewServiceHostPort(host string, port int) *ServiceHostPort { + return &ServiceHostPort{address: fmt.Sprintf("%s:%d", host, port)} +} + +func NewServiceHostPortParse(s string) (*ServiceHostPort, error) { + pieces := strings.Split(s, ":") + if len(pieces) != 2 { + return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) + } + + port, err := strconv.Atoi(pieces[1]) + if err != nil || port < 1 { + return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) + } + + return &ServiceHostPort{s}, nil +} + +type ServiceHostPort struct { + address string +} + +func (s ServiceHostPort) Address() string { + return s.address +} + +func (s ServiceHostPort) URL() *url.URL { + return &url.URL{Host: s.address} +} + +func NewServiceURLParse(s string) (*ServiceURL, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + return &ServiceURL{u: *u}, nil +} + +func NewServiceURL(u url.URL) *ServiceURL { + return &ServiceURL{u: u} +} + +type ServiceURL struct { + u url.URL +} + +func (s ServiceURL) Address() string { + return s.u.Host +} + +func (s ServiceURL) URL() *url.URL { + return &s.u +} + +// ServiceAdapter verifies connectivity to the service, then returns either the +// connection string (typically a URL) and nil, or empty string and an error. +type ServiceAdapter func(ctx context.Context, host string, port int) (ServiceConfig, error) + +// StartService will start the runner's configured docker container with a +// random UUID suffix appended to the name to make it unique and will return +// either a hostname or local address depending on if a Docker network was given. +// +// Most tests can default to using this. +func (d *Runner) StartService(ctx context.Context, connect ServiceAdapter) (*Service, error) { + serv, _, err := d.StartNewService(ctx, true, false, connect) + + return serv, err +} + +type LogConsumerWriter struct { + consumer func(string) +} + +func (l LogConsumerWriter) Write(p []byte) (n int, err error) { + // TODO this assumes that we're never passed partial log lines, which + // seems a safe assumption for now based on how docker looks to implement + // logging, but might change in the future. + scanner := bufio.NewScanner(bytes.NewReader(p)) + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + l.consumer(scanner.Text()) + } + return len(p), nil +} + +var _ io.Writer = &LogConsumerWriter{} + +// StartNewService will start the runner's configured docker container but with the +// ability to control adding a name suffix or forcing a local address to be returned. +// 'addSuffix' will add a random UUID to the end of the container name. +// 'forceLocalAddr' will force the container address returned to be in the +// form of '127.0.0.1:1234' where 1234 is the mapped container port. +func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr bool, connect ServiceAdapter) (*Service, string, error) { + if d.RunOptions.PreDelete { + name := d.RunOptions.ContainerName + matches, err := d.DockerAPI.ContainerList(ctx, container.ListOptions{ + All: true, + // TODO use labels to ensure we don't delete anything we shouldn't + Filters: filters.NewArgs( + filters.Arg("name", name), + ), + }) + if err != nil { + return nil, "", fmt.Errorf("failed to list containers named %q", name) + } + for _, cont := range matches { + err = d.DockerAPI.ContainerRemove(ctx, cont.ID, container.RemoveOptions{Force: true}) + if err != nil { + return nil, "", fmt.Errorf("failed to pre-delete container named %q", name) + } + } + } + result, err := d.Start(context.Background(), addSuffix, forceLocalAddr) + if err != nil { + return nil, "", err + } + + // The waitgroup wg is used here to support some stuff in NewDockerCluster. + // We can't generate the PKI cert for the https listener until we know the + // container's address, meaning we must first start the container, then + // generate the cert, then copy it into the container, then signal Vault + // to reload its config/certs. However, if we SIGHUP Vault before Vault + // has installed its signal handler, that will kill Vault, since the default + // behaviour for HUP is termination. So the PostStart that NewDockerCluster + // passes in (which does all that PKI cert stuff) waits to see output from + // Vault on stdout/stderr before it sends the signal, and we don't want to + // run the PostStart until we've hooked into the docker logs. + var wg sync.WaitGroup + logConsumer := d.createLogConsumer(result.Container.ID, &wg) + + if logConsumer != nil { + wg.Add(1) + go logConsumer() + } + wg.Wait() + + if d.RunOptions.PostStart != nil { + if err := d.RunOptions.PostStart(result.Container.ID, result.RealIP); err != nil { + return nil, "", fmt.Errorf("poststart failed: %w", err) + } + } + + cleanup := func() { + for i := 0; i < 10; i++ { + err := d.DockerAPI.ContainerRemove(ctx, result.Container.ID, container.RemoveOptions{Force: true}) + if err == nil || client.IsErrNotFound(err) { + return + } + time.Sleep(1 * time.Second) + } + } + + bo := backoff.NewExponentialBackOff() + bo.MaxInterval = time.Second * 5 + bo.MaxElapsedTime = 2 * time.Minute + + pieces := strings.Split(result.Addrs[0], ":") + portInt, err := strconv.Atoi(pieces[1]) + if err != nil { + return nil, "", err + } + + var config ServiceConfig + err = backoff.Retry(func() error { + container, err := d.DockerAPI.ContainerInspect(ctx, result.Container.ID) + if err != nil || !container.State.Running { + return backoff.Permanent(fmt.Errorf("failed inspect or container %q not running: %w", result.Container.ID, err)) + } + + c, err := connect(ctx, pieces[0], portInt) + if err != nil { + return err + } + if c == nil { + return fmt.Errorf("service adapter returned nil error and config") + } + config = c + return nil + }, bo) + if err != nil { + if !d.RunOptions.DoNotAutoRemove { + cleanup() + } + return nil, "", err + } + + return &Service{ + Config: config, + Cleanup: cleanup, + Container: result.Container, + StartResult: result, + }, result.Container.ID, nil +} + +// createLogConsumer returns a function to consume the logs of the container with the given ID. +// If a wait group is given, `WaitGroup.Done()` will be called as soon as the call to the +// ContainerLogs Docker API call is done. +// The returned function will block, so it should be run on a goroutine. +func (d *Runner) createLogConsumer(containerId string, wg *sync.WaitGroup) func() { + if d.RunOptions.LogStdout != nil && d.RunOptions.LogStderr != nil { + return func() { + d.consumeLogs(containerId, wg, d.RunOptions.LogStdout, d.RunOptions.LogStderr) + } + } + if d.RunOptions.LogConsumer != nil { + return func() { + d.consumeLogs(containerId, wg, &LogConsumerWriter{d.RunOptions.LogConsumer}, &LogConsumerWriter{d.RunOptions.LogConsumer}) + } + } + return nil +} + +// consumeLogs is the function called by the function returned by createLogConsumer. +func (d *Runner) consumeLogs(containerId string, wg *sync.WaitGroup, logStdout, logStderr io.Writer) { + // We must run inside a goroutine because we're using Follow:true, + // and StdCopy will block until the log stream is closed. + stream, err := d.DockerAPI.ContainerLogs(context.Background(), containerId, container.LogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: !d.RunOptions.OmitLogTimestamps, + Details: true, + Follow: true, + }) + wg.Done() + if err != nil { + d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs: %v", err)) + } else { + _, err := stdcopy.StdCopy(logStdout, logStderr, stream) + if err != nil { + d.RunOptions.LogConsumer(fmt.Sprintf("error demultiplexing docker logs: %v", err)) + } + } +} + +type Service struct { + Config ServiceConfig + Cleanup func() + Container *types.ContainerJSON + StartResult *StartResult +} + +type StartResult struct { + Container *types.ContainerJSON + Addrs []string + RealIP string +} + +func (d *Runner) Start(ctx context.Context, addSuffix, forceLocalAddr bool) (*StartResult, error) { + name := d.RunOptions.ContainerName + if addSuffix { + suffix, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + name += "-" + suffix + } + + cfg := &container.Config{ + Hostname: name, + Image: fmt.Sprintf("%s:%s", d.RunOptions.ImageRepo, d.RunOptions.ImageTag), + Env: d.RunOptions.Env, + Cmd: d.RunOptions.Cmd, + } + if len(d.RunOptions.Ports) > 0 { + cfg.ExposedPorts = make(map[nat.Port]struct{}) + for _, p := range d.RunOptions.Ports { + cfg.ExposedPorts[nat.Port(p)] = struct{}{} + } + } + if len(d.RunOptions.Entrypoint) > 0 { + cfg.Entrypoint = strslice.StrSlice(d.RunOptions.Entrypoint) + } + + hostConfig := &container.HostConfig{ + AutoRemove: !d.RunOptions.DoNotAutoRemove, + PublishAllPorts: true, + } + if len(d.RunOptions.Capabilities) > 0 { + hostConfig.CapAdd = d.RunOptions.Capabilities + } + + netConfig := &network.NetworkingConfig{} + if d.RunOptions.NetworkID != "" { + netConfig.EndpointsConfig = map[string]*network.EndpointSettings{ + d.RunOptions.NetworkID: {}, + } + } + + // best-effort pull + var opts types.ImageCreateOptions + if d.RunOptions.AuthUsername != "" && d.RunOptions.AuthPassword != "" { + var buf bytes.Buffer + auth := map[string]string{ + "username": d.RunOptions.AuthUsername, + "password": d.RunOptions.AuthPassword, + } + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return nil, err + } + opts.RegistryAuth = base64.URLEncoding.EncodeToString(buf.Bytes()) + } + resp, _ := d.DockerAPI.ImageCreate(ctx, cfg.Image, opts) + if resp != nil { + _, _ = ioutil.ReadAll(resp) + } + + for vol, mtpt := range d.RunOptions.VolumeNameToMountPoint { + hostConfig.Mounts = append(hostConfig.Mounts, mount.Mount{ + Type: "volume", + Source: vol, + Target: mtpt, + ReadOnly: false, + }) + } + + c, err := d.DockerAPI.ContainerCreate(ctx, cfg, hostConfig, netConfig, nil, cfg.Hostname) + if err != nil { + return nil, fmt.Errorf("container create failed: %v", err) + } + + for from, to := range d.RunOptions.CopyFromTo { + if err := copyToContainer(ctx, d.DockerAPI, c.ID, from, to); err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, container.RemoveOptions{}) + return nil, err + } + } + + err = d.DockerAPI.ContainerStart(ctx, c.ID, container.StartOptions{}) + if err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, container.RemoveOptions{}) + return nil, fmt.Errorf("container start failed: %v", err) + } + + inspect, err := d.DockerAPI.ContainerInspect(ctx, c.ID) + if err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, container.RemoveOptions{}) + return nil, err + } + + var addrs []string + for _, port := range d.RunOptions.Ports { + pieces := strings.Split(port, "/") + if len(pieces) < 2 { + return nil, fmt.Errorf("expected port of the form 1234/tcp, got: %s", port) + } + if d.RunOptions.NetworkID != "" && !forceLocalAddr { + addrs = append(addrs, fmt.Sprintf("%s:%s", cfg.Hostname, pieces[0])) + } else { + mapped, ok := inspect.NetworkSettings.Ports[nat.Port(port)] + if !ok || len(mapped) == 0 { + return nil, fmt.Errorf("no port mapping found for %s", port) + } + addrs = append(addrs, fmt.Sprintf("127.0.0.1:%s", mapped[0].HostPort)) + } + } + + var realIP string + if d.RunOptions.NetworkID == "" { + if len(inspect.NetworkSettings.Networks) > 1 { + return nil, fmt.Errorf("Set d.RunOptions.NetworkName instead for container with multiple networks: %v", inspect.NetworkSettings.Networks) + } + for _, network := range inspect.NetworkSettings.Networks { + realIP = network.IPAddress + break + } + } else { + realIP = inspect.NetworkSettings.Networks[d.RunOptions.NetworkName].IPAddress + } + + return &StartResult{ + Container: &inspect, + Addrs: addrs, + RealIP: realIP, + }, nil +} + +func (d *Runner) RefreshFiles(ctx context.Context, containerID string) error { + for from, to := range d.RunOptions.CopyFromTo { + if err := copyToContainer(ctx, d.DockerAPI, containerID, from, to); err != nil { + // TODO too drastic? + _ = d.DockerAPI.ContainerRemove(ctx, containerID, container.RemoveOptions{}) + return err + } + } + return d.DockerAPI.ContainerKill(ctx, containerID, "SIGHUP") +} + +func (d *Runner) Stop(ctx context.Context, containerID string) error { + if d.RunOptions.NetworkID != "" { + if err := d.DockerAPI.NetworkDisconnect(ctx, d.RunOptions.NetworkID, containerID, true); err != nil { + return fmt.Errorf("error disconnecting network (%v): %v", d.RunOptions.NetworkID, err) + } + } + + // timeout in seconds + timeout := 5 + options := container.StopOptions{ + Timeout: &timeout, + } + if err := d.DockerAPI.ContainerStop(ctx, containerID, options); err != nil { + return fmt.Errorf("error stopping container: %v", err) + } + + return nil +} + +func (d *Runner) RestartContainerWithTimeout(ctx context.Context, containerID string, timeout int) error { + err := d.DockerAPI.ContainerRestart(ctx, containerID, container.StopOptions{Timeout: &timeout}) + if err != nil { + return fmt.Errorf("failed to restart container: %s", err) + } + var wg sync.WaitGroup + logConsumer := d.createLogConsumer(containerID, &wg) + if logConsumer != nil { + wg.Add(1) + go logConsumer() + } + // we don't really care about waiting for logs to start showing up, do we? + return nil +} + +func (d *Runner) Restart(ctx context.Context, containerID string) error { + if err := d.DockerAPI.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil { + return err + } + + ends := &network.EndpointSettings{ + NetworkID: d.RunOptions.NetworkID, + } + + return d.DockerAPI.NetworkConnect(ctx, d.RunOptions.NetworkID, containerID, ends) +} + +func copyToContainer(ctx context.Context, dapi *client.Client, containerID, from, to string) error { + srcInfo, err := archive.CopyInfoSourcePath(from, false) + if err != nil { + return fmt.Errorf("error copying from source %q: %v", from, err) + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return fmt.Errorf("error creating tar from source %q: %v", from, err) + } + defer srcArchive.Close() + + dstInfo := archive.CopyInfo{Path: to} + + dstDir, content, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return fmt.Errorf("error preparing copy from %q -> %q: %v", from, to, err) + } + defer content.Close() + err = dapi.CopyToContainer(ctx, containerID, dstDir, content, types.CopyToContainerOptions{}) + if err != nil { + return fmt.Errorf("error copying from %q -> %q: %v", from, to, err) + } + + return nil +} + +type RunCmdOpt interface { + Apply(cfg *types.ExecConfig) error +} + +type RunCmdUser string + +var _ RunCmdOpt = (*RunCmdUser)(nil) + +func (u RunCmdUser) Apply(cfg *types.ExecConfig) error { + cfg.User = string(u) + return nil +} + +func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) { + return RunCmdWithOutput(d.DockerAPI, ctx, container, cmd, opts...) +} + +func RunCmdWithOutput(api *client.Client, ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) { + runCfg := types.ExecConfig{ + AttachStdout: true, + AttachStderr: true, + Cmd: cmd, + } + + for index, opt := range opts { + if err := opt.Apply(&runCfg); err != nil { + return nil, nil, -1, fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) + } + } + + ret, err := api.ContainerExecCreate(ctx, container, runCfg) + if err != nil { + return nil, nil, -1, fmt.Errorf("error creating execution environment: %v\ncfg: %v\n", err, runCfg) + } + + resp, err := api.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{}) + if err != nil { + return nil, nil, -1, fmt.Errorf("error attaching to command execution: %v\ncfg: %v\nret: %v\n", err, runCfg, ret) + } + defer resp.Close() + + var stdoutB bytes.Buffer + var stderrB bytes.Buffer + if _, err := stdcopy.StdCopy(&stdoutB, &stderrB, resp.Reader); err != nil { + return nil, nil, -1, fmt.Errorf("error reading command output: %v", err) + } + + stdout := stdoutB.Bytes() + stderr := stderrB.Bytes() + + // Fetch return code. + info, err := api.ContainerExecInspect(ctx, ret.ID) + if err != nil { + return stdout, stderr, -1, fmt.Errorf("error reading command exit code: %v", err) + } + + return stdout, stderr, info.ExitCode, nil +} + +func (d *Runner) RunCmdInBackground(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) { + return RunCmdInBackground(d.DockerAPI, ctx, container, cmd, opts...) +} + +func RunCmdInBackground(api *client.Client, ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) { + runCfg := types.ExecConfig{ + AttachStdout: true, + AttachStderr: true, + Cmd: cmd, + } + + for index, opt := range opts { + if err := opt.Apply(&runCfg); err != nil { + return "", fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) + } + } + + ret, err := api.ContainerExecCreate(ctx, container, runCfg) + if err != nil { + return "", fmt.Errorf("error creating execution environment: %w\ncfg: %v\n", err, runCfg) + } + + err = api.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{}) + if err != nil { + return "", fmt.Errorf("error starting command execution: %w\ncfg: %v\nret: %v\n", err, runCfg, ret) + } + + return ret.ID, nil +} + +// Mapping of path->contents +type PathContents interface { + UpdateHeader(header *tar.Header) error + Get() ([]byte, error) + SetMode(mode int64) + SetOwners(uid int, gid int) +} + +type FileContents struct { + Data []byte + Mode int64 + UID int + GID int +} + +func (b FileContents) UpdateHeader(header *tar.Header) error { + header.Mode = b.Mode + header.Uid = b.UID + header.Gid = b.GID + return nil +} + +func (b FileContents) Get() ([]byte, error) { + return b.Data, nil +} + +func (b *FileContents) SetMode(mode int64) { + b.Mode = mode +} + +func (b *FileContents) SetOwners(uid int, gid int) { + b.UID = uid + b.GID = gid +} + +func PathContentsFromBytes(data []byte) PathContents { + return &FileContents{ + Data: data, + Mode: 0o644, + } +} + +func PathContentsFromString(data string) PathContents { + return PathContentsFromBytes([]byte(data)) +} + +type BuildContext map[string]PathContents + +func NewBuildContext() BuildContext { + return BuildContext{} +} + +func BuildContextFromTarball(reader io.Reader) (BuildContext, error) { + archive := tar.NewReader(reader) + bCtx := NewBuildContext() + + for true { + header, err := archive.Next() + if err != nil { + if err == io.EOF { + break + } + + return nil, fmt.Errorf("failed to parse provided tarball: %v", err) + } + + data := make([]byte, int(header.Size)) + read, err := archive.Read(data) + if err != nil { + return nil, fmt.Errorf("failed to parse read from provided tarball: %v", err) + } + + if read != int(header.Size) { + return nil, fmt.Errorf("unexpectedly short read on tarball: %v of %v", read, header.Size) + } + + bCtx[header.Name] = &FileContents{ + Data: data, + Mode: header.Mode, + UID: header.Uid, + GID: header.Gid, + } + } + + return bCtx, nil +} + +func (bCtx *BuildContext) ToTarball() (io.Reader, error) { + var err error + buffer := new(bytes.Buffer) + tarBuilder := tar.NewWriter(buffer) + defer tarBuilder.Close() + + now := time.Now() + for filepath, contents := range *bCtx { + fileHeader := &tar.Header{ + Name: filepath, + ModTime: now, + AccessTime: now, + ChangeTime: now, + } + if contents == nil && !strings.HasSuffix(filepath, "/") { + return nil, fmt.Errorf("expected file path (%v) to have trailing / due to nil contents, indicating directory", filepath) + } + + if err := contents.UpdateHeader(fileHeader); err != nil { + return nil, fmt.Errorf("failed to update tar header entry for %v: %w", filepath, err) + } + + var rawContents []byte + if contents != nil { + rawContents, err = contents.Get() + if err != nil { + return nil, fmt.Errorf("failed to get file contents for %v: %w", filepath, err) + } + + fileHeader.Size = int64(len(rawContents)) + } + + if err := tarBuilder.WriteHeader(fileHeader); err != nil { + return nil, fmt.Errorf("failed to write tar header entry for %v: %w", filepath, err) + } + + if contents != nil { + if _, err := tarBuilder.Write(rawContents); err != nil { + return nil, fmt.Errorf("failed to write tar file entry for %v: %w", filepath, err) + } + } + } + + return bytes.NewReader(buffer.Bytes()), nil +} + +type BuildOpt interface { + Apply(cfg *types.ImageBuildOptions) error +} + +type BuildRemove bool + +var _ BuildOpt = (*BuildRemove)(nil) + +func (u BuildRemove) Apply(cfg *types.ImageBuildOptions) error { + cfg.Remove = bool(u) + return nil +} + +type BuildForceRemove bool + +var _ BuildOpt = (*BuildForceRemove)(nil) + +func (u BuildForceRemove) Apply(cfg *types.ImageBuildOptions) error { + cfg.ForceRemove = bool(u) + return nil +} + +type BuildPullParent bool + +var _ BuildOpt = (*BuildPullParent)(nil) + +func (u BuildPullParent) Apply(cfg *types.ImageBuildOptions) error { + cfg.PullParent = bool(u) + return nil +} + +type BuildArgs map[string]*string + +var _ BuildOpt = (*BuildArgs)(nil) + +func (u BuildArgs) Apply(cfg *types.ImageBuildOptions) error { + cfg.BuildArgs = u + return nil +} + +type BuildTags []string + +var _ BuildOpt = (*BuildTags)(nil) + +func (u BuildTags) Apply(cfg *types.ImageBuildOptions) error { + cfg.Tags = u + return nil +} + +const containerfilePath = "_containerfile" + +func (d *Runner) BuildImage(ctx context.Context, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) { + return BuildImage(ctx, d.DockerAPI, containerfile, containerContext, opts...) +} + +func BuildImage(ctx context.Context, api *client.Client, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) { + var cfg types.ImageBuildOptions + + // Build container context tarball, provisioning containerfile in. + containerContext[containerfilePath] = PathContentsFromBytes([]byte(containerfile)) + tar, err := containerContext.ToTarball() + if err != nil { + return nil, fmt.Errorf("failed to create build image context tarball: %w", err) + } + cfg.Dockerfile = "/" + containerfilePath + + // Apply all given options + for index, opt := range opts { + if err := opt.Apply(&cfg); err != nil { + return nil, fmt.Errorf("failed to apply option (%d / %v): %w", index, opt, err) + } + } + + resp, err := api.ImageBuild(ctx, tar, cfg) + if err != nil { + return nil, fmt.Errorf("failed to build image: %v", err) + } + + output, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read image build output: %w", err) + } + + return output, nil +} + +func (d *Runner) CopyTo(container string, destination string, contents BuildContext) error { + // XXX: currently we use the default options but we might want to allow + // modifying cfg.CopyUIDGID in the future. + var cfg types.CopyToContainerOptions + + // Convert our provided contents to a tarball to ship up. + tar, err := contents.ToTarball() + if err != nil { + return fmt.Errorf("failed to build contents into tarball: %v", err) + } + + return d.DockerAPI.CopyToContainer(context.Background(), container, destination, tar, cfg) +} + +func (d *Runner) CopyFrom(container string, source string) (BuildContext, *types.ContainerPathStat, error) { + reader, stat, err := d.DockerAPI.CopyFromContainer(context.Background(), container, source) + if err != nil { + return nil, nil, fmt.Errorf("failed to read %v from container: %v", source, err) + } + + result, err := BuildContextFromTarball(reader) + if err != nil { + return nil, nil, fmt.Errorf("failed to build archive from result: %v", err) + } + + return result, &stat, nil +} + +func (d *Runner) GetNetworkAndAddresses(container string) (map[string]string, error) { + response, err := d.DockerAPI.ContainerInspect(context.Background(), container) + if err != nil { + return nil, fmt.Errorf("failed to fetch container inspection data: %v", err) + } + + if response.NetworkSettings == nil || len(response.NetworkSettings.Networks) == 0 { + return nil, fmt.Errorf("container (%v) had no associated network settings: %v", container, response) + } + + ret := make(map[string]string) + ns := response.NetworkSettings.Networks + for network, data := range ns { + if data == nil { + continue + } + + ret[network] = data.IPAddress + } + + if len(ret) == 0 { + return nil, fmt.Errorf("no valid network data for container (%v): %v", container, response) + } + + return ret, nil +} diff --git a/sdk/helper/errutil/error.go b/sdk/helper/errutil/error.go index 0b95efb40e3a..1866343b5183 100644 --- a/sdk/helper/errutil/error.go +++ b/sdk/helper/errutil/error.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package errutil // UserError represents an error generated due to invalid user input diff --git a/sdk/helper/hclutil/hcl.go b/sdk/helper/hclutil/hcl.go index 0b120367d5a6..a78d820087d4 100644 --- a/sdk/helper/hclutil/hcl.go +++ b/sdk/helper/hclutil/hcl.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package hclutil import ( diff --git a/sdk/helper/identitytpl/templating.go b/sdk/helper/identitytpl/templating.go index 6d84df8241de..4cbf1e22f07d 100644 --- a/sdk/helper/identitytpl/templating.go +++ b/sdk/helper/identitytpl/templating.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package identitytpl import ( @@ -9,6 +12,7 @@ import ( "time" "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/vault/sdk/logical" ) @@ -327,7 +331,7 @@ func performTemplating(input string, p *PopulateStringInput) (string, error) { return "", errors.New("missing time operand") case 3: - duration, err := time.ParseDuration(opsSplit[2]) + duration, err := parseutil.ParseDurationSecond(opsSplit[2]) if err != nil { return "", errwrap.Wrapf("invalid duration: {{err}}", err) } diff --git a/sdk/helper/identitytpl/templating_test.go b/sdk/helper/identitytpl/templating_test.go index 15bfc812387c..d17409e78ae6 100644 --- a/sdk/helper/identitytpl/templating_test.go +++ b/sdk/helper/identitytpl/templating_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package identitytpl import ( diff --git a/sdk/helper/jsonutil/json.go b/sdk/helper/jsonutil/json.go index c03a4f8c8d14..1abd9fafebdc 100644 --- a/sdk/helper/jsonutil/json.go +++ b/sdk/helper/jsonutil/json.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package jsonutil import ( diff --git a/sdk/helper/jsonutil/json_test.go b/sdk/helper/jsonutil/json_test.go index dd33f9bf179a..10aabf1b93ea 100644 --- a/sdk/helper/jsonutil/json_test.go +++ b/sdk/helper/jsonutil/json_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package jsonutil import ( diff --git a/sdk/helper/kdf/kdf.go b/sdk/helper/kdf/kdf.go index 9d3e0e858585..e9964ba28c4f 100644 --- a/sdk/helper/kdf/kdf.go +++ b/sdk/helper/kdf/kdf.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // This package is used to implement Key Derivation Functions (KDF) // based on the recommendations of NIST SP 800-108. These are useful // for generating unique-per-transaction keys, or situations in which diff --git a/sdk/helper/kdf/kdf_test.go b/sdk/helper/kdf/kdf_test.go index 2148257f357c..ed5c0a13d36b 100644 --- a/sdk/helper/kdf/kdf_test.go +++ b/sdk/helper/kdf/kdf_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package kdf import ( diff --git a/sdk/helper/keysutil/cache.go b/sdk/helper/keysutil/cache.go index 7da9c202fa58..fb55091e40a8 100644 --- a/sdk/helper/keysutil/cache.go +++ b/sdk/helper/keysutil/cache.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package keysutil type Cache interface { diff --git a/sdk/helper/keysutil/consts.go b/sdk/helper/keysutil/consts.go index e6c657b9115e..6262b477c220 100644 --- a/sdk/helper/keysutil/consts.go +++ b/sdk/helper/keysutil/consts.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package keysutil import ( @@ -25,11 +28,12 @@ const ( HashTypeSHA3512 ) +//go:generate enumer -type=MarshalingType -trimprefix=MarshalingType -transform=snake type MarshalingType uint32 const ( - _ = iota - MarshalingTypeASN1 MarshalingType = iota + _ MarshalingType = iota + MarshalingTypeASN1 MarshalingTypeJWS ) @@ -73,8 +77,5 @@ var ( HashTypeSHA3512: crypto.SHA3_512, } - MarshalingTypeMap = map[string]MarshalingType{ - "asn1": MarshalingTypeASN1, - "jws": MarshalingTypeJWS, - } + MarshalingTypeMap = _MarshalingTypeNameToValueMap ) diff --git a/sdk/helper/keysutil/encrypted_key_storage.go b/sdk/helper/keysutil/encrypted_key_storage.go index 90eaaf0bbae1..7314758bc167 100644 --- a/sdk/helper/keysutil/encrypted_key_storage.go +++ b/sdk/helper/keysutil/encrypted_key_storage.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package keysutil import ( diff --git a/sdk/helper/keysutil/encrypted_key_storage_test.go b/sdk/helper/keysutil/encrypted_key_storage_test.go index 2f29d14b7ad7..5147027fc883 100644 --- a/sdk/helper/keysutil/encrypted_key_storage_test.go +++ b/sdk/helper/keysutil/encrypted_key_storage_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package keysutil import ( diff --git a/sdk/helper/keysutil/lock_manager.go b/sdk/helper/keysutil/lock_manager.go index a60cf69d53f2..6d2881e0d8da 100644 --- a/sdk/helper/keysutil/lock_manager.go +++ b/sdk/helper/keysutil/lock_manager.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package keysutil import ( @@ -59,6 +62,12 @@ type PolicyRequest struct { // AllowImportedKeyRotation indicates whether an imported key may be rotated by Vault AllowImportedKeyRotation bool + + // Indicates whether a private or public key is imported/upserted + IsPrivateKey bool + + // The UUID of the managed key, if using one + ManagedKeyUUID string } type LockManager struct { @@ -382,6 +391,12 @@ func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest, rand io return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) } + case KeyType_MANAGED_KEY: + if req.Derived || req.Convergent { + cleanup() + return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) + } + default: cleanup() return nil, false, fmt.Errorf("unsupported key type %v", req.KeyType) @@ -412,7 +427,11 @@ func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest, rand io } // Performs the actual persist and does setup - err = p.Rotate(ctx, req.Storage, rand) + if p.Type == KeyType_MANAGED_KEY { + err = p.RotateManagedKey(ctx, req.Storage, req.ManagedKeyUUID) + } else { + err = p.Rotate(ctx, req.Storage, rand) + } if err != nil { cleanup() return nil, false, err @@ -495,7 +514,7 @@ func (lm *LockManager) ImportPolicy(ctx context.Context, req PolicyRequest, key } } - err = p.Import(ctx, req.Storage, key, rand) + err = p.ImportPublicOrPrivate(ctx, req.Storage, key, req.IsPrivateKey, rand) if err != nil { return fmt.Errorf("error importing key: %s", err) } diff --git a/sdk/helper/keysutil/managed_key_util.go b/sdk/helper/keysutil/managed_key_util.go new file mode 100644 index 000000000000..bb3c0b2968b7 --- /dev/null +++ b/sdk/helper/keysutil/managed_key_util.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package keysutil + +import ( + "context" + "errors" + + "github.com/hashicorp/vault/sdk/logical" +) + +type ManagedKeyParameters struct { + ManagedKeySystemView logical.ManagedKeySystemView + BackendUUID string + Context context.Context +} + +var errEntOnly = errors.New("managed keys are supported within enterprise edition only") + +func (p *Policy) decryptWithManagedKey(params ManagedKeyParameters, keyEntry KeyEntry, ciphertext []byte, nonce []byte, aad []byte) (plaintext []byte, err error) { + return nil, errEntOnly +} + +func (p *Policy) encryptWithManagedKey(params ManagedKeyParameters, keyEntry KeyEntry, plaintext []byte, nonce []byte, aad []byte) (ciphertext []byte, err error) { + return nil, errEntOnly +} + +func (p *Policy) signWithManagedKey(options *SigningOptions, keyEntry KeyEntry, input []byte) (sig []byte, err error) { + return nil, errEntOnly +} + +func (p *Policy) verifyWithManagedKey(options *SigningOptions, keyEntry KeyEntry, input, sig []byte) (verified bool, err error) { + return false, errEntOnly +} + +func (p *Policy) HMACWithManagedKey(ctx context.Context, ver int, managedKeySystemView logical.ManagedKeySystemView, backendUUID string, algorithm string, data []byte) (hmacBytes []byte, err error) { + return nil, errEntOnly +} + +func (p *Policy) RotateManagedKey(ctx context.Context, storage logical.Storage, managedKeyUUID string) error { + return errEntOnly +} diff --git a/sdk/helper/keysutil/marshalingtype_enumer.go b/sdk/helper/keysutil/marshalingtype_enumer.go new file mode 100644 index 000000000000..93b5c2f1f94b --- /dev/null +++ b/sdk/helper/keysutil/marshalingtype_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=MarshalingType -trimprefix=MarshalingType -transform=snake"; DO NOT EDIT. + +package keysutil + +import ( + "fmt" +) + +const _MarshalingTypeName = "asn1jws" + +var _MarshalingTypeIndex = [...]uint8{0, 4, 7} + +func (i MarshalingType) String() string { + i -= 1 + if i >= MarshalingType(len(_MarshalingTypeIndex)-1) { + return fmt.Sprintf("MarshalingType(%d)", i+1) + } + return _MarshalingTypeName[_MarshalingTypeIndex[i]:_MarshalingTypeIndex[i+1]] +} + +var _MarshalingTypeValues = []MarshalingType{1, 2} + +var _MarshalingTypeNameToValueMap = map[string]MarshalingType{ + _MarshalingTypeName[0:4]: 1, + _MarshalingTypeName[4:7]: 2, +} + +// MarshalingTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func MarshalingTypeString(s string) (MarshalingType, error) { + if val, ok := _MarshalingTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to MarshalingType values", s) +} + +// MarshalingTypeValues returns all values of the enum +func MarshalingTypeValues() []MarshalingType { + return _MarshalingTypeValues +} + +// IsAMarshalingType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i MarshalingType) IsAMarshalingType() bool { + for _, v := range _MarshalingTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/helper/keysutil/policy.go b/sdk/helper/keysutil/policy.go index 3417c2992214..f70d4497e002 100644 --- a/sdk/helper/keysutil/policy.go +++ b/sdk/helper/keysutil/policy.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package keysutil import ( @@ -19,6 +22,7 @@ import ( "encoding/pem" "errors" "fmt" + "hash" "io" "math/big" "path" @@ -33,11 +37,13 @@ import ( "golang.org/x/crypto/hkdf" "github.com/hashicorp/errwrap" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/sdk/helper/errutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/kdf" "github.com/hashicorp/vault/sdk/logical" + + "github.com/google/tink/go/kwp/subtle" ) // Careful with iota; don't put anything before it in this const block because @@ -83,6 +89,10 @@ type AssociatedDataFactory interface { GetAssociatedData() ([]byte, error) } +type ManagedKeyFactory interface { + GetManagedKeyParameters() ManagedKeyParameters +} + type RestoreInfo struct { Time time.Time `json:"time"` Version int `json:"version"` @@ -94,10 +104,11 @@ type BackupInfo struct { } type SigningOptions struct { - HashAlgorithm HashType - Marshaling MarshalingType - SaltLength int - SigAlgorithm string + HashAlgorithm HashType + Marshaling MarshalingType + SaltLength int + SigAlgorithm string + ManagedKeyParams ManagedKeyParameters } type SigningResult struct { @@ -113,7 +124,7 @@ type KeyType int func (kt KeyType) EncryptionSupported() bool { switch kt { - case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: return true } return false @@ -121,7 +132,7 @@ func (kt KeyType) EncryptionSupported() bool { func (kt KeyType) DecryptionSupported() bool { switch kt { - case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: return true } return false @@ -129,7 +140,7 @@ func (kt KeyType) DecryptionSupported() bool { func (kt KeyType) SigningSupported() bool { switch kt { - case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_ED25519, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_ED25519, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: return true } return false @@ -137,7 +148,7 @@ func (kt KeyType) SigningSupported() bool { func (kt KeyType) HashSignatureInput() bool { switch kt { - case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: return true } return false @@ -153,7 +164,15 @@ func (kt KeyType) DerivationSupported() bool { func (kt KeyType) AssociatedDataSupported() bool { switch kt { - case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_MANAGED_KEY: + return true + } + return false +} + +func (kt KeyType) ImportPublicKeySupported() bool { + switch kt { + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_ED25519: return true } return false @@ -183,6 +202,8 @@ func (kt KeyType) String() string { return "rsa-4096" case KeyType_HMAC: return "hmac" + case KeyType_MANAGED_KEY: + return "managed_key" } return "[unknown]" @@ -208,7 +229,8 @@ type KeyEntry struct { EC_Y *big.Int `json:"ec_y"` EC_D *big.Int `json:"ec_d"` - RSAKey *rsa.PrivateKey `json:"rsa_key"` + RSAKey *rsa.PrivateKey `json:"rsa_key"` + RSAPublicKey *rsa.PublicKey `json:"rsa_public_key"` // The public key in an appropriate format for the type of key FormattedPublicKey string `json:"public_key"` @@ -220,6 +242,20 @@ type KeyEntry struct { // This is deprecated (but still filled) in favor of the value above which // is more precise DeprecatedCreationTime int64 `json:"creation_time"` + + ManagedKeyUUID string `json:"managed_key_id,omitempty"` + + // Key entry certificate chain. If set, leaf certificate key matches the + // KeyEntry key + CertificateChain [][]byte `json:"certificate_chain"` +} + +func (ke *KeyEntry) IsPrivateKeyMissing() bool { + if ke.RSAKey != nil || ke.EC_D != nil || len(ke.Key) != 0 || len(ke.ManagedKeyUUID) != 0 { + return false + } + + return true } // deprecatedKeyEntryMap is used to allow JSON marshal/unmarshal @@ -323,6 +359,19 @@ func LoadPolicy(ctx context.Context, s logical.Storage, path string) (*Policy, e return nil, err } + // Migrate RSA private keys to include their private counterpart. This lets + // us reference RSAPublicKey whenever we need to, without necessarily + // needing the private key handy, synchronizing the behavior with EC and + // Ed25519 key pairs. + switch policy.Type { + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + for _, entry := range policy.Keys { + if entry.RSAPublicKey == nil && entry.RSAKey != nil { + entry.RSAPublicKey = entry.RSAKey.Public().(*rsa.PublicKey) + } + } + } + policy.l = new(sync.RWMutex) return &policy, nil @@ -419,8 +468,6 @@ type Policy struct { // AllowImportedKeyRotation indicates whether an imported key may be rotated by Vault AllowImportedKeyRotation bool - - ManagedKeyName string `json:"managed_key_name,omitempty"` } func (p *Policy) Lock(exclusive bool) { @@ -724,6 +771,10 @@ func (p *Policy) Upgrade(ctx context.Context, storage logical.Storage, randReade entry.HMACKey = hmacKey p.Keys[strconv.Itoa(p.LatestVersion)] = entry persistNeeded = true + + if p.Type == KeyType_HMAC { + entry.HMACKey = entry.Key + } } if persistNeeded { @@ -943,6 +994,7 @@ func (p *Policy) DecryptWithFactory(context, nonce []byte, value string, factori if err != nil { return "", errutil.InternalError{Err: fmt.Sprintf("unable to get associated_data/additional_data from factory[%d]: %v", index, err)} } + case ManagedKeyFactory: default: return "", errutil.InternalError{Err: fmt.Sprintf("unknown type of factory[%d]: %T", index, rawFactory)} } @@ -958,10 +1010,40 @@ func (p *Policy) DecryptWithFactory(context, nonce []byte, value string, factori return "", err } key := keyEntry.RSAKey + if key == nil { + return "", errutil.InternalError{Err: fmt.Sprintf("cannot decrypt ciphertext, key version does not have a private counterpart")} + } plain, err = rsa.DecryptOAEP(sha256.New(), rand.Reader, key, decoded, nil) if err != nil { return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA decrypt the ciphertext: %v", err)} } + case KeyType_MANAGED_KEY: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return "", err + } + var aad []byte + var managedKeyFactory ManagedKeyFactory + for _, f := range factories { + switch factory := f.(type) { + case AssociatedDataFactory: + aad, err = factory.GetAssociatedData() + if err != nil { + return "", err + } + case ManagedKeyFactory: + managedKeyFactory = factory + } + } + + if managedKeyFactory == nil { + return "", errors.New("key type is managed_key, but managed key parameters were not provided") + } + + plain, err = p.decryptWithManagedKey(managedKeyFactory.GetManagedKeyParameters(), keyEntry, decoded, nonce, aad) + if err != nil { + return "", err + } default: return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} @@ -1005,13 +1087,13 @@ func (p *Policy) minRSAPSSSaltLength() int { return rsa.PSSSaltLengthEqualsHash } -func (p *Policy) maxRSAPSSSaltLength(priv *rsa.PrivateKey, hash crypto.Hash) int { +func (p *Policy) maxRSAPSSSaltLength(keyBitLen int, hash crypto.Hash) int { // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/crypto/rsa/pss.go;l=288 - return (priv.N.BitLen()-1+7)/8 - 2 - hash.Size() + return (keyBitLen-1+7)/8 - 2 - hash.Size() } -func (p *Policy) validRSAPSSSaltLength(priv *rsa.PrivateKey, hash crypto.Hash, saltLength int) bool { - return p.minRSAPSSSaltLength() <= saltLength && saltLength <= p.maxRSAPSSSaltLength(priv, hash) +func (p *Policy) validRSAPSSSaltLength(keyBitLen int, hash crypto.Hash, saltLength int) bool { + return p.minRSAPSSSaltLength() <= saltLength && saltLength <= p.maxRSAPSSSaltLength(keyBitLen, hash) } func (p *Policy) SignWithOptions(ver int, context, input []byte, options *SigningOptions) (*SigningResult, error) { @@ -1038,6 +1120,11 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin return nil, err } + // Before signing, check if key has its private part, if not return error + if keyParams.IsPrivateKeyMissing() { + return nil, errutil.UserError{Err: "requested version for signing does not contain a private part"} + } + hashAlgorithm := options.HashAlgorithm marshaling := options.Marshaling saltLength := options.SaltLength @@ -1144,7 +1231,7 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin switch sigAlgorithm { case "pss": - if !p.validRSAPSSSaltLength(key, algo, saltLength) { + if !p.validRSAPSSSaltLength(key.N.BitLen(), algo, saltLength) { return nil, errutil.UserError{Err: fmt.Sprintf("requested salt length %d is invalid", saltLength)} } sig, err = rsa.SignPSS(rand.Reader, key, algo, input, &rsa.PSSOptions{SaltLength: saltLength}) @@ -1160,6 +1247,17 @@ func (p *Policy) SignWithOptions(ver int, context, input []byte, options *Signin return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported rsa signature algorithm %s", sigAlgorithm)} } + case KeyType_MANAGED_KEY: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return nil, err + } + + sig, err = p.signWithManagedKey(options, keyEntry, input) + if err != nil { + return nil, err + } + default: return nil, fmt.Errorf("unsupported key type %v", p.Type) } @@ -1287,20 +1385,30 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o return ecdsa.Verify(key, input, ecdsaSig.R, ecdsaSig.S), nil case KeyType_ED25519: - var key ed25519.PrivateKey + var pub ed25519.PublicKey if p.Derived { // Derive the key that should be used - var err error - key, err = p.GetKey(context, ver, 32) + key, err := p.GetKey(context, ver, 32) if err != nil { return false, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)} } + pub = ed25519.PrivateKey(key).Public().(ed25519.PublicKey) } else { - key = ed25519.PrivateKey(p.Keys[strconv.Itoa(ver)].Key) + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return false, err + } + + raw, err := base64.StdEncoding.DecodeString(keyEntry.FormattedPublicKey) + if err != nil { + return false, err + } + + pub = ed25519.PublicKey(raw) } - return ed25519.Verify(key.Public().(ed25519.PublicKey), input, sigBytes), nil + return ed25519.Verify(pub, input, sigBytes), nil case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: keyEntry, err := p.safeGetKeyEntry(ver) @@ -1308,8 +1416,6 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o return false, err } - key := keyEntry.RSAKey - algo, ok := CryptoHashMap[hashAlgorithm] if !ok { return false, errutil.InternalError{Err: "unsupported hash algorithm"} @@ -1321,30 +1427,62 @@ func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, o switch sigAlgorithm { case "pss": - if !p.validRSAPSSSaltLength(key, algo, saltLength) { + publicKey := keyEntry.RSAPublicKey + if !keyEntry.IsPrivateKeyMissing() { + publicKey = &keyEntry.RSAKey.PublicKey + } + if !p.validRSAPSSSaltLength(publicKey.N.BitLen(), algo, saltLength) { return false, errutil.UserError{Err: fmt.Sprintf("requested salt length %d is invalid", saltLength)} } - err = rsa.VerifyPSS(&key.PublicKey, algo, input, sigBytes, &rsa.PSSOptions{SaltLength: saltLength}) + err = rsa.VerifyPSS(publicKey, algo, input, sigBytes, &rsa.PSSOptions{SaltLength: saltLength}) case "pkcs1v15": - err = rsa.VerifyPKCS1v15(&key.PublicKey, algo, input, sigBytes) + publicKey := keyEntry.RSAPublicKey + if !keyEntry.IsPrivateKeyMissing() { + publicKey = &keyEntry.RSAKey.PublicKey + } + err = rsa.VerifyPKCS1v15(publicKey, algo, input, sigBytes) default: return false, errutil.InternalError{Err: fmt.Sprintf("unsupported rsa signature algorithm %s", sigAlgorithm)} } return err == nil, nil + case KeyType_MANAGED_KEY: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return false, err + } + + return p.verifyWithManagedKey(options, keyEntry, input, sigBytes) + default: return false, errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} } } func (p *Policy) Import(ctx context.Context, storage logical.Storage, key []byte, randReader io.Reader) error { + return p.ImportPublicOrPrivate(ctx, storage, key, true, randReader) +} + +func (p *Policy) ImportPublicOrPrivate(ctx context.Context, storage logical.Storage, key []byte, isPrivateKey bool, randReader io.Reader) error { now := time.Now() entry := KeyEntry{ CreationTime: now, DeprecatedCreationTime: now.Unix(), } + // Before we insert this entry, check if the latest version is incomplete + // and this entry matches the current version; if so, return without + // updating to the next version. + if p.LatestVersion > 0 { + latestKey := p.Keys[strconv.Itoa(p.LatestVersion)] + if latestKey.IsPrivateKeyMissing() && isPrivateKey { + if err := p.ImportPrivateKeyForVersion(ctx, storage, p.LatestVersion, key); err == nil { + return nil + } + } + } + if p.Type != KeyType_HMAC { hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) if err != nil { @@ -1353,6 +1491,10 @@ func (p *Policy) Import(ctx context.Context, storage logical.Storage, key []byte entry.HMACKey = hmacKey } + if p.Type == KeyType_ED25519 && p.Derived && !isPrivateKey { + return fmt.Errorf("unable to import only public key for derived Ed25519 key: imported key should not be an Ed25519 key pair but is instead an HKDF key") + } + if (p.Type == KeyType_AES128_GCM96 && len(key) != 16) || ((p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305) && len(key) != 32) || (p.Type == KeyType_HMAC && (len(key) < HmacMinKeySize || len(key) > HmacMaxKeySize)) { @@ -1363,85 +1505,49 @@ func (p *Policy) Import(ctx context.Context, storage logical.Storage, key []byte entry.Key = key if p.Type == KeyType_HMAC { p.KeySize = len(key) + entry.HMACKey = key } } else { - parsedPrivateKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - if strings.Contains(err.Error(), "unknown elliptic curve") { - var edErr error - parsedPrivateKey, edErr = ParsePKCS8Ed25519PrivateKey(key) - if edErr != nil { - return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) + var parsedKey any + var err error + if isPrivateKey { + parsedKey, err = x509.ParsePKCS8PrivateKey(key) + if err != nil { + if strings.Contains(err.Error(), "unknown elliptic curve") { + var edErr error + parsedKey, edErr = ParsePKCS8Ed25519PrivateKey(key) + if edErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) + } + + // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! + } else if strings.Contains(err.Error(), oidSignatureRSAPSS.String()) { + var rsaErr error + parsedKey, rsaErr = ParsePKCS8RSAPSSPrivateKey(key) + if rsaErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an RSA/PSS private key: %v\n - original error: %w", rsaErr, err) + } + + // Parsing as RSA-PSS in PKCS8 succeeded! + } else { + return fmt.Errorf("error parsing asymmetric key: %s", err) } - - // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! - } else { - return fmt.Errorf("error parsing asymmetric key: %s", err) } - } - - switch parsedPrivateKey.(type) { - case *ecdsa.PrivateKey: - if p.Type != KeyType_ECDSA_P256 && p.Type != KeyType_ECDSA_P384 && p.Type != KeyType_ECDSA_P521 { - return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) - } - - ecdsaKey := parsedPrivateKey.(*ecdsa.PrivateKey) - curve := elliptic.P256() - if p.Type == KeyType_ECDSA_P384 { - curve = elliptic.P384() - } else if p.Type == KeyType_ECDSA_P521 { - curve = elliptic.P521() - } - - if ecdsaKey.Curve != curve { - return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) + } else { + pemBlock, _ := pem.Decode(key) + if pemBlock == nil { + return fmt.Errorf("error parsing public key: not in PEM format") } - entry.EC_D = ecdsaKey.D - entry.EC_X = ecdsaKey.X - entry.EC_Y = ecdsaKey.Y - derBytes, err := x509.MarshalPKIXPublicKey(ecdsaKey.Public()) + parsedKey, err = x509.ParsePKIXPublicKey(pemBlock.Bytes) if err != nil { - return errwrap.Wrapf("error marshaling public key: {{err}}", err) - } - pemBlock := &pem.Block{ - Type: "PUBLIC KEY", - Bytes: derBytes, - } - pemBytes := pem.EncodeToMemory(pemBlock) - if pemBytes == nil || len(pemBytes) == 0 { - return fmt.Errorf("error PEM-encoding public key") - } - entry.FormattedPublicKey = string(pemBytes) - case ed25519.PrivateKey: - if p.Type != KeyType_ED25519 { - return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) - } - privateKey := parsedPrivateKey.(ed25519.PrivateKey) - - entry.Key = privateKey - publicKey := privateKey.Public().(ed25519.PublicKey) - entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) - case *rsa.PrivateKey: - if p.Type != KeyType_RSA2048 && p.Type != KeyType_RSA3072 && p.Type != KeyType_RSA4096 { - return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) - } - - keyBytes := 256 - if p.Type == KeyType_RSA3072 { - keyBytes = 384 - } else if p.Type == KeyType_RSA4096 { - keyBytes = 512 - } - rsaKey := parsedPrivateKey.(*rsa.PrivateKey) - if rsaKey.Size() != keyBytes { - return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) + return fmt.Errorf("error parsing public key: %w", err) } + } - entry.RSAKey = rsaKey - default: - return fmt.Errorf("invalid key type: expected %s, got %T", p.Type, parsedPrivateKey) + err = entry.parseFromKey(p.Type, parsedKey) + if err != nil { + return err } } @@ -1520,7 +1626,7 @@ func (p *Policy) RotateInMemory(randReader io.Reader) (retErr error) { if p.Type == KeyType_AES128_GCM96 { numBytes = 16 } else if p.Type == KeyType_HMAC { - numBytes := p.KeySize + numBytes = p.KeySize if numBytes < HmacMinKeySize || numBytes > HmacMaxKeySize { return fmt.Errorf("invalid key size for HMAC key, must be between %d and %d bytes", HmacMinKeySize, HmacMaxKeySize) } @@ -1531,6 +1637,11 @@ func (p *Policy) RotateInMemory(randReader io.Reader) (retErr error) { } entry.Key = newKey + if p.Type == KeyType_HMAC { + // To avoid causing problems, ensure HMACKey = Key. + entry.HMACKey = newKey + } + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: var curve elliptic.Curve switch p.Type { @@ -1564,13 +1675,19 @@ func (p *Policy) RotateInMemory(randReader io.Reader) (retErr error) { entry.FormattedPublicKey = string(pemBytes) case KeyType_ED25519: + // Go uses a 64-byte private key for Ed25519 keys (private+public, each + // 32-bytes long). When we do Key derivation, we still generate a 32-byte + // random value (and compute the corresponding Ed25519 public key), but + // use this entire 64-byte key as if it was an HKDF key. The corresponding + // underlying public key is never returned (which is probably good, because + // doing so would leak half of our HKDF key...), but means we cannot import + // derived-enabled Ed25519 public key components. pub, pri, err := ed25519.GenerateKey(randReader) if err != nil { return err } entry.Key = pri entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(pub) - case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: bitSize := 2048 if p.Type == KeyType_RSA3072 { @@ -1896,9 +2013,15 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value encBytes := 32 hmacBytes := 0 - if p.convergentVersion(ver) > 2 { + convergentVersion := p.convergentVersion(ver) + if convergentVersion > 2 { deriveHMAC = true hmacBytes = 32 + if len(nonce) > 0 { + return "", errutil.UserError{Err: "nonce provided when not allowed"} + } + } else if len(nonce) > 0 && (!p.ConvergentEncryption || convergentVersion != 1) { + return "", errutil.UserError{Err: "nonce provided when not allowed"} } if p.Type == KeyType_AES128_GCM96 { encBytes = 16 @@ -1941,6 +2064,7 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value if err != nil { return "", errutil.InternalError{Err: fmt.Sprintf("unable to get associated_data/additional_data from factory[%d]: %v", index, err)} } + case ManagedKeyFactory: default: return "", errutil.InternalError{Err: fmt.Sprintf("unknown type of factory[%d]: %T", index, rawFactory)} } @@ -1955,11 +2079,44 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value if err != nil { return "", err } - key := keyEntry.RSAKey - ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, &key.PublicKey, plaintext, nil) + var publicKey *rsa.PublicKey + if keyEntry.RSAKey != nil { + publicKey = &keyEntry.RSAKey.PublicKey + } else { + publicKey = keyEntry.RSAPublicKey + } + ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, publicKey, plaintext, nil) if err != nil { return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA encrypt the plaintext: %v", err)} } + case KeyType_MANAGED_KEY: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return "", err + } + + var aad []byte + var managedKeyFactory ManagedKeyFactory + for _, f := range factories { + switch factory := f.(type) { + case AssociatedDataFactory: + aad, err = factory.GetAssociatedData() + if err != nil { + return "", nil + } + case ManagedKeyFactory: + managedKeyFactory = factory + } + } + + if managedKeyFactory == nil { + return "", errors.New("key type is managed_key, but managed key parameters were not provided") + } + + ciphertext, err = p.encryptWithManagedKey(managedKeyFactory.GetManagedKeyParameters(), keyEntry, plaintext, nonce, aad) + if err != nil { + return "", err + } default: return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} @@ -1973,3 +2130,470 @@ func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value return encoded, nil } + +func (p *Policy) KeyVersionCanBeUpdated(keyVersion int, isPrivateKey bool) error { + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return err + } + + if !p.Type.ImportPublicKeySupported() { + return errors.New("provided type does not support importing key versions") + } + + isPrivateKeyMissing := keyEntry.IsPrivateKeyMissing() + if isPrivateKeyMissing && !isPrivateKey { + return errors.New("cannot add a public key to a key version that already has a public key set") + } + + if !isPrivateKeyMissing { + return errors.New("private key imported, key version cannot be updated") + } + + return nil +} + +func (p *Policy) ImportPrivateKeyForVersion(ctx context.Context, storage logical.Storage, keyVersion int, key []byte) error { + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return err + } + + // Parse key + parsedPrivateKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + if strings.Contains(err.Error(), "unknown elliptic curve") { + var edErr error + parsedPrivateKey, edErr = ParsePKCS8Ed25519PrivateKey(key) + if edErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) + } + + // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! + } else if strings.Contains(err.Error(), oidSignatureRSAPSS.String()) { + var rsaErr error + parsedPrivateKey, rsaErr = ParsePKCS8RSAPSSPrivateKey(key) + if rsaErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an RSA/PSS private key: %v\n - original error: %w", rsaErr, err) + } + + // Parsing as RSA-PSS in PKCS8 succeeded! + } else { + return fmt.Errorf("error parsing asymmetric key: %s", err) + } + } + + switch parsedPrivateKey.(type) { + case *ecdsa.PrivateKey: + ecdsaKey := parsedPrivateKey.(*ecdsa.PrivateKey) + pemBlock, _ := pem.Decode([]byte(keyEntry.FormattedPublicKey)) + if pemBlock == nil { + return fmt.Errorf("failed to parse key entry public key: invalid PEM blob") + } + publicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil || publicKey == nil { + return fmt.Errorf("failed to parse key entry public key: %v", err) + } + if !publicKey.(*ecdsa.PublicKey).Equal(&ecdsaKey.PublicKey) { + return fmt.Errorf("cannot import key, key pair does not match") + } + case *rsa.PrivateKey: + rsaKey := parsedPrivateKey.(*rsa.PrivateKey) + if !rsaKey.PublicKey.Equal(keyEntry.RSAPublicKey) { + return fmt.Errorf("cannot import key, key pair does not match") + } + case ed25519.PrivateKey: + ed25519Key := parsedPrivateKey.(ed25519.PrivateKey) + publicKey, err := base64.StdEncoding.DecodeString(keyEntry.FormattedPublicKey) + if err != nil { + return fmt.Errorf("failed to parse key entry public key: %v", err) + } + if !ed25519.PublicKey(publicKey).Equal(ed25519Key.Public()) { + return fmt.Errorf("cannot import key, key pair does not match") + } + } + + err = keyEntry.parseFromKey(p.Type, parsedPrivateKey) + if err != nil { + return err + } + + p.Keys[strconv.Itoa(keyVersion)] = keyEntry + + return p.Persist(ctx, storage) +} + +func (ke *KeyEntry) parseFromKey(PolKeyType KeyType, parsedKey any) error { + switch parsedKey.(type) { + case *ecdsa.PrivateKey, *ecdsa.PublicKey: + if PolKeyType != KeyType_ECDSA_P256 && PolKeyType != KeyType_ECDSA_P384 && PolKeyType != KeyType_ECDSA_P521 { + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + curve := elliptic.P256() + if PolKeyType == KeyType_ECDSA_P384 { + curve = elliptic.P384() + } else if PolKeyType == KeyType_ECDSA_P521 { + curve = elliptic.P521() + } + + var derBytes []byte + var err error + ecdsaKey, ok := parsedKey.(*ecdsa.PrivateKey) + if ok { + + if ecdsaKey.Curve != curve { + return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) + } + + ke.EC_D = ecdsaKey.D + ke.EC_X = ecdsaKey.X + ke.EC_Y = ecdsaKey.Y + + derBytes, err = x509.MarshalPKIXPublicKey(ecdsaKey.Public()) + if err != nil { + return errwrap.Wrapf("error marshaling public key: {{err}}", err) + } + } else { + ecdsaKey := parsedKey.(*ecdsa.PublicKey) + + if ecdsaKey.Curve != curve { + return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) + } + + ke.EC_X = ecdsaKey.X + ke.EC_Y = ecdsaKey.Y + + derBytes, err = x509.MarshalPKIXPublicKey(ecdsaKey) + if err != nil { + return errwrap.Wrapf("error marshaling public key: {{err}}", err) + } + } + + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + if pemBytes == nil || len(pemBytes) == 0 { + return fmt.Errorf("error PEM-encoding public key") + } + ke.FormattedPublicKey = string(pemBytes) + case ed25519.PrivateKey, ed25519.PublicKey: + if PolKeyType != KeyType_ED25519 { + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + privateKey, ok := parsedKey.(ed25519.PrivateKey) + if ok { + ke.Key = privateKey + publicKey := privateKey.Public().(ed25519.PublicKey) + ke.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) + } else { + publicKey := parsedKey.(ed25519.PublicKey) + ke.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) + } + case *rsa.PrivateKey, *rsa.PublicKey: + if PolKeyType != KeyType_RSA2048 && PolKeyType != KeyType_RSA3072 && PolKeyType != KeyType_RSA4096 { + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + keyBytes := 256 + if PolKeyType == KeyType_RSA3072 { + keyBytes = 384 + } else if PolKeyType == KeyType_RSA4096 { + keyBytes = 512 + } + + rsaKey, ok := parsedKey.(*rsa.PrivateKey) + if ok { + if rsaKey.Size() != keyBytes { + return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) + } + ke.RSAKey = rsaKey + ke.RSAPublicKey = rsaKey.Public().(*rsa.PublicKey) + } else { + rsaKey := parsedKey.(*rsa.PublicKey) + if rsaKey.Size() != keyBytes { + return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) + } + ke.RSAPublicKey = rsaKey + } + default: + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + return nil +} + +func (p *Policy) WrapKey(ver int, targetKey interface{}, targetKeyType KeyType, hash hash.Hash) (string, error) { + if !p.Type.SigningSupported() { + return "", fmt.Errorf("message signing not supported for key type %v", p.Type) + } + + switch { + case ver == 0: + ver = p.LatestVersion + case ver < 0: + return "", errutil.UserError{Err: "requested version for key wrapping is negative"} + case ver > p.LatestVersion: + return "", errutil.UserError{Err: "requested version for key wrapping is higher than the latest key version"} + case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: + return "", errutil.UserError{Err: "requested version for key wrapping is less than the minimum encryption key version"} + } + + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return "", err + } + + return keyEntry.WrapKey(targetKey, targetKeyType, hash) +} + +func (ke *KeyEntry) WrapKey(targetKey interface{}, targetKeyType KeyType, hash hash.Hash) (string, error) { + // Presently this method implements a CKM_RSA_AES_KEY_WRAP-compatible + // wrapping interface and only works on RSA keyEntries as a result. + if ke.RSAPublicKey == nil { + return "", fmt.Errorf("unsupported key type in use; must be a rsa key") + } + + var preppedTargetKey []byte + switch targetKeyType { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_HMAC: + var ok bool + preppedTargetKey, ok = targetKey.([]byte) + if !ok { + return "", fmt.Errorf("failed to wrap target key for import: symmetric key not provided in byte format (%T)", targetKey) + } + default: + var err error + preppedTargetKey, err = x509.MarshalPKCS8PrivateKey(targetKey) + if err != nil { + return "", fmt.Errorf("failed to wrap target key for import: %w", err) + } + } + + result, err := wrapTargetPKCS8ForImport(ke.RSAPublicKey, preppedTargetKey, hash) + if err != nil { + return result, fmt.Errorf("failed to wrap target key for import: %w", err) + } + + return result, nil +} + +func wrapTargetPKCS8ForImport(wrappingKey *rsa.PublicKey, preppedTargetKey []byte, hash hash.Hash) (string, error) { + // Generate an ephemeral AES-256 key + ephKey, err := uuid.GenerateRandomBytes(32) + if err != nil { + return "", fmt.Errorf("failed to generate an ephemeral AES wrapping key: %w", err) + } + + // Wrap ephemeral AES key with public wrapping key + ephKeyWrapped, err := rsa.EncryptOAEP(hash, rand.Reader, wrappingKey, ephKey, []byte{} /* label */) + if err != nil { + return "", fmt.Errorf("failed to encrypt ephemeral wrapping key with public key: %w", err) + } + + // Create KWP instance for wrapping target key + kwp, err := subtle.NewKWP(ephKey) + if err != nil { + return "", fmt.Errorf("failed to generate new KWP from AES key: %w", err) + } + + // Wrap target key with KWP + targetKeyWrapped, err := kwp.Wrap(preppedTargetKey) + if err != nil { + return "", fmt.Errorf("failed to wrap target key with KWP: %w", err) + } + + // Combined wrapped keys into a single blob and base64 encode + wrappedKeys := append(ephKeyWrapped, targetKeyWrapped...) + return base64.StdEncoding.EncodeToString(wrappedKeys), nil +} + +func (p *Policy) CreateCsr(keyVersion int, csrTemplate *x509.CertificateRequest) ([]byte, error) { + if !p.Type.SigningSupported() { + return nil, errutil.UserError{Err: fmt.Sprintf("key type '%s' does not support signing", p.Type)} + } + + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return nil, err + } + + if keyEntry.IsPrivateKeyMissing() { + return nil, errutil.UserError{Err: "private key not imported for key version selected"} + } + + csrTemplate.Signature = nil + csrTemplate.SignatureAlgorithm = x509.UnknownSignatureAlgorithm + + var key crypto.Signer + switch p.Type { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + var curve elliptic.Curve + switch p.Type { + case KeyType_ECDSA_P384: + curve = elliptic.P384() + case KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + + key = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: keyEntry.EC_X, + Y: keyEntry.EC_Y, + }, + D: keyEntry.EC_D, + } + + case KeyType_ED25519: + if p.Derived { + return nil, errutil.UserError{Err: "operation not supported on keys with derivation enabled"} + } + key = ed25519.PrivateKey(keyEntry.Key) + + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + key = keyEntry.RSAKey + + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("selected key type '%s' does not support signing", p.Type.String())} + } + csrBytes, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, key) + if err != nil { + return nil, fmt.Errorf("could not create the cerfificate request: %w", err) + } + + pemCsr := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csrBytes, + }) + + return pemCsr, nil +} + +func (p *Policy) ValidateLeafCertKeyMatch(keyVersion int, certPublicKeyAlgorithm x509.PublicKeyAlgorithm, certPublicKey any) (bool, error) { + if !p.Type.SigningSupported() { + return false, errutil.UserError{Err: fmt.Sprintf("key type '%s' does not support signing", p.Type)} + } + + var keyTypeMatches bool + switch p.Type { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + if certPublicKeyAlgorithm == x509.ECDSA { + keyTypeMatches = true + } + case KeyType_ED25519: + if certPublicKeyAlgorithm == x509.Ed25519 { + keyTypeMatches = true + } + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + if certPublicKeyAlgorithm == x509.RSA { + keyTypeMatches = true + } + } + if !keyTypeMatches { + return false, errutil.UserError{Err: fmt.Sprintf("provided leaf certificate public key algorithm '%s' does not match the transit key type '%s'", + certPublicKeyAlgorithm, p.Type)} + } + + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return false, err + } + + switch certPublicKeyAlgorithm { + case x509.ECDSA: + certPublicKey := certPublicKey.(*ecdsa.PublicKey) + var curve elliptic.Curve + switch p.Type { + case KeyType_ECDSA_P384: + curve = elliptic.P384() + case KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + + publicKey := &ecdsa.PublicKey{ + Curve: curve, + X: keyEntry.EC_X, + Y: keyEntry.EC_Y, + } + + return publicKey.Equal(certPublicKey), nil + + case x509.Ed25519: + if p.Derived { + return false, errutil.UserError{Err: "operation not supported on keys with derivation enabled"} + } + certPublicKey := certPublicKey.(ed25519.PublicKey) + + raw, err := base64.StdEncoding.DecodeString(keyEntry.FormattedPublicKey) + if err != nil { + return false, err + } + publicKey := ed25519.PublicKey(raw) + + return publicKey.Equal(certPublicKey), nil + + case x509.RSA: + certPublicKey := certPublicKey.(*rsa.PublicKey) + publicKey := keyEntry.RSAKey.PublicKey + return publicKey.Equal(certPublicKey), nil + + case x509.UnknownPublicKeyAlgorithm: + return false, errutil.InternalError{Err: fmt.Sprint("certificate signed with an unknown algorithm")} + } + + return false, nil +} + +func (p *Policy) ValidateAndPersistCertificateChain(ctx context.Context, keyVersion int, certChain []*x509.Certificate, storage logical.Storage) error { + if len(certChain) == 0 { + return errutil.UserError{Err: "expected at least one certificate in the parsed certificate chain"} + } + + if certChain[0].BasicConstraintsValid && certChain[0].IsCA { + return errutil.UserError{Err: "certificate in the first position is not a leaf certificate"} + } + + for _, cert := range certChain[1:] { + if cert.BasicConstraintsValid && !cert.IsCA { + return errutil.UserError{Err: "provided certificate chain contains more than one leaf certificate"} + } + } + + valid, err := p.ValidateLeafCertKeyMatch(keyVersion, certChain[0].PublicKeyAlgorithm, certChain[0].PublicKey) + if err != nil { + prefixedErr := fmt.Errorf("could not validate key match between leaf certificate key and key version in transit: %w", err) + switch err.(type) { + case errutil.UserError: + return errutil.UserError{Err: prefixedErr.Error()} + default: + return prefixedErr + } + } + if !valid { + return fmt.Errorf("leaf certificate public key does match the key version selected") + } + + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return err + } + + // Convert the certificate chain to DER format + derCertificates := make([][]byte, len(certChain)) + for i, cert := range certChain { + derCertificates[i] = cert.Raw + } + + keyEntry.CertificateChain = derCertificates + + p.Keys[strconv.Itoa(keyVersion)] = keyEntry + return p.Persist(ctx, storage) +} diff --git a/sdk/helper/keysutil/policy_test.go b/sdk/helper/keysutil/policy_test.go index 2df73971a2b1..f5e4d35eb81c 100644 --- a/sdk/helper/keysutil/policy_test.go +++ b/sdk/helper/keysutil/policy_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package keysutil import ( @@ -843,7 +846,7 @@ func Test_RSA_PSS(t *testing.T) { } cryptoHash := CryptoHashMap[hashType] minSaltLength := p.minRSAPSSSaltLength() - maxSaltLength := p.maxRSAPSSSaltLength(rsaKey, cryptoHash) + maxSaltLength := p.maxRSAPSSSaltLength(rsaKey.N.BitLen(), cryptoHash) hash := cryptoHash.New() hash.Write(input) input = hash.Sum(nil) diff --git a/sdk/helper/keysutil/transit_lru.go b/sdk/helper/keysutil/transit_lru.go index cd1f6dafe693..66ea66dc74e2 100644 --- a/sdk/helper/keysutil/transit_lru.go +++ b/sdk/helper/keysutil/transit_lru.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package keysutil import lru "github.com/hashicorp/golang-lru" diff --git a/sdk/helper/keysutil/transit_syncmap.go b/sdk/helper/keysutil/transit_syncmap.go index ce9071380a99..fddcf706b2f1 100644 --- a/sdk/helper/keysutil/transit_syncmap.go +++ b/sdk/helper/keysutil/transit_syncmap.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package keysutil import ( diff --git a/sdk/helper/keysutil/util.go b/sdk/helper/keysutil/util.go index 063af5914672..94a56d42c573 100644 --- a/sdk/helper/keysutil/util.go +++ b/sdk/helper/keysutil/util.go @@ -1,6 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package keysutil import ( + "crypto/x509" "crypto/x509/pkix" "encoding/asn1" "errors" @@ -53,6 +57,9 @@ var ( // Other implementations may use the OID 1.3.101.110 from // https://datatracker.ietf.org/doc/html/rfc8410. oidRFC8410Ed25519 = asn1.ObjectIdentifier{1, 3, 101, 110} + + // See crypto/x509/x509.go in the Go toolchain source distribution. + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} ) func isEd25519OID(oid asn1.ObjectIdentifier) bool { @@ -113,3 +120,32 @@ func ParsePKCS8Ed25519PrivateKey(der []byte) (key interface{}, err error) { return ed25519.NewKeyFromSeed(ed25519Key.PrivateKey), nil } + +// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS #8, ASN.1 DER form. +// +// This helper only supports RSA/PSS keys (with OID 1.2.840.113549.1.1.10). +// +// It returns a *rsa.PrivateKey, a *ecdsa.PrivateKey, or a ed25519.PrivateKey. +// More types might be supported in the future. +// +// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY". +func ParsePKCS8RSAPSSPrivateKey(der []byte) (key interface{}, err error) { + var privKey pkcs8 + if _, err := asn1.Unmarshal(der, &privKey); err == nil { + switch { + case privKey.Algo.Algorithm.Equal(oidSignatureRSAPSS): + // Fall through; there's no parameters here unlike ECDSA + // containers, so we can go to parsing the inner rsaPrivateKey + // object. + default: + return nil, errors.New("keysutil: failed to parse key as RSA PSS private key") + } + } + + key, err = x509.ParsePKCS1PrivateKey(privKey.PrivateKey) + if err != nil { + return nil, fmt.Errorf("keysutil: failed to parse inner RSA PSS private key: %w", err) + } + + return key, nil +} diff --git a/sdk/helper/ldaputil/client.go b/sdk/helper/ldaputil/client.go index 8a7ac4822c34..a1901fdcb6dc 100644 --- a/sdk/helper/ldaputil/client.go +++ b/sdk/helper/ldaputil/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package ldaputil import ( @@ -5,11 +8,13 @@ import ( "crypto/tls" "crypto/x509" "encoding/binary" + "encoding/hex" "fmt" "math" "net" "net/url" "strings" + "sync" "text/template" "time" @@ -28,6 +33,7 @@ func (c *Client) DialLDAP(cfg *ConfigEntry) (Connection, error) { var retErr *multierror.Error var conn Connection urls := strings.Split(cfg.Url, ",") + for _, uut := range urls { u, err := url.Parse(uut) if err != nil { @@ -40,12 +46,20 @@ func (c *Client) DialLDAP(cfg *ConfigEntry) (Connection, error) { } var tlsConfig *tls.Config + dialer := net.Dialer{ + Timeout: time.Duration(cfg.ConnectionTimeout) * time.Second, + } + switch u.Scheme { case "ldap": if port == "" { port = "389" } - conn, err = c.LDAP.Dial("tcp", net.JoinHostPort(host, port)) + + fullAddr := fmt.Sprintf("%s://%s", u.Scheme, net.JoinHostPort(host, port)) + opt := ldap.DialWithDialer(&dialer) + + conn, err = c.LDAP.DialURL(fullAddr, opt) if err != nil { break } @@ -68,7 +82,15 @@ func (c *Client) DialLDAP(cfg *ConfigEntry) (Connection, error) { if err != nil { break } - conn, err = c.LDAP.DialTLS("tcp", net.JoinHostPort(host, port), tlsConfig) + + fullAddr := fmt.Sprintf("%s://%s", u.Scheme, net.JoinHostPort(host, port)) + opt := ldap.DialWithDialer(&dialer) + tls := ldap.DialWithTLSConfig(tlsConfig) + + conn, err = c.LDAP.DialURL(fullAddr, opt, tls) + if err != nil { + break + } default: retErr = multierror.Append(retErr, fmt.Errorf("invalid LDAP scheme in url %q", net.JoinHostPort(host, port))) continue @@ -119,10 +141,11 @@ func (c *Client) makeLdapSearchRequest(cfg *ConfigEntry, conn Connection, userna c.Logger.Debug("discovering user", "userdn", cfg.UserDN, "filter", renderedFilter) } ldapRequest := &ldap.SearchRequest{ - BaseDN: cfg.UserDN, - Scope: ldap.ScopeWholeSubtree, - Filter: renderedFilter, - SizeLimit: 2, // Should be only 1 result. Any number larger (2 or more) means access denied. + BaseDN: cfg.UserDN, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Scope: ldap.ScopeWholeSubtree, + Filter: renderedFilter, + SizeLimit: 2, // Should be only 1 result. Any number larger (2 or more) means access denied. Attributes: []string{ cfg.UserAttr, // Return only needed attributes }, @@ -206,9 +229,17 @@ func (c *Client) RenderUserSearchFilter(cfg *ConfigEntry, username string) (stri } if cfg.UPNDomain != "" { context.UserAttr = "userPrincipalName" - context.Username = fmt.Sprintf("%s@%s", EscapeLDAPValue(username), cfg.UPNDomain) + // Intentionally, calling EscapeFilter(...) (vs EscapeValue) since the + // username is being injected into a search filter. + // As an untrusted string, the username must be escaped according to RFC + // 4515, in order to prevent attackers from injecting characters that could modify the filter + context.Username = fmt.Sprintf("%s@%s", ldap.EscapeFilter(username), cfg.UPNDomain) } + // Execute the template. Note that the template context contains escaped input and does + // not provide behavior via functions. Additionally, no function map has been provided + // during template initialization. The only template functions available during execution + // are the predefined global functions: https://pkg.go.dev/text/template#hdr-Functions var renderedFilter bytes.Buffer if err := t.Execute(&renderedFilter, context); err != nil { return "", fmt.Errorf("LDAP search failed due to template parsing error: %w", err) @@ -274,10 +305,11 @@ func (c *Client) GetUserDN(cfg *ConfigEntry, conn Connection, bindDN, username s c.Logger.Debug("searching upn", "userdn", cfg.UserDN, "filter", filter) } result, err := conn.Search(&ldap.SearchRequest{ - BaseDN: cfg.UserDN, - Scope: ldap.ScopeWholeSubtree, - Filter: filter, - SizeLimit: math.MaxInt32, + BaseDN: cfg.UserDN, + Scope: ldap.ScopeWholeSubtree, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: filter, + SizeLimit: math.MaxInt32, }) if err != nil { return userDN, fmt.Errorf("LDAP search failed for detecting user: %w", err) @@ -335,9 +367,10 @@ func (c *Client) performLdapFilterGroupsSearch(cfg *ConfigEntry, conn Connection } result, err := conn.Search(&ldap.SearchRequest{ - BaseDN: cfg.GroupDN, - Scope: ldap.ScopeWholeSubtree, - Filter: renderedQuery.String(), + BaseDN: cfg.GroupDN, + Scope: ldap.ScopeWholeSubtree, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: renderedQuery.String(), Attributes: []string{ cfg.GroupAttr, }, @@ -383,6 +416,10 @@ func (c *Client) performLdapFilterGroupsSearchPaging(cfg *ConfigEntry, conn Pagi ldap.EscapeFilter(username), } + // Execute the template. Note that the template context contains escaped input and does + // not provide behavior via functions. Additionally, no function map has been provided + // during template initialization. The only template functions available during execution + // are the predefined global functions: https://pkg.go.dev/text/template#hdr-Functions var renderedQuery bytes.Buffer if err := t.Execute(&renderedQuery, context); err != nil { return nil, fmt.Errorf("LDAP search failed due to template parsing error: %w", err) @@ -393,14 +430,15 @@ func (c *Client) performLdapFilterGroupsSearchPaging(cfg *ConfigEntry, conn Pagi } result, err := conn.SearchWithPaging(&ldap.SearchRequest{ - BaseDN: cfg.GroupDN, - Scope: ldap.ScopeWholeSubtree, - Filter: renderedQuery.String(), + BaseDN: cfg.GroupDN, + Scope: ldap.ScopeWholeSubtree, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: renderedQuery.String(), Attributes: []string{ cfg.GroupAttr, }, SizeLimit: math.MaxInt32, - }, math.MaxInt32) + }, uint32(cfg.MaximumPageSize)) if err != nil { return nil, fmt.Errorf("LDAP search failed: %w", err) } @@ -441,10 +479,16 @@ func sidBytesToString(b []byte) (string, error) { } func (c *Client) performLdapTokenGroupsSearch(cfg *ConfigEntry, conn Connection, userDN string) ([]*ldap.Entry, error) { + var wg sync.WaitGroup + var lock sync.Mutex + taskChan := make(chan string) + maxWorkers := 10 + result, err := conn.Search(&ldap.SearchRequest{ - BaseDN: userDN, - Scope: ldap.ScopeBaseObject, - Filter: "(objectClass=*)", + BaseDN: userDN, + Scope: ldap.ScopeBaseObject, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: "(objectClass=*)", Attributes: []string{ "tokenGroups", }, @@ -460,36 +504,53 @@ func (c *Client) performLdapTokenGroupsSearch(cfg *ConfigEntry, conn Connection, userEntry := result.Entries[0] groupAttrValues := userEntry.GetRawAttributeValues("tokenGroups") - groupEntries := make([]*ldap.Entry, 0, len(groupAttrValues)) + + for i := 0; i < maxWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for sid := range taskChan { + groupResult, err := conn.Search(&ldap.SearchRequest{ + BaseDN: fmt.Sprintf("", sid), + Scope: ldap.ScopeBaseObject, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: "(objectClass=*)", + Attributes: []string{ + "1.1", // RFC no attributes + }, + SizeLimit: 1, + }) + if err != nil { + c.Logger.Warn("unable to read the group sid", "sid", sid) + continue + } + + if len(groupResult.Entries) == 0 { + c.Logger.Warn("unable to find the group", "sid", sid) + continue + } + + lock.Lock() + groupEntries = append(groupEntries, groupResult.Entries[0]) + lock.Unlock() + } + }() + } + for _, sidBytes := range groupAttrValues { sidString, err := sidBytesToString(sidBytes) if err != nil { c.Logger.Warn("unable to read sid", "err", err) continue } - - groupResult, err := conn.Search(&ldap.SearchRequest{ - BaseDN: fmt.Sprintf("", sidString), - Scope: ldap.ScopeBaseObject, - Filter: "(objectClass=*)", - Attributes: []string{ - "1.1", // RFC no attributes - }, - SizeLimit: 1, - }) - if err != nil { - c.Logger.Warn("unable to read the group sid", "sid", sidString) - continue - } - if len(groupResult.Entries) == 0 { - c.Logger.Warn("unable to find the group", "sid", sidString) - continue - } - - groupEntries = append(groupEntries, groupResult.Entries[0]) + taskChan <- sidString } + close(taskChan) + wg.Wait() + return groupEntries, nil } @@ -519,7 +580,7 @@ func (c *Client) GetLdapGroups(cfg *ConfigEntry, conn Connection, userDN string, if cfg.UseTokenGroups { entries, err = c.performLdapTokenGroupsSearch(cfg, conn, userDN) } else { - if paging, ok := conn.(PagingConnection); ok { + if paging, ok := conn.(PagingConnection); ok && cfg.MaximumPageSize > 0 { entries, err = c.performLdapFilterGroupsSearchPaging(cfg, paging, userDN, username) } else { entries, err = c.performLdapFilterGroupsSearch(cfg, conn, userDN, username) @@ -561,42 +622,59 @@ func (c *Client) GetLdapGroups(cfg *ConfigEntry, conn Connection, userDN string, } // EscapeLDAPValue is exported because a plugin uses it outside this package. +// EscapeLDAPValue will properly escape the input string as an ldap value +// rfc4514 states the following must be escaped: +// - leading space or hash +// - trailing space +// - special characters '"', '+', ',', ';', '<', '>', '\\' +// - hex func EscapeLDAPValue(input string) string { if input == "" { return "" } - // RFC4514 forbids un-escaped: - // - leading space or hash - // - trailing space - // - special characters '"', '+', ',', ';', '<', '>', '\\' - // - null - for i := 0; i < len(input); i++ { - escaped := false - if input[i] == '\\' && i+1 < len(input)-1 { - i++ - escaped = true - } - switch input[i] { - case '"', '+', ',', ';', '<', '>', '\\': - if !escaped { - input = input[0:i] + "\\" + input[i:] - i++ - } + buf := bytes.Buffer{} + + escFn := func(c byte) { + buf.WriteByte('\\') + buf.WriteByte(c) + } + + inputLen := len(input) + for i := 0; i < inputLen; i++ { + char := input[i] + switch { + case i == 0 && char == ' ' || char == '#': + // leading space or hash. + escFn(char) continue + case i == inputLen-1 && char == ' ': + // trailing space. + escFn(char) + continue + case specialChar(char): + escFn(char) + continue + case char < ' ' || char > '~': + // anything that's not between the ascii space and tilde must be hex + buf.WriteByte('\\') + buf.WriteString(hex.EncodeToString([]byte{char})) + continue + default: + // everything remaining, doesn't need to be escaped + buf.WriteByte(char) } - if escaped { - input = input[0:i] + "\\" + input[i:] - i++ - } - } - if input[0] == ' ' || input[0] == '#' { - input = "\\" + input } - if input[len(input)-1] == ' ' { - input = input[0:len(input)-1] + "\\ " + return buf.String() +} + +func specialChar(char byte) bool { + switch char { + case '"', '+', ',', ';', '<', '>', '\\': + return true + default: + return false } - return input } /* diff --git a/sdk/helper/ldaputil/client_test.go b/sdk/helper/ldaputil/client_test.go index c9ae9cd4baa5..dcce9c6e0e85 100644 --- a/sdk/helper/ldaputil/client_test.go +++ b/sdk/helper/ldaputil/client_test.go @@ -1,9 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package ldaputil import ( "testing" "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // TestDialLDAP duplicates a potential panic that was @@ -26,15 +31,20 @@ func TestDialLDAP(t *testing.T) { func TestLDAPEscape(t *testing.T) { testcases := map[string]string{ - "#test": "\\#test", - "test,hello": "test\\,hello", - "test,hel+lo": "test\\,hel\\+lo", - "test\\hello": "test\\\\hello", - " test ": "\\ test \\ ", - "": "", - "\\test": "\\\\test", - "test\\": "test\\\\", - "test\\ ": "test\\\\\\ ", + "#test": "\\#test", + "test,hello": "test\\,hello", + "test,hel+lo": "test\\,hel\\+lo", + "test\\hello": "test\\\\hello", + " test ": "\\ test \\ ", + "": "", + `\`: `\\`, + "trailing\000": `trailing\00`, + "mid\000dle": `mid\00dle`, + "\000": `\00`, + "multiple\000\000": `multiple\00\00`, + "backlash-before-null\\\000": `backlash-before-null\\\00`, + "trailing\\": `trailing\\`, + "double-escaping\\>": `double-escaping\\\>`, } for test, answer := range testcases { @@ -85,3 +95,58 @@ func TestSIDBytesToString(t *testing.T) { } } } + +func TestClient_renderUserSearchFilter(t *testing.T) { + t.Parallel() + tests := []struct { + name string + conf *ConfigEntry + username string + want string + errContains string + }{ + { + name: "valid-default", + username: "alice", + conf: &ConfigEntry{ + UserAttr: "cn", + }, + want: "(cn=alice)", + }, + { + name: "escaped-malicious-filter", + username: "foo@example.com)((((((((((((((((((((((((((((((((((((((userPrincipalName=foo", + conf: &ConfigEntry{ + UPNDomain: "example.com", + UserFilter: "(&({{.UserAttr}}={{.Username}})({{.UserAttr}}=admin@example.com))", + }, + want: "(&(userPrincipalName=foo@example.com\\29\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28userPrincipalName=foo@example.com)(userPrincipalName=admin@example.com))", + }, + { + name: "bad-filter-unclosed-action", + username: "alice", + conf: &ConfigEntry{ + UserFilter: "hello{{range", + }, + errContains: "search failed due to template compilation error", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + c := Client{ + Logger: hclog.NewNullLogger(), + LDAP: NewLDAP(), + } + + f, err := c.RenderUserSearchFilter(tc.conf, tc.username) + if tc.errContains != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tc.errContains) + return + } + require.NoError(t, err) + assert.NotEmpty(t, f) + assert.Equal(t, tc.want, f) + }) + } +} diff --git a/sdk/helper/ldaputil/config.go b/sdk/helper/ldaputil/config.go index 43844da22b13..4f18957f8271 100644 --- a/sdk/helper/ldaputil/config.go +++ b/sdk/helper/ldaputil/config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package ldaputil import ( @@ -9,12 +12,23 @@ import ( "strings" "text/template" + capldap "github.com/hashicorp/cap/ldap" "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/errwrap" + + "github.com/go-ldap/ldap/v3" ) +var ldapDerefAliasMap = map[string]int{ + "never": ldap.NeverDerefAliases, + "finding": ldap.DerefFindingBaseObj, + "searching": ldap.DerefInSearching, + "always": ldap.DerefAlways, +} + // ConfigFields returns all the config fields that can potentially be used by the LDAP client. // Not all fields will be used by every integration. func ConfigFields() map[string]*framework.FieldSchema { @@ -226,6 +240,25 @@ Default: ({{.UserAttr}}={{.Username}})`, Description: "Timeout, in seconds, for the connection when making requests against the server before returning back an error.", Default: "90s", }, + + "connection_timeout": { + Type: framework.TypeDurationSecond, + Description: "Timeout, in seconds, when attempting to connect to the LDAP server before trying the next URL in the configuration.", + Default: "30s", + }, + + "dereference_aliases": { + Type: framework.TypeString, + Description: "When aliases should be dereferenced on search operations. Accepted values are 'never', 'finding', 'searching', 'always'. Defaults to 'never'.", + Default: "never", + AllowedValues: []interface{}{"never", "finding", "searching", "always"}, + }, + + "max_page_size": { + Type: framework.TypeInt, + Description: "If set to a value greater than 0, the LDAP backend will use the LDAP server's paged search control to request pages of up to the given size. This can be used to avoid hitting the LDAP server's maximum result size limit. Otherwise, the LDAP backend will not use the paged search control.", + Default: 0, + }, } } @@ -392,6 +425,18 @@ func NewConfigEntry(existing *ConfigEntry, d *framework.FieldData) (*ConfigEntry cfg.RequestTimeout = d.Get("request_timeout").(int) } + if _, ok := d.Raw["connection_timeout"]; ok || !hadExisting { + cfg.ConnectionTimeout = d.Get("connection_timeout").(int) + } + + if _, ok := d.Raw["dereference_aliases"]; ok || !hadExisting { + cfg.DerefAliases = d.Get("dereference_aliases").(string) + } + + if _, ok := d.Raw["max_page_size"]; ok || !hadExisting { + cfg.MaximumPageSize = d.Get("max_page_size").(int) + } + return cfg, nil } @@ -418,6 +463,9 @@ type ConfigEntry struct { UseTokenGroups bool `json:"use_token_groups"` UsePre111GroupCNBehavior *bool `json:"use_pre111_group_cn_behavior"` RequestTimeout int `json:"request_timeout"` + ConnectionTimeout int `json:"connection_timeout"` // deprecated: use RequestTimeout + DerefAliases string `json:"dereference_aliases"` + MaximumPageSize int `json:"max_page_size"` // These json tags deviate from snake case because there was a past issue // where the tag was being ignored, causing it to be jsonified as "CaseSensitiveNames", etc. @@ -455,7 +503,10 @@ func (c *ConfigEntry) PasswordlessMap() map[string]interface{} { "use_token_groups": c.UseTokenGroups, "anonymous_group_search": c.AnonymousGroupSearch, "request_timeout": c.RequestTimeout, + "connection_timeout": c.ConnectionTimeout, "username_as_alias": c.UsernameAsAlias, + "dereference_aliases": c.DerefAliases, + "max_page_size": c.MaximumPageSize, } if c.CaseSensitiveNames != nil { m["case_sensitive_names"] = *c.CaseSensitiveNames @@ -510,3 +561,55 @@ func (c *ConfigEntry) Validate() error { } return nil } + +func ConvertConfig(cfg *ConfigEntry) *capldap.ClientConfig { + // cap/ldap doesn't have a notion of connection_timeout, and uses a single timeout value for + // both the net.Dialer and ldap connection timeout. + // So take the smaller of the two values and use that as the timeout value. + minTimeout := min(cfg.ConnectionTimeout, cfg.RequestTimeout) + urls := strings.Split(cfg.Url, ",") + config := &capldap.ClientConfig{ + URLs: urls, + UserDN: cfg.UserDN, + AnonymousGroupSearch: cfg.AnonymousGroupSearch, + GroupDN: cfg.GroupDN, + GroupFilter: cfg.GroupFilter, + GroupAttr: cfg.GroupAttr, + UPNDomain: cfg.UPNDomain, + UserFilter: cfg.UserFilter, + UserAttr: cfg.UserAttr, + ClientTLSCert: cfg.ClientTLSCert, + ClientTLSKey: cfg.ClientTLSKey, + InsecureTLS: cfg.InsecureTLS, + StartTLS: cfg.StartTLS, + BindDN: cfg.BindDN, + BindPassword: cfg.BindPassword, + AllowEmptyPasswordBinds: !cfg.DenyNullBind, + DiscoverDN: cfg.DiscoverDN, + TLSMinVersion: cfg.TLSMinVersion, + TLSMaxVersion: cfg.TLSMaxVersion, + UseTokenGroups: cfg.UseTokenGroups, + RequestTimeout: minTimeout, + IncludeUserAttributes: true, + ExcludedUserAttributes: nil, + IncludeUserGroups: true, + LowerUserAttributeKeys: true, + AllowEmptyAnonymousGroupSearch: true, + MaximumPageSize: cfg.MaximumPageSize, + DerefAliases: cfg.DerefAliases, + DeprecatedVaultPre111GroupCNBehavior: cfg.UsePre111GroupCNBehavior, + } + + if cfg.Certificate != "" { + config.Certificates = []string{cfg.Certificate} + } + + return config +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/sdk/helper/ldaputil/config_test.go b/sdk/helper/ldaputil/config_test.go index 32edb5dffaad..b7fd22ccbb2d 100644 --- a/sdk/helper/ldaputil/config_test.go +++ b/sdk/helper/ldaputil/config_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package ldaputil import ( @@ -71,15 +74,16 @@ func testConfig(t *testing.T) *ConfigEntry { t.Helper() return &ConfigEntry{ - Url: "ldap://138.91.247.105", - UserDN: "example,com", - BindDN: "kitty", - BindPassword: "cats", - TLSMaxVersion: "tls12", - TLSMinVersion: "tls12", - RequestTimeout: 30, - ClientTLSCert: "", - ClientTLSKey: "", + Url: "ldap://138.91.247.105", + UserDN: "example,com", + BindDN: "kitty", + BindPassword: "cats", + TLSMaxVersion: "tls12", + TLSMinVersion: "tls12", + RequestTimeout: 30, + ConnectionTimeout: 15, + ClientTLSCert: "", + ClientTLSKey: "", } } @@ -138,6 +142,7 @@ var jsonConfig = []byte(`{ "tls_max_version": "tls12", "tls_min_version": "tls12", "request_timeout": 30, + "connection_timeout": 15, "ClientTLSCert": "", "ClientTLSKey": "" }`) @@ -168,6 +173,9 @@ var jsonConfigDefault = []byte(` "use_pre111_group_cn_behavior": null, "username_as_alias": false, "request_timeout": 90, + "connection_timeout": 30, + "dereference_aliases": "never", + "max_page_size": 0, "CaseSensitiveNames": false, "ClientTLSCert": "", "ClientTLSKey": "" diff --git a/sdk/helper/ldaputil/connection.go b/sdk/helper/ldaputil/connection.go index 71c83f2f9b3a..2e4ab54ee1ef 100644 --- a/sdk/helper/ldaputil/connection.go +++ b/sdk/helper/ldaputil/connection.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package ldaputil import ( @@ -11,7 +14,7 @@ import ( // but through an interface. type Connection interface { Bind(username, password string) error - Close() + Close() error Add(addRequest *ldap.AddRequest) error Modify(modifyRequest *ldap.ModifyRequest) error Del(delRequest *ldap.DelRequest) error diff --git a/sdk/helper/ldaputil/ldap.go b/sdk/helper/ldaputil/ldap.go index 82ace01773cc..bdf746e5c8cd 100644 --- a/sdk/helper/ldaputil/ldap.go +++ b/sdk/helper/ldaputil/ldap.go @@ -1,8 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package ldaputil import ( - "crypto/tls" - "github.com/go-ldap/ldap/v3" ) @@ -13,16 +14,11 @@ func NewLDAP() LDAP { // LDAP provides ldap functionality, but through an interface // rather than statically. This allows faking it for tests. type LDAP interface { - Dial(network, addr string) (Connection, error) - DialTLS(network, addr string, config *tls.Config) (Connection, error) + DialURL(addr string, opts ...ldap.DialOpt) (Connection, error) } type ldapIfc struct{} -func (l *ldapIfc) Dial(network, addr string) (Connection, error) { - return ldap.Dial(network, addr) -} - -func (l *ldapIfc) DialTLS(network, addr string, config *tls.Config) (Connection, error) { - return ldap.DialTLS(network, addr, config) +func (l *ldapIfc) DialURL(addr string, opts ...ldap.DialOpt) (Connection, error) { + return ldap.DialURL(addr, opts...) } diff --git a/sdk/helper/license/feature.go b/sdk/helper/license/feature.go index c7c000a58a30..b42fcd1fc1a1 100644 --- a/sdk/helper/license/feature.go +++ b/sdk/helper/license/feature.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package license // Features is a bitmask of feature flags diff --git a/sdk/helper/locksutil/locks.go b/sdk/helper/locksutil/locks.go index 35ffcf739d9d..c7538b63b4f7 100644 --- a/sdk/helper/locksutil/locks.go +++ b/sdk/helper/locksutil/locks.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package locksutil import ( diff --git a/sdk/helper/locksutil/locks_test.go b/sdk/helper/locksutil/locks_test.go index 991664463777..954a46349ea7 100644 --- a/sdk/helper/locksutil/locks_test.go +++ b/sdk/helper/locksutil/locks_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package locksutil import "testing" diff --git a/sdk/helper/logging/logging.go b/sdk/helper/logging/logging.go index 25de5a781316..37dcefa47783 100644 --- a/sdk/helper/logging/logging.go +++ b/sdk/helper/logging/logging.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logging import ( diff --git a/sdk/helper/logging/logging_test.go b/sdk/helper/logging/logging_test.go index 91e204b097ed..16075524b0b8 100644 --- a/sdk/helper/logging/logging_test.go +++ b/sdk/helper/logging/logging_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logging import ( diff --git a/sdk/helper/mlock/mlock.go b/sdk/helper/mlock/mlock.go index 1bbf8a0bbbf8..5820d15af3c1 100644 --- a/sdk/helper/mlock/mlock.go +++ b/sdk/helper/mlock/mlock.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package mlock diff --git a/sdk/helper/ocsp/client.go b/sdk/helper/ocsp/client.go index e54fdeface46..8fba050cb203 100644 --- a/sdk/helper/ocsp/client.go +++ b/sdk/helper/ocsp/client.go @@ -24,12 +24,15 @@ import ( "time" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-retryablehttp" lru "github.com/hashicorp/golang-lru" "github.com/hashicorp/vault/sdk/helper/certutil" "golang.org/x/crypto/ocsp" ) +//go:generate enumer -type=FailOpenMode -trimprefix=FailOpen + // FailOpenMode is OCSP fail open mode. FailOpenTrue by default and may // set to ocspModeFailClosed for fail closed mode type FailOpenMode uint32 @@ -75,6 +78,15 @@ const ( cacheExpire = float64(24 * 60 * 60) ) +// ErrOcspIssuerVerification indicates an error verifying the identity of an OCSP response occurred +type ErrOcspIssuerVerification struct { + Err error +} + +func (e *ErrOcspIssuerVerification) Error() string { + return fmt.Sprintf("ocsp response verification error: %v", e.Err) +} + type ocspCachedResponse struct { time float64 producedAt float64 @@ -161,9 +173,27 @@ func (c *Client) getHashAlgorithmFromOID(target pkix.AlgorithmIdentifier) crypto return crypto.SHA1 } -// isInValidityRange checks the validity -func isInValidityRange(currTime, nextUpdate time.Time) bool { - return !nextUpdate.IsZero() && !currTime.After(nextUpdate) +// isInValidityRange checks the validity times of the OCSP response making sure +// that thisUpdate and nextUpdate values are bounded within currTime +func isInValidityRange(currTime time.Time, ocspRes *ocsp.Response) bool { + thisUpdate := ocspRes.ThisUpdate + + // If the thisUpdate value in the OCSP response wasn't set or greater than current time fail this check + if thisUpdate.IsZero() || thisUpdate.After(currTime) { + return false + } + + nextUpdate := ocspRes.NextUpdate + if nextUpdate.IsZero() { + // We don't have a nextUpdate field set, assume we are okay. + return true + } + + if currTime.After(nextUpdate) || thisUpdate.After(nextUpdate) { + return false + } + + return true } func extractCertIDKeyFromRequest(ocspReq []byte) (*certIDKey, *ocspStatus) { @@ -208,7 +238,7 @@ func (c *Client) encodeCertIDKey(certIDKeyBase64 string) (*certIDKey, error) { }, nil } -func (c *Client) checkOCSPResponseCache(encodedCertID *certIDKey, subject, issuer *x509.Certificate) (*ocspStatus, error) { +func (c *Client) checkOCSPResponseCache(encodedCertID *certIDKey, subject, issuer *x509.Certificate, config *VerifyConfig) (*ocspStatus, error) { c.ocspResponseCacheLock.RLock() var cacheValue *ocspCachedResponse v, ok := c.ocspResponseCache.Get(*encodedCertID) @@ -217,7 +247,7 @@ func (c *Client) checkOCSPResponseCache(encodedCertID *certIDKey, subject, issue } c.ocspResponseCacheLock.RUnlock() - status, err := c.extractOCSPCacheResponseValue(cacheValue, subject, issuer) + status, err := c.extractOCSPCacheResponseValue(cacheValue, subject, issuer, config) if err != nil { return nil, err } @@ -234,18 +264,25 @@ func (c *Client) deleteOCSPCache(encodedCertID *certIDKey) { c.ocspResponseCacheLock.Unlock() } -func validateOCSP(ocspRes *ocsp.Response) (*ocspStatus, error) { +func validateOCSP(conf *VerifyConfig, ocspRes *ocsp.Response) (*ocspStatus, error) { curTime := time.Now() if ocspRes == nil { return nil, errors.New("OCSP Response is nil") } - if !isInValidityRange(curTime, ocspRes.NextUpdate) { + if !isInValidityRange(curTime, ocspRes) { return &ocspStatus{ code: ocspInvalidValidity, err: fmt.Errorf("invalid validity: producedAt: %v, thisUpdate: %v, nextUpdate: %v", ocspRes.ProducedAt, ocspRes.ThisUpdate, ocspRes.NextUpdate), }, nil } + + if conf.OcspThisUpdateMaxAge > 0 && curTime.Sub(ocspRes.ThisUpdate) > conf.OcspThisUpdateMaxAge { + return &ocspStatus{ + code: ocspInvalidValidity, + err: fmt.Errorf("invalid validity: thisUpdate: %v is greater than max age: %s", ocspRes.ThisUpdate, conf.OcspThisUpdateMaxAge), + }, nil + } return returnOCSPStatus(ocspRes), nil } @@ -282,13 +319,10 @@ func (c *Client) retryOCSP( ocspHost *url.URL, headers map[string]string, reqBody []byte, + subject, issuer *x509.Certificate, -) (ocspRes *ocsp.Response, ocspResBytes []byte, ocspS *ocspStatus, err error) { - origHost := *ocspHost +) (ocspRes *ocsp.Response, ocspResBytes []byte, ocspS *ocspStatus, retErr error) { doRequest := func(request *retryablehttp.Request) (*http.Response, error) { - if err != nil { - return nil, err - } if request != nil { request = request.WithContext(ctx) for k, v := range headers { @@ -303,48 +337,184 @@ func (c *Client) retryOCSP( return res, err } - ocspHost.Path = ocspHost.Path + "/" + base64.StdEncoding.EncodeToString(reqBody) - var res *http.Response - request, err := req("GET", ocspHost.String(), nil) - if err != nil { - return nil, nil, nil, err - } - if res, err = doRequest(request); err != nil { - return nil, nil, nil, err - } else { - defer res.Body.Close() - } - if res.StatusCode == http.StatusMethodNotAllowed { - request, err := req("POST", origHost.String(), bytes.NewBuffer(reqBody)) + for _, method := range []string{"GET", "POST"} { + reqUrl := *ocspHost + var body []byte + + switch method { + case "GET": + reqUrl.Path = reqUrl.Path + "/" + base64.StdEncoding.EncodeToString(reqBody) + case "POST": + body = reqBody + default: + // Programming error; all request/systems errors are multierror + // and appended. + return nil, nil, nil, fmt.Errorf("unknown request method: %v", method) + } + + var res *http.Response + request, err := req(method, reqUrl.String(), bytes.NewBuffer(body)) if err != nil { - return nil, nil, nil, err + err = fmt.Errorf("error creating %v request: %w", method, err) + retErr = multierror.Append(retErr, err) + continue } - if res, err := doRequest(request); err != nil { - return nil, nil, nil, err + if res, err = doRequest(request); err != nil { + err = fmt.Errorf("error doing %v request: %w", method, err) + retErr = multierror.Append(retErr, err) + continue } else { defer res.Body.Close() } + + if res.StatusCode != http.StatusOK { + err = fmt.Errorf("HTTP code is not OK on %v request. %v: %v", method, res.StatusCode, res.Status) + retErr = multierror.Append(retErr, err) + continue + } + + ocspResBytes, err = io.ReadAll(res.Body) + if err != nil { + err = fmt.Errorf("error reading %v request body: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + + // Reading an OCSP response shouldn't be fatal. A misconfigured + // endpoint might return invalid results for e.g., GET but return + // valid results for POST on retry. This could happen if e.g., the + // server responds with JSON. + ocspRes, err = ocsp.ParseResponse(ocspResBytes /*issuer = */, nil /* !!unsafe!! */) + if err != nil { + err = fmt.Errorf("error parsing %v OCSP response: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + + if err := validateOCSPParsedResponse(ocspRes, subject, issuer); err != nil { + err = fmt.Errorf("error validating %v OCSP response: %w", method, err) + + if IsOcspVerificationError(err) { + // We want to immediately give up on a verification error to a response + // and inform the user something isn't correct + return nil, nil, nil, err + } + + retErr = multierror.Append(retErr, err) + // Clear the response out as we can't trust it. + ocspRes = nil + continue + } + + // While we haven't validated the signature on the OCSP response, we + // got what we presume is a definitive answer and simply changing + // methods will likely not help us in that regard. Use this status + // to return without retrying another method, when it looks definitive. + // + // We don't accept ocsp.Unknown here: presumably, we could've hit a CDN + // with static mapping of request->responses, with a default "unknown" + // handler for everything else. By retrying here, we use POST, which + // could hit a live OCSP server with fresher data than the cached CDN. + if ocspRes.Status == ocsp.Good || ocspRes.Status == ocsp.Revoked { + break + } + + // Here, we didn't have a valid response. Even though we didn't get an + // error, we should inform the user that this (valid-looking) response + // wasn't utilized. + err = fmt.Errorf("fetched %v OCSP response of status %v; wanted either good (%v) or revoked (%v)", method, ocspRes.Status, ocsp.Good, ocsp.Revoked) + retErr = multierror.Append(retErr, err) } - if res.StatusCode != http.StatusOK { - return nil, nil, nil, fmt.Errorf("HTTP code is not OK. %v: %v", res.StatusCode, res.Status) + + if ocspRes != nil && ocspResBytes != nil { + // Clear retErr, because we have one parseable-but-maybe-not-quite-correct + // OCSP response. + retErr = nil + ocspS = &ocspStatus{ + code: ocspSuccess, + } } - ocspResBytes, err = io.ReadAll(res.Body) - if err != nil { - return nil, nil, nil, err + + return +} + +func IsOcspVerificationError(err error) bool { + errOcspIssuer := &ErrOcspIssuerVerification{} + return errors.As(err, &errOcspIssuer) +} + +func validateOCSPParsedResponse(ocspRes *ocsp.Response, subject, issuer *x509.Certificate) error { + // Above, we use the unsafe issuer=nil parameter to ocsp.ParseResponse + // because Go's library does the wrong thing. + // + // Here, we lack a full chain, but we know we trust the parent issuer, + // so if the Go library incorrectly discards useful certificates, we + // likely cannot verify this without passing through the full chain + // back to the root. + // + // Instead, take one of two paths: 1. if there is no certificate in + // the ocspRes, verify the OCSP response directly with our trusted + // issuer certificate, or 2. if there is a certificate, either verify + // it directly matches our trusted issuer certificate, or verify it + // is signed by our trusted issuer certificate. + // + // See also: https://github.com/golang/go/issues/59641 + // + // This addresses the !!unsafe!! behavior above. + if ocspRes.Certificate == nil { + if err := ocspRes.CheckSignatureFrom(issuer); err != nil { + return &ErrOcspIssuerVerification{fmt.Errorf("error directly verifying signature: %w", err)} + } + } else { + // Because we have at least one certificate here, we know that + // Go's ocsp library verified the signature from this certificate + // onto the response and it was valid. Now we need to know we trust + // this certificate. There's two ways we can do this: + // + // 1. Via confirming issuer == ocspRes.Certificate, or + // 2. Via confirming ocspRes.Certificate.CheckSignatureFrom(issuer). + if !bytes.Equal(issuer.Raw, ocspRes.Raw) { + // 1 must not hold, so 2 holds; verify the signature. + if err := ocspRes.Certificate.CheckSignatureFrom(issuer); err != nil { + return &ErrOcspIssuerVerification{fmt.Errorf("error checking chain of trust %v failed: %w", issuer.Subject.String(), err)} + } + + // Verify the OCSP responder certificate is still valid and + // contains the required EKU since it is a delegated OCSP + // responder certificate. + if ocspRes.Certificate.NotAfter.Before(time.Now()) { + return &ErrOcspIssuerVerification{fmt.Errorf("error checking delegated OCSP responder OCSP response: certificate has expired")} + } + haveEKU := false + for _, ku := range ocspRes.Certificate.ExtKeyUsage { + if ku == x509.ExtKeyUsageOCSPSigning { + haveEKU = true + break + } + } + if !haveEKU { + return &ErrOcspIssuerVerification{fmt.Errorf("error checking delegated OCSP responder: certificate lacks the OCSP Signing EKU")} + } + } } - ocspRes, err = ocsp.ParseResponse(ocspResBytes, issuer) - if err != nil { - return nil, nil, nil, err + + // Verify the response was for our original subject + if ocspRes.SerialNumber == nil || subject.SerialNumber == nil { + return &ErrOcspIssuerVerification{fmt.Errorf("OCSP response or cert did not contain a serial number")} + } + if ocspRes.SerialNumber.Cmp(subject.SerialNumber) != 0 { + return &ErrOcspIssuerVerification{fmt.Errorf( + "OCSP response serial number %s did not match the leaf certificate serial number %s", + certutil.GetHexFormatted(ocspRes.SerialNumber.Bytes(), ":"), + certutil.GetHexFormatted(subject.SerialNumber.Bytes(), ":"))} } - return ocspRes, ocspResBytes, &ocspStatus{ - code: ocspSuccess, - }, nil + return nil } // GetRevocationStatus checks the certificate revocation status for subject using issuer certificate. func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate, conf *VerifyConfig) (*ocspStatus, error) { - status, ocspReq, encodedCertID, err := c.validateWithCache(subject, issuer) + status, ocspReq, encodedCertID, err := c.validateWithCache(subject, issuer, conf) if err != nil { return nil, err } @@ -385,15 +555,16 @@ func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509. timeout := defaultOCSPResponderTimeout ocspClient := retryablehttp.NewClient() + ocspClient.RetryMax = conf.OcspMaxRetries ocspClient.HTTPClient.Timeout = timeout ocspClient.HTTPClient.Transport = newInsecureOcspTransport(conf.ExtraCas) - doRequest := func() error { + doRequest := func(i int) error { if conf.QueryAllServers { defer wg.Done() } ocspRes, _, ocspS, err := c.retryOCSP( - ctx, ocspClient, retryablehttp.NewRequest, u, headers, ocspReq, issuer) + ctx, ocspClient, retryablehttp.NewRequest, u, headers, ocspReq, subject, issuer) ocspResponses[i] = ocspRes if err != nil { errors[i] = err @@ -404,21 +575,26 @@ func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509. return nil } - ret, err := validateOCSP(ocspRes) + ret, err := validateOCSP(conf, ocspRes) if err != nil { errors[i] = err return err } if isValidOCSPStatus(ret.code) { ocspStatuses[i] = ret + } else if ret.err != nil { + // This check needs to occur after the isValidOCSPStatus as the unknown + // status also sets an err value within ret. + errors[i] = ret.err + return ret.err } return nil } if conf.QueryAllServers { wg.Add(1) - go doRequest() + go doRequest(i) } else { - err = doRequest() + err = doRequest(i) if err == nil { break } @@ -433,6 +609,9 @@ func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509. var firstError error for i := range ocspHosts { if errors[i] != nil { + if IsOcspVerificationError(errors[i]) { + return nil, errors[i] + } if firstError == nil { firstError = errors[i] } @@ -458,15 +637,33 @@ func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509. } } + // If querying all servers is enabled, and we have an error from a host, we can't trust + // a good status from the other as we can't confirm the other server would have returned the + // same response, we do allow revoke responses through + if conf.QueryAllServers && firstError != nil && (ret != nil && ret.code == ocspStatusGood) { + return nil, fmt.Errorf("encountered an error on a server, "+ + "ignoring good response status as ocsp_query_all_servers is set to true: %w", firstError) + } + // If no server reported the cert revoked, but we did have an error, report it if (ret == nil || ret.code == ocspStatusUnknown) && firstError != nil { return nil, firstError } - // otherwise ret should contain a response for the overall request + // An extra safety in case ret and firstError are both nil + if ret == nil { + return nil, fmt.Errorf("failed to extract a known response code or error from the OCSP server") + } + // otherwise ret should contain a response for the overall request if !isValidOCSPStatus(ret.code) { return ret, nil } + + if ocspRes.NextUpdate.IsZero() { + // We should not cache values with no NextUpdate values + return ret, nil + } + v := ocspCachedResponse{ status: ret.code, time: float64(time.Now().UTC().Unix()), @@ -487,11 +684,13 @@ func isValidOCSPStatus(status ocspStatusCode) bool { } type VerifyConfig struct { - OcspEnabled bool - ExtraCas []*x509.Certificate - OcspServersOverride []string - OcspFailureMode FailOpenMode - QueryAllServers bool + OcspEnabled bool + ExtraCas []*x509.Certificate + OcspServersOverride []string + OcspFailureMode FailOpenMode + QueryAllServers bool + OcspThisUpdateMaxAge time.Duration + OcspMaxRetries int } // VerifyLeafCertificate verifies just the subject against it's direct issuer @@ -577,12 +776,12 @@ func (c *Client) canEarlyExitForOCSP(results []*ocspStatus, chainSize int, conf return nil } -func (c *Client) validateWithCacheForAllCertificates(verifiedChains []*x509.Certificate) (bool, error) { +func (c *Client) validateWithCacheForAllCertificates(verifiedChains []*x509.Certificate, config *VerifyConfig) (bool, error) { n := len(verifiedChains) - 1 for j := 0; j < n; j++ { subject := verifiedChains[j] issuer := verifiedChains[j+1] - status, _, _, err := c.validateWithCache(subject, issuer) + status, _, _, err := c.validateWithCache(subject, issuer, config) if err != nil { return false, err } @@ -593,7 +792,7 @@ func (c *Client) validateWithCacheForAllCertificates(verifiedChains []*x509.Cert return true, nil } -func (c *Client) validateWithCache(subject, issuer *x509.Certificate) (*ocspStatus, []byte, *certIDKey, error) { +func (c *Client) validateWithCache(subject, issuer *x509.Certificate, config *VerifyConfig) (*ocspStatus, []byte, *certIDKey, error) { ocspReq, err := ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{}) if err != nil { return nil, nil, nil, fmt.Errorf("failed to create OCSP request from the certificates: %v", err) @@ -602,7 +801,7 @@ func (c *Client) validateWithCache(subject, issuer *x509.Certificate) (*ocspStat if ocspS.code != ocspSuccess { return nil, nil, nil, fmt.Errorf("failed to extract CertID from OCSP Request: %v", err) } - status, err := c.checkOCSPResponseCache(encodedCertID, subject, issuer) + status, err := c.checkOCSPResponseCache(encodedCertID, subject, issuer, config) if err != nil { return nil, nil, nil, err } @@ -610,7 +809,7 @@ func (c *Client) validateWithCache(subject, issuer *x509.Certificate) (*ocspStat } func (c *Client) GetAllRevocationStatus(ctx context.Context, verifiedChains []*x509.Certificate, conf *VerifyConfig) ([]*ocspStatus, error) { - _, err := c.validateWithCacheForAllCertificates(verifiedChains) + _, err := c.validateWithCacheForAllCertificates(verifiedChains, conf) if err != nil { return nil, err } @@ -635,11 +834,11 @@ func (c *Client) verifyPeerCertificateSerial(conf *VerifyConfig) func(_ [][]byte } } -func (c *Client) extractOCSPCacheResponseValueWithoutSubject(cacheValue ocspCachedResponse) (*ocspStatus, error) { - return c.extractOCSPCacheResponseValue(&cacheValue, nil, nil) +func (c *Client) extractOCSPCacheResponseValueWithoutSubject(cacheValue ocspCachedResponse, conf *VerifyConfig) (*ocspStatus, error) { + return c.extractOCSPCacheResponseValue(&cacheValue, nil, nil, conf) } -func (c *Client) extractOCSPCacheResponseValue(cacheValue *ocspCachedResponse, subject, issuer *x509.Certificate) (*ocspStatus, error) { +func (c *Client) extractOCSPCacheResponseValue(cacheValue *ocspCachedResponse, subject, issuer *x509.Certificate, conf *VerifyConfig) (*ocspStatus, error) { subjectName := "Unknown" if subject != nil { subjectName = subject.Subject.CommonName @@ -661,14 +860,29 @@ func (c *Client) extractOCSPCacheResponseValue(cacheValue *ocspCachedResponse, s }, nil } - return validateOCSP(&ocsp.Response{ + sdkOcspStatus := internalStatusCodeToSDK(cacheValue.status) + + return validateOCSP(conf, &ocsp.Response{ ProducedAt: time.Unix(int64(cacheValue.producedAt), 0).UTC(), ThisUpdate: time.Unix(int64(cacheValue.thisUpdate), 0).UTC(), NextUpdate: time.Unix(int64(cacheValue.nextUpdate), 0).UTC(), - Status: int(cacheValue.status), + Status: sdkOcspStatus, }) } +func internalStatusCodeToSDK(internalStatusCode ocspStatusCode) int { + switch internalStatusCode { + case ocspStatusGood: + return ocsp.Good + case ocspStatusRevoked: + return ocsp.Revoked + case ocspStatusUnknown: + return ocsp.Unknown + default: + return int(internalStatusCode) + } +} + /* // writeOCSPCache writes a OCSP Response cache func (c *Client) writeOCSPCache(ctx context.Context, storage logical.Storage) error { diff --git a/sdk/helper/ocsp/failopenmode_enumer.go b/sdk/helper/ocsp/failopenmode_enumer.go new file mode 100644 index 000000000000..d0cf9f5e9240 --- /dev/null +++ b/sdk/helper/ocsp/failopenmode_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=FailOpenMode -trimprefix=FailOpen"; DO NOT EDIT. + +package ocsp + +import ( + "fmt" +) + +const _FailOpenModeName = "ocspFailOpenNotSetTrueFalse" + +var _FailOpenModeIndex = [...]uint8{0, 18, 22, 27} + +func (i FailOpenMode) String() string { + if i >= FailOpenMode(len(_FailOpenModeIndex)-1) { + return fmt.Sprintf("FailOpenMode(%d)", i) + } + return _FailOpenModeName[_FailOpenModeIndex[i]:_FailOpenModeIndex[i+1]] +} + +var _FailOpenModeValues = []FailOpenMode{0, 1, 2} + +var _FailOpenModeNameToValueMap = map[string]FailOpenMode{ + _FailOpenModeName[0:18]: 0, + _FailOpenModeName[18:22]: 1, + _FailOpenModeName[22:27]: 2, +} + +// FailOpenModeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func FailOpenModeString(s string) (FailOpenMode, error) { + if val, ok := _FailOpenModeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to FailOpenMode values", s) +} + +// FailOpenModeValues returns all values of the enum +func FailOpenModeValues() []FailOpenMode { + return _FailOpenModeValues +} + +// IsAFailOpenMode returns "true" if the value is listed in the enum definition. "false" otherwise +func (i FailOpenMode) IsAFailOpenMode() bool { + for _, v := range _FailOpenModeValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/helper/ocsp/ocsp_test.go b/sdk/helper/ocsp/ocsp_test.go index 2f3f1976d2a8..326a5b233647 100644 --- a/sdk/helper/ocsp/ocsp_test.go +++ b/sdk/helper/ocsp/ocsp_test.go @@ -6,21 +6,29 @@ import ( "bytes" "context" "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" "crypto/tls" "crypto/x509" + "crypto/x509/pkix" "errors" "fmt" "io" "io/ioutil" + "math/big" "net" "net/http" + "net/http/httptest" "net/url" + "sync/atomic" "testing" "time" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-retryablehttp" lru "github.com/hashicorp/golang-lru" + "github.com/stretchr/testify/require" "golang.org/x/crypto/ocsp" ) @@ -161,6 +169,7 @@ func TestUnitEncodeCertIDGood(t *testing.T) { } func TestUnitCheckOCSPResponseCache(t *testing.T) { + conf := &VerifyConfig{OcspEnabled: true} c := New(testLogFactory, 10) dummyKey0 := certIDKey{ NameHash: "dummy0", @@ -176,7 +185,7 @@ func TestUnitCheckOCSPResponseCache(t *testing.T) { c.ocspResponseCache.Add(dummyKey0, &ocspCachedResponse{time: currentTime}) subject := &x509.Certificate{} issuer := &x509.Certificate{} - ost, err := c.checkOCSPResponseCache(&dummyKey, subject, issuer) + ost, err := c.checkOCSPResponseCache(&dummyKey, subject, issuer, conf) if err != nil { t.Fatal(err) } @@ -185,7 +194,7 @@ func TestUnitCheckOCSPResponseCache(t *testing.T) { } // old timestamp c.ocspResponseCache.Add(dummyKey, &ocspCachedResponse{time: float64(1395054952)}) - ost, err = c.checkOCSPResponseCache(&dummyKey, subject, issuer) + ost, err = c.checkOCSPResponseCache(&dummyKey, subject, issuer, conf) if err != nil { t.Fatal(err) } @@ -195,15 +204,385 @@ func TestUnitCheckOCSPResponseCache(t *testing.T) { // invalid validity c.ocspResponseCache.Add(dummyKey, &ocspCachedResponse{time: float64(currentTime - 1000)}) - ost, err = c.checkOCSPResponseCache(&dummyKey, subject, nil) + ost, err = c.checkOCSPResponseCache(&dummyKey, subject, nil, conf) if err == nil && isValidOCSPStatus(ost.code) { t.Fatalf("should have failed.") } } +// TestUnitValidOCSPResponse validates various combinations of acceptable OCSP responses +func TestUnitValidOCSPResponse(t *testing.T) { + rootCaKey, rootCa, leafCert := createCaLeafCerts(t) + + type tests struct { + name string + ocspRes ocsp.Response + expectedStatus ocspStatusCode + } + + now := time.Now() + ctx := context.Background() + + tt := []tests{ + { + name: "normal", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + }, + expectedStatus: ocspStatusGood, + }, + { + name: "no-next-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + Status: ocsp.Good, + }, + expectedStatus: ocspStatusGood, + }, + { + name: "revoked-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + Status: ocsp.Revoked, + }, + expectedStatus: ocspStatusRevoked, + }, + { + name: "revoked-update-with-next-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(1 * time.Hour), + Status: ocsp.Revoked, + }, + expectedStatus: ocspStatusRevoked, + }, + } + for _, tc := range tt { + for _, maxAge := range []time.Duration{time.Duration(0), time.Duration(2 * time.Hour)} { + t.Run(tc.name+"-max-age-"+maxAge.String(), func(t *testing.T) { + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + response := buildOcspResponse(t, rootCa, rootCaKey, tc.ocspRes) + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + logFactory := func() hclog.Logger { + return hclog.NewNullLogger() + } + client := New(logFactory, 100) + config := &VerifyConfig{ + OcspEnabled: true, + OcspServersOverride: []string{ts.URL}, + OcspFailureMode: FailOpenFalse, + QueryAllServers: false, + OcspThisUpdateMaxAge: maxAge, + } + + status, err := client.GetRevocationStatus(ctx, leafCert, rootCa, config) + require.NoError(t, err, "ocsp response should have been considered valid") + require.NoError(t, status.err, "ocsp status should not contain an error") + require.Equal(t, &ocspStatus{code: tc.expectedStatus}, status) + }) + } + } +} + +// TestUnitBadOCSPResponses verifies that we fail properly on a bunch of different +// OCSP response conditions +func TestUnitBadOCSPResponses(t *testing.T) { + rootCaKey, rootCa, leafCert := createCaLeafCerts(t) + rootCaKey2, rootCa2, _ := createCaLeafCerts(t) + + type tests struct { + name string + ocspRes ocsp.Response + maxAge time.Duration + ca *x509.Certificate + caKey *ecdsa.PrivateKey + errContains string + } + + now := time.Now() + ctx := context.Background() + + tt := []tests{ + { + name: "bad-signing-issuer", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + }, + ca: rootCa2, + caKey: rootCaKey2, + errContains: "error directly verifying signature", + }, + { + name: "incorrect-serial-number", + ocspRes: ocsp.Response{ + SerialNumber: big.NewInt(1000), + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + }, + ca: rootCa, + caKey: rootCaKey, + errContains: "did not match the leaf certificate serial number", + }, + { + name: "expired-next-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(-30 * time.Minute), + Status: ocsp.Good, + }, + errContains: "invalid validity", + }, + { + name: "this-update-in-future", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(1 * time.Hour), + NextUpdate: now.Add(2 * time.Hour), + Status: ocsp.Good, + }, + errContains: "invalid validity", + }, + { + name: "next-update-before-this-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(-2 * time.Hour), + Status: ocsp.Good, + }, + errContains: "invalid validity", + }, + { + name: "missing-this-update", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + NextUpdate: now.Add(2 * time.Hour), + Status: ocsp.Good, + }, + errContains: "invalid validity", + }, + { + name: "unknown-status", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Unknown, + }, + errContains: "OCSP status unknown", + }, + { + name: "over-max-age", + ocspRes: ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(30 * time.Minute), + Status: ocsp.Good, + }, + maxAge: 10 * time.Minute, + errContains: "is greater than max age", + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + useCa := rootCa + useCaKey := rootCaKey + if tc.ca != nil { + useCa = tc.ca + } + if tc.caKey != nil { + useCaKey = tc.caKey + } + response := buildOcspResponse(t, useCa, useCaKey, tc.ocspRes) + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + logFactory := func() hclog.Logger { + return hclog.NewNullLogger() + } + client := New(logFactory, 100) + + config := &VerifyConfig{ + OcspEnabled: true, + OcspServersOverride: []string{ts.URL}, + OcspFailureMode: FailOpenFalse, + QueryAllServers: false, + OcspThisUpdateMaxAge: tc.maxAge, + } + + status, err := client.GetRevocationStatus(ctx, leafCert, rootCa, config) + if err == nil && status == nil || (status != nil && status.err == nil) { + t.Fatalf("expected an error got none") + } + if err != nil { + require.ErrorContains(t, err, tc.errContains, + "Expected error got response: %v, %v", status, err) + } + if status != nil && status.err != nil { + require.ErrorContains(t, status.err, tc.errContains, + "Expected error got response: %v, %v", status, err) + } + }) + } +} + +// TestUnitZeroNextUpdateAreNotCached verifies that we are not caching the responses +// with no NextUpdate field set as according to RFC6960 4.2.2.1 +// "If nextUpdate is not set, the responder is indicating that newer +// revocation information is available all the time." +func TestUnitZeroNextUpdateAreNotCached(t *testing.T) { + rootCaKey, rootCa, leafCert := createCaLeafCerts(t) + numQueries := &atomic.Uint32{} + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + numQueries.Add(1) + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + Status: ocsp.Good, + } + response := buildOcspResponse(t, rootCa, rootCaKey, ocspRes) + _, _ = w.Write(response) + }) + ts := httptest.NewServer(ocspHandler) + defer ts.Close() + + logFactory := func() hclog.Logger { + return hclog.NewNullLogger() + } + client := New(logFactory, 100) + + config := &VerifyConfig{ + OcspEnabled: true, + OcspServersOverride: []string{ts.URL}, + } + + _, err := client.GetRevocationStatus(context.Background(), leafCert, rootCa, config) + require.NoError(t, err, "Failed fetching revocation status") + + _, err = client.GetRevocationStatus(context.Background(), leafCert, rootCa, config) + require.NoError(t, err, "Failed fetching revocation status second time") + + require.Equal(t, uint32(2), numQueries.Load()) +} + +// TestUnitResponsesAreCached verify that the OCSP responses are properly cached when +// querying for the same leaf certificates +func TestUnitResponsesAreCached(t *testing.T) { + rootCaKey, rootCa, leafCert := createCaLeafCerts(t) + numQueries := &atomic.Uint32{} + ocspHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + numQueries.Add(1) + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: leafCert.SerialNumber, + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(1 * time.Hour), + Status: ocsp.Good, + } + response := buildOcspResponse(t, rootCa, rootCaKey, ocspRes) + _, _ = w.Write(response) + }) + ts1 := httptest.NewServer(ocspHandler) + ts2 := httptest.NewServer(ocspHandler) + defer ts1.Close() + defer ts2.Close() + + logFactory := func() hclog.Logger { + return hclog.NewNullLogger() + } + client := New(logFactory, 100) + + config := &VerifyConfig{ + OcspEnabled: true, + OcspServersOverride: []string{ts1.URL, ts2.URL}, + QueryAllServers: true, + } + + _, err := client.GetRevocationStatus(context.Background(), leafCert, rootCa, config) + require.NoError(t, err, "Failed fetching revocation status") + // Make sure that we queried both servers and not the cache + require.Equal(t, uint32(2), numQueries.Load()) + + // These query should be cached and not influence our counter + _, err = client.GetRevocationStatus(context.Background(), leafCert, rootCa, config) + require.NoError(t, err, "Failed fetching revocation status second time") + + require.Equal(t, uint32(2), numQueries.Load()) +} + +func buildOcspResponse(t *testing.T, ca *x509.Certificate, caKey *ecdsa.PrivateKey, ocspRes ocsp.Response) []byte { + response, err := ocsp.CreateResponse(ca, ca, ocspRes, caKey) + if err != nil { + t.Fatalf("failed generating OCSP response: %v", err) + } + return response +} + +func createCaLeafCerts(t *testing.T) (*ecdsa.PrivateKey, *x509.Certificate, *x509.Certificate) { + rootCaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated root key for CA") + + // Validate we reject CSRs that contain CN that aren't in the original order + cr := &x509.Certificate{ + Subject: pkix.Name{CommonName: "Root Cert"}, + SerialNumber: big.NewInt(1), + IsCA: true, + BasicConstraintsValid: true, + SignatureAlgorithm: x509.ECDSAWithSHA256, + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().AddDate(1, 0, 0), + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageOCSPSigning}, + } + rootCaBytes, err := x509.CreateCertificate(rand.Reader, cr, cr, &rootCaKey.PublicKey, rootCaKey) + require.NoError(t, err, "failed generating root ca") + + rootCa, err := x509.ParseCertificate(rootCaBytes) + require.NoError(t, err, "failed parsing root ca") + + leafKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated leaf key") + + cr = &x509.Certificate{ + Subject: pkix.Name{CommonName: "Leaf Cert"}, + SerialNumber: big.NewInt(2), + SignatureAlgorithm: x509.ECDSAWithSHA256, + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().AddDate(1, 0, 0), + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } + leafCertBytes, err := x509.CreateCertificate(rand.Reader, cr, rootCa, &leafKey.PublicKey, rootCaKey) + require.NoError(t, err, "failed generating root ca") + + leafCert, err := x509.ParseCertificate(leafCertBytes) + require.NoError(t, err, "failed parsing root ca") + return rootCaKey, rootCa, leafCert +} + func TestUnitValidateOCSP(t *testing.T) { + conf := &VerifyConfig{OcspEnabled: true} ocspRes := &ocsp.Response{} - ost, err := validateOCSP(ocspRes) + ost, err := validateOCSP(conf, ocspRes) if err == nil && isValidOCSPStatus(ost.code) { t.Fatalf("should have failed.") } @@ -212,7 +591,7 @@ func TestUnitValidateOCSP(t *testing.T) { ocspRes.ThisUpdate = currentTime.Add(-2 * time.Hour) ocspRes.NextUpdate = currentTime.Add(2 * time.Hour) ocspRes.Status = ocsp.Revoked - ost, err = validateOCSP(ocspRes) + ost, err = validateOCSP(conf, ocspRes) if err != nil { t.Fatal(err) } @@ -221,7 +600,7 @@ func TestUnitValidateOCSP(t *testing.T) { t.Fatalf("should have failed. expected: %v, got: %v", ocspStatusRevoked, ost.code) } ocspRes.Status = ocsp.Good - ost, err = validateOCSP(ocspRes) + ost, err = validateOCSP(conf, ocspRes) if err != nil { t.Fatal(err) } @@ -230,7 +609,7 @@ func TestUnitValidateOCSP(t *testing.T) { t.Fatalf("should have success. expected: %v, got: %v", ocspStatusGood, ost.code) } ocspRes.Status = ocsp.Unknown - ost, err = validateOCSP(ocspRes) + ost, err = validateOCSP(conf, ocspRes) if err != nil { t.Fatal(err) } @@ -238,7 +617,7 @@ func TestUnitValidateOCSP(t *testing.T) { t.Fatalf("should have failed. expected: %v, got: %v", ocspStatusUnknown, ost.code) } ocspRes.Status = ocsp.ServerFailed - ost, err = validateOCSP(ocspRes) + ost, err = validateOCSP(conf, ocspRes) if err != nil { t.Fatal(err) } @@ -299,7 +678,7 @@ func TestOCSPRetry(t *testing.T) { context.TODO(), client, fakeRequestFunc, dummyOCSPHost, - make(map[string]string), []byte{0}, certs[len(certs)-1]) + make(map[string]string), []byte{0}, certs[0], certs[len(certs)-1]) if err == nil { fmt.Printf("should fail: %v, %v, %v\n", res, b, st) } @@ -314,7 +693,7 @@ func TestOCSPRetry(t *testing.T) { context.TODO(), client, fakeRequestFunc, dummyOCSPHost, - make(map[string]string), []byte{0}, certs[len(certs)-1]) + make(map[string]string), []byte{0}, certs[0], certs[len(certs)-1]) if err == nil { fmt.Printf("should fail: %v, %v, %v\n", res, b, st) } diff --git a/sdk/helper/parseutil/parseutil.go b/sdk/helper/parseutil/parseutil.go index eda539424f2d..5bea8909de14 100644 --- a/sdk/helper/parseutil/parseutil.go +++ b/sdk/helper/parseutil/parseutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package parseutil diff --git a/sdk/helper/password/password.go b/sdk/helper/password/password.go index 84e6b594d55d..931a72cc8bc3 100644 --- a/sdk/helper/password/password.go +++ b/sdk/helper/password/password.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package password diff --git a/sdk/helper/pathmanager/pathmanager.go b/sdk/helper/pathmanager/pathmanager.go index e0e39445b2a5..0d2d60070f78 100644 --- a/sdk/helper/pathmanager/pathmanager.go +++ b/sdk/helper/pathmanager/pathmanager.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pathmanager import ( diff --git a/sdk/helper/pathmanager/pathmanager_test.go b/sdk/helper/pathmanager/pathmanager_test.go index 7d6207b625e6..515d830324f1 100644 --- a/sdk/helper/pathmanager/pathmanager_test.go +++ b/sdk/helper/pathmanager/pathmanager_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pathmanager import ( diff --git a/sdk/helper/pluginidentityutil/errors.go b/sdk/helper/pluginidentityutil/errors.go new file mode 100644 index 000000000000..92a6ed6f5d26 --- /dev/null +++ b/sdk/helper/pluginidentityutil/errors.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginidentityutil + +import "errors" + +var ErrPluginWorkloadIdentityUnsupported = errors.New("plugin workload identity not supported in Vault community edition") diff --git a/sdk/helper/pluginidentityutil/fields.go b/sdk/helper/pluginidentityutil/fields.go new file mode 100644 index 000000000000..3d97537ecc94 --- /dev/null +++ b/sdk/helper/pluginidentityutil/fields.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginidentityutil + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" +) + +// PluginIdentityTokenParams contains a set of common parameters that plugins +// can use for setting plugin identity token behavior. +type PluginIdentityTokenParams struct { + // IdentityTokenTTL is the duration that tokens will be valid for + IdentityTokenTTL time.Duration `json:"identity_token_ttl"` + // IdentityTokenAudience identifies the recipient of the token + IdentityTokenAudience string `json:"identity_token_audience"` +} + +// ParsePluginIdentityTokenFields provides common field parsing to embedding structs. +func (p *PluginIdentityTokenParams) ParsePluginIdentityTokenFields(d *framework.FieldData) error { + if tokenTTLRaw, ok := d.GetOk("identity_token_ttl"); ok { + p.IdentityTokenTTL = time.Duration(tokenTTLRaw.(int)) * time.Second + } + + if tokenAudienceRaw, ok := d.GetOk("identity_token_audience"); ok { + p.IdentityTokenAudience = tokenAudienceRaw.(string) + } + + return nil +} + +// PopulatePluginIdentityTokenData adds PluginIdentityTokenParams info into the given map. +func (p *PluginIdentityTokenParams) PopulatePluginIdentityTokenData(m map[string]interface{}) { + m["identity_token_ttl"] = int64(p.IdentityTokenTTL.Seconds()) + m["identity_token_audience"] = p.IdentityTokenAudience +} + +// AddPluginIdentityTokenFields adds plugin identity token fields to the given +// field schema map. +func AddPluginIdentityTokenFields(m map[string]*framework.FieldSchema) { + fields := map[string]*framework.FieldSchema{ + "identity_token_audience": { + Type: framework.TypeString, + Description: "Audience of plugin identity tokens", + Default: "", + }, + "identity_token_ttl": { + Type: framework.TypeDurationSecond, + Description: "Time-to-live of plugin identity tokens", + Default: 3600, + }, + } + + for name, schema := range fields { + if _, ok := m[name]; ok { + panic(fmt.Sprintf("adding field %q would overwrite existing field", name)) + } + m[name] = schema + } +} diff --git a/sdk/helper/pluginidentityutil/fields_test.go b/sdk/helper/pluginidentityutil/fields_test.go new file mode 100644 index 000000000000..96c844971d25 --- /dev/null +++ b/sdk/helper/pluginidentityutil/fields_test.go @@ -0,0 +1,177 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginidentityutil + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/vault/sdk/framework" +) + +const ( + fieldIDTokenTTL = "identity_token_ttl" + fieldIDTokenAudience = "identity_token_audience" +) + +func identityTokenFieldData(raw map[string]interface{}) *framework.FieldData { + return &framework.FieldData{ + Raw: raw, + Schema: map[string]*framework.FieldSchema{ + fieldIDTokenTTL: { + Type: framework.TypeDurationSecond, + }, + fieldIDTokenAudience: { + Type: framework.TypeString, + }, + }, + } +} + +func TestParsePluginIdentityTokenFields(t *testing.T) { + testcases := []struct { + name string + d *framework.FieldData + wantErr bool + want map[string]interface{} + }{ + { + name: "all input", + d: identityTokenFieldData(map[string]interface{}{ + fieldIDTokenTTL: 10, + fieldIDTokenAudience: "test-aud", + }), + want: map[string]interface{}{ + fieldIDTokenTTL: time.Duration(10) * time.Second, + fieldIDTokenAudience: "test-aud", + }, + }, + { + name: "empty ttl", + d: identityTokenFieldData(map[string]interface{}{ + fieldIDTokenAudience: "test-aud", + }), + want: map[string]interface{}{ + fieldIDTokenTTL: time.Duration(0), + fieldIDTokenAudience: "test-aud", + }, + }, + { + name: "empty audience", + d: identityTokenFieldData(map[string]interface{}{ + fieldIDTokenTTL: 10, + }), + want: map[string]interface{}{ + fieldIDTokenTTL: time.Duration(10) * time.Second, + fieldIDTokenAudience: "", + }, + }, + } + + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + p := new(PluginIdentityTokenParams) + err := p.ParsePluginIdentityTokenFields(tt.d) + if tt.wantErr { + assert.Error(t, err) + return + } + got := map[string]interface{}{ + fieldIDTokenTTL: p.IdentityTokenTTL, + fieldIDTokenAudience: p.IdentityTokenAudience, + } + assert.Equal(t, tt.want, got) + }) + } +} + +func TestPopulatePluginIdentityTokenData(t *testing.T) { + testcases := []struct { + name string + p *PluginIdentityTokenParams + want map[string]interface{} + }{ + { + name: "basic", + p: &PluginIdentityTokenParams{ + IdentityTokenAudience: "test-aud", + IdentityTokenTTL: time.Duration(10) * time.Second, + }, + want: map[string]interface{}{ + fieldIDTokenTTL: int64(10), + fieldIDTokenAudience: "test-aud", + }, + }, + } + + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + got := make(map[string]interface{}) + tt.p.PopulatePluginIdentityTokenData(got) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestAddPluginIdentityTokenFields(t *testing.T) { + testcases := []struct { + name string + input map[string]*framework.FieldSchema + want map[string]*framework.FieldSchema + }{ + { + name: "basic", + input: map[string]*framework.FieldSchema{}, + want: map[string]*framework.FieldSchema{ + fieldIDTokenAudience: { + Type: framework.TypeString, + Description: "Audience of plugin identity tokens", + Default: "", + }, + fieldIDTokenTTL: { + Type: framework.TypeDurationSecond, + Description: "Time-to-live of plugin identity tokens", + Default: 3600, + }, + }, + }, + { + name: "additional-fields", + input: map[string]*framework.FieldSchema{ + "test": { + Type: framework.TypeString, + Description: "Test description", + Default: "default", + }, + }, + want: map[string]*framework.FieldSchema{ + fieldIDTokenAudience: { + Type: framework.TypeString, + Description: "Audience of plugin identity tokens", + Default: "", + }, + fieldIDTokenTTL: { + Type: framework.TypeDurationSecond, + Description: "Time-to-live of plugin identity tokens", + Default: 3600, + }, + "test": { + Type: framework.TypeString, + Description: "Test description", + Default: "default", + }, + }, + }, + } + + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + got := tt.input + AddPluginIdentityTokenFields(got) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/sdk/helper/pluginruntimeutil/config.go b/sdk/helper/pluginruntimeutil/config.go new file mode 100644 index 000000000000..f674c7df3625 --- /dev/null +++ b/sdk/helper/pluginruntimeutil/config.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginruntimeutil + +import "github.com/hashicorp/vault/sdk/helper/consts" + +// PluginRuntimeConfig defines the metadata needed to run a plugin runtime +type PluginRuntimeConfig struct { + Name string `json:"name" structs:"name"` + Type consts.PluginRuntimeType `json:"type" structs:"type"` + OCIRuntime string `json:"oci_runtime" structs:"oci_runtime"` + CgroupParent string `json:"cgroup_parent" structs:"cgroup_parent"` + CPU int64 `json:"cpu" structs:"cpu"` + Memory int64 `json:"memory" structs:"memory"` + Rootless bool `json:"rootless" structs:"rootlesss"` +} diff --git a/sdk/helper/pluginutil/env.go b/sdk/helper/pluginutil/env.go index df1fdbeede93..515baa1f7619 100644 --- a/sdk/helper/pluginutil/env.go +++ b/sdk/helper/pluginutil/env.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pluginutil import ( @@ -35,6 +38,12 @@ const ( // PluginMultiplexingOptOut is an ENV name used to define a comma separated list of plugin names // opted-out of the multiplexing feature; for emergencies if multiplexing ever causes issues PluginMultiplexingOptOut = "VAULT_PLUGIN_MULTIPLEXING_OPT_OUT" + + // PluginUseLegacyEnvLayering opts out of new environment variable precedence. + // If set to true, Vault process environment variables take precedence over any + // colliding plugin-specific environment variables. Otherwise, plugin-specific + // environment variables take precedence over Vault process environment variables. + PluginUseLegacyEnvLayering = "VAULT_PLUGIN_USE_LEGACY_ENV_LAYERING" ) // OptionallyEnableMlock determines if mlock should be called, and if so enables diff --git a/sdk/helper/pluginutil/env_test.go b/sdk/helper/pluginutil/env_test.go index 1d04b327524e..21f77faba6e6 100644 --- a/sdk/helper/pluginutil/env_test.go +++ b/sdk/helper/pluginutil/env_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pluginutil import ( diff --git a/sdk/helper/pluginutil/identity_token.go b/sdk/helper/pluginutil/identity_token.go new file mode 100644 index 000000000000..7e764bb1e137 --- /dev/null +++ b/sdk/helper/pluginutil/identity_token.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "time" +) + +const redactedTokenString = "ey***" + +type IdentityTokenRequest struct { + // Audience identifies the recipient of the token. The requested + // value will be in the "aud" claim. Required. + Audience string + // TTL is the requested duration that the token will be valid for. + // Optional with a default of 1hr. + TTL time.Duration +} + +type IdentityTokenResponse struct { + // Token is the plugin identity token. + Token IdentityToken + // TTL is the duration that the token is valid for after truncation is applied. + // The TTL may be truncated depending on the lifecycle of its signing key. + TTL time.Duration +} + +type IdentityToken string + +// String returns a redacted token string. Use the Token() method +// to obtain the non-redacted token contents. +func (t IdentityToken) String() string { + return redactedTokenString +} + +// Token returns the non-redacted token contents. +func (t IdentityToken) Token() string { + return string(t) +} diff --git a/sdk/helper/pluginutil/identity_token_test.go b/sdk/helper/pluginutil/identity_token_test.go new file mode 100644 index 000000000000..d0c01c390b30 --- /dev/null +++ b/sdk/helper/pluginutil/identity_token_test.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestIdentityToken_Stringer ensures that plugin identity tokens that +// are printed in formatted strings or errors are redacted and getters +// return expected values. +func TestIdentityToken_Stringer(t *testing.T) { + contents := "header.payload.signature" + tk := IdentityToken(contents) + + // token getters + assert.Equal(t, contents, tk.Token()) + assert.Equal(t, redactedTokenString, tk.String()) + + // formatted strings and errors + assert.NotContains(t, fmt.Sprintf("%v", tk), tk.Token()) + assert.NotContains(t, fmt.Sprintf("%s", tk), tk.Token()) + assert.NotContains(t, fmt.Errorf("%v", tk).Error(), tk.Token()) + assert.NotContains(t, fmt.Errorf("%s", tk).Error(), tk.Token()) +} diff --git a/sdk/helper/pluginutil/multiplexing.go b/sdk/helper/pluginutil/multiplexing.go index 41316ec49df2..8fc86a4c36c9 100644 --- a/sdk/helper/pluginutil/multiplexing.go +++ b/sdk/helper/pluginutil/multiplexing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pluginutil import ( diff --git a/sdk/helper/pluginutil/multiplexing.pb.go b/sdk/helper/pluginutil/multiplexing.pb.go index cfd463d6af93..1faed8698f89 100644 --- a/sdk/helper/pluginutil/multiplexing.pb.go +++ b/sdk/helper/pluginutil/multiplexing.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: sdk/helper/pluginutil/multiplexing.proto package pluginutil diff --git a/sdk/helper/pluginutil/multiplexing.proto b/sdk/helper/pluginutil/multiplexing.proto index aa2438b070ff..3b5d19198417 100644 --- a/sdk/helper/pluginutil/multiplexing.proto +++ b/sdk/helper/pluginutil/multiplexing.proto @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + syntax = "proto3"; package pluginutil.multiplexing; @@ -5,9 +8,9 @@ option go_package = "github.com/hashicorp/vault/sdk/helper/pluginutil"; message MultiplexingSupportRequest {} message MultiplexingSupportResponse { - bool supported = 1; + bool supported = 1; } service PluginMultiplexing { - rpc MultiplexingSupport(MultiplexingSupportRequest) returns (MultiplexingSupportResponse); + rpc MultiplexingSupport(MultiplexingSupportRequest) returns (MultiplexingSupportResponse); } diff --git a/sdk/helper/pluginutil/multiplexing_grpc.pb.go b/sdk/helper/pluginutil/multiplexing_grpc.pb.go index aa8d0e47ba84..1fc6cba7c62d 100644 --- a/sdk/helper/pluginutil/multiplexing_grpc.pb.go +++ b/sdk/helper/pluginutil/multiplexing_grpc.pb.go @@ -1,4 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: sdk/helper/pluginutil/multiplexing.proto package pluginutil @@ -14,6 +21,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + PluginMultiplexing_MultiplexingSupport_FullMethodName = "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport" +) + // PluginMultiplexingClient is the client API for PluginMultiplexing service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -31,7 +42,7 @@ func NewPluginMultiplexingClient(cc grpc.ClientConnInterface) PluginMultiplexing func (c *pluginMultiplexingClient) MultiplexingSupport(ctx context.Context, in *MultiplexingSupportRequest, opts ...grpc.CallOption) (*MultiplexingSupportResponse, error) { out := new(MultiplexingSupportResponse) - err := c.cc.Invoke(ctx, "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", in, out, opts...) + err := c.cc.Invoke(ctx, PluginMultiplexing_MultiplexingSupport_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -76,7 +87,7 @@ func _PluginMultiplexing_MultiplexingSupport_Handler(srv interface{}, ctx contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", + FullMethod: PluginMultiplexing_MultiplexingSupport_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PluginMultiplexingServer).MultiplexingSupport(ctx, req.(*MultiplexingSupportRequest)) diff --git a/sdk/helper/pluginutil/multiplexing_test.go b/sdk/helper/pluginutil/multiplexing_test.go index 125a4a120c62..3f589ffa7cd9 100644 --- a/sdk/helper/pluginutil/multiplexing_test.go +++ b/sdk/helper/pluginutil/multiplexing_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pluginutil import ( diff --git a/sdk/helper/pluginutil/run_config.go b/sdk/helper/pluginutil/run_config.go index f344ca979931..1af71d09b75c 100644 --- a/sdk/helper/pluginutil/run_config.go +++ b/sdk/helper/pluginutil/run_config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pluginutil import ( @@ -5,11 +8,25 @@ import ( "crypto/sha256" "crypto/tls" "fmt" + "os" "os/exec" + "strconv" + "strings" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" + "github.com/hashicorp/go-secure-stdlib/plugincontainer" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginruntimeutil" +) + +const ( + // Labels for plugin container ownership + labelVaultPID = "com.hashicorp.vault.pid" + labelVaultClusterID = "com.hashicorp.vault.cluster.id" + labelVaultPluginName = "com.hashicorp.vault.plugin.name" + labelVaultPluginVersion = "com.hashicorp.vault.plugin.version" + labelVaultPluginType = "com.hashicorp.vault.plugin.type" ) type PluginClientConfig struct { @@ -27,85 +44,188 @@ type PluginClientConfig struct { type runConfig struct { // Provided by PluginRunner - command string - args []string - sha256 []byte + command string + image string + imageTag string + args []string + sha256 []byte // Initialized with what's in PluginRunner.Env, but can be added to env []string + runtimeConfig *pluginruntimeutil.PluginRuntimeConfig + PluginClientConfig + tmpdir string } -func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) { - cmd := exec.Command(rc.command, rc.args...) - cmd.Env = append(cmd.Env, rc.env...) +func (rc runConfig) mlockEnabled() bool { + return rc.MLock || (rc.Wrapper != nil && rc.Wrapper.MlockEnabled()) +} + +func (rc runConfig) generateCmd(ctx context.Context) (cmd *exec.Cmd, clientTLSConfig *tls.Config, err error) { + cmd = exec.Command(rc.command, rc.args...) + env := rc.env // Add the mlock setting to the ENV of the plugin - if rc.MLock || (rc.Wrapper != nil && rc.Wrapper.MlockEnabled()) { - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) + if rc.mlockEnabled() { + env = append(env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) } version, err := rc.Wrapper.VaultVersion(ctx) if err != nil { - return nil, err + return nil, nil, err } - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version)) + env = append(env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version)) if rc.IsMetadataMode { rc.Logger = rc.Logger.With("metadata", "true") } metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.IsMetadataMode) - cmd.Env = append(cmd.Env, metadataEnv) + env = append(env, metadataEnv) automtlsEnv := fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, rc.AutoMTLS) - cmd.Env = append(cmd.Env, automtlsEnv) + env = append(env, automtlsEnv) - var clientTLSConfig *tls.Config if !rc.AutoMTLS && !rc.IsMetadataMode { // Get a CA TLS Certificate certBytes, key, err := generateCert() if err != nil { - return nil, err + return nil, nil, err } // Use CA to sign a client cert and return a configured TLS config clientTLSConfig, err = createClientTLSConfig(certBytes, key) if err != nil { - return nil, err + return nil, nil, err } // Use CA to sign a server cert and wrap the values in a response wrapped // token. wrapToken, err := wrapServerConfig(ctx, rc.Wrapper, certBytes, key) if err != nil { - return nil, err + return nil, nil, err } // Add the response wrap token to the ENV of the plugin - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) + env = append(env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) } - secureConfig := &plugin.SecureConfig{ - Checksum: rc.sha256, - Hash: sha256.New(), + if rc.image == "" { + // go-plugin has always overridden user-provided env vars with the OS + // (Vault process) env vars, but we want plugins to be able to override + // the Vault process env. We don't want to make a breaking change in + // go-plugin so always set SkipHostEnv and replicate the legacy behavior + // ourselves if user opts in. + if legacy, _ := strconv.ParseBool(os.Getenv(PluginUseLegacyEnvLayering)); legacy { + // Env vars are layered as follows, with later entries overriding + // earlier entries if there are duplicate keys: + // 1. Env specified at plugin registration + // 2. Env from Vault SDK + // 3. Env from Vault process (OS) + // 4. Env from go-plugin + cmd.Env = append(env, os.Environ()...) + } else { + // Env vars are layered as follows, with later entries overriding + // earlier entries if there are duplicate keys: + // 1. Env from Vault process (OS) + // 2. Env specified at plugin registration + // 3. Env from Vault SDK + // 4. Env from go-plugin + cmd.Env = append(os.Environ(), env...) + } + } else { + // Containerized plugins do not inherit any env vars from Vault. + cmd.Env = env + } + + return cmd, clientTLSConfig, nil +} + +func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) { + cmd, clientTLSConfig, err := rc.generateCmd(ctx) + if err != nil { + return nil, err } clientConfig := &plugin.ClientConfig{ HandshakeConfig: rc.HandshakeConfig, VersionedPlugins: rc.PluginSets, - Cmd: cmd, - SecureConfig: secureConfig, TLSConfig: clientTLSConfig, Logger: rc.Logger, AllowedProtocols: []plugin.Protocol{ plugin.ProtocolNetRPC, plugin.ProtocolGRPC, }, - AutoMTLS: rc.AutoMTLS, + AutoMTLS: rc.AutoMTLS, + SkipHostEnv: true, + } + if rc.image == "" { + clientConfig.Cmd = cmd + clientConfig.SecureConfig = &plugin.SecureConfig{ + Checksum: rc.sha256, + Hash: sha256.New(), + } + } else { + containerCfg, err := rc.containerConfig(ctx, cmd.Env) + if err != nil { + return nil, err + } + clientConfig.RunnerFunc = containerCfg.NewContainerRunner + clientConfig.UnixSocketConfig = &plugin.UnixSocketConfig{ + Group: strconv.Itoa(containerCfg.GroupAdd), + TempDir: rc.tmpdir, + } + clientConfig.GRPCBrokerMultiplex = true } return clientConfig, nil } +func (rc runConfig) containerConfig(ctx context.Context, env []string) (*plugincontainer.Config, error) { + clusterID, err := rc.Wrapper.ClusterID(ctx) + if err != nil { + return nil, err + } + cfg := &plugincontainer.Config{ + Image: rc.image, + Tag: rc.imageTag, + SHA256: fmt.Sprintf("%x", rc.sha256), + + Env: env, + GroupAdd: os.Getegid(), + Runtime: consts.DefaultContainerPluginOCIRuntime, + CapIPCLock: rc.mlockEnabled(), + Labels: map[string]string{ + labelVaultPID: strconv.Itoa(os.Getpid()), + labelVaultClusterID: clusterID, + labelVaultPluginName: rc.PluginClientConfig.Name, + labelVaultPluginType: rc.PluginClientConfig.PluginType.String(), + labelVaultPluginVersion: rc.PluginClientConfig.Version, + }, + } + + // Use rc.command and rc.args directly instead of cmd.Path and cmd.Args, as + // exec.Command may mutate the provided command. + if rc.command != "" { + cfg.Entrypoint = []string{rc.command} + } + if len(rc.args) > 0 { + cfg.Args = rc.args + } + if rc.runtimeConfig != nil { + cfg.CgroupParent = rc.runtimeConfig.CgroupParent + cfg.NanoCpus = rc.runtimeConfig.CPU + cfg.Memory = rc.runtimeConfig.Memory + if rc.runtimeConfig.OCIRuntime != "" { + cfg.Runtime = rc.runtimeConfig.OCIRuntime + } + if rc.runtimeConfig.Rootless { + cfg.Rootless = true + } + } + + return cfg, nil +} + func (rc runConfig) run(ctx context.Context) (*plugin.Client, error) { clientConfig, err := rc.makeConfig(ctx) if err != nil { @@ -167,11 +287,25 @@ func MLock(mlock bool) RunOpt { } func (r *PluginRunner) RunConfig(ctx context.Context, opts ...RunOpt) (*plugin.Client, error) { + var image, imageTag string + if r.OCIImage != "" { + image = r.OCIImage + imageTag = strings.TrimPrefix(r.Version, "v") + } rc := runConfig{ - command: r.Command, - args: r.Args, - sha256: r.Sha256, - env: r.Env, + command: r.Command, + image: image, + imageTag: imageTag, + args: r.Args, + sha256: r.Sha256, + env: r.Env, + runtimeConfig: r.RuntimeConfig, + tmpdir: r.Tmpdir, + PluginClientConfig: PluginClientConfig{ + Name: r.Name, + PluginType: r.Type, + Version: r.Version, + }, } for _, opt := range opts { diff --git a/sdk/helper/pluginutil/run_config_test.go b/sdk/helper/pluginutil/run_config_test.go index b817ef9551be..6bb840f462d9 100644 --- a/sdk/helper/pluginutil/run_config_test.go +++ b/sdk/helper/pluginutil/run_config_test.go @@ -1,14 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pluginutil import ( "context" + "encoding/hex" "fmt" + "os" "os/exec" + "strconv" "testing" "time" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" + "github.com/hashicorp/go-secure-stdlib/plugincontainer" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginruntimeutil" "github.com/hashicorp/vault/sdk/helper/wrapping" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -25,8 +34,11 @@ func TestMakeConfig(t *testing.T) { mlockEnabled bool mlockEnabledTimes int - expectedConfig *plugin.ClientConfig - expectTLSConfig bool + expectedConfig *plugin.ClientConfig + expectTLSConfig bool + expectRunnerFunc bool + skipSecureConfig bool + useLegacyEnvLayering bool } tests := map[string]testCase{ @@ -55,8 +67,9 @@ func TestMakeConfig(t *testing.T) { responseWrapInfoTimes: 0, - mlockEnabled: false, - mlockEnabledTimes: 1, + mlockEnabled: false, + mlockEnabledTimes: 1, + useLegacyEnvLayering: true, expectedConfig: &plugin.ClientConfig{ HandshakeConfig: plugin.HandshakeConfig{ @@ -72,12 +85,12 @@ func TestMakeConfig(t *testing.T) { Cmd: commandWithEnv( "echo", []string{"foo", "bar"}, - []string{ + append(append([]string{ "initial=true", fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, false), - }, + }, os.Environ()...), PluginUseLegacyEnvLayering+"=true"), ), SecureConfig: &plugin.SecureConfig{ Checksum: []byte("some_sha256"), @@ -87,8 +100,9 @@ func TestMakeConfig(t *testing.T) { plugin.ProtocolNetRPC, plugin.ProtocolGRPC, }, - Logger: hclog.NewNullLogger(), - AutoMTLS: false, + Logger: hclog.NewNullLogger(), + AutoMTLS: false, + SkipHostEnv: true, }, expectTLSConfig: false, }, @@ -137,14 +151,14 @@ func TestMakeConfig(t *testing.T) { Cmd: commandWithEnv( "echo", []string{"foo", "bar"}, - []string{ + append(os.Environ(), []string{ "initial=true", fmt.Sprintf("%s=%t", PluginMlockEnabled, true), fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, false), fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, "testtoken"), - }, + }...), ), SecureConfig: &plugin.SecureConfig{ Checksum: []byte("some_sha256"), @@ -154,8 +168,9 @@ func TestMakeConfig(t *testing.T) { plugin.ProtocolNetRPC, plugin.ProtocolGRPC, }, - Logger: hclog.NewNullLogger(), - AutoMTLS: false, + Logger: hclog.NewNullLogger(), + AutoMTLS: false, + SkipHostEnv: true, }, expectTLSConfig: true, }, @@ -201,12 +216,12 @@ func TestMakeConfig(t *testing.T) { Cmd: commandWithEnv( "echo", []string{"foo", "bar"}, - []string{ + append(os.Environ(), []string{ "initial=true", fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), - }, + }...), ), SecureConfig: &plugin.SecureConfig{ Checksum: []byte("some_sha256"), @@ -216,8 +231,9 @@ func TestMakeConfig(t *testing.T) { plugin.ProtocolNetRPC, plugin.ProtocolGRPC, }, - Logger: hclog.NewNullLogger(), - AutoMTLS: true, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + SkipHostEnv: true, }, expectTLSConfig: false, }, @@ -263,12 +279,12 @@ func TestMakeConfig(t *testing.T) { Cmd: commandWithEnv( "echo", []string{"foo", "bar"}, - []string{ + append(os.Environ(), []string{ "initial=true", fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), - }, + }...), ), SecureConfig: &plugin.SecureConfig{ Checksum: []byte("some_sha256"), @@ -278,11 +294,71 @@ func TestMakeConfig(t *testing.T) { plugin.ProtocolNetRPC, plugin.ProtocolGRPC, }, - Logger: hclog.NewNullLogger(), - AutoMTLS: true, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + SkipHostEnv: true, }, expectTLSConfig: false, }, + "image set": { + rc: runConfig{ + command: "echo", + args: []string{"foo", "bar"}, + sha256: []byte("some_sha256"), + env: []string{"initial=true"}, + image: "some-image", + imageTag: "0.1.0", + PluginClientConfig: PluginClientConfig{ + PluginSets: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + Logger: hclog.NewNullLogger(), + IsMetadataMode: false, + AutoMTLS: true, + }, + }, + + responseWrapInfoTimes: 0, + + mlockEnabled: false, + mlockEnabledTimes: 2, + + expectedConfig: &plugin.ClientConfig{ + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + VersionedPlugins: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + Cmd: nil, + SecureConfig: nil, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + SkipHostEnv: true, + GRPCBrokerMultiplex: true, + UnixSocketConfig: &plugin.UnixSocketConfig{ + Group: strconv.Itoa(os.Getgid()), + }, + }, + expectTLSConfig: false, + expectRunnerFunc: true, + skipSecureConfig: true, + }, } for name, test := range tests { @@ -299,6 +375,10 @@ func TestMakeConfig(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() + if test.useLegacyEnvLayering { + t.Setenv(PluginUseLegacyEnvLayering, "true") + } + config, err := test.rc.makeConfig(ctx) if err != nil { t.Fatalf("no error expected, got: %s", err) @@ -306,11 +386,13 @@ func TestMakeConfig(t *testing.T) { // The following fields are generated, so we just need to check for existence, not specific value // The value must be nilled out before performing a DeepEqual check - hsh := config.SecureConfig.Hash - if hsh == nil { - t.Fatalf("Missing SecureConfig.Hash") + if !test.skipSecureConfig { + hsh := config.SecureConfig.Hash + if hsh == nil { + t.Fatalf("Missing SecureConfig.Hash") + } + config.SecureConfig.Hash = nil } - config.SecureConfig.Hash = nil if test.expectTLSConfig && config.TLSConfig == nil { t.Fatalf("TLS config expected, got nil") @@ -320,6 +402,11 @@ func TestMakeConfig(t *testing.T) { } config.TLSConfig = nil + if test.expectRunnerFunc != (config.RunnerFunc != nil) { + t.Fatalf("expected RunnerFunc: %v, actual: %v", test.expectRunnerFunc, config.RunnerFunc != nil) + } + config.RunnerFunc = nil + require.Equal(t, test.expectedConfig, config) }) } @@ -355,3 +442,137 @@ func (m *mockRunnerUtil) MlockEnabled() bool { args := m.Called() return args.Bool(0) } + +func (m *mockRunnerUtil) ClusterID(ctx context.Context) (string, error) { + return "1234", nil +} + +func TestContainerConfig(t *testing.T) { + dummySHA, err := hex.DecodeString("abc123") + if err != nil { + t.Fatal(err) + } + myPID := strconv.Itoa(os.Getpid()) + for name, tc := range map[string]struct { + rc runConfig + expected plugincontainer.Config + }{ + "image set, no runtime": { + rc: runConfig{ + command: "echo", + args: []string{"foo", "bar"}, + sha256: dummySHA, + env: []string{"initial=true"}, + image: "some-image", + imageTag: "0.1.0", + PluginClientConfig: PluginClientConfig{ + PluginSets: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + Name: "some-plugin", + PluginType: consts.PluginTypeCredential, + Version: "v0.1.0", + }, + }, + expected: plugincontainer.Config{ + Image: "some-image", + Tag: "0.1.0", + SHA256: "abc123", + Entrypoint: []string{"echo"}, + Args: []string{"foo", "bar"}, + Env: []string{ + "initial=true", + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), + fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), + }, + Labels: map[string]string{ + labelVaultPID: myPID, + labelVaultClusterID: "1234", + labelVaultPluginName: "some-plugin", + labelVaultPluginType: "auth", + labelVaultPluginVersion: "v0.1.0", + }, + Runtime: consts.DefaultContainerPluginOCIRuntime, + GroupAdd: os.Getgid(), + }, + }, + "image set, with runtime": { + rc: runConfig{ + sha256: dummySHA, + image: "some-image", + imageTag: "0.1.0", + runtimeConfig: &pluginruntimeutil.PluginRuntimeConfig{ + OCIRuntime: "some-oci-runtime", + CgroupParent: "/cgroup/parent", + CPU: 1000, + Memory: 2000, + }, + PluginClientConfig: PluginClientConfig{ + PluginSets: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + Name: "some-plugin", + PluginType: consts.PluginTypeCredential, + Version: "v0.1.0", + }, + }, + expected: plugincontainer.Config{ + Image: "some-image", + Tag: "0.1.0", + SHA256: "abc123", + Env: []string{ + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), + fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), + }, + Labels: map[string]string{ + labelVaultPID: myPID, + labelVaultClusterID: "1234", + labelVaultPluginName: "some-plugin", + labelVaultPluginType: "auth", + labelVaultPluginVersion: "v0.1.0", + }, + Runtime: "some-oci-runtime", + GroupAdd: os.Getgid(), + CgroupParent: "/cgroup/parent", + NanoCpus: 1000, + Memory: 2000, + }, + }, + } { + t.Run(name, func(t *testing.T) { + mockWrapper := new(mockRunnerUtil) + mockWrapper.On("ResponseWrapData", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil, nil) + mockWrapper.On("MlockEnabled"). + Return(false) + tc.rc.Wrapper = mockWrapper + cmd, _, err := tc.rc.generateCmd(context.Background()) + if err != nil { + t.Fatal(err) + } + cfg, err := tc.rc.containerConfig(context.Background(), cmd.Env) + require.NoError(t, err) + require.Equal(t, tc.expected, *cfg) + }) + } +} diff --git a/sdk/helper/pluginutil/runner.go b/sdk/helper/pluginutil/runner.go index 886efe21f1a0..ebbe110c3474 100644 --- a/sdk/helper/pluginutil/runner.go +++ b/sdk/helper/pluginutil/runner.go @@ -1,17 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pluginutil import ( "context" + "errors" + "strings" "time" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/hashicorp/go-version" "github.com/hashicorp/vault/sdk/helper/consts" + prutil "github.com/hashicorp/vault/sdk/helper/pluginruntimeutil" "github.com/hashicorp/vault/sdk/helper/wrapping" "google.golang.org/grpc" ) +// ErrPluginNotFound is returned when a plugin does not have a pinned version. +var ErrPinnedVersionNotFound = errors.New("pinned version not found") + // Looker defines the plugin Lookup function that looks into the plugin catalog // for available plugins and returns a PluginRunner type Looker interface { @@ -28,6 +37,7 @@ type RunnerUtil interface { ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) MlockEnabled() bool VaultVersion(ctx context.Context) (string, error) + ClusterID(ctx context.Context) (string, error) } // LookRunnerUtil defines the functions for both Looker and Wrapper @@ -50,12 +60,49 @@ type PluginRunner struct { Name string `json:"name" structs:"name"` Type consts.PluginType `json:"type" structs:"type"` Version string `json:"version" structs:"version"` + OCIImage string `json:"oci_image" structs:"oci_image"` + Runtime string `json:"runtime" structs:"runtime"` Command string `json:"command" structs:"command"` Args []string `json:"args" structs:"args"` Env []string `json:"env" structs:"env"` Sha256 []byte `json:"sha256" structs:"sha256"` Builtin bool `json:"builtin" structs:"builtin"` BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"` + RuntimeConfig *prutil.PluginRuntimeConfig `json:"-" structs:"-"` + Tmpdir string `json:"-" structs:"-"` +} + +// BinaryReference returns either the OCI image reference if it's a container +// plugin or the path to the binary if it's a plain process plugin. +func (p *PluginRunner) BinaryReference() string { + if p.Builtin { + return "" + } + if p.OCIImage == "" { + return p.Command + } + + imageRef := p.OCIImage + if p.Version != "" { + imageRef += ":" + strings.TrimPrefix(p.Version, "v") + } + + return imageRef +} + +// SetPluginInput is only used as input for the plugin catalog's set methods. +// We don't use the very similar PluginRunner struct to avoid confusion about +// what's settable, which does not include the builtin fields. +type SetPluginInput struct { + Name string + Type consts.PluginType + Version string + Command string + OCIImage string + Runtime string + Args []string + Env []string + Sha256 []byte } // Run takes a wrapper RunnerUtil instance along with the go-plugin parameters and @@ -92,6 +139,8 @@ type VersionedPlugin struct { Type string `json:"type"` // string instead of consts.PluginType so that we get the string form in API responses. Name string `json:"name"` Version string `json:"version"` + OCIImage string `json:"oci_image,omitempty"` + Runtime string `json:"runtime,omitempty"` SHA256 string `json:"sha256,omitempty"` Builtin bool `json:"builtin"` DeprecationStatus string `json:"deprecation_status,omitempty"` @@ -100,6 +149,12 @@ type VersionedPlugin struct { SemanticVersion *version.Version `json:"-"` } +type PinnedVersion struct { + Name string `json:"name"` + Type consts.PluginType `json:"type"` + Version string `json:"version"` +} + // CtxCancelIfCanceled takes a context cancel func and a context. If the context is // shutdown the cancelfunc is called. This is useful for merging two cancel // functions. diff --git a/sdk/helper/pluginutil/tls.go b/sdk/helper/pluginutil/tls.go index c5fff6d701ed..21b35d910e79 100644 --- a/sdk/helper/pluginutil/tls.go +++ b/sdk/helper/pluginutil/tls.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pluginutil import ( diff --git a/sdk/helper/pointerutil/pointer.go b/sdk/helper/pointerutil/pointer.go index 0f26e7dad660..a3cb55898207 100644 --- a/sdk/helper/pointerutil/pointer.go +++ b/sdk/helper/pointerutil/pointer.go @@ -1,8 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pointerutil import ( "os" "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" ) // StringPtr returns a pointer to a string value @@ -17,7 +22,7 @@ func BoolPtr(b bool) *bool { // TimeDurationPtr returns a pointer to a time duration value func TimeDurationPtr(duration string) *time.Duration { - d, _ := time.ParseDuration(duration) + d, _ := parseutil.ParseDurationSecond(duration) return &d } diff --git a/sdk/helper/policyutil/policyutil.go b/sdk/helper/policyutil/policyutil.go index 8e5541b1868f..a5a8082e13c2 100644 --- a/sdk/helper/policyutil/policyutil.go +++ b/sdk/helper/policyutil/policyutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package policyutil import ( diff --git a/sdk/helper/policyutil/policyutil_test.go b/sdk/helper/policyutil/policyutil_test.go index 4b26483f716a..2280ba93eed8 100644 --- a/sdk/helper/policyutil/policyutil_test.go +++ b/sdk/helper/policyutil/policyutil_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package policyutil import "testing" diff --git a/sdk/helper/roottoken/decode.go b/sdk/helper/roottoken/decode.go index cc9300690a4a..9939b67f72f4 100644 --- a/sdk/helper/roottoken/decode.go +++ b/sdk/helper/roottoken/decode.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package roottoken import ( diff --git a/sdk/helper/roottoken/encode.go b/sdk/helper/roottoken/encode.go index 2537d9397906..dbbc90a2afa3 100644 --- a/sdk/helper/roottoken/encode.go +++ b/sdk/helper/roottoken/encode.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package roottoken import ( diff --git a/sdk/helper/roottoken/encode_test.go b/sdk/helper/roottoken/encode_test.go index 9df26928e294..269bf65b0472 100644 --- a/sdk/helper/roottoken/encode_test.go +++ b/sdk/helper/roottoken/encode_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package roottoken import ( diff --git a/sdk/helper/roottoken/otp.go b/sdk/helper/roottoken/otp.go index 5a12c4f0ae86..4445ec52dc65 100644 --- a/sdk/helper/roottoken/otp.go +++ b/sdk/helper/roottoken/otp.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package roottoken import ( diff --git a/sdk/helper/roottoken/otp_test.go b/sdk/helper/roottoken/otp_test.go index 437e8f3d0f22..53776ec21c8b 100644 --- a/sdk/helper/roottoken/otp_test.go +++ b/sdk/helper/roottoken/otp_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package roottoken import ( diff --git a/sdk/helper/salt/salt.go b/sdk/helper/salt/salt.go index 50e0cad90a60..84cbd03556c2 100644 --- a/sdk/helper/salt/salt.go +++ b/sdk/helper/salt/salt.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package salt import ( diff --git a/sdk/helper/salt/salt_test.go b/sdk/helper/salt/salt_test.go index 99fcb06bd053..3aec9a27b499 100644 --- a/sdk/helper/salt/salt_test.go +++ b/sdk/helper/salt/salt_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package salt import ( diff --git a/sdk/helper/strutil/strutil.go b/sdk/helper/strutil/strutil.go index 09cc9425cb1d..a9e506942af5 100644 --- a/sdk/helper/strutil/strutil.go +++ b/sdk/helper/strutil/strutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package strutil diff --git a/sdk/helper/template/funcs.go b/sdk/helper/template/funcs.go index ee9927fe15cb..6d68cab3a7e4 100644 --- a/sdk/helper/template/funcs.go +++ b/sdk/helper/template/funcs.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package template import ( diff --git a/sdk/helper/template/funcs_test.go b/sdk/helper/template/funcs_test.go index f682a96753f2..4965115960ee 100644 --- a/sdk/helper/template/funcs_test.go +++ b/sdk/helper/template/funcs_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package template import ( diff --git a/sdk/helper/template/template.go b/sdk/helper/template/template.go index 2918825b978e..dea65f3f5ed3 100644 --- a/sdk/helper/template/template.go +++ b/sdk/helper/template/template.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package template import ( diff --git a/sdk/helper/template/template_test.go b/sdk/helper/template/template_test.go index 715dd52519e8..2f66bf36fe03 100644 --- a/sdk/helper/template/template_test.go +++ b/sdk/helper/template/template_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package template import ( diff --git a/sdk/helper/testcluster/consts.go b/sdk/helper/testcluster/consts.go new file mode 100644 index 000000000000..b736b5f88f7f --- /dev/null +++ b/sdk/helper/testcluster/consts.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +const ( + // EnvVaultLicenseCI is the name of an environment variable that contains + // a signed license string used for Vault Enterprise binary-based tests. + // The binary will be run with the env var VAULT_LICENSE set to this value. + EnvVaultLicenseCI = "VAULT_LICENSE_CI" + + // DefaultCAFile is the path to the CA file. This is a docker-specific + // constant. TODO: needs to be moved to a more relevant place + DefaultCAFile = "/vault/config/ca.pem" +) diff --git a/sdk/helper/testcluster/docker/cert.go b/sdk/helper/testcluster/docker/cert.go new file mode 100644 index 000000000000..4704030cb52f --- /dev/null +++ b/sdk/helper/testcluster/docker/cert.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "sync" + + "github.com/hashicorp/errwrap" +) + +// ReloadFunc are functions that are called when a reload is requested +type ReloadFunc func() error + +// CertificateGetter satisfies ReloadFunc and its GetCertificate method +// satisfies the tls.GetCertificate function signature. Currently it does not +// allow changing paths after the fact. +type CertificateGetter struct { + sync.RWMutex + + cert *tls.Certificate + + certFile string + keyFile string + passphrase string +} + +func NewCertificateGetter(certFile, keyFile, passphrase string) *CertificateGetter { + return &CertificateGetter{ + certFile: certFile, + keyFile: keyFile, + passphrase: passphrase, + } +} + +func (cg *CertificateGetter) Reload() error { + certPEMBlock, err := ioutil.ReadFile(cg.certFile) + if err != nil { + return err + } + keyPEMBlock, err := ioutil.ReadFile(cg.keyFile) + if err != nil { + return err + } + + // Check for encrypted pem block + keyBlock, _ := pem.Decode(keyPEMBlock) + if keyBlock == nil { + return errors.New("decoded PEM is blank") + } + + if x509.IsEncryptedPEMBlock(keyBlock) { + keyBlock.Bytes, err = x509.DecryptPEMBlock(keyBlock, []byte(cg.passphrase)) + if err != nil { + return errwrap.Wrapf("Decrypting PEM block failed {{err}}", err) + } + keyPEMBlock = pem.EncodeToMemory(keyBlock) + } + + cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return err + } + + cg.Lock() + defer cg.Unlock() + + cg.cert = &cert + + return nil +} + +func (cg *CertificateGetter) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + cg.RLock() + defer cg.RUnlock() + + if cg.cert == nil { + return nil, fmt.Errorf("nil certificate") + } + + return cg.cert, nil +} diff --git a/sdk/helper/testcluster/docker/environment.go b/sdk/helper/testcluster/docker/environment.go new file mode 100644 index 000000000000..e0d9b72c153b --- /dev/null +++ b/sdk/helper/testcluster/docker/environment.go @@ -0,0 +1,1336 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "bufio" + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + mathrand "math/rand" + "net" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/volume" + docker "github.com/docker/docker/client" + "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/api" + dockhelper "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/testcluster" + uberAtomic "go.uber.org/atomic" + "golang.org/x/net/http2" +) + +var ( + _ testcluster.VaultCluster = &DockerCluster{} + _ testcluster.VaultClusterNode = &DockerClusterNode{} +) + +const MaxClusterNameLength = 52 + +// DockerCluster is used to managing the lifecycle of the test Vault cluster +type DockerCluster struct { + ClusterName string + + ClusterNodes []*DockerClusterNode + + // Certificate fields + *testcluster.CA + RootCAs *x509.CertPool + + barrierKeys [][]byte + recoveryKeys [][]byte + tmpDir string + + // rootToken is the initial root token created when the Vault cluster is + // created. + rootToken string + DockerAPI *docker.Client + ID string + Logger log.Logger + builtTags map[string]struct{} + + storage testcluster.ClusterStorage +} + +func (dc *DockerCluster) NamedLogger(s string) log.Logger { + return dc.Logger.Named(s) +} + +func (dc *DockerCluster) ClusterID() string { + return dc.ID +} + +func (dc *DockerCluster) Nodes() []testcluster.VaultClusterNode { + ret := make([]testcluster.VaultClusterNode, len(dc.ClusterNodes)) + for i := range dc.ClusterNodes { + ret[i] = dc.ClusterNodes[i] + } + return ret +} + +func (dc *DockerCluster) GetBarrierKeys() [][]byte { + return dc.barrierKeys +} + +func testKeyCopy(key []byte) []byte { + result := make([]byte, len(key)) + copy(result, key) + return result +} + +func (dc *DockerCluster) GetRecoveryKeys() [][]byte { + ret := make([][]byte, len(dc.recoveryKeys)) + for i, k := range dc.recoveryKeys { + ret[i] = testKeyCopy(k) + } + return ret +} + +func (dc *DockerCluster) GetBarrierOrRecoveryKeys() [][]byte { + return dc.GetBarrierKeys() +} + +func (dc *DockerCluster) SetBarrierKeys(keys [][]byte) { + dc.barrierKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.barrierKeys[i] = testKeyCopy(k) + } +} + +func (dc *DockerCluster) SetRecoveryKeys(keys [][]byte) { + dc.recoveryKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.recoveryKeys[i] = testKeyCopy(k) + } +} + +func (dc *DockerCluster) GetCACertPEMFile() string { + return dc.CACertPEMFile +} + +func (dc *DockerCluster) Cleanup() { + dc.cleanup() +} + +func (dc *DockerCluster) cleanup() error { + var result *multierror.Error + for _, node := range dc.ClusterNodes { + if err := node.cleanup(); err != nil { + result = multierror.Append(result, err) + } + } + + return result.ErrorOrNil() +} + +// GetRootToken returns the root token of the cluster, if set +func (dc *DockerCluster) GetRootToken() string { + return dc.rootToken +} + +func (dc *DockerCluster) SetRootToken(s string) { + dc.Logger.Trace("cluster root token changed", "helpful_env", fmt.Sprintf("VAULT_TOKEN=%s VAULT_CACERT=/vault/config/ca.pem", s)) + dc.rootToken = s +} + +func (n *DockerClusterNode) Name() string { + return n.Cluster.ClusterName + "-" + n.NodeID +} + +func (dc *DockerCluster) setupNode0(ctx context.Context) error { + client := dc.ClusterNodes[0].client + + var resp *api.InitResponse + var err error + for ctx.Err() == nil { + resp, err = client.Sys().Init(&api.InitRequest{ + SecretShares: 3, + SecretThreshold: 3, + }) + if err == nil && resp != nil { + break + } + time.Sleep(500 * time.Millisecond) + } + if err != nil { + return err + } + if resp == nil { + return fmt.Errorf("nil response to init request") + } + + for _, k := range resp.Keys { + raw, err := hex.DecodeString(k) + if err != nil { + return err + } + dc.barrierKeys = append(dc.barrierKeys, raw) + } + + for _, k := range resp.RecoveryKeys { + raw, err := hex.DecodeString(k) + if err != nil { + return err + } + dc.recoveryKeys = append(dc.recoveryKeys, raw) + } + + dc.rootToken = resp.RootToken + client.SetToken(dc.rootToken) + dc.ClusterNodes[0].client = client + + err = testcluster.UnsealNode(ctx, dc, 0) + if err != nil { + return err + } + + err = ensureLeaderMatches(ctx, client, func(leader *api.LeaderResponse) error { + if !leader.IsSelf { + return fmt.Errorf("node %d leader=%v, expected=%v", 0, leader.IsSelf, true) + } + + return nil + }) + + status, err := client.Sys().SealStatusWithContext(ctx) + if err != nil { + return err + } + dc.ID = status.ClusterID + return err +} + +func (dc *DockerCluster) clusterReady(ctx context.Context) error { + for i, node := range dc.ClusterNodes { + expectLeader := i == 0 + err := ensureLeaderMatches(ctx, node.client, func(leader *api.LeaderResponse) error { + if expectLeader != leader.IsSelf { + return fmt.Errorf("node %d leader=%v, expected=%v", i, leader.IsSelf, expectLeader) + } + + return nil + }) + if err != nil { + return err + } + } + + return nil +} + +func (dc *DockerCluster) setupCA(opts *DockerClusterOptions) error { + var err error + var ca testcluster.CA + + if opts != nil && opts.CAKey != nil { + ca.CAKey = opts.CAKey + } else { + ca.CAKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return err + } + } + + var caBytes []byte + if opts != nil && len(opts.CACert) > 0 { + caBytes = opts.CACert + } else { + serialNumber := mathrand.New(mathrand.NewSource(time.Now().UnixNano())).Int63() + CACertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + SerialNumber: big.NewInt(serialNumber), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caBytes, err = x509.CreateCertificate(rand.Reader, CACertTemplate, CACertTemplate, ca.CAKey.Public(), ca.CAKey) + if err != nil { + return err + } + } + CACert, err := x509.ParseCertificate(caBytes) + if err != nil { + return err + } + ca.CACert = CACert + ca.CACertBytes = caBytes + + CACertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + ca.CACertPEM = pem.EncodeToMemory(CACertPEMBlock) + + ca.CACertPEMFile = filepath.Join(dc.tmpDir, "ca", "ca.pem") + err = os.WriteFile(ca.CACertPEMFile, ca.CACertPEM, 0o755) + if err != nil { + return err + } + + marshaledCAKey, err := x509.MarshalECPrivateKey(ca.CAKey) + if err != nil { + return err + } + CAKeyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledCAKey, + } + ca.CAKeyPEM = pem.EncodeToMemory(CAKeyPEMBlock) + + dc.CA = &ca + + return nil +} + +func (n *DockerClusterNode) setupCert(ip string) error { + var err error + + n.ServerKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return err + } + + serialNumber := mathrand.New(mathrand.NewSource(time.Now().UnixNano())).Int63() + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: n.Name(), + }, + DNSNames: []string{"localhost", n.Name()}, + IPAddresses: []net.IP{net.IPv6loopback, net.ParseIP("127.0.0.1"), net.ParseIP(ip)}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(serialNumber), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + n.ServerCertBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, n.Cluster.CACert, n.ServerKey.Public(), n.Cluster.CAKey) + if err != nil { + return err + } + n.ServerCert, err = x509.ParseCertificate(n.ServerCertBytes) + if err != nil { + return err + } + n.ServerCertPEM = pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: n.ServerCertBytes, + }) + + marshaledKey, err := x509.MarshalECPrivateKey(n.ServerKey) + if err != nil { + return err + } + n.ServerKeyPEM = pem.EncodeToMemory(&pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledKey, + }) + + n.ServerCertPEMFile = filepath.Join(n.WorkDir, "cert.pem") + err = os.WriteFile(n.ServerCertPEMFile, n.ServerCertPEM, 0o755) + if err != nil { + return err + } + + n.ServerKeyPEMFile = filepath.Join(n.WorkDir, "key.pem") + err = os.WriteFile(n.ServerKeyPEMFile, n.ServerKeyPEM, 0o755) + if err != nil { + return err + } + + tlsCert, err := tls.X509KeyPair(n.ServerCertPEM, n.ServerKeyPEM) + if err != nil { + return err + } + + certGetter := NewCertificateGetter(n.ServerCertPEMFile, n.ServerKeyPEMFile, "") + if err := certGetter.Reload(); err != nil { + return err + } + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + RootCAs: n.Cluster.RootCAs, + ClientCAs: n.Cluster.RootCAs, + ClientAuth: tls.RequestClientCert, + NextProtos: []string{"h2", "http/1.1"}, + GetCertificate: certGetter.GetCertificate, + } + + n.tlsConfig = tlsConfig + + err = os.WriteFile(filepath.Join(n.WorkDir, "ca.pem"), n.Cluster.CACertPEM, 0o755) + if err != nil { + return err + } + return nil +} + +func NewTestDockerCluster(t *testing.T, opts *DockerClusterOptions) *DockerCluster { + if opts == nil { + opts = &DockerClusterOptions{} + } + if opts.ClusterName == "" { + opts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") + } + if opts.Logger == nil { + opts.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name()) + } + if opts.NetworkName == "" { + opts.NetworkName = os.Getenv("TEST_DOCKER_NETWORK_NAME") + } + + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + t.Cleanup(cancel) + + dc, err := NewDockerCluster(ctx, opts) + if err != nil { + t.Fatal(err) + } + dc.Logger.Trace("cluster started", "helpful_env", fmt.Sprintf("VAULT_TOKEN=%s VAULT_CACERT=/vault/config/ca.pem", dc.GetRootToken())) + return dc +} + +func NewDockerCluster(ctx context.Context, opts *DockerClusterOptions) (*DockerCluster, error) { + api, err := dockhelper.NewDockerAPI() + if err != nil { + return nil, err + } + + if opts == nil { + opts = &DockerClusterOptions{} + } + if opts.Logger == nil { + opts.Logger = log.NewNullLogger() + } + if opts.VaultLicense == "" { + opts.VaultLicense = os.Getenv(testcluster.EnvVaultLicenseCI) + } + + dc := &DockerCluster{ + DockerAPI: api, + ClusterName: opts.ClusterName, + Logger: opts.Logger, + builtTags: map[string]struct{}{}, + CA: opts.CA, + storage: opts.Storage, + } + + if err := dc.setupDockerCluster(ctx, opts); err != nil { + dc.Cleanup() + return nil, err + } + + return dc, nil +} + +// DockerClusterNode represents a single instance of Vault in a cluster +type DockerClusterNode struct { + NodeID string + HostPort string + client *api.Client + ServerCert *x509.Certificate + ServerCertBytes []byte + ServerCertPEM []byte + ServerCertPEMFile string + ServerKey *ecdsa.PrivateKey + ServerKeyPEM []byte + ServerKeyPEMFile string + tlsConfig *tls.Config + WorkDir string + Cluster *DockerCluster + Container *types.ContainerJSON + DockerAPI *docker.Client + runner *dockhelper.Runner + Logger log.Logger + cleanupContainer func() + RealAPIAddr string + ContainerNetworkName string + ContainerIPAddress string + ImageRepo string + ImageTag string + DataVolumeName string + cleanupVolume func() + AllClients []*api.Client +} + +func (n *DockerClusterNode) TLSConfig() *tls.Config { + return n.tlsConfig.Clone() +} + +func (n *DockerClusterNode) APIClient() *api.Client { + // We clone to ensure that whenever this method is called, the caller gets + // back a pristine client, without e.g. any namespace or token changes that + // might pollute a shared client. We clone the config instead of the + // client because (1) Client.clone propagates the replicationStateStore and + // the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and + // (3) if clone returns an error, it doesn't feel as appropriate to panic + // below. Who knows why clone might return an error? + cfg := n.client.CloneConfig() + client, err := api.NewClient(cfg) + if err != nil { + // It seems fine to panic here, since this should be the same input + // we provided to NewClient when we were setup, and we didn't panic then. + // Better not to completely ignore the error though, suppose there's a + // bug in CloneConfig? + panic(fmt.Sprintf("NewClient error on cloned config: %v", err)) + } + client.SetToken(n.Cluster.rootToken) + return client +} + +func (n *DockerClusterNode) APIClientN(listenerNumber int) (*api.Client, error) { + // We clone to ensure that whenever this method is called, the caller gets + // back a pristine client, without e.g. any namespace or token changes that + // might pollute a shared client. We clone the config instead of the + // client because (1) Client.clone propagates the replicationStateStore and + // the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and + // (3) if clone returns an error, it doesn't feel as appropriate to panic + // below. Who knows why clone might return an error? + if listenerNumber >= len(n.AllClients) { + return nil, fmt.Errorf("invalid listener number %d", listenerNumber) + } + cfg := n.AllClients[listenerNumber].CloneConfig() + client, err := api.NewClient(cfg) + if err != nil { + // It seems fine to panic here, since this should be the same input + // we provided to NewClient when we were setup, and we didn't panic then. + // Better not to completely ignore the error though, suppose there's a + // bug in CloneConfig? + panic(fmt.Sprintf("NewClient error on cloned config: %v", err)) + } + client.SetToken(n.Cluster.rootToken) + return client, nil +} + +// NewAPIClient creates and configures a Vault API client to communicate with +// the running Vault Cluster for this DockerClusterNode +func (n *DockerClusterNode) apiConfig() (*api.Config, error) { + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = n.TLSConfig() + if err := http2.ConfigureTransport(transport); err != nil { + return nil, err + } + client := &http.Client{ + Transport: transport, + CheckRedirect: func(*http.Request, []*http.Request) error { + // This can of course be overridden per-test by using its own client + return fmt.Errorf("redirects not allowed in these tests") + }, + } + config := api.DefaultConfig() + if config.Error != nil { + return nil, config.Error + } + + protocol := "https" + if n.tlsConfig == nil { + protocol = "http" + } + config.Address = fmt.Sprintf("%s://%s", protocol, n.HostPort) + + config.HttpClient = client + config.MaxRetries = 0 + return config, nil +} + +func (n *DockerClusterNode) newAPIClient() (*api.Client, error) { + config, err := n.apiConfig() + if err != nil { + return nil, err + } + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + client.SetToken(n.Cluster.GetRootToken()) + return client, nil +} + +func (n *DockerClusterNode) newAPIClientForAddress(address string) (*api.Client, error) { + config, err := n.apiConfig() + if err != nil { + return nil, err + } + config.Address = fmt.Sprintf("https://%s", address) + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + client.SetToken(n.Cluster.GetRootToken()) + return client, nil +} + +// Cleanup kills the container of the node and deletes its data volume +func (n *DockerClusterNode) Cleanup() { + n.cleanup() +} + +// Stop kills the container of the node +func (n *DockerClusterNode) Stop() { + n.cleanupContainer() +} + +func (n *DockerClusterNode) cleanup() error { + if n.Container == nil || n.Container.ID == "" { + return nil + } + n.cleanupContainer() + n.cleanupVolume() + return nil +} + +func (n *DockerClusterNode) createDefaultListenerConfig() map[string]interface{} { + return map[string]interface{}{"tcp": map[string]interface{}{ + "address": fmt.Sprintf("%s:%d", "0.0.0.0", 8200), + "tls_cert_file": "/vault/config/cert.pem", + "tls_key_file": "/vault/config/key.pem", + "telemetry": map[string]interface{}{ + "unauthenticated_metrics_access": true, + }, + }} +} + +func (n *DockerClusterNode) createTLSDisabledListenerConfig() map[string]interface{} { + return map[string]interface{}{"tcp": map[string]interface{}{ + "address": fmt.Sprintf("%s:%d", "0.0.0.0", 8200), + "telemetry": map[string]interface{}{ + "unauthenticated_metrics_access": true, + }, + "tls_disable": true, + }} +} + +func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error { + if n.DataVolumeName == "" { + vol, err := n.DockerAPI.VolumeCreate(ctx, volume.CreateOptions{}) + if err != nil { + return err + } + n.DataVolumeName = vol.Name + n.cleanupVolume = func() { + _ = n.DockerAPI.VolumeRemove(ctx, vol.Name, false) + } + } + vaultCfg := map[string]interface{}{} + var listenerConfig []map[string]interface{} + + var defaultListenerConfig map[string]interface{} + if opts.DisableTLS { + defaultListenerConfig = n.createTLSDisabledListenerConfig() + } else { + defaultListenerConfig = n.createDefaultListenerConfig() + } + + listenerConfig = append(listenerConfig, defaultListenerConfig) + ports := []string{"8200/tcp", "8201/tcp"} + + if opts.VaultNodeConfig != nil && opts.VaultNodeConfig.AdditionalListeners != nil { + for _, config := range opts.VaultNodeConfig.AdditionalListeners { + cfg := n.createDefaultListenerConfig() + listener := cfg["tcp"].(map[string]interface{}) + listener["address"] = fmt.Sprintf("%s:%d", "0.0.0.0", config.Port) + listener["chroot_namespace"] = config.ChrootNamespace + listener["redact_addresses"] = config.RedactAddresses + listener["redact_cluster_name"] = config.RedactClusterName + listener["redact_version"] = config.RedactVersion + listenerConfig = append(listenerConfig, cfg) + portStr := fmt.Sprintf("%d/tcp", config.Port) + if strutil.StrListContains(ports, portStr) { + return fmt.Errorf("duplicate port %d specified", config.Port) + } + ports = append(ports, portStr) + } + } + vaultCfg["listener"] = listenerConfig + vaultCfg["telemetry"] = map[string]interface{}{ + "disable_hostname": true, + } + + // Setup storage. Default is raft. + storageType := "raft" + storageOpts := map[string]interface{}{ + // TODO add options from vnc + "path": "/vault/file", + "node_id": n.NodeID, + } + + if opts.Storage != nil { + storageType = opts.Storage.Type() + storageOpts = opts.Storage.Opts() + } + + if opts != nil && opts.VaultNodeConfig != nil { + for k, v := range opts.VaultNodeConfig.StorageOptions { + if _, ok := storageOpts[k].(string); !ok { + storageOpts[k] = v + } + } + } + vaultCfg["storage"] = map[string]interface{}{ + storageType: storageOpts, + } + + //// disable_mlock is required for working in the Docker environment with + //// custom plugins + vaultCfg["disable_mlock"] = true + + protocol := "https" + if opts.DisableTLS { + protocol = "http" + } + vaultCfg["api_addr"] = fmt.Sprintf(`%s://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8200`, protocol) + vaultCfg["cluster_addr"] = `https://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8201` + + vaultCfg["administrative_namespace_path"] = opts.AdministrativeNamespacePath + + systemJSON, err := json.Marshal(vaultCfg) + if err != nil { + return err + } + err = os.WriteFile(filepath.Join(n.WorkDir, "system.json"), systemJSON, 0o644) + if err != nil { + return err + } + + if opts.VaultNodeConfig != nil { + localCfg := *opts.VaultNodeConfig + if opts.VaultNodeConfig.LicensePath != "" { + b, err := os.ReadFile(opts.VaultNodeConfig.LicensePath) + if err != nil || len(b) == 0 { + return fmt.Errorf("unable to read LicensePath at %q: %w", opts.VaultNodeConfig.LicensePath, err) + } + localCfg.LicensePath = "/vault/config/license" + dest := filepath.Join(n.WorkDir, "license") + err = os.WriteFile(dest, b, 0o644) + if err != nil { + return fmt.Errorf("error writing license to %q: %w", dest, err) + } + + } + userJSON, err := json.Marshal(localCfg) + if err != nil { + return err + } + err = os.WriteFile(filepath.Join(n.WorkDir, "user.json"), userJSON, 0o644) + if err != nil { + return err + } + } + + if !opts.DisableTLS { + // Create a temporary cert so vault will start up + err = n.setupCert("127.0.0.1") + if err != nil { + return err + } + } + + caDir := filepath.Join(n.Cluster.tmpDir, "ca") + + // setup plugin bin copy if needed + copyFromTo := map[string]string{ + n.WorkDir: "/vault/config", + caDir: "/usr/local/share/ca-certificates/", + } + + var wg sync.WaitGroup + wg.Add(1) + var seenLogs uberAtomic.Bool + logConsumer := func(s string) { + if seenLogs.CAS(false, true) { + wg.Done() + } + n.Logger.Trace(s) + } + logStdout := &LogConsumerWriter{logConsumer} + logStderr := &LogConsumerWriter{func(s string) { + if seenLogs.CAS(false, true) { + wg.Done() + } + testcluster.JSONLogNoTimestamp(n.Logger, s) + }} + + postStartFunc := func(containerID string, realIP string) error { + err := n.setupCert(realIP) + if err != nil { + return err + } + + // If we signal Vault before it installs its sighup handler, it'll die. + wg.Wait() + n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP) + return n.runner.RefreshFiles(ctx, containerID) + } + + if opts.DisableTLS { + postStartFunc = func(containerID string, realIP string) error { + // If we signal Vault before it installs its sighup handler, it'll die. + wg.Wait() + n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP) + return n.runner.RefreshFiles(ctx, containerID) + } + } + + r, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{ + ImageRepo: n.ImageRepo, + ImageTag: n.ImageTag, + // We don't need to run update-ca-certificates in the container, because + // we're providing the CA in the raft join call, and otherwise Vault + // servers don't talk to one another on the API port. + Cmd: append([]string{"server"}, opts.Args...), + Env: []string{ + // For now we're using disable_mlock, because this is for testing + // anyway, and because it prevents us using external plugins. + "SKIP_SETCAP=true", + "VAULT_LOG_FORMAT=json", + "VAULT_LICENSE=" + opts.VaultLicense, + }, + Ports: ports, + ContainerName: n.Name(), + NetworkName: opts.NetworkName, + CopyFromTo: copyFromTo, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + PreDelete: true, + DoNotAutoRemove: true, + PostStart: postStartFunc, + Capabilities: []string{"NET_ADMIN"}, + OmitLogTimestamps: true, + VolumeNameToMountPoint: map[string]string{ + n.DataVolumeName: "/vault/file", + }, + }) + if err != nil { + return err + } + n.runner = r + + probe := opts.StartProbe + if probe == nil { + probe = func(c *api.Client) error { + _, err = c.Sys().SealStatus() + return err + } + } + svc, _, err := r.StartNewService(ctx, false, false, func(ctx context.Context, host string, port int) (dockhelper.ServiceConfig, error) { + config, err := n.apiConfig() + if err != nil { + return nil, err + } + config.Address = fmt.Sprintf("%s://%s:%d", protocol, host, port) + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + err = probe(client) + if err != nil { + return nil, err + } + + return dockhelper.NewServiceHostPort(host, port), nil + }) + if err != nil { + return err + } + + n.HostPort = svc.Config.Address() + n.Container = svc.Container + netName := opts.NetworkName + if netName == "" { + if len(svc.Container.NetworkSettings.Networks) > 1 { + return fmt.Errorf("Set d.RunOptions.NetworkName instead for container with multiple networks: %v", svc.Container.NetworkSettings.Networks) + } + for netName = range svc.Container.NetworkSettings.Networks { + // Networks above is a map; we just need to find the first and + // only key of this map (network name). The range handles this + // for us, but we need a loop construction in order to use range. + } + } + n.ContainerNetworkName = netName + n.ContainerIPAddress = svc.Container.NetworkSettings.Networks[netName].IPAddress + n.RealAPIAddr = protocol + "://" + n.ContainerIPAddress + ":8200" + n.cleanupContainer = svc.Cleanup + + client, err := n.newAPIClient() + if err != nil { + return err + } + client.SetToken(n.Cluster.rootToken) + n.client = client + + n.AllClients = append(n.AllClients, client) + + for _, addr := range svc.StartResult.Addrs[2:] { + // The second element of this list of addresses is the cluster address + // We do not want to create a client for the cluster address mapping + client, err := n.newAPIClientForAddress(addr) + if err != nil { + return err + } + client.SetToken(n.Cluster.rootToken) + n.AllClients = append(n.AllClients, client) + } + return nil +} + +func (n *DockerClusterNode) Pause(ctx context.Context) error { + return n.DockerAPI.ContainerPause(ctx, n.Container.ID) +} + +func (n *DockerClusterNode) Restart(ctx context.Context) error { + timeout := 5 + err := n.DockerAPI.ContainerRestart(ctx, n.Container.ID, container.StopOptions{Timeout: &timeout}) + if err != nil { + return err + } + + resp, err := n.DockerAPI.ContainerInspect(ctx, n.Container.ID) + if err != nil { + return fmt.Errorf("error inspecting container after restart: %s", err) + } + + var port int + if len(resp.NetworkSettings.Ports) > 0 { + for key, binding := range resp.NetworkSettings.Ports { + if len(binding) < 1 { + continue + } + + if key == "8200/tcp" { + port, err = strconv.Atoi(binding[0].HostPort) + } + } + } + + if port == 0 { + return fmt.Errorf("failed to find container port after restart") + } + + hostPieces := strings.Split(n.HostPort, ":") + if len(hostPieces) < 2 { + return errors.New("could not parse node hostname") + } + + n.HostPort = fmt.Sprintf("%s:%d", hostPieces[0], port) + + client, err := n.newAPIClient() + if err != nil { + return err + } + client.SetToken(n.Cluster.rootToken) + n.client = client + + return nil +} + +func (n *DockerClusterNode) AddNetworkDelay(ctx context.Context, delay time.Duration, targetIP string) error { + ip := net.ParseIP(targetIP) + if ip == nil { + return fmt.Errorf("targetIP %q is not an IP address", targetIP) + } + // Let's attempt to get a unique handle for the filter rule; we'll assume that + // every targetIP has a unique last octet, which is true currently for how + // we're doing docker networking. + lastOctet := ip.To4()[3] + + stdout, stderr, exitCode, err := n.runner.RunCmdWithOutput(ctx, n.Container.ID, []string{ + "/bin/sh", + "-xec", strings.Join([]string{ + fmt.Sprintf("echo isolating node %s", targetIP), + "apk add iproute2", + // If we're running this script a second time on the same node, + // the add dev will fail; since we only want to run the netem + // command once, we'll do so in the case where the add dev doesn't fail. + "tc qdisc add dev eth0 root handle 1: prio && " + + fmt.Sprintf("tc qdisc add dev eth0 parent 1:1 handle 2: netem delay %dms", delay/time.Millisecond), + // Here we create a u32 filter as per https://man7.org/linux/man-pages/man8/tc-u32.8.html + // Its parent is 1:0 (which I guess is the root?) + // Its handle must be unique, so we base it on targetIP + fmt.Sprintf("tc filter add dev eth0 parent 1:0 protocol ip pref 55 handle ::%x u32 match ip dst %s flowid 2:1", lastOctet, targetIP), + }, "; "), + }) + if err != nil { + return err + } + + n.Logger.Trace(string(stdout)) + n.Logger.Trace(string(stderr)) + if exitCode != 0 { + return fmt.Errorf("got nonzero exit code from iptables: %d", exitCode) + } + return nil +} + +// PartitionFromCluster will cause the node to be disconnected at the network +// level from the rest of the docker cluster. It does so in a way that the node +// will not see TCP RSTs and all packets it sends will be "black holed". It +// attempts to keep packets to and from the host intact which allows docker +// daemon to continue streaming logs and any test code to continue making +// requests from the host to the partitioned node. +func (n *DockerClusterNode) PartitionFromCluster(ctx context.Context) error { + stdout, stderr, exitCode, err := n.runner.RunCmdWithOutput(ctx, n.Container.ID, []string{ + "/bin/sh", + "-xec", strings.Join([]string{ + fmt.Sprintf("echo partitioning container from network"), + "apk add iproute2", + // Get the gateway address for the bridge so we can allow host to + // container traffic still. + "GW=$(ip r | grep default | grep eth0 | cut -f 3 -d' ')", + // First delete the rules in case this is called twice otherwise we'll add + // multiple copies and only remove one in Unpartition (yay iptables). + // Ignore the error if it didn't exist. + "iptables -D INPUT -i eth0 ! -s \"$GW\" -j DROP | true", + "iptables -D OUTPUT -o eth0 ! -d \"$GW\" -j DROP | true", + // Add rules to drop all packets in and out of the docker network + // connection. + "iptables -I INPUT -i eth0 ! -s \"$GW\" -j DROP", + "iptables -I OUTPUT -o eth0 ! -d \"$GW\" -j DROP", + }, "; "), + }) + if err != nil { + return err + } + + n.Logger.Trace(string(stdout)) + n.Logger.Trace(string(stderr)) + if exitCode != 0 { + return fmt.Errorf("got nonzero exit code from iptables: %d", exitCode) + } + return nil +} + +// UnpartitionFromCluster reverses a previous call to PartitionFromCluster and +// restores full connectivity. Currently assumes the default "bridge" network. +func (n *DockerClusterNode) UnpartitionFromCluster(ctx context.Context) error { + stdout, stderr, exitCode, err := n.runner.RunCmdWithOutput(ctx, n.Container.ID, []string{ + "/bin/sh", + "-xec", strings.Join([]string{ + fmt.Sprintf("echo un-partitioning container from network"), + // Get the gateway address for the bridge so we can allow host to + // container traffic still. + "GW=$(ip r | grep default | grep eth0 | cut -f 3 -d' ')", + // Remove the rules, ignore if they are not present or iptables wasn't + // installed yet (i.e. no-one called PartitionFromCluster yet). + "iptables -D INPUT -i eth0 ! -s \"$GW\" -j DROP | true", + "iptables -D OUTPUT -o eth0 ! -d \"$GW\" -j DROP | true", + }, "; "), + }) + if err != nil { + return err + } + + n.Logger.Trace(string(stdout)) + n.Logger.Trace(string(stderr)) + if exitCode != 0 { + return fmt.Errorf("got nonzero exit code from iptables: %d", exitCode) + } + return nil +} + +type LogConsumerWriter struct { + consumer func(string) +} + +func (l LogConsumerWriter) Write(p []byte) (n int, err error) { + // TODO this assumes that we're never passed partial log lines, which + // seems a safe assumption for now based on how docker looks to implement + // logging, but might change in the future. + scanner := bufio.NewScanner(bytes.NewReader(p)) + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + l.consumer(scanner.Text()) + } + return len(p), nil +} + +// DockerClusterOptions has options for setting up the docker cluster +type DockerClusterOptions struct { + testcluster.ClusterOptions + CAKey *ecdsa.PrivateKey + NetworkName string + ImageRepo string + ImageTag string + CA *testcluster.CA + VaultBinary string + Args []string + StartProbe func(*api.Client) error + Storage testcluster.ClusterStorage + DisableTLS bool +} + +func ensureLeaderMatches(ctx context.Context, client *api.Client, ready func(response *api.LeaderResponse) error) error { + var leader *api.LeaderResponse + var err error + for ctx.Err() == nil { + leader, err = client.Sys().Leader() + switch { + case err != nil: + case leader == nil: + err = fmt.Errorf("nil response to leader check") + default: + err = ready(leader) + if err == nil { + return nil + } + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("error checking leader: %v", err) +} + +const DefaultNumCores = 3 + +// creates a managed docker container running Vault +func (dc *DockerCluster) setupDockerCluster(ctx context.Context, opts *DockerClusterOptions) error { + if opts.TmpDir != "" { + if _, err := os.Stat(opts.TmpDir); os.IsNotExist(err) { + if err := os.MkdirAll(opts.TmpDir, 0o700); err != nil { + return err + } + } + dc.tmpDir = opts.TmpDir + } else { + tempDir, err := ioutil.TempDir("", "vault-test-cluster-") + if err != nil { + return err + } + dc.tmpDir = tempDir + } + caDir := filepath.Join(dc.tmpDir, "ca") + if err := os.MkdirAll(caDir, 0o755); err != nil { + return err + } + + var numCores int + if opts.NumCores == 0 { + numCores = DefaultNumCores + } else { + numCores = opts.NumCores + } + + if !opts.DisableTLS { + if dc.CA == nil { + if err := dc.setupCA(opts); err != nil { + return err + } + } + dc.RootCAs = x509.NewCertPool() + dc.RootCAs.AddCert(dc.CA.CACert) + } + + if dc.storage != nil { + if err := dc.storage.Start(ctx, &opts.ClusterOptions); err != nil { + return err + } + } + + for i := 0; i < numCores; i++ { + if err := dc.addNode(ctx, opts); err != nil { + return err + } + if opts.SkipInit { + continue + } + if i == 0 { + if err := dc.setupNode0(ctx); err != nil { + return err + } + } else { + if err := dc.joinNode(ctx, i, 0); err != nil { + return err + } + } + } + + return nil +} + +func (dc *DockerCluster) AddNode(ctx context.Context, opts *DockerClusterOptions) error { + leaderIdx, err := testcluster.LeaderNode(ctx, dc) + if err != nil { + return err + } + if err := dc.addNode(ctx, opts); err != nil { + return err + } + + return dc.joinNode(ctx, len(dc.ClusterNodes)-1, leaderIdx) +} + +func (dc *DockerCluster) addNode(ctx context.Context, opts *DockerClusterOptions) error { + tag, err := dc.setupImage(ctx, opts) + if err != nil { + return err + } + i := len(dc.ClusterNodes) + nodeID := fmt.Sprintf("core-%d", i) + node := &DockerClusterNode{ + DockerAPI: dc.DockerAPI, + NodeID: nodeID, + Cluster: dc, + WorkDir: filepath.Join(dc.tmpDir, nodeID), + Logger: dc.Logger.Named(nodeID), + ImageRepo: opts.ImageRepo, + ImageTag: tag, + } + dc.ClusterNodes = append(dc.ClusterNodes, node) + if err := os.MkdirAll(node.WorkDir, 0o755); err != nil { + return err + } + if err := node.Start(ctx, opts); err != nil { + return err + } + return nil +} + +func (dc *DockerCluster) joinNode(ctx context.Context, nodeIdx int, leaderIdx int) error { + if dc.storage != nil && dc.storage.Type() != "raft" { + // Storage is not raft so nothing to do but unseal. + return testcluster.UnsealNode(ctx, dc, nodeIdx) + } + + leader := dc.ClusterNodes[leaderIdx] + + if nodeIdx >= len(dc.ClusterNodes) { + return fmt.Errorf("invalid node %d", nodeIdx) + } + node := dc.ClusterNodes[nodeIdx] + client := node.APIClient() + + var resp *api.RaftJoinResponse + resp, err := client.Sys().RaftJoinWithContext(ctx, &api.RaftJoinRequest{ + // When running locally on a bridge network, the containers must use their + // actual (private) IP to talk to one another. Our code must instead use + // the portmapped address since we're not on their network in that case. + LeaderAPIAddr: leader.RealAPIAddr, + LeaderCACert: string(dc.CACertPEM), + LeaderClientCert: string(node.ServerCertPEM), + LeaderClientKey: string(node.ServerKeyPEM), + }) + if resp == nil || !resp.Joined { + return fmt.Errorf("nil or negative response from raft join request: %v", resp) + } + if err != nil { + return fmt.Errorf("failed to join cluster: %w", err) + } + + return testcluster.UnsealNode(ctx, dc, nodeIdx) +} + +func (dc *DockerCluster) setupImage(ctx context.Context, opts *DockerClusterOptions) (string, error) { + if opts == nil { + opts = &DockerClusterOptions{} + } + sourceTag := opts.ImageTag + if sourceTag == "" { + sourceTag = "latest" + } + + if opts.VaultBinary == "" { + return sourceTag, nil + } + + suffix := "testing" + if sha := os.Getenv("COMMIT_SHA"); sha != "" { + suffix = sha + } + tag := sourceTag + "-" + suffix + if _, ok := dc.builtTags[tag]; ok { + return tag, nil + } + + f, err := os.Open(opts.VaultBinary) + if err != nil { + return "", err + } + data, err := io.ReadAll(f) + if err != nil { + return "", err + } + bCtx := dockhelper.NewBuildContext() + bCtx["vault"] = &dockhelper.FileContents{ + Data: data, + Mode: 0o755, + } + + containerFile := fmt.Sprintf(` +FROM %s:%s +COPY vault /bin/vault +`, opts.ImageRepo, sourceTag) + + _, err = dockhelper.BuildImage(ctx, dc.DockerAPI, containerFile, bCtx, + dockhelper.BuildRemove(true), dockhelper.BuildForceRemove(true), + dockhelper.BuildPullParent(true), + dockhelper.BuildTags([]string{opts.ImageRepo + ":" + tag})) + if err != nil { + return "", err + } + dc.builtTags[tag] = struct{}{} + return tag, nil +} + +func (dc *DockerCluster) GetActiveClusterNode() *DockerClusterNode { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + node, err := testcluster.WaitForActiveNode(ctx, dc) + if err != nil { + panic(fmt.Sprintf("no cluster node became active in timeout window: %v", err)) + } + + return dc.ClusterNodes[node] +} + +/* Notes on testing the non-bridge network case: +- you need the test itself to be running in a container so that it can use + the network; create the network using + docker network create testvault +- this means that you need to mount the docker socket in that test container, + but on macos there's stuff that prevents that from working; to hack that, + on the host run + sudo ln -s "$HOME/Library/Containers/com.docker.docker/Data/docker.raw.sock" /var/run/docker.sock.raw +- run the test container like + docker run --rm -it --network testvault \ + -v /var/run/docker.sock.raw:/var/run/docker.sock \ + -v $(pwd):/home/circleci/go/src/github.com/hashicorp/vault/ \ + -w /home/circleci/go/src/github.com/hashicorp/vault/ \ + "docker.mirror.hashicorp.services/cimg/go:1.19.2" /bin/bash +- in the container you may need to chown/chmod /var/run/docker.sock; use `docker ps` + to test if it's working + +*/ diff --git a/sdk/helper/testcluster/docker/replication.go b/sdk/helper/testcluster/docker/replication.go new file mode 100644 index 000000000000..c313e7af4d8d --- /dev/null +++ b/sdk/helper/testcluster/docker/replication.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/testcluster" +) + +func DefaultOptions(t *testing.T) *DockerClusterOptions { + return &DockerClusterOptions{ + ImageRepo: "hashicorp/vault", + ImageTag: "latest", + VaultBinary: os.Getenv("VAULT_BINARY"), + ClusterOptions: testcluster.ClusterOptions{ + NumCores: 3, + ClusterName: strings.ReplaceAll(t.Name(), "/", "-"), + VaultNodeConfig: &testcluster.VaultNodeConfig{ + LogLevel: "TRACE", + }, + }, + } +} + +func NewReplicationSetDocker(t *testing.T, opts *DockerClusterOptions) (*testcluster.ReplicationSet, error) { + binary := os.Getenv("VAULT_BINARY") + if binary == "" { + t.Skip("only running docker test when $VAULT_BINARY present") + } + + r := &testcluster.ReplicationSet{ + Clusters: map[string]testcluster.VaultCluster{}, + Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()), + } + + // clusterName is used for container name as well. + // A container name should not exceed 64 chars. + // There are additional chars that are added to the name as well + // like "-A-core0". So, setting a max limit for a cluster name. + if len(opts.ClusterName) > MaxClusterNameLength { + return nil, fmt.Errorf("cluster name length exceeded the maximum allowed length of %v", MaxClusterNameLength) + } + + r.Builder = func(ctx context.Context, name string, baseLogger hclog.Logger) (testcluster.VaultCluster, error) { + myOpts := *opts + myOpts.Logger = baseLogger.Named(name) + if myOpts.ClusterName == "" { + myOpts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") + } + myOpts.ClusterName += "-" + strings.ReplaceAll(name, "/", "-") + myOpts.CA = r.CA + return NewTestDockerCluster(t, &myOpts), nil + } + + a, err := r.Builder(context.TODO(), "A", r.Logger) + if err != nil { + return nil, err + } + r.Clusters["A"] = a + r.CA = a.(*DockerCluster).CA + + return r, err +} diff --git a/sdk/helper/testcluster/exec.go b/sdk/helper/testcluster/exec.go new file mode 100644 index 000000000000..d91a3de034ac --- /dev/null +++ b/sdk/helper/testcluster/exec.go @@ -0,0 +1,324 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +type ExecDevCluster struct { + ID string + ClusterName string + ClusterNodes []*execDevClusterNode + CACertPEMFile string + barrierKeys [][]byte + recoveryKeys [][]byte + tmpDir string + clientAuthRequired bool + rootToken string + stop func() + stopCh chan struct{} + Logger log.Logger +} + +func (dc *ExecDevCluster) SetRootToken(token string) { + dc.rootToken = token +} + +func (dc *ExecDevCluster) NamedLogger(s string) log.Logger { + return dc.Logger.Named(s) +} + +var _ VaultCluster = &ExecDevCluster{} + +type ExecDevClusterOptions struct { + ClusterOptions + BinaryPath string + // this is -dev-listen-address, defaults to "127.0.0.1:8200" + BaseListenAddress string +} + +func NewTestExecDevCluster(t *testing.T, opts *ExecDevClusterOptions) *ExecDevCluster { + if opts == nil { + opts = &ExecDevClusterOptions{} + } + if opts.ClusterName == "" { + opts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") + } + if opts.Logger == nil { + opts.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name()) // .Named("container") + } + if opts.VaultLicense == "" { + opts.VaultLicense = os.Getenv(EnvVaultLicenseCI) + } + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + t.Cleanup(cancel) + + dc, err := NewExecDevCluster(ctx, opts) + if err != nil { + t.Fatal(err) + } + return dc +} + +func NewExecDevCluster(ctx context.Context, opts *ExecDevClusterOptions) (*ExecDevCluster, error) { + dc := &ExecDevCluster{ + ClusterName: opts.ClusterName, + stopCh: make(chan struct{}), + } + + if opts == nil { + opts = &ExecDevClusterOptions{} + } + if opts.NumCores == 0 { + opts.NumCores = 3 + } + if err := dc.setupExecDevCluster(ctx, opts); err != nil { + dc.Cleanup() + return nil, err + } + + return dc, nil +} + +func (dc *ExecDevCluster) setupExecDevCluster(ctx context.Context, opts *ExecDevClusterOptions) (retErr error) { + if opts == nil { + opts = &ExecDevClusterOptions{} + } + if opts.Logger == nil { + opts.Logger = log.NewNullLogger() + } + dc.Logger = opts.Logger + + if opts.TmpDir != "" { + if _, err := os.Stat(opts.TmpDir); os.IsNotExist(err) { + if err := os.MkdirAll(opts.TmpDir, 0o700); err != nil { + return err + } + } + dc.tmpDir = opts.TmpDir + } else { + tempDir, err := os.MkdirTemp("", "vault-test-cluster-") + if err != nil { + return err + } + dc.tmpDir = tempDir + } + + // This context is used to stop the subprocess + execCtx, cancel := context.WithCancel(context.Background()) + dc.stop = func() { + cancel() + close(dc.stopCh) + } + defer func() { + if retErr != nil { + cancel() + } + }() + + bin := opts.BinaryPath + if bin == "" { + bin = "vault" + } + + clusterJsonPath := filepath.Join(dc.tmpDir, "cluster.json") + args := []string{"server", "-dev", "-dev-cluster-json", clusterJsonPath} + switch { + case opts.NumCores == 3: + args = append(args, "-dev-three-node") + case opts.NumCores == 1: + args = append(args, "-dev-tls") + default: + return fmt.Errorf("NumCores=1 and NumCores=3 are the only supported options right now") + } + if opts.BaseListenAddress != "" { + args = append(args, "-dev-listen-address", opts.BaseListenAddress) + } + cmd := exec.CommandContext(execCtx, bin, args...) + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "VAULT_LICENSE="+opts.VaultLicense) + cmd.Env = append(cmd.Env, "VAULT_LOG_FORMAT=json") + cmd.Env = append(cmd.Env, "VAULT_DEV_TEMP_DIR="+dc.tmpDir) + if opts.Logger != nil { + stdout, err := cmd.StdoutPipe() + if err != nil { + return err + } + go func() { + outlog := opts.Logger.Named("stdout") + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + outlog.Trace(scanner.Text()) + } + }() + stderr, err := cmd.StderrPipe() + if err != nil { + return err + } + go func() { + errlog := opts.Logger.Named("stderr") + scanner := bufio.NewScanner(stderr) + // The default buffer is 4k, and Vault can emit bigger log lines + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + JSONLogNoTimestamp(errlog, scanner.Text()) + } + }() + } + + if err := cmd.Start(); err != nil { + return err + } + + for ctx.Err() == nil { + if b, err := os.ReadFile(clusterJsonPath); err == nil && len(b) > 0 { + var clusterJson ClusterJson + if err := jsonutil.DecodeJSON(b, &clusterJson); err != nil { + continue + } + dc.CACertPEMFile = clusterJson.CACertPath + dc.rootToken = clusterJson.RootToken + for i, node := range clusterJson.Nodes { + config := api.DefaultConfig() + config.Address = node.APIAddress + err := config.ConfigureTLS(&api.TLSConfig{ + CACert: clusterJson.CACertPath, + }) + if err != nil { + return err + } + client, err := api.NewClient(config) + if err != nil { + return err + } + client.SetToken(dc.rootToken) + _, err = client.Sys().ListMounts() + if err != nil { + return err + } + + dc.ClusterNodes = append(dc.ClusterNodes, &execDevClusterNode{ + name: fmt.Sprintf("core-%d", i), + client: client, + }) + } + return nil + } + time.Sleep(500 * time.Millisecond) + } + return ctx.Err() +} + +type execDevClusterNode struct { + name string + client *api.Client +} + +var _ VaultClusterNode = &execDevClusterNode{} + +func (e *execDevClusterNode) Name() string { + return e.name +} + +func (e *execDevClusterNode) APIClient() *api.Client { + // We clone to ensure that whenever this method is called, the caller gets + // back a pristine client, without e.g. any namespace or token changes that + // might pollute a shared client. We clone the config instead of the + // client because (1) Client.clone propagates the replicationStateStore and + // the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and + // (3) if clone returns an error, it doesn't feel as appropriate to panic + // below. Who knows why clone might return an error? + cfg := e.client.CloneConfig() + client, err := api.NewClient(cfg) + if err != nil { + // It seems fine to panic here, since this should be the same input + // we provided to NewClient when we were setup, and we didn't panic then. + // Better not to completely ignore the error though, suppose there's a + // bug in CloneConfig? + panic(fmt.Sprintf("NewClient error on cloned config: %v", err)) + } + client.SetToken(e.client.Token()) + return client +} + +func (e *execDevClusterNode) TLSConfig() *tls.Config { + return e.client.CloneConfig().TLSConfig() +} + +func (dc *ExecDevCluster) ClusterID() string { + return dc.ID +} + +func (dc *ExecDevCluster) Nodes() []VaultClusterNode { + ret := make([]VaultClusterNode, len(dc.ClusterNodes)) + for i := range dc.ClusterNodes { + ret[i] = dc.ClusterNodes[i] + } + return ret +} + +func (dc *ExecDevCluster) GetBarrierKeys() [][]byte { + return dc.barrierKeys +} + +func copyKey(key []byte) []byte { + result := make([]byte, len(key)) + copy(result, key) + return result +} + +func (dc *ExecDevCluster) GetRecoveryKeys() [][]byte { + ret := make([][]byte, len(dc.recoveryKeys)) + for i, k := range dc.recoveryKeys { + ret[i] = copyKey(k) + } + return ret +} + +func (dc *ExecDevCluster) GetBarrierOrRecoveryKeys() [][]byte { + return dc.GetBarrierKeys() +} + +func (dc *ExecDevCluster) SetBarrierKeys(keys [][]byte) { + dc.barrierKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.barrierKeys[i] = copyKey(k) + } +} + +func (dc *ExecDevCluster) SetRecoveryKeys(keys [][]byte) { + dc.recoveryKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.recoveryKeys[i] = copyKey(k) + } +} + +func (dc *ExecDevCluster) GetCACertPEMFile() string { + return dc.CACertPEMFile +} + +func (dc *ExecDevCluster) Cleanup() { + dc.stop() +} + +// GetRootToken returns the root token of the cluster, if set +func (dc *ExecDevCluster) GetRootToken() string { + return dc.rootToken +} diff --git a/sdk/helper/testcluster/generaterootkind_enumer.go b/sdk/helper/testcluster/generaterootkind_enumer.go new file mode 100644 index 000000000000..367c1a5df400 --- /dev/null +++ b/sdk/helper/testcluster/generaterootkind_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=GenerateRootKind -trimprefix=GenerateRoot"; DO NOT EDIT. + +package testcluster + +import ( + "fmt" +) + +const _GenerateRootKindName = "RegularDRGenerateRecovery" + +var _GenerateRootKindIndex = [...]uint8{0, 7, 9, 25} + +func (i GenerateRootKind) String() string { + if i < 0 || i >= GenerateRootKind(len(_GenerateRootKindIndex)-1) { + return fmt.Sprintf("GenerateRootKind(%d)", i) + } + return _GenerateRootKindName[_GenerateRootKindIndex[i]:_GenerateRootKindIndex[i+1]] +} + +var _GenerateRootKindValues = []GenerateRootKind{0, 1, 2} + +var _GenerateRootKindNameToValueMap = map[string]GenerateRootKind{ + _GenerateRootKindName[0:7]: 0, + _GenerateRootKindName[7:9]: 1, + _GenerateRootKindName[9:25]: 2, +} + +// GenerateRootKindString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func GenerateRootKindString(s string) (GenerateRootKind, error) { + if val, ok := _GenerateRootKindNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to GenerateRootKind values", s) +} + +// GenerateRootKindValues returns all values of the enum +func GenerateRootKindValues() []GenerateRootKind { + return _GenerateRootKindValues +} + +// IsAGenerateRootKind returns "true" if the value is listed in the enum definition. "false" otherwise +func (i GenerateRootKind) IsAGenerateRootKind() bool { + for _, v := range _GenerateRootKindValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/helper/testcluster/logging.go b/sdk/helper/testcluster/logging.go new file mode 100644 index 000000000000..dda759c7f84f --- /dev/null +++ b/sdk/helper/testcluster/logging.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "encoding/json" + "strings" + + "github.com/hashicorp/go-hclog" +) + +func JSONLogNoTimestamp(outlog hclog.Logger, text string) { + d := json.NewDecoder(strings.NewReader(text)) + m := map[string]interface{}{} + if err := d.Decode(&m); err != nil { + outlog.Error("failed to decode json output from dev vault", "error", err, "input", text) + return + } + + delete(m, "@timestamp") + message := m["@message"].(string) + delete(m, "@message") + level := m["@level"].(string) + delete(m, "@level") + if module, ok := m["@module"]; ok { + delete(m, "@module") + outlog = outlog.Named(module.(string)) + } + + var pairs []interface{} + for k, v := range m { + pairs = append(pairs, k, v) + } + + outlog.Log(hclog.LevelFromString(level), message, pairs...) +} diff --git a/sdk/helper/testcluster/replication.go b/sdk/helper/testcluster/replication.go new file mode 100644 index 000000000000..6f99581574c9 --- /dev/null +++ b/sdk/helper/testcluster/replication.go @@ -0,0 +1,908 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/mapstructure" +) + +func GetPerformanceToken(pri VaultCluster, id, secondaryPublicKey string) (string, error) { + client := pri.Nodes()[0].APIClient() + req := map[string]interface{}{ + "id": id, + } + if secondaryPublicKey != "" { + req["secondary_public_key"] = secondaryPublicKey + } + secret, err := client.Logical().Write("sys/replication/performance/primary/secondary-token", req) + if err != nil { + return "", err + } + + if secondaryPublicKey != "" { + return secret.Data["token"].(string), nil + } + return secret.WrapInfo.Token, nil +} + +func EnablePerfPrimary(ctx context.Context, pri VaultCluster) error { + client := pri.Nodes()[0].APIClient() + _, err := client.Logical().WriteWithContext(ctx, "sys/replication/performance/primary/enable", nil) + if err != nil { + return err + } + + err = WaitForPerfReplicationState(ctx, pri, consts.ReplicationPerformancePrimary) + if err != nil { + return err + } + return WaitForActiveNodeAndPerfStandbys(ctx, pri) +} + +func WaitForPerfReplicationState(ctx context.Context, cluster VaultCluster, state consts.ReplicationState) error { + client := cluster.Nodes()[0].APIClient() + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + if err == nil && health.ReplicationPerformanceMode == state.GetPerformanceString() { + return nil + } + time.Sleep(500 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func EnablePerformanceSecondaryNoWait(ctx context.Context, perfToken string, pri, sec VaultCluster, updatePrimary bool) error { + postData := map[string]interface{}{ + "token": perfToken, + "ca_file": DefaultCAFile, + } + path := "sys/replication/performance/secondary/enable" + if updatePrimary { + path = "sys/replication/performance/secondary/update-primary" + } + err := WaitForActiveNodeAndPerfStandbys(ctx, sec) + if err != nil { + return err + } + _, err = sec.Nodes()[0].APIClient().Logical().Write(path, postData) + if err != nil { + return err + } + + return WaitForPerfReplicationState(ctx, sec, consts.ReplicationPerformanceSecondary) +} + +func EnablePerformanceSecondary(ctx context.Context, perfToken string, pri, sec VaultCluster, updatePrimary, skipPoisonPill bool) (string, error) { + if err := EnablePerformanceSecondaryNoWait(ctx, perfToken, pri, sec, updatePrimary); err != nil { + return "", err + } + if err := WaitForMatchingMerkleRoots(ctx, "sys/replication/performance/", pri, sec); err != nil { + return "", err + } + root, err := WaitForPerformanceSecondary(ctx, pri, sec, skipPoisonPill) + if err != nil { + return "", err + } + if err := WaitForPerfReplicationWorking(ctx, pri, sec); err != nil { + return "", err + } + return root, nil +} + +func WaitForMatchingMerkleRoots(ctx context.Context, endpoint string, pri, sec VaultCluster) error { + getRoot := func(mode string, cli *api.Client) (string, error) { + status, err := cli.Logical().Read(endpoint + "status") + if err != nil { + return "", err + } + if status == nil || status.Data == nil || status.Data["mode"] == nil { + return "", fmt.Errorf("got nil secret or data") + } + if status.Data["mode"].(string) != mode { + return "", fmt.Errorf("expected mode=%s, got %s", mode, status.Data["mode"].(string)) + } + return status.Data["merkle_root"].(string), nil + } + + secClient := sec.Nodes()[0].APIClient() + priClient := pri.Nodes()[0].APIClient() + for i := 0; i < 30; i++ { + secRoot, err := getRoot("secondary", secClient) + if err != nil { + return err + } + priRoot, err := getRoot("primary", priClient) + if err != nil { + return err + } + + if reflect.DeepEqual(priRoot, secRoot) { + return nil + } + time.Sleep(time.Second) + } + + return fmt.Errorf("roots did not become equal") +} + +func WaitForPerformanceWAL(ctx context.Context, pri, sec VaultCluster) error { + endpoint := "sys/replication/performance/" + if err := WaitForMatchingMerkleRoots(ctx, endpoint, pri, sec); err != nil { + return nil + } + getWAL := func(mode, walKey string, cli *api.Client) (int64, error) { + status, err := cli.Logical().Read(endpoint + "status") + if err != nil { + return 0, err + } + if status == nil || status.Data == nil || status.Data["mode"] == nil { + return 0, fmt.Errorf("got nil secret or data") + } + if status.Data["mode"].(string) != mode { + return 0, fmt.Errorf("expected mode=%s, got %s", mode, status.Data["mode"].(string)) + } + return status.Data[walKey].(json.Number).Int64() + } + + secClient := sec.Nodes()[0].APIClient() + priClient := pri.Nodes()[0].APIClient() + for ctx.Err() == nil { + secLastRemoteWAL, err := getWAL("secondary", "last_remote_wal", secClient) + if err != nil { + return err + } + priLastPerfWAL, err := getWAL("primary", "last_performance_wal", priClient) + if err != nil { + return err + } + + if secLastRemoteWAL >= priLastPerfWAL { + return nil + } + time.Sleep(time.Second) + } + + return fmt.Errorf("performance WALs on the secondary did not catch up with the primary, context err: %w", ctx.Err()) +} + +func WaitForPerformanceSecondary(ctx context.Context, pri, sec VaultCluster, skipPoisonPill bool) (string, error) { + if len(pri.GetRecoveryKeys()) > 0 { + sec.SetBarrierKeys(pri.GetRecoveryKeys()) + sec.SetRecoveryKeys(pri.GetRecoveryKeys()) + } else { + sec.SetBarrierKeys(pri.GetBarrierKeys()) + sec.SetRecoveryKeys(pri.GetBarrierKeys()) + } + + if len(sec.Nodes()) > 1 { + if skipPoisonPill { + // As part of prepareSecondary on the active node the keyring is + // deleted from storage. Its absence can cause standbys to seal + // themselves. But it's not reliable, so we'll seal them + // ourselves to force the issue. + for i := range sec.Nodes()[1:] { + if err := SealNode(ctx, sec, i+1); err != nil { + return "", err + } + } + } else { + // We want to make sure we unseal all the nodes so we first need to wait + // until two of the nodes seal due to the poison pill being written + if err := WaitForNCoresSealed(ctx, sec, len(sec.Nodes())-1); err != nil { + return "", err + } + } + } + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return "", err + } + if err := UnsealAllNodes(ctx, sec); err != nil { + return "", err + } + + perfSecondaryRootToken, err := GenerateRoot(sec, GenerateRootRegular) + if err != nil { + return "", err + } + sec.SetRootToken(perfSecondaryRootToken) + if err := WaitForActiveNodeAndPerfStandbys(ctx, sec); err != nil { + return "", err + } + + return perfSecondaryRootToken, nil +} + +func WaitForPerfReplicationWorking(ctx context.Context, pri, sec VaultCluster) error { + priActiveIdx, err := WaitForActiveNode(ctx, pri) + if err != nil { + return err + } + secActiveIdx, err := WaitForActiveNode(ctx, sec) + if err != nil { + return err + } + + priClient, secClient := pri.Nodes()[priActiveIdx].APIClient(), sec.Nodes()[secActiveIdx].APIClient() + mountPoint, err := uuid.GenerateUUID() + if err != nil { + return err + } + err = priClient.Sys().Mount(mountPoint, &api.MountInput{ + Type: "kv", + Local: false, + }) + if err != nil { + return fmt.Errorf("unable to mount KV engine on primary") + } + + path := mountPoint + "/foo" + _, err = priClient.Logical().Write(path, map[string]interface{}{ + "bar": 1, + }) + if err != nil { + return fmt.Errorf("unable to write KV on primary, path=%s", path) + } + + for ctx.Err() == nil { + var secret *api.Secret + secret, err = secClient.Logical().Read(path) + if err == nil && secret != nil { + err = priClient.Sys().Unmount(mountPoint) + if err != nil { + return fmt.Errorf("unable to unmount KV engine on primary") + } + return nil + } + time.Sleep(100 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return fmt.Errorf("unable to read replicated KV on secondary, path=%s, err=%v", path, err) +} + +func SetupTwoClusterPerfReplication(ctx context.Context, pri, sec VaultCluster) error { + if err := EnablePerfPrimary(ctx, pri); err != nil { + return err + } + perfToken, err := GetPerformanceToken(pri, sec.ClusterID(), "") + if err != nil { + return err + } + + _, err = EnablePerformanceSecondary(ctx, perfToken, pri, sec, false, false) + return err +} + +// PassiveWaitForActiveNodeAndPerfStandbys should be used instead of +// WaitForActiveNodeAndPerfStandbys when you don't want to do any writes +// as a side-effect. This returns perfStandby nodes in the cluster and +// an error. +func PassiveWaitForActiveNodeAndPerfStandbys(ctx context.Context, pri VaultCluster) (VaultClusterNode, []VaultClusterNode, error) { + leaderNode, standbys, err := GetActiveAndStandbys(ctx, pri) + if err != nil { + return nil, nil, fmt.Errorf("failed to derive standby nodes, %w", err) + } + + for i, node := range standbys { + client := node.APIClient() + // Make sure we get perf standby nodes + if err = EnsureCoreIsPerfStandby(ctx, client); err != nil { + return nil, nil, fmt.Errorf("standby node %d is not a perfStandby, %w", i, err) + } + } + + return leaderNode, standbys, nil +} + +func GetActiveAndStandbys(ctx context.Context, cluster VaultCluster) (VaultClusterNode, []VaultClusterNode, error) { + var leaderIndex int + var err error + if leaderIndex, err = WaitForActiveNode(ctx, cluster); err != nil { + return nil, nil, err + } + + var leaderNode VaultClusterNode + var nodes []VaultClusterNode + for i, node := range cluster.Nodes() { + if i == leaderIndex { + leaderNode = node + continue + } + nodes = append(nodes, node) + } + + return leaderNode, nodes, nil +} + +func EnsureCoreIsPerfStandby(ctx context.Context, client *api.Client) error { + var err error + var health *api.HealthResponse + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + if err == nil && health.PerformanceStandby { + return nil + } + time.Sleep(time.Millisecond * 500) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func WaitForDRReplicationState(ctx context.Context, cluster VaultCluster, state consts.ReplicationState) error { + client := cluster.Nodes()[0].APIClient() + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + if err == nil && health.ReplicationDRMode == state.GetDRString() { + return nil + } + time.Sleep(500 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func EnableDrPrimary(ctx context.Context, pri VaultCluster) error { + client := pri.Nodes()[0].APIClient() + _, err := client.Logical().Write("sys/replication/dr/primary/enable", nil) + if err != nil { + return err + } + + err = WaitForDRReplicationState(ctx, pri, consts.ReplicationDRPrimary) + if err != nil { + return err + } + return WaitForActiveNodeAndPerfStandbys(ctx, pri) +} + +func GenerateDRActivationToken(pri VaultCluster, id, secondaryPublicKey string) (string, error) { + client := pri.Nodes()[0].APIClient() + req := map[string]interface{}{ + "id": id, + } + if secondaryPublicKey != "" { + req["secondary_public_key"] = secondaryPublicKey + } + secret, err := client.Logical().Write("sys/replication/dr/primary/secondary-token", req) + if err != nil { + return "", err + } + + if secondaryPublicKey != "" { + return secret.Data["token"].(string), nil + } + return secret.WrapInfo.Token, nil +} + +func WaitForDRSecondary(ctx context.Context, pri, sec VaultCluster, skipPoisonPill bool) error { + if len(pri.GetRecoveryKeys()) > 0 { + sec.SetBarrierKeys(pri.GetRecoveryKeys()) + sec.SetRecoveryKeys(pri.GetRecoveryKeys()) + } else { + sec.SetBarrierKeys(pri.GetBarrierKeys()) + sec.SetRecoveryKeys(pri.GetBarrierKeys()) + } + + if len(sec.Nodes()) > 1 { + if skipPoisonPill { + // As part of prepareSecondary on the active node the keyring is + // deleted from storage. Its absence can cause standbys to seal + // themselves. But it's not reliable, so we'll seal them + // ourselves to force the issue. + for i := range sec.Nodes()[1:] { + if err := SealNode(ctx, sec, i+1); err != nil { + return err + } + } + } else { + // We want to make sure we unseal all the nodes so we first need to wait + // until two of the nodes seal due to the poison pill being written + if err := WaitForNCoresSealed(ctx, sec, len(sec.Nodes())-1); err != nil { + return err + } + } + } + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return err + } + + // unseal nodes + for i := range sec.Nodes() { + if err := UnsealNode(ctx, sec, i); err != nil { + // Sometimes when we get here it's already unsealed on its own + // and then this fails for DR secondaries so check again + // The error is "path disabled in replication DR secondary mode". + if healthErr := NodeHealthy(ctx, sec, i); healthErr != nil { + // return the original error + return err + } + } + } + + sec.SetRootToken(pri.GetRootToken()) + + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return err + } + + return nil +} + +func EnableDRSecondaryNoWait(ctx context.Context, sec VaultCluster, drToken string) error { + postData := map[string]interface{}{ + "token": drToken, + "ca_file": DefaultCAFile, + } + + _, err := sec.Nodes()[0].APIClient().Logical().Write("sys/replication/dr/secondary/enable", postData) + if err != nil { + return err + } + + return WaitForDRReplicationState(ctx, sec, consts.ReplicationDRSecondary) +} + +func WaitForReplicationStatus(ctx context.Context, client *api.Client, dr bool, accept func(map[string]interface{}) error) error { + url := "sys/replication/performance/status" + if dr { + url = "sys/replication/dr/status" + } + + var err error + var secret *api.Secret + for ctx.Err() == nil { + secret, err = client.Logical().Read(url) + if err == nil && secret != nil && secret.Data != nil { + if err = accept(secret.Data); err == nil { + return nil + } + } + time.Sleep(500 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + + return fmt.Errorf("unable to get acceptable replication status: error=%v secret=%#v", err, secret) +} + +func WaitForDRReplicationWorking(ctx context.Context, pri, sec VaultCluster) error { + priClient := pri.Nodes()[0].APIClient() + secClient := sec.Nodes()[0].APIClient() + + // Make sure we've entered stream-wals mode + err := WaitForReplicationStatus(ctx, secClient, true, func(secret map[string]interface{}) error { + state := secret["state"] + if state == string("stream-wals") { + return nil + } + return fmt.Errorf("expected stream-wals replication state, got %v", state) + }) + if err != nil { + return err + } + + // Now write some data and make sure that we see last_remote_wal nonzero, i.e. + // at least one WAL has been streamed. + secret, err := priClient.Auth().Token().Create(&api.TokenCreateRequest{}) + if err != nil { + return err + } + + // Revoke the token since some tests won't be happy to see it. + err = priClient.Auth().Token().RevokeTree(secret.Auth.ClientToken) + if err != nil { + return err + } + + err = WaitForReplicationStatus(ctx, secClient, true, func(secret map[string]interface{}) error { + state := secret["state"] + if state != string("stream-wals") { + return fmt.Errorf("expected stream-wals replication state, got %v", state) + } + + if secret["last_remote_wal"] != nil { + lastRemoteWal, _ := secret["last_remote_wal"].(json.Number).Int64() + if lastRemoteWal <= 0 { + return fmt.Errorf("expected last_remote_wal to be greater than zero") + } + return nil + } + + return fmt.Errorf("replication seems to be still catching up, maybe need to wait more") + }) + if err != nil { + return err + } + return nil +} + +func EnableDrSecondary(ctx context.Context, pri, sec VaultCluster, drToken string) error { + err := EnableDRSecondaryNoWait(ctx, sec, drToken) + if err != nil { + return err + } + + if err = WaitForMatchingMerkleRoots(ctx, "sys/replication/dr/", pri, sec); err != nil { + return err + } + + err = WaitForDRSecondary(ctx, pri, sec, false) + if err != nil { + return err + } + + if err = WaitForDRReplicationWorking(ctx, pri, sec); err != nil { + return err + } + return nil +} + +func SetupTwoClusterDRReplication(ctx context.Context, pri, sec VaultCluster) error { + if err := EnableDrPrimary(ctx, pri); err != nil { + return err + } + + drToken, err := GenerateDRActivationToken(pri, sec.ClusterID(), "") + if err != nil { + return err + } + err = EnableDrSecondary(ctx, pri, sec, drToken) + if err != nil { + return err + } + return nil +} + +func DemoteDRPrimary(client *api.Client) error { + _, err := client.Logical().Write("sys/replication/dr/primary/demote", map[string]interface{}{}) + return err +} + +func createBatchToken(client *api.Client, path string) (string, error) { + // TODO: should these be more random in case more than one batch token needs to be created? + suffix := strings.Replace(path, "/", "", -1) + policyName := "path-batch-policy-" + suffix + roleName := "path-batch-role-" + suffix + + rules := fmt.Sprintf(`path "%s" { capabilities = [ "read", "update" ] }`, path) + + // create policy + _, err := client.Logical().Write("sys/policy/"+policyName, map[string]interface{}{ + "policy": rules, + }) + if err != nil { + return "", err + } + + // create a role + _, err = client.Logical().Write("auth/token/roles/"+roleName, map[string]interface{}{ + "allowed_policies": policyName, + "orphan": true, + "renewable": false, + "token_type": "batch", + }) + if err != nil { + return "", err + } + + // create batch token + secret, err := client.Logical().Write("auth/token/create/"+roleName, nil) + if err != nil { + return "", err + } + + return secret.Auth.ClientToken, nil +} + +// PromoteDRSecondaryWithBatchToken creates a batch token for DR promotion +// before promotion, it demotes the primary cluster. The primary cluster needs +// to be functional for the generation of the batch token +func PromoteDRSecondaryWithBatchToken(ctx context.Context, pri, sec VaultCluster) error { + client := pri.Nodes()[0].APIClient() + drToken, err := createBatchToken(client, "sys/replication/dr/secondary/promote") + if err != nil { + return err + } + + err = DemoteDRPrimary(client) + if err != nil { + return err + } + + return promoteDRSecondaryInternal(ctx, sec, drToken) +} + +// PromoteDRSecondary generates a DR operation token on the secondary using +// unseal/recovery keys. Therefore, the primary cluster could potentially +// be out of service. +func PromoteDRSecondary(ctx context.Context, sec VaultCluster) error { + // generate DR operation token to do update primary on vC to point to + // the new perfSec primary vD + drToken, err := GenerateRoot(sec, GenerateRootDR) + if err != nil { + return err + } + return promoteDRSecondaryInternal(ctx, sec, drToken) +} + +func promoteDRSecondaryInternal(ctx context.Context, sec VaultCluster, drToken string) error { + secClient := sec.Nodes()[0].APIClient() + + // Allow retries of 503s, e.g.: replication is still catching up, + // try again later or provide the "force" argument + oldMaxRetries := secClient.MaxRetries() + secClient.SetMaxRetries(10) + defer secClient.SetMaxRetries(oldMaxRetries) + resp, err := secClient.Logical().Write("sys/replication/dr/secondary/promote", map[string]interface{}{ + "dr_operation_token": drToken, + }) + if err != nil { + return err + } + if resp == nil { + return fmt.Errorf("nil status response during DR promotion") + } + + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return err + } + + return WaitForDRReplicationState(ctx, sec, consts.ReplicationDRPrimary) +} + +func checkClusterAddr(ctx context.Context, pri, sec VaultCluster) error { + priClient := pri.Nodes()[0].APIClient() + priLeader, err := priClient.Sys().LeaderWithContext(ctx) + if err != nil { + return err + } + secClient := sec.Nodes()[0].APIClient() + endpoint := "sys/replication/dr/" + status, err := secClient.Logical().Read(endpoint + "status") + if err != nil { + return err + } + if status == nil || status.Data == nil { + return fmt.Errorf("got nil secret or data") + } + + var priAddrs []string + err = mapstructure.Decode(status.Data["known_primary_cluster_addrs"], &priAddrs) + if err != nil { + return err + } + if !strutil.StrListContains(priAddrs, priLeader.LeaderClusterAddress) { + return fmt.Errorf("failed to fine the expected primary cluster address %v in known_primary_cluster_addrs", priLeader.LeaderClusterAddress) + } + + return nil +} + +func UpdatePrimary(ctx context.Context, pri, sec VaultCluster) error { + // generate DR operation token to do update primary on vC to point to + // the new perfSec primary vD + rootToken, err := GenerateRoot(sec, GenerateRootDR) + if err != nil { + return err + } + + // secondary activation token + drToken, err := GenerateDRActivationToken(pri, sec.ClusterID(), "") + if err != nil { + return err + } + + // update-primary on vC (new perfSec Dr secondary) to point to + // the new perfSec Dr primary + secClient := sec.Nodes()[0].APIClient() + resp, err := secClient.Logical().Write("sys/replication/dr/secondary/update-primary", map[string]interface{}{ + "dr_operation_token": rootToken, + "token": drToken, + "ca_file": DefaultCAFile, + }) + if err != nil { + return err + } + if resp == nil { + return fmt.Errorf("nil status response during update primary") + } + + if _, err = WaitForActiveNode(ctx, sec); err != nil { + return err + } + + if err = WaitForDRReplicationState(ctx, sec, consts.ReplicationDRSecondary); err != nil { + return err + } + + if err = checkClusterAddr(ctx, pri, sec); err != nil { + return err + } + + return nil +} + +func SetupFourClusterReplication(ctx context.Context, pri, sec, pridr, secdr VaultCluster) error { + err := SetupTwoClusterPerfReplication(ctx, pri, sec) + if err != nil { + return err + } + err = SetupTwoClusterDRReplication(ctx, pri, pridr) + if err != nil { + return err + } + err = SetupTwoClusterDRReplication(ctx, sec, secdr) + if err != nil { + return err + } + return nil +} + +type ReplicationSet struct { + // By convention, we recommend the following naming scheme for + // clusters in this map: + // A: perf primary + // B: primary's DR + // C: first perf secondary of A + // D: C's DR + // E: second perf secondary of A + // F: E's DR + // ... etc. + // + // We use generic names rather than role-specific names because + // that's less confusing when promotions take place that result in role + // changes. In other words, if D gets promoted to replace C as a perf + // secondary, and C gets demoted and updated to become D's DR secondary, + // they should maintain their initial names of D and C throughout. + Clusters map[string]VaultCluster + Builder ClusterBuilder + Logger hclog.Logger + CA *CA +} + +type ClusterBuilder func(ctx context.Context, name string, logger hclog.Logger) (VaultCluster, error) + +func NewReplicationSet(b ClusterBuilder) (*ReplicationSet, error) { + return &ReplicationSet{ + Clusters: map[string]VaultCluster{}, + Builder: b, + Logger: hclog.NewNullLogger(), + }, nil +} + +func (r *ReplicationSet) StandardPerfReplication(ctx context.Context) error { + for _, name := range []string{"A", "C"} { + if _, ok := r.Clusters[name]; !ok { + cluster, err := r.Builder(ctx, name, r.Logger) + if err != nil { + return err + } + r.Clusters[name] = cluster + } + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err := SetupTwoClusterPerfReplication(ctx, r.Clusters["A"], r.Clusters["C"]) + if err != nil { + return err + } + + return nil +} + +func (r *ReplicationSet) StandardDRReplication(ctx context.Context) error { + for _, name := range []string{"A", "B"} { + if _, ok := r.Clusters[name]; !ok { + cluster, err := r.Builder(ctx, name, r.Logger) + if err != nil { + return err + } + r.Clusters[name] = cluster + } + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err := SetupTwoClusterDRReplication(ctx, r.Clusters["A"], r.Clusters["B"]) + if err != nil { + return err + } + + return nil +} + +func (r *ReplicationSet) GetFourReplicationCluster(ctx context.Context) error { + for _, name := range []string{"A", "B", "C", "D"} { + if _, ok := r.Clusters[name]; !ok { + cluster, err := r.Builder(ctx, name, r.Logger) + if err != nil { + return err + } + r.Clusters[name] = cluster + } + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err := SetupFourClusterReplication(ctx, r.Clusters["A"], r.Clusters["C"], r.Clusters["B"], r.Clusters["D"]) + if err != nil { + return err + } + return nil +} + +func (r *ReplicationSet) Cleanup() { + for _, cluster := range r.Clusters { + cluster.Cleanup() + } +} + +func WaitForPerfReplicationConnectionStatus(ctx context.Context, client *api.Client) error { + type Primary struct { + APIAddress string `mapstructure:"api_address"` + ConnectionStatus string `mapstructure:"connection_status"` + ClusterAddress string `mapstructure:"cluster_address"` + LastHeartbeat string `mapstructure:"last_heartbeat"` + } + type Status struct { + Primaries []Primary `mapstructure:"primaries"` + } + return WaitForPerfReplicationStatus(ctx, client, func(m map[string]interface{}) error { + var status Status + err := mapstructure.Decode(m, &status) + if err != nil { + return err + } + if len(status.Primaries) == 0 { + return fmt.Errorf("primaries is zero") + } + for _, v := range status.Primaries { + if v.ConnectionStatus == "connected" { + return nil + } + } + return fmt.Errorf("no primaries connected") + }) +} + +func WaitForPerfReplicationStatus(ctx context.Context, client *api.Client, accept func(map[string]interface{}) error) error { + var err error + var secret *api.Secret + for ctx.Err() == nil { + secret, err = client.Logical().Read("sys/replication/performance/status") + if err == nil && secret != nil && secret.Data != nil { + if err = accept(secret.Data); err == nil { + return nil + } + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("unable to get acceptable replication status within allotted time: error=%v secret=%#v", err, secret) +} diff --git a/sdk/helper/testcluster/types.go b/sdk/helper/testcluster/types.go new file mode 100644 index 000000000000..0c04c224c1de --- /dev/null +++ b/sdk/helper/testcluster/types.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "context" + "crypto/ecdsa" + "crypto/tls" + "crypto/x509" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" +) + +type VaultClusterNode interface { + APIClient() *api.Client + TLSConfig() *tls.Config +} + +type VaultCluster interface { + Nodes() []VaultClusterNode + GetBarrierKeys() [][]byte + GetRecoveryKeys() [][]byte + GetBarrierOrRecoveryKeys() [][]byte + SetBarrierKeys([][]byte) + SetRecoveryKeys([][]byte) + GetCACertPEMFile() string + Cleanup() + ClusterID() string + NamedLogger(string) hclog.Logger + SetRootToken(token string) + GetRootToken() string +} + +type VaultNodeConfig struct { + // Not configurable because cluster creator wants to control these: + // PluginDirectory string `hcl:"plugin_directory"` + // APIAddr string `hcl:"api_addr"` + // ClusterAddr string `hcl:"cluster_addr"` + // Storage *Storage `hcl:"-"` + // HAStorage *Storage `hcl:"-"` + // DisableMlock bool `hcl:"disable_mlock"` + // ClusterName string `hcl:"cluster_name"` + + // Not configurable yet: + // Listeners []*Listener `hcl:"-"` + // Seals []*KMS `hcl:"-"` + // Entropy *Entropy `hcl:"-"` + // Telemetry *Telemetry `hcl:"telemetry"` + // HCPLinkConf *HCPLinkConfig `hcl:"cloud"` + // PidFile string `hcl:"pid_file"` + // ServiceRegistrationType string + // ServiceRegistrationOptions map[string]string + + StorageOptions map[string]string + AdditionalListeners []VaultNodeListenerConfig + + DefaultMaxRequestDuration time.Duration `json:"default_max_request_duration"` + LogFormat string `json:"log_format"` + LogLevel string `json:"log_level"` + CacheSize int `json:"cache_size"` + DisableCache bool `json:"disable_cache"` + DisablePrintableCheck bool `json:"disable_printable_check"` + EnableUI bool `json:"ui"` + MaxLeaseTTL time.Duration `json:"max_lease_ttl"` + DefaultLeaseTTL time.Duration `json:"default_lease_ttl"` + ClusterCipherSuites string `json:"cluster_cipher_suites"` + PluginFileUid int `json:"plugin_file_uid"` + PluginFilePermissions int `json:"plugin_file_permissions"` + EnableRawEndpoint bool `json:"raw_storage_endpoint"` + DisableClustering bool `json:"disable_clustering"` + DisablePerformanceStandby bool `json:"disable_performance_standby"` + DisableSealWrap bool `json:"disable_sealwrap"` + DisableIndexing bool `json:"disable_indexing"` + DisableSentinelTrace bool `json:"disable_sentinel"` + EnableResponseHeaderHostname bool `json:"enable_response_header_hostname"` + LogRequestsLevel string `json:"log_requests_level"` + EnableResponseHeaderRaftNodeID bool `json:"enable_response_header_raft_node_id"` + LicensePath string `json:"license_path"` +} + +type ClusterNode struct { + APIAddress string `json:"api_address"` +} + +type ClusterJson struct { + Nodes []ClusterNode `json:"nodes"` + CACertPath string `json:"ca_cert_path"` + RootToken string `json:"root_token"` +} + +type ClusterOptions struct { + ClusterName string + KeepStandbysSealed bool + SkipInit bool + CACert []byte + NumCores int + TmpDir string + Logger hclog.Logger + VaultNodeConfig *VaultNodeConfig + VaultLicense string + AdministrativeNamespacePath string +} + +type VaultNodeListenerConfig struct { + Port int + ChrootNamespace string + RedactAddresses bool + RedactClusterName bool + RedactVersion bool +} + +type CA struct { + CACert *x509.Certificate + CACertBytes []byte + CACertPEM []byte + CACertPEMFile string + CAKey *ecdsa.PrivateKey + CAKeyPEM []byte +} + +type ClusterStorage interface { + Start(context.Context, *ClusterOptions) error + Cleanup() error + Opts() map[string]interface{} + Type() string +} diff --git a/sdk/helper/testcluster/util.go b/sdk/helper/testcluster/util.go new file mode 100644 index 000000000000..a628b4d6c1e6 --- /dev/null +++ b/sdk/helper/testcluster/util.go @@ -0,0 +1,463 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "context" + "encoding/base64" + "encoding/hex" + "fmt" + "sync/atomic" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/xor" +) + +// Note that OSS standbys will not accept seal requests. And ent perf standbys +// may fail it as well if they haven't yet been able to get "elected" as perf standbys. +func SealNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + err := client.Sys().SealWithContext(ctx) + if err != nil { + return err + } + + return NodeSealed(ctx, cluster, nodeIdx) +} + +func SealAllNodes(ctx context.Context, cluster VaultCluster) error { + for i := range cluster.Nodes() { + if err := SealNode(ctx, cluster, i); err != nil { + return err + } + } + return nil +} + +func UnsealNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + for _, key := range cluster.GetBarrierOrRecoveryKeys() { + _, err := client.Sys().UnsealWithContext(ctx, hex.EncodeToString(key)) + if err != nil { + return err + } + } + + return NodeHealthy(ctx, cluster, nodeIdx) +} + +func UnsealAllNodes(ctx context.Context, cluster VaultCluster) error { + for i := range cluster.Nodes() { + if err := UnsealNode(ctx, cluster, i); err != nil { + return err + } + } + return nil +} + +func NodeSealed(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + switch { + case err != nil: + case !health.Sealed: + err = fmt.Errorf("unsealed: %#v", health) + default: + return nil + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("node %d is not sealed: %v", nodeIdx, err) +} + +func WaitForNCoresSealed(ctx context.Context, cluster VaultCluster, n int) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + errs := make(chan error) + for i := range cluster.Nodes() { + go func(i int) { + var err error + for ctx.Err() == nil { + err = NodeSealed(ctx, cluster, i) + if err == nil { + errs <- nil + return + } + time.Sleep(100 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + errs <- err + }(i) + } + + var merr *multierror.Error + var sealed int + for range cluster.Nodes() { + err := <-errs + if err != nil { + merr = multierror.Append(merr, err) + } else { + sealed++ + if sealed == n { + return nil + } + } + } + + return fmt.Errorf("%d cores were not sealed, errs: %v", n, merr.ErrorOrNil()) +} + +func NodeHealthy(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + switch { + case err != nil: + case health == nil: + err = fmt.Errorf("nil response to health check") + case health.Sealed: + err = fmt.Errorf("sealed: %#v", health) + default: + return nil + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("node %d is unhealthy: %v", nodeIdx, err) +} + +func LeaderNode(ctx context.Context, cluster VaultCluster) (int, error) { + // Be robust to multiple nodes thinking they are active. This is possible in + // certain network partition situations where the old leader has not + // discovered it's lost leadership yet. In tests this is only likely to come + // up when we are specifically provoking it, but it's possible it could happen + // at any point if leadership flaps of connectivity suffers transient errors + // etc. so be robust against it. The best solution would be to have some sort + // of epoch like the raft term that is guaranteed to be monotonically + // increasing through elections, however we don't have that abstraction for + // all HABackends in general. The best we have is the ActiveTime. In a + // distributed systems text book this would be bad to rely on due to clock + // sync issues etc. but for our tests it's likely fine because even if we are + // running separate Vault containers, they are all using the same hardware + // clock in the system. + leaderActiveTimes := make(map[int]time.Time) + for i, node := range cluster.Nodes() { + client := node.APIClient() + ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + resp, err := client.Sys().LeaderWithContext(ctx) + cancel() + if err != nil || resp == nil || !resp.IsSelf { + continue + } + leaderActiveTimes[i] = resp.ActiveTime + } + if len(leaderActiveTimes) == 0 { + return -1, fmt.Errorf("no leader found") + } + // At least one node thinks it is active. If multiple, pick the one with the + // most recent ActiveTime. Note if there is only one then this just returns + // it. + var newestLeaderIdx int + var newestActiveTime time.Time + for i, at := range leaderActiveTimes { + if at.After(newestActiveTime) { + newestActiveTime = at + newestLeaderIdx = i + } + } + return newestLeaderIdx, nil +} + +func WaitForActiveNode(ctx context.Context, cluster VaultCluster) (int, error) { + for ctx.Err() == nil { + if idx, _ := LeaderNode(ctx, cluster); idx != -1 { + return idx, nil + } + time.Sleep(500 * time.Millisecond) + } + return -1, ctx.Err() +} + +func WaitForStandbyNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + var err error + for ctx.Err() == nil { + var resp *api.LeaderResponse + + resp, err = client.Sys().LeaderWithContext(ctx) + switch { + case err != nil: + case resp.IsSelf: + return fmt.Errorf("waiting for standby but node is leader") + case resp.LeaderAddress == "": + err = fmt.Errorf("node doesn't know leader address") + default: + return nil + } + + time.Sleep(100 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func WaitForActiveNodeAndStandbys(ctx context.Context, cluster VaultCluster) (int, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + leaderIdx, err := WaitForActiveNode(ctx, cluster) + if err != nil { + return 0, err + } + + if len(cluster.Nodes()) == 1 { + return 0, nil + } + + errs := make(chan error) + for i := range cluster.Nodes() { + if i == leaderIdx { + continue + } + go func(i int) { + errs <- WaitForStandbyNode(ctx, cluster, i) + }(i) + } + + var merr *multierror.Error + expectedStandbys := len(cluster.Nodes()) - 1 + for i := 0; i < expectedStandbys; i++ { + merr = multierror.Append(merr, <-errs) + } + + return leaderIdx, merr.ErrorOrNil() +} + +func WaitForActiveNodeAndPerfStandbys(ctx context.Context, cluster VaultCluster) error { + logger := cluster.NamedLogger("WaitForActiveNodeAndPerfStandbys") + // This WaitForActiveNode was added because after a Raft cluster is sealed + // and then unsealed, when it comes up it may have a different leader than + // Core0, making this helper fail. + // A sleep before calling WaitForActiveNodeAndPerfStandbys seems to sort + // things out, but so apparently does this. We should be able to eliminate + // this call to WaitForActiveNode by reworking the logic in this method. + leaderIdx, err := WaitForActiveNode(ctx, cluster) + if err != nil { + return err + } + + if len(cluster.Nodes()) == 1 { + return nil + } + + expectedStandbys := len(cluster.Nodes()) - 1 + + mountPoint, err := uuid.GenerateUUID() + if err != nil { + return err + } + leaderClient := cluster.Nodes()[leaderIdx].APIClient() + + for ctx.Err() == nil { + err = leaderClient.Sys().MountWithContext(ctx, mountPoint, &api.MountInput{ + Type: "kv", + Local: true, + }) + if err == nil { + break + } + time.Sleep(1 * time.Second) + } + if err != nil { + return fmt.Errorf("unable to mount KV engine: %v", err) + } + path := mountPoint + "/waitforactivenodeandperfstandbys" + var standbys, actives int64 + errchan := make(chan error, len(cluster.Nodes())) + for i := range cluster.Nodes() { + go func(coreNo int) { + node := cluster.Nodes()[coreNo] + client := node.APIClient() + val := 1 + var err error + defer func() { + errchan <- err + }() + + var lastWAL uint64 + for ctx.Err() == nil { + _, err = leaderClient.Logical().WriteWithContext(ctx, path, map[string]interface{}{ + "bar": val, + }) + val++ + time.Sleep(250 * time.Millisecond) + if err != nil { + continue + } + var leader *api.LeaderResponse + leader, err = client.Sys().LeaderWithContext(ctx) + if err != nil { + logger.Trace("waiting for core", "core", coreNo, "err", err) + continue + } + switch { + case leader.IsSelf: + logger.Trace("waiting for core", "core", coreNo, "isLeader", true) + atomic.AddInt64(&actives, 1) + return + case leader.PerfStandby && leader.PerfStandbyLastRemoteWAL > 0: + switch { + case lastWAL == 0: + lastWAL = leader.PerfStandbyLastRemoteWAL + logger.Trace("waiting for core", "core", coreNo, "lastRemoteWAL", leader.PerfStandbyLastRemoteWAL, "lastWAL", lastWAL) + case lastWAL < leader.PerfStandbyLastRemoteWAL: + logger.Trace("waiting for core", "core", coreNo, "lastRemoteWAL", leader.PerfStandbyLastRemoteWAL, "lastWAL", lastWAL) + atomic.AddInt64(&standbys, 1) + return + } + default: + logger.Trace("waiting for core", "core", coreNo, + "ha_enabled", leader.HAEnabled, + "is_self", leader.IsSelf, + "perf_standby", leader.PerfStandby, + "perf_standby_remote_wal", leader.PerfStandbyLastRemoteWAL) + } + } + }(i) + } + + errs := make([]error, 0, len(cluster.Nodes())) + for range cluster.Nodes() { + errs = append(errs, <-errchan) + } + if actives != 1 || int(standbys) != expectedStandbys { + return fmt.Errorf("expected 1 active core and %d standbys, got %d active and %d standbys, errs: %v", + expectedStandbys, actives, standbys, errs) + } + + for ctx.Err() == nil { + err = leaderClient.Sys().UnmountWithContext(ctx, mountPoint) + if err == nil { + break + } + time.Sleep(time.Second) + } + if err != nil { + return fmt.Errorf("unable to unmount KV engine on primary") + } + return nil +} + +func Clients(vc VaultCluster) []*api.Client { + var ret []*api.Client + for _, n := range vc.Nodes() { + ret = append(ret, n.APIClient()) + } + return ret +} + +//go:generate enumer -type=GenerateRootKind -trimprefix=GenerateRoot +type GenerateRootKind int + +const ( + GenerateRootRegular GenerateRootKind = iota + GenerateRootDR + GenerateRecovery +) + +func GenerateRoot(cluster VaultCluster, kind GenerateRootKind) (string, error) { + // If recovery keys supported, use those to perform root token generation instead + keys := cluster.GetBarrierOrRecoveryKeys() + + client := cluster.Nodes()[0].APIClient() + + var err error + var status *api.GenerateRootStatusResponse + switch kind { + case GenerateRootRegular: + status, err = client.Sys().GenerateRootInit("", "") + case GenerateRootDR: + status, err = client.Sys().GenerateDROperationTokenInit("", "") + case GenerateRecovery: + status, err = client.Sys().GenerateRecoveryOperationTokenInit("", "") + } + if err != nil { + return "", err + } + + if status.Required > len(keys) { + return "", fmt.Errorf("need more keys than have, need %d have %d", status.Required, len(keys)) + } + + otp := status.OTP + + for i, key := range keys { + if i >= status.Required { + break + } + + strKey := base64.StdEncoding.EncodeToString(key) + switch kind { + case GenerateRootRegular: + status, err = client.Sys().GenerateRootUpdate(strKey, status.Nonce) + case GenerateRootDR: + status, err = client.Sys().GenerateDROperationTokenUpdate(strKey, status.Nonce) + case GenerateRecovery: + status, err = client.Sys().GenerateRecoveryOperationTokenUpdate(strKey, status.Nonce) + } + if err != nil { + return "", err + } + } + if !status.Complete { + return "", fmt.Errorf("generate root operation did not end successfully") + } + + tokenBytes, err := base64.RawStdEncoding.DecodeString(status.EncodedToken) + if err != nil { + return "", err + } + tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp)) + if err != nil { + return "", err + } + return string(tokenBytes), nil +} diff --git a/sdk/helper/testhelpers/output.go b/sdk/helper/testhelpers/output.go new file mode 100644 index 000000000000..c18b1bb6deb1 --- /dev/null +++ b/sdk/helper/testhelpers/output.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testhelpers + +import ( + "crypto/sha256" + "fmt" + "reflect" + + "github.com/mitchellh/go-testing-interface" + "github.com/mitchellh/mapstructure" +) + +// ToMap renders an input value of any type as a map. This is intended for +// logging human-readable data dumps in test logs, so it uses the `json` +// tags on struct fields: this makes it easy to exclude `"-"` values that +// are typically not interesting, respect omitempty, etc. +// +// We also replace any []byte fields with a hash of their value. +// This is usually sufficient for test log purposes, and is a lot more readable +// than a big array of individual byte values like Go would normally stringify a +// byte slice. +func ToMap(in any) (map[string]any, error) { + temp := make(map[string]any) + cfg := &mapstructure.DecoderConfig{ + TagName: "json", + IgnoreUntaggedFields: true, + Result: &temp, + } + md, err := mapstructure.NewDecoder(cfg) + if err != nil { + return nil, err + } + err = md.Decode(in) + if err != nil { + return nil, err + } + + // mapstructure doesn't call the DecodeHook for each field when doing + // struct->map conversions, but it does for map->map, so call it a second + // time to convert each []byte field. + out := make(map[string]any) + md2, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: &out, + DecodeHook: func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if from.Kind() != reflect.Slice || from.Elem().Kind() != reflect.Uint8 { + return data, nil + } + b := data.([]byte) + return fmt.Sprintf("%x", sha256.Sum256(b)), nil + }, + }) + if err != nil { + return nil, err + } + err = md2.Decode(temp) + if err != nil { + return nil, err + } + + return out, nil +} + +// ToString renders its input using ToMap, and returns a string containing the +// result or an error if that fails. +func ToString(in any) string { + m, err := ToMap(in) + if err != nil { + return err.Error() + } + return fmt.Sprintf("%v", m) +} + +// StringOrDie renders its input using ToMap, and returns a string containing the +// result. If rendering yields an error, calls t.Fatal. +func StringOrDie(t testing.T, in any) string { + t.Helper() + m, err := ToMap(in) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("%v", m) +} diff --git a/sdk/helper/testhelpers/output_test.go b/sdk/helper/testhelpers/output_test.go new file mode 100644 index 000000000000..ada51a1fe119 --- /dev/null +++ b/sdk/helper/testhelpers/output_test.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testhelpers + +import ( + "fmt" + "reflect" + "testing" +) + +func TestToMap(t *testing.T) { + type s struct { + A string `json:"a"` + B []byte `json:"b"` + C map[string]string `json:"c"` + D string `json:"-"` + } + type args struct { + in s + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "basic", + args: args{s{A: "a", B: []byte("bytes"), C: map[string]string{"k": "v"}, D: "d"}}, + want: "map[a:a b:277089d91c0bdf4f2e6862ba7e4a07605119431f5d13f726dd352b06f1b206a9 c:map[k:v]]", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m, err := ToMap(&tt.args.in) + if (err != nil) != tt.wantErr { + t.Errorf("ToMap() error = %v, wantErr %v", err, tt.wantErr) + return + } + got := fmt.Sprintf("%s", m) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ToMap() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/sdk/helper/testhelpers/schema/response_validation.go b/sdk/helper/testhelpers/schema/response_validation.go new file mode 100644 index 000000000000..8085b042b6dd --- /dev/null +++ b/sdk/helper/testhelpers/schema/response_validation.go @@ -0,0 +1,214 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// ValidateResponse is a test helper that validates whether the given response +// object conforms to the response schema (schema.Fields). It cycles through +// the data map and validates conversions in the schema. In "strict" mode, this +// function will also ensure that the data map has all schema-required fields +// and does not have any fields outside of the schema. +func ValidateResponse(t *testing.T, schema *framework.Response, response *logical.Response, strict bool) { + t.Helper() + + if response != nil { + ValidateResponseData(t, schema, response.Data, strict) + } else { + ValidateResponseData(t, schema, nil, strict) + } +} + +// ValidateResponseData is a test helper that validates whether the given +// response data map conforms to the response schema (schema.Fields). It cycles +// through the data map and validates conversions in the schema. In "strict" +// mode, this function will also ensure that the data map has all schema's +// requred fields and does not have any fields outside of the schema. +func ValidateResponseData(t *testing.T, schema *framework.Response, data map[string]interface{}, strict bool) { + t.Helper() + + if err := validateResponseDataImpl( + schema, + data, + strict, + ); err != nil { + t.Fatalf("validation error: %v; response data: %#v", err, data) + } +} + +// validateResponseDataImpl is extracted so that it can be tested +func validateResponseDataImpl(schema *framework.Response, data map[string]interface{}, strict bool) error { + // nothing to validate + if schema == nil { + return nil + } + + // Certain responses may come through with non-2xx status codes. While + // these are not always errors (e.g. 3xx redirection codes), we don't + // consider them for the purposes of schema validation + if status, exists := data[logical.HTTPStatusCode]; exists { + s, ok := status.(int) + if ok && (s < 200 || s > 299) { + return nil + } + } + + // Marshal the data to JSON and back to convert the map's values into + // JSON strings expected by Validate() and ValidateStrict(). This is + // not efficient and is done for testing purposes only. + jsonBytes, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("failed to convert input to json: %w", err) + } + + var dataWithStringValues map[string]interface{} + if err := json.Unmarshal( + jsonBytes, + &dataWithStringValues, + ); err != nil { + return fmt.Errorf("failed to unmashal data: %w", err) + } + + // these are special fields that will not show up in the final response and + // should be ignored + for _, field := range []string{ + logical.HTTPContentType, + logical.HTTPRawBody, + logical.HTTPStatusCode, + logical.HTTPRawBodyAlreadyJSONDecoded, + logical.HTTPCacheControlHeader, + logical.HTTPPragmaHeader, + logical.HTTPWWWAuthenticateHeader, + } { + delete(dataWithStringValues, field) + + if _, ok := schema.Fields[field]; ok { + return fmt.Errorf("encountered a reserved field in response schema: %s", field) + } + } + + // Validate + fd := framework.FieldData{ + Raw: dataWithStringValues, + Schema: schema.Fields, + } + + if strict { + return fd.ValidateStrict() + } + + return fd.Validate() +} + +// FindResponseSchema is a test helper to extract response schema from the +// given framework path / operation. +func FindResponseSchema(t *testing.T, paths []*framework.Path, pathIdx int, operation logical.Operation) *framework.Response { + t.Helper() + + if pathIdx >= len(paths) { + t.Fatalf("path index %d is out of range", pathIdx) + } + + schemaPath := paths[pathIdx] + + return GetResponseSchema(t, schemaPath, operation) +} + +func GetResponseSchema(t *testing.T, path *framework.Path, operation logical.Operation) *framework.Response { + t.Helper() + + schemaOperation, ok := path.Operations[operation] + if !ok { + t.Fatalf( + "could not find response schema: %s: %q operation does not exist", + path.Pattern, + operation, + ) + } + + var schemaResponses []framework.Response + + for _, status := range []int{ + http.StatusOK, // 200 + http.StatusAccepted, // 202 + http.StatusNoContent, // 204 + } { + schemaResponses, ok = schemaOperation.Properties().Responses[status] + if ok { + break + } + } + + if len(schemaResponses) == 0 { + // ListOperations have a default response schema that is implicit unless overridden + if operation == logical.ListOperation { + return &framework.Response{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + }, + }, + } + } + + t.Fatalf( + "could not find response schema: %s: %q operation: no responses found", + path.Pattern, + operation, + ) + } + + return &schemaResponses[0] +} + +// ResponseValidatingCallback can be used in setting up a [vault.TestCluster] +// that validates every response against the openapi specifications. +// +// [vault.TestCluster]: https://pkg.go.dev/github.com/hashicorp/vault/vault#TestCluster +func ResponseValidatingCallback(t *testing.T) func(logical.Backend, *logical.Request, *logical.Response) { + type PathRouter interface { + Route(string) *framework.Path + } + + return func(b logical.Backend, req *logical.Request, resp *logical.Response) { + t.Helper() + + if b == nil { + t.Fatalf("non-nil backend required") + } + + backend, ok := b.(PathRouter) + if !ok { + t.Fatalf("could not cast %T to have `Route(string) *framework.Path`", b) + } + + // The full request path includes the backend but when passing to the + // backend, we have to trim the mount point: + // `sys/mounts/secret` -> `mounts/secret` + // `auth/token/create` -> `create` + requestPath := strings.TrimPrefix(req.Path, req.MountPoint) + + route := backend.Route(requestPath) + if route == nil { + t.Fatalf("backend %T could not find a route for %s", b, req.Path) + } + + ValidateResponse( + t, + GetResponseSchema(t, route, req.Operation), + resp, + true, + ) + } +} diff --git a/sdk/helper/testhelpers/schema/response_validation_test.go b/sdk/helper/testhelpers/schema/response_validation_test.go new file mode 100644 index 000000000000..4f4aa8b1cc3c --- /dev/null +++ b/sdk/helper/testhelpers/schema/response_validation_test.go @@ -0,0 +1,359 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "testing" + "time" + + "github.com/hashicorp/vault/sdk/framework" +) + +func TestValidateResponse(t *testing.T) { + cases := map[string]struct { + schema *framework.Response + response map[string]interface{} + strict bool + errorExpected bool + }{ + "nil schema, nil response, strict": { + schema: nil, + response: nil, + strict: true, + errorExpected: false, + }, + + "nil schema, nil response, not strict": { + schema: nil, + response: nil, + strict: false, + errorExpected: false, + }, + + "nil schema, good response, strict": { + schema: nil, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: false, + }, + + "nil schema, good response, not strict": { + schema: nil, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: false, + }, + + "nil schema fields, good response, strict": { + schema: &framework.Response{}, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: false, + }, + + "nil schema fields, good response, not strict": { + schema: &framework.Response{}, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: false, + }, + + "string schema field, string response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + }, + }, + }, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: false, + }, + + "string schema field, string response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + }, + }, + }, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: false, + errorExpected: false, + }, + + "string schema not required field, empty response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + Required: false, + }, + }, + }, + response: map[string]interface{}{}, + strict: true, + errorExpected: false, + }, + + "string schema required field, empty response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + Required: true, + }, + }, + }, + response: map[string]interface{}{}, + strict: true, + errorExpected: true, + }, + + "string schema required field, empty response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + Required: true, + }, + }, + }, + response: map[string]interface{}{}, + strict: false, + errorExpected: false, + }, + + "string schema required field, nil response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + Required: true, + }, + }, + }, + response: nil, + strict: true, + errorExpected: true, + }, + + "string schema required field, nil response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + Required: true, + }, + }, + }, + response: nil, + strict: false, + errorExpected: false, + }, + + "empty schema, string response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{}, + }, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: true, + }, + + "empty schema, string response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{}, + }, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: false, + errorExpected: false, + }, + + "time schema, string response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "time": { + Type: framework.TypeTime, + Required: true, + }, + }, + }, + response: map[string]interface{}{ + "time": "2024-12-11T09:08:07Z", + }, + strict: true, + errorExpected: false, + }, + + "time schema, string response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "time": { + Type: framework.TypeTime, + Required: true, + }, + }, + }, + response: map[string]interface{}{ + "time": "2024-12-11T09:08:07Z", + }, + strict: false, + errorExpected: false, + }, + + "time schema, time response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "time": { + Type: framework.TypeTime, + Required: true, + }, + }, + }, + response: map[string]interface{}{ + "time": time.Date(2024, 12, 11, 9, 8, 7, 0, time.UTC), + }, + strict: true, + errorExpected: false, + }, + + "time schema, time response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "time": { + Type: framework.TypeTime, + Required: true, + }, + }, + }, + response: map[string]interface{}{ + "time": time.Date(2024, 12, 11, 9, 8, 7, 0, time.UTC), + }, + strict: false, + errorExpected: false, + }, + + "empty schema, response has http_raw_body, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{}, + }, + response: map[string]interface{}{ + "http_raw_body": "foo", + }, + strict: true, + errorExpected: false, + }, + + "empty schema, response has http_raw_body, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{}, + }, + response: map[string]interface{}{ + "http_raw_body": "foo", + }, + strict: false, + errorExpected: false, + }, + + "string schema field, response has non-200 http_status_code, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + }, + }, + }, + response: map[string]interface{}{ + "http_status_code": 304, + }, + strict: true, + errorExpected: false, + }, + + "string schema field, response has non-200 http_status_code, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + }, + }, + }, + response: map[string]interface{}{ + "http_status_code": 304, + }, + strict: false, + errorExpected: false, + }, + + "schema has http_raw_body, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "http_raw_body": { + Type: framework.TypeString, + Required: false, + }, + }, + }, + response: map[string]interface{}{ + "http_raw_body": "foo", + }, + strict: true, + errorExpected: true, + }, + + "schema has http_raw_body, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "http_raw_body": { + Type: framework.TypeString, + Required: false, + }, + }, + }, + response: map[string]interface{}{ + "http_raw_body": "foo", + }, + strict: false, + errorExpected: true, + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + err := validateResponseDataImpl( + tc.schema, + tc.response, + tc.strict, + ) + if err == nil && tc.errorExpected == true { + t.Fatalf("expected an error, got nil") + } + if err != nil && tc.errorExpected == false { + t.Fatalf("unexpected error: %v", err) + } + }) + } +} diff --git a/sdk/helper/tlsutil/tlsutil.go b/sdk/helper/tlsutil/tlsutil.go index e1e9b9484bf4..d91af3679e2e 100644 --- a/sdk/helper/tlsutil/tlsutil.go +++ b/sdk/helper/tlsutil/tlsutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // DEPRECATED: this has been moved to go-secure-stdlib and will be removed package tlsutil diff --git a/sdk/helper/tokenutil/tokenutil.go b/sdk/helper/tokenutil/tokenutil.go index 776b40501ed4..4319bd182369 100644 --- a/sdk/helper/tokenutil/tokenutil.go +++ b/sdk/helper/tokenutil/tokenutil.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package tokenutil import ( @@ -75,8 +78,9 @@ func TokenFields() map[string]*framework.FieldSchema { Type: framework.TypeCommaStringSlice, Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, DisplayAttrs: &framework.DisplayAttributes{ - Name: "Generated Token's Bound CIDRs", - Group: "Tokens", + Name: "Generated Token's Bound CIDRs", + Group: "Tokens", + Description: "A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.", }, }, @@ -120,8 +124,9 @@ func TokenFields() map[string]*framework.FieldSchema { Type: framework.TypeCommaStringSlice, Description: "Comma-separated list of policies", DisplayAttrs: &framework.DisplayAttributes{ - Name: "Generated Token's Policies", - Group: "Tokens", + Name: "Generated Token's Policies", + Group: "Tokens", + Description: "A list of policies that will apply to the generated token for this user.", }, }, diff --git a/sdk/helper/useragent/useragent.go b/sdk/helper/useragent/useragent.go index 33b2a23b89dc..53569e910a87 100644 --- a/sdk/helper/useragent/useragent.go +++ b/sdk/helper/useragent/useragent.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package useragent import ( @@ -6,7 +9,6 @@ import ( "strings" "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/version" ) var ( @@ -15,34 +17,29 @@ var ( // rt is the runtime - variable for tests. rt = runtime.Version() - - // versionFunc is the func that returns the current version. This is a - // function to take into account the different build processes and distinguish - // between enterprise and oss builds. - versionFunc = func() string { - return version.GetVersion().VersionNumber() - } ) // String returns the consistent user-agent string for Vault. +// Deprecated: use PluginString instead. // -// e.g. Vault/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +// Example output: // -// Given comments will be appended to the semicolon-delimited comment section. +// Vault (+https://www.vaultproject.io/; go1.19.5) // -// e.g. Vault/0.10.4 (+https://www.vaultproject.io/; go1.10.1; comment-0; comment-1) +// Given comments will be appended to the semicolon-delimited comment section: +// +// Vault (+https://www.vaultproject.io/; go1.19.5; comment-0; comment-1) // -// Deprecated: use PluginString instead. // At one point the user-agent string returned contained the Vault -// version hardcoded into the vault/sdk/version/ package. This works for builtin +// version hardcoded into the vault/sdk/version/ package. This worked for builtin // plugins that are compiled into the `vault` binary, in that it correctly described -// the version of that Vault binary. It does not work for external plugins: for them, +// the version of that Vault binary. It did not work for external plugins: for them, // the version will be based on the version stored in the sdk based on the -// contents of the external plugin's go.mod. Now that we're no longer updating -// the version in vault/sdk/version/, it is even less meaningful than ever. +// contents of the external plugin's go.mod. We've kept the String method around +// to avoid breaking builds, but you should be using PluginString. func String(comments ...string) string { c := append([]string{"+" + projectURL, rt}, comments...) - return fmt.Sprintf("Vault/%s (%s)", versionFunc(), strings.Join(c, "; ")) + return fmt.Sprintf("Vault (%s)", strings.Join(c, "; ")) } // PluginString is usable by plugins to return a user-agent string reflecting diff --git a/sdk/helper/useragent/useragent_test.go b/sdk/helper/useragent/useragent_test.go index f0d014f6c570..4677bb62face 100644 --- a/sdk/helper/useragent/useragent_test.go +++ b/sdk/helper/useragent/useragent_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package useragent import ( @@ -9,7 +12,6 @@ import ( func TestUserAgent(t *testing.T) { projectURL = "https://vault-test.com" rt = "go5.0" - versionFunc = func() string { return "1.2.3" } type args struct { comments []string @@ -22,21 +24,21 @@ func TestUserAgent(t *testing.T) { { name: "User agent", args: args{}, - want: "Vault/1.2.3 (+https://vault-test.com; go5.0)", + want: "Vault (+https://vault-test.com; go5.0)", }, { name: "User agent with additional comment", args: args{ comments: []string{"pid-abcdefg"}, }, - want: "Vault/1.2.3 (+https://vault-test.com; go5.0; pid-abcdefg)", + want: "Vault (+https://vault-test.com; go5.0; pid-abcdefg)", }, { name: "User agent with additional comments", args: args{ comments: []string{"pid-abcdefg", "cloud-provider"}, }, - want: "Vault/1.2.3 (+https://vault-test.com; go5.0; pid-abcdefg; cloud-provider)", + want: "Vault (+https://vault-test.com; go5.0; pid-abcdefg; cloud-provider)", }, } for _, tt := range tests { diff --git a/sdk/helper/wrapping/wrapinfo.go b/sdk/helper/wrapping/wrapinfo.go index 8d8e63340f95..03a703013008 100644 --- a/sdk/helper/wrapping/wrapinfo.go +++ b/sdk/helper/wrapping/wrapinfo.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package wrapping import "time" diff --git a/sdk/helper/xor/xor.go b/sdk/helper/xor/xor.go index a1f1e90bc156..098a67317855 100644 --- a/sdk/helper/xor/xor.go +++ b/sdk/helper/xor/xor.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package xor import ( diff --git a/sdk/helper/xor/xor_test.go b/sdk/helper/xor/xor_test.go index f50f525ce639..143345d9a5bd 100644 --- a/sdk/helper/xor/xor_test.go +++ b/sdk/helper/xor/xor_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package xor import ( diff --git a/sdk/logical/acme_billing.go b/sdk/logical/acme_billing.go new file mode 100644 index 000000000000..6e4f6ef398b8 --- /dev/null +++ b/sdk/logical/acme_billing.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import "context" + +type ACMEBillingSystemView interface { + CreateActivityCountEventForIdentifiers(ctx context.Context, identifiers []string) error +} diff --git a/sdk/logical/audit.go b/sdk/logical/audit.go index 8ba70f37e01a..ecd1f5865a40 100644 --- a/sdk/logical/audit.go +++ b/sdk/logical/audit.go @@ -1,5 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical +import ( + "fmt" + + "github.com/mitchellh/copystructure" +) + +// LogInput is used as the input to the audit system on which audit entries are based. type LogInput struct { Type string Auth *Auth @@ -17,3 +27,133 @@ type MarshalOptions struct { type OptMarshaler interface { MarshalJSONWithOptions(*MarshalOptions) ([]byte, error) } + +// LogInputBexpr is used for evaluating boolean expressions with go-bexpr. +type LogInputBexpr struct { + MountPoint string `bexpr:"mount_point"` + MountType string `bexpr:"mount_type"` + Namespace string `bexpr:"namespace"` + Operation string `bexpr:"operation"` + Path string `bexpr:"path"` +} + +// BexprDatum returns values from a LogInput formatted for use in evaluating go-bexpr boolean expressions. +// The namespace should be supplied from the current request's context. +func (l *LogInput) BexprDatum(namespace string) *LogInputBexpr { + var mountPoint string + var mountType string + var operation string + var path string + + if l.Request != nil { + mountPoint = l.Request.MountPoint + mountType = l.Request.MountType + operation = string(l.Request.Operation) + path = l.Request.Path + } + + return &LogInputBexpr{ + MountPoint: mountPoint, + MountType: mountType, + Namespace: namespace, + Operation: operation, + Path: path, + } +} + +// Clone will attempt to create a deep copy (almost) of the LogInput. +// If the LogInput type or any of the subtypes referenced by LogInput fields are +// changed, then the Clone methods will need to be updated. +// NOTE: Does not deep clone the LogInput.OuterError field as it represents an +// error interface. +// NOTE: LogInput.Request.Connection (at the time of writing) is also not deep-copied +// and remains a pointer, see Request.Clone for more information. +func (l *LogInput) Clone() (*LogInput, error) { + // Clone Auth + auth, err := cloneAuth(l.Auth) + if err != nil { + return nil, err + } + + // Clone Request + var req *Request + if l.Request != nil { + req, err = l.Request.Clone() + if err != nil { + return nil, err + } + } + + // Clone Response + resp, err := cloneResponse(l.Response) + if err != nil { + return nil, err + } + + // Copy HMAC keys + reqDataKeys := make([]string, len(l.NonHMACReqDataKeys)) + copy(reqDataKeys, l.NonHMACReqDataKeys) + respDataKeys := make([]string, len(l.NonHMACRespDataKeys)) + copy(respDataKeys, l.NonHMACRespDataKeys) + + // OuterErr is just linked in a non-deep way as it's an interface, and we + // don't know for sure which type this might actually be. + // At the time of writing this code, OuterErr isn't modified by anything, + // so we shouldn't get any race issues. + cloned := &LogInput{ + Type: l.Type, + Auth: auth, + Request: req, + Response: resp, + OuterErr: l.OuterErr, + NonHMACReqDataKeys: reqDataKeys, + NonHMACRespDataKeys: respDataKeys, + } + + return cloned, nil +} + +// clone will deep-copy the supplied struct. +// However, it cannot copy unexported fields or evaluate methods. +func clone[V any](s V) (V, error) { + var result V + + data, err := copystructure.Copy(s) + if err != nil { + return result, err + } + + result = data.(V) + + return result, err +} + +// cloneAuth deep copies an Auth struct. +func cloneAuth(auth *Auth) (*Auth, error) { + // If auth is nil, there's nothing to clone. + if auth == nil { + return nil, nil + } + + auth, err := clone[*Auth](auth) + if err != nil { + return nil, fmt.Errorf("unable to clone auth: %w", err) + } + + return auth, nil +} + +// cloneResponse deep copies a Response struct. +func cloneResponse(response *Response) (*Response, error) { + // If response is nil, there's nothing to clone. + if response == nil { + return nil, nil + } + + resp, err := clone[*Response](response) + if err != nil { + return nil, fmt.Errorf("unable to clone response: %w", err) + } + + return resp, nil +} diff --git a/sdk/logical/audit_test.go b/sdk/logical/audit_test.go new file mode 100644 index 000000000000..07623daab9e8 --- /dev/null +++ b/sdk/logical/audit_test.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestLogInput_BexprDatum ensures that we can transform a LogInput +// into a LogInputBexpr to be used in audit filtering. +func TestLogInput_BexprDatum(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + Request *Request + Namespace string + ExpectedPath string + ExpectedMountPoint string + ExpectedMountType string + ExpectedNamespace string + ExpectedOperation string + }{ + "nil-no-namespace": { + Request: nil, + Namespace: "", + ExpectedPath: "", + ExpectedMountPoint: "", + ExpectedMountType: "", + ExpectedNamespace: "", + ExpectedOperation: "", + }, + "nil-namespace": { + Request: nil, + Namespace: "juan", + ExpectedPath: "", + ExpectedMountPoint: "", + ExpectedMountType: "", + ExpectedNamespace: "juan", + ExpectedOperation: "", + }, + "happy-path": { + Request: &Request{ + MountPoint: "IAmAMountPoint", + MountType: "IAmAMountType", + Operation: CreateOperation, + Path: "IAmAPath", + }, + Namespace: "juan", + ExpectedPath: "IAmAPath", + ExpectedMountPoint: "IAmAMountPoint", + ExpectedMountType: "IAmAMountType", + ExpectedNamespace: "juan", + ExpectedOperation: "create", + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + l := &LogInput{Request: tc.Request} + + d := l.BexprDatum(tc.Namespace) + + require.Equal(t, tc.ExpectedPath, d.Path) + require.Equal(t, tc.ExpectedMountPoint, d.MountPoint) + require.Equal(t, tc.ExpectedMountType, d.MountType) + require.Equal(t, tc.ExpectedNamespace, d.Namespace) + require.Equal(t, tc.ExpectedOperation, d.Operation) + }) + } +} diff --git a/sdk/logical/auth.go b/sdk/logical/auth.go index 62707e81959a..83d9daca12ad 100644 --- a/sdk/logical/auth.go +++ b/sdk/logical/auth.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( @@ -123,7 +126,8 @@ type PolicyResults struct { } type PolicyInfo struct { - Name string `json:"name"` - NamespaceId string `json:"namespace_id"` - Type string `json:"type"` + Name string `json:"name"` + NamespaceId string `json:"namespace_id"` + NamespacePath string `json:"namespace_path"` + Type string `json:"type"` } diff --git a/sdk/logical/clienttokensource_enumer.go b/sdk/logical/clienttokensource_enumer.go new file mode 100644 index 000000000000..e930a3a0ddd9 --- /dev/null +++ b/sdk/logical/clienttokensource_enumer.go @@ -0,0 +1,51 @@ +// Code generated by "enumer -type=ClientTokenSource -trimprefix=ClientTokenFrom -transform=snake"; DO NOT EDIT. + +package logical + +import ( + "fmt" +) + +const _ClientTokenSourceName = "no_client_tokenvault_headerauthz_headerinternal_auth" + +var _ClientTokenSourceIndex = [...]uint8{0, 15, 27, 39, 52} + +func (i ClientTokenSource) String() string { + if i >= ClientTokenSource(len(_ClientTokenSourceIndex)-1) { + return fmt.Sprintf("ClientTokenSource(%d)", i) + } + return _ClientTokenSourceName[_ClientTokenSourceIndex[i]:_ClientTokenSourceIndex[i+1]] +} + +var _ClientTokenSourceValues = []ClientTokenSource{0, 1, 2, 3} + +var _ClientTokenSourceNameToValueMap = map[string]ClientTokenSource{ + _ClientTokenSourceName[0:15]: 0, + _ClientTokenSourceName[15:27]: 1, + _ClientTokenSourceName[27:39]: 2, + _ClientTokenSourceName[39:52]: 3, +} + +// ClientTokenSourceString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ClientTokenSourceString(s string) (ClientTokenSource, error) { + if val, ok := _ClientTokenSourceNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ClientTokenSource values", s) +} + +// ClientTokenSourceValues returns all values of the enum +func ClientTokenSourceValues() []ClientTokenSource { + return _ClientTokenSourceValues +} + +// IsAClientTokenSource returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ClientTokenSource) IsAClientTokenSource() bool { + for _, v := range _ClientTokenSourceValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/logical/connection.go b/sdk/logical/connection.go index 5be863077079..e590e6f59acc 100644 --- a/sdk/logical/connection.go +++ b/sdk/logical/connection.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/controlgroup.go b/sdk/logical/controlgroup.go index 2ed1b07688d9..e166f00d1f88 100644 --- a/sdk/logical/controlgroup.go +++ b/sdk/logical/controlgroup.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/error.go b/sdk/logical/error.go index 68c8e1373202..4d78651648f9 100644 --- a/sdk/logical/error.go +++ b/sdk/logical/error.go @@ -1,6 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical -import "errors" +import ( + "context" + "errors" +) var ( // ErrUnsupportedOperation is returned if the operation is not supported @@ -22,7 +28,7 @@ var ( // The status code returned does not change because of this error ErrInvalidCredentials = errors.New("invalid credentials") - // ErrMultiAuthzPending is returned if the the request needs more + // ErrMultiAuthzPending is returned if the request needs more // authorizations ErrMultiAuthzPending = errors.New("request needs further approval") @@ -56,8 +62,53 @@ var ( // Error indicating that the requested path used to serve a purpose in older // versions, but the functionality has now been removed ErrPathFunctionalityRemoved = errors.New("functionality on this path has been removed") + + // ErrNotFound is an error used to indicate that a particular resource was + // not found. + ErrNotFound = errors.New("not found") ) +type DelegatedAuthErrorHandler func(ctx context.Context, initiatingRequest, authRequest *Request, authResponse *Response, err error) (*Response, error) + +var _ error = &RequestDelegatedAuthError{} + +// RequestDelegatedAuthError Special error indicating the backend wants to delegate authentication elsewhere +type RequestDelegatedAuthError struct { + mountAccessor string + path string + data map[string]interface{} + errHandler DelegatedAuthErrorHandler +} + +func NewDelegatedAuthenticationRequest(mountAccessor, path string, data map[string]interface{}, errHandler DelegatedAuthErrorHandler) *RequestDelegatedAuthError { + return &RequestDelegatedAuthError{ + mountAccessor: mountAccessor, + path: path, + data: data, + errHandler: errHandler, + } +} + +func (d *RequestDelegatedAuthError) Error() string { + return "authentication delegation requested" +} + +func (d *RequestDelegatedAuthError) MountAccessor() string { + return d.mountAccessor +} + +func (d *RequestDelegatedAuthError) Path() string { + return d.path +} + +func (d *RequestDelegatedAuthError) Data() map[string]interface{} { + return d.data +} + +func (d *RequestDelegatedAuthError) AuthErrorHandler() DelegatedAuthErrorHandler { + return d.errHandler +} + type HTTPCodedError interface { Error() string Code() int diff --git a/sdk/logical/event.pb.go b/sdk/logical/event.pb.go new file mode 100644 index 000000000000..96633525cf55 --- /dev/null +++ b/sdk/logical/event.pb.go @@ -0,0 +1,413 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.1 +// protoc (unknown) +// source: sdk/logical/event.proto + +package logical + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// EventPluginInfo contains data related to the plugin that generated an event. +type EventPluginInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of plugin this event originated from, i.e., "auth" or "secrets. + MountClass string `protobuf:"bytes,1,opt,name=mount_class,json=mountClass,proto3" json:"mount_class,omitempty"` + // Unique ID of the mount entry, e.g., "kv_957bb7d8" + MountAccessor string `protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + // Mount path of the plugin this event originated from, e.g., "secret/" + MountPath string `protobuf:"bytes,3,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty"` + // Plugin name that this event originated from, e.g., "kv" + Plugin string `protobuf:"bytes,4,opt,name=plugin,proto3" json:"plugin,omitempty"` + // Plugin version of the plugin this event originated from, e.g., "v0.13.3+builtin" + PluginVersion string `protobuf:"bytes,5,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"` + // Mount version that this event originated from, i.e., if KVv2, then "2". Usually empty. + Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *EventPluginInfo) Reset() { + *x = EventPluginInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_event_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventPluginInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventPluginInfo) ProtoMessage() {} + +func (x *EventPluginInfo) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_event_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventPluginInfo.ProtoReflect.Descriptor instead. +func (*EventPluginInfo) Descriptor() ([]byte, []int) { + return file_sdk_logical_event_proto_rawDescGZIP(), []int{0} +} + +func (x *EventPluginInfo) GetMountClass() string { + if x != nil { + return x.MountClass + } + return "" +} + +func (x *EventPluginInfo) GetMountAccessor() string { + if x != nil { + return x.MountAccessor + } + return "" +} + +func (x *EventPluginInfo) GetMountPath() string { + if x != nil { + return x.MountPath + } + return "" +} + +func (x *EventPluginInfo) GetPlugin() string { + if x != nil { + return x.Plugin + } + return "" +} + +func (x *EventPluginInfo) GetPluginVersion() string { + if x != nil { + return x.PluginVersion + } + return "" +} + +func (x *EventPluginInfo) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +// EventData contains event data in a CloudEvents container. +type EventData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID identifies the event. It is required. The combination of + // CloudEvents Source (i.e., Vault cluster) + ID must be unique. + // Events with the same Source + ID can be assumed to be duplicates + // by consumers. + // Be careful when setting this manually that the ID contains enough + // entropy to be unique, or possibly that it is idempotent, such + // as a hash of other fields with sufficient uniqueness. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Arbitrary non-secret data. Optional. + Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Any IDs that the event relates to, i.e., UUIDs, paths. + EntityIds []string `protobuf:"bytes,3,rep,name=entity_ids,json=entityIds,proto3" json:"entity_ids,omitempty"` + // Human-readable note. + Note string `protobuf:"bytes,4,opt,name=note,proto3" json:"note,omitempty"` +} + +func (x *EventData) Reset() { + *x = EventData{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_event_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventData) ProtoMessage() {} + +func (x *EventData) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_event_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventData.ProtoReflect.Descriptor instead. +func (*EventData) Descriptor() ([]byte, []int) { + return file_sdk_logical_event_proto_rawDescGZIP(), []int{1} +} + +func (x *EventData) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *EventData) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *EventData) GetEntityIds() []string { + if x != nil { + return x.EntityIds + } + return nil +} + +func (x *EventData) GetNote() string { + if x != nil { + return x.Note + } + return "" +} + +// EventReceived is used to consume events and includes additional metadata regarding +// the event type and plugin information. +type EventReceived struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Event *EventData `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + // namespace path + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + EventType string `protobuf:"bytes,3,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` + PluginInfo *EventPluginInfo `protobuf:"bytes,4,opt,name=plugin_info,json=pluginInfo,proto3" json:"plugin_info,omitempty"` +} + +func (x *EventReceived) Reset() { + *x = EventReceived{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_event_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventReceived) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventReceived) ProtoMessage() {} + +func (x *EventReceived) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_event_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventReceived.ProtoReflect.Descriptor instead. +func (*EventReceived) Descriptor() ([]byte, []int) { + return file_sdk_logical_event_proto_rawDescGZIP(), []int{2} +} + +func (x *EventReceived) GetEvent() *EventData { + if x != nil { + return x.Event + } + return nil +} + +func (x *EventReceived) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *EventReceived) GetEventType() string { + if x != nil { + return x.EventType + } + return "" +} + +func (x *EventReceived) GetPluginInfo() *EventPluginInfo { + if x != nil { + return x.PluginInfo + } + return nil +} + +var File_sdk_logical_event_proto protoreflect.FileDescriptor + +var file_sdk_logical_event_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, 0x69, 0x63, + 0x61, 0x6c, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xd1, 0x01, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x83, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x49, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x22, 0xb1, 0x01, 0x0a, 0x0d, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x05, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, + 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x0a, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x28, + 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, + 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_logical_event_proto_rawDescOnce sync.Once + file_sdk_logical_event_proto_rawDescData = file_sdk_logical_event_proto_rawDesc +) + +func file_sdk_logical_event_proto_rawDescGZIP() []byte { + file_sdk_logical_event_proto_rawDescOnce.Do(func() { + file_sdk_logical_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_event_proto_rawDescData) + }) + return file_sdk_logical_event_proto_rawDescData +} + +var file_sdk_logical_event_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_sdk_logical_event_proto_goTypes = []interface{}{ + (*EventPluginInfo)(nil), // 0: logical.EventPluginInfo + (*EventData)(nil), // 1: logical.EventData + (*EventReceived)(nil), // 2: logical.EventReceived + (*structpb.Struct)(nil), // 3: google.protobuf.Struct +} +var file_sdk_logical_event_proto_depIdxs = []int32{ + 3, // 0: logical.EventData.metadata:type_name -> google.protobuf.Struct + 1, // 1: logical.EventReceived.event:type_name -> logical.EventData + 0, // 2: logical.EventReceived.plugin_info:type_name -> logical.EventPluginInfo + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_sdk_logical_event_proto_init() } +func file_sdk_logical_event_proto_init() { + if File_sdk_logical_event_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_logical_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventPluginInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_event_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_event_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventReceived); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_logical_event_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_logical_event_proto_goTypes, + DependencyIndexes: file_sdk_logical_event_proto_depIdxs, + MessageInfos: file_sdk_logical_event_proto_msgTypes, + }.Build() + File_sdk_logical_event_proto = out.File + file_sdk_logical_event_proto_rawDesc = nil + file_sdk_logical_event_proto_goTypes = nil + file_sdk_logical_event_proto_depIdxs = nil +} diff --git a/sdk/logical/event.proto b/sdk/logical/event.proto new file mode 100644 index 000000000000..8892412f5f5e --- /dev/null +++ b/sdk/logical/event.proto @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +package logical; + +import "google/protobuf/struct.proto"; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +// EventPluginInfo contains data related to the plugin that generated an event. +message EventPluginInfo { + // The type of plugin this event originated from, i.e., "auth" or "secrets. + string mount_class = 1; + // Unique ID of the mount entry, e.g., "kv_957bb7d8" + string mount_accessor = 2; + // Mount path of the plugin this event originated from, e.g., "secret/" + string mount_path = 3; + // Plugin name that this event originated from, e.g., "kv" + string plugin = 4; + // Plugin version of the plugin this event originated from, e.g., "v0.13.3+builtin" + string plugin_version = 5; + // Mount version that this event originated from, i.e., if KVv2, then "2". Usually empty. + string version = 6; +} + +// EventData contains event data in a CloudEvents container. +message EventData { + // ID identifies the event. It is required. The combination of + // CloudEvents Source (i.e., Vault cluster) + ID must be unique. + // Events with the same Source + ID can be assumed to be duplicates + // by consumers. + // Be careful when setting this manually that the ID contains enough + // entropy to be unique, or possibly that it is idempotent, such + // as a hash of other fields with sufficient uniqueness. + string id = 1; + // Arbitrary non-secret data. Optional. + google.protobuf.Struct metadata = 2; + // Any IDs that the event relates to, i.e., UUIDs, paths. + repeated string entity_ids = 3; + // Human-readable note. + string note = 4; +} + +// EventReceived is used to consume events and includes additional metadata regarding +// the event type and plugin information. +message EventReceived { + EventData event = 1; + // namespace path + string namespace = 2; + string event_type = 3; + EventPluginInfo plugin_info = 4; +} diff --git a/sdk/logical/events.go b/sdk/logical/events.go new file mode 100644 index 000000000000..5bd9717f7ff1 --- /dev/null +++ b/sdk/logical/events.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + + "github.com/hashicorp/go-uuid" + "google.golang.org/protobuf/types/known/structpb" +) + +// common event metadata keys +const ( + // EventMetadataDataPath is used in event metadata to show the API path that can be used to fetch any underlying + // data. For example, the KV plugin would set this to `data/mysecret`. The event system will automatically prepend + // the plugin mount to this path, if present, so it would become `secret/data/mysecret`, for example. + // If this is an auth plugin event, this will additionally be prepended with `auth/`. + EventMetadataDataPath = "data_path" + // EventMetadataOperation is used in event metadata to express what operation was performed that generated the + // event, e.g., `read` or `write`. + EventMetadataOperation = "operation" + // EventMetadataModified is used in event metadata when the event attests that the underlying data has been modified + // and might need to be re-fetched (at the EventMetadataDataPath). + EventMetadataModified = "modified" + + extraMetadataArgument = "EXTRA_VALUE_AT_END" +) + +// ID is an alias to GetId() for CloudEvents compatibility. +func (x *EventReceived) ID() string { + return x.Event.GetId() +} + +// NewEvent returns an event with a new, random EID. +func NewEvent() (*EventData, error) { + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + return &EventData{ + Id: id, + }, nil +} + +// EventType represents a topic, and is a wrapper around eventlogger.EventType. +type EventType string + +// EventSender sends events to the common event bus. +type EventSender interface { + SendEvent(ctx context.Context, eventType EventType, event *EventData) error +} + +// SendEvent is a convenience method for plugins events to an EventSender, converting the +// metadataPairs to the EventData structure. +func SendEvent(ctx context.Context, sender EventSender, eventType string, metadataPairs ...string) error { + ev, err := NewEvent() + if err != nil { + return err + } + ev.Metadata = &structpb.Struct{Fields: make(map[string]*structpb.Value, (len(metadataPairs)+1)/2)} + for i := 0; i < len(metadataPairs)-1; i += 2 { + ev.Metadata.Fields[metadataPairs[i]] = structpb.NewStringValue(metadataPairs[i+1]) + } + if len(metadataPairs)%2 != 0 { + ev.Metadata.Fields[extraMetadataArgument] = structpb.NewStringValue(metadataPairs[len(metadataPairs)-1]) + } + return sender.SendEvent(ctx, EventType(eventType), ev) +} + +// EventReceivedBexpr is used for evaluating boolean expressions with go-bexpr. +type EventReceivedBexpr struct { + EventType string `bexpr:"event_type"` + Operation string `bexpr:"operation"` + SourcePluginMount string `bexpr:"source_plugin_mount"` + DataPath string `bexpr:"data_path"` + Namespace string `bexpr:"namespace"` +} + +// BexprDatum returns a copy of EventReceived formatted for use in evaluating go-bexpr boolean expressions. +func (x *EventReceived) BexprDatum() any { + operation := "" + dataPath := "" + + if x.Event != nil { + if x.Event.Metadata != nil { + operationValue := x.Event.Metadata.Fields[EventMetadataOperation] + if operationValue != nil { + operation = operationValue.GetStringValue() + } + dataPathValue := x.Event.Metadata.Fields[EventMetadataDataPath] + if dataPathValue != nil { + dataPath = dataPathValue.GetStringValue() + } + } + } + + return &EventReceivedBexpr{ + EventType: x.EventType, + Operation: operation, + SourcePluginMount: x.PluginInfo.MountPath, + DataPath: dataPath, + Namespace: x.Namespace, + } +} diff --git a/sdk/logical/events_mock.go b/sdk/logical/events_mock.go new file mode 100644 index 000000000000..72741163e98a --- /dev/null +++ b/sdk/logical/events_mock.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "sync" +) + +// MockEventSender is a simple implementation of logical.EventSender that simply stores whatever events it receives, +// meant to be used in testing. It is thread-safe. +type MockEventSender struct { + sync.Mutex + Events []MockEvent + Stopped bool +} + +// MockEvent is a container for an event type + event pair. +type MockEvent struct { + Type EventType + Event *EventData +} + +// SendEvent implements the logical.EventSender interface. +func (m *MockEventSender) SendEvent(_ context.Context, eventType EventType, event *EventData) error { + m.Lock() + defer m.Unlock() + if !m.Stopped { + m.Events = append(m.Events, MockEvent{Type: eventType, Event: event}) + } + return nil +} + +func (m *MockEventSender) Stop() { + m.Lock() + defer m.Unlock() + m.Stopped = true +} + +var _ EventSender = (*MockEventSender)(nil) + +// NewMockEventSender returns a new MockEventSender ready to be used. +func NewMockEventSender() *MockEventSender { + return &MockEventSender{} +} diff --git a/sdk/logical/events_test.go b/sdk/logical/events_test.go new file mode 100644 index 000000000000..a018b0d312a9 --- /dev/null +++ b/sdk/logical/events_test.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +type fakeSender struct { + captured *EventData +} + +func (f *fakeSender) SendEvent(ctx context.Context, eventType EventType, event *EventData) error { + f.captured = event + return nil +} + +// TestSendEventWithOddParametersAddsExtraMetadata tests that an extra parameter is added to the metadata +// with a special key to note that it was extra. +func TestSendEventWithOddParametersAddsExtraMetadata(t *testing.T) { + sender := &fakeSender{} + // 0 or 2 arguments are okay + err := SendEvent(context.Background(), sender, "foo") + if err != nil { + t.Fatal(err) + } + m := sender.captured.Metadata.AsMap() + assert.NotContains(t, m, extraMetadataArgument) + err = SendEvent(context.Background(), sender, "foo", "bar", "baz") + if err != nil { + t.Fatal(err) + } + m = sender.captured.Metadata.AsMap() + assert.NotContains(t, m, extraMetadataArgument) + + // 1 or 3 arguments should give result in extraMetadataArgument in metadata + err = SendEvent(context.Background(), sender, "foo", "extra") + if err != nil { + t.Fatal(err) + } + m = sender.captured.Metadata.AsMap() + assert.Contains(t, m, extraMetadataArgument) + assert.Equal(t, "extra", m[extraMetadataArgument]) + + err = SendEvent(context.Background(), sender, "foo", "bar", "baz", "extra") + if err != nil { + t.Fatal(err) + } + m = sender.captured.Metadata.AsMap() + assert.Contains(t, m, extraMetadataArgument) + assert.Equal(t, "extra", m[extraMetadataArgument]) +} diff --git a/sdk/logical/identity.pb.go b/sdk/logical/identity.pb.go index 42c722afe193..5a4a90c87de2 100644 --- a/sdk/logical/identity.pb.go +++ b/sdk/logical/identity.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: sdk/logical/identity.proto package logical @@ -318,6 +321,7 @@ type MFAMethodID struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` UsesPasscode bool `protobuf:"varint,3,opt,name=uses_passcode,json=usesPasscode,proto3" json:"uses_passcode,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` } func (x *MFAMethodID) Reset() { @@ -373,6 +377,13 @@ func (x *MFAMethodID) GetUsesPasscode() bool { return false } +func (x *MFAMethodID) GetName() string { + if x != nil { + return x.Name + } + return "" +} + type MFAConstraintAny struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -537,34 +548,35 @@ var file_sdk_logical_identity_proto_rawDesc = []byte{ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x56, 0x0a, 0x0b, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, + 0x01, 0x22, 0x6a, 0x0a, 0x0b, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x65, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x75, 0x73, 0x65, - 0x73, 0x50, 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x4d, 0x46, 0x41, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x12, 0x26, 0x0a, - 0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6c, 0x6f, 0x67, - 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, - 0x52, 0x03, 0x61, 0x6e, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x0e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, - 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x66, 0x61, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x6d, 0x66, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x54, - 0x0a, 0x0f, 0x6d, 0x66, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, - 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x2e, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x6d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, - 0x69, 0x6e, 0x74, 0x73, 0x1a, 0x5c, 0x0a, 0x13, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6c, - 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, - 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, - 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x50, 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, + 0x10, 0x4d, 0x46, 0x41, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, + 0x79, 0x12, 0x26, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x49, 0x44, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x0e, 0x4d, 0x46, + 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, + 0x6d, 0x66, 0x61, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x66, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x54, 0x0a, 0x0f, 0x6d, 0x66, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, + 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x6d, 0x66, 0x61, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x1a, 0x5c, 0x0a, 0x13, 0x4d, 0x66, 0x61, 0x43, + 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x43, 0x6f, + 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, + 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/sdk/logical/identity.proto b/sdk/logical/identity.proto index ea2e373b18c6..8bac5559011e 100644 --- a/sdk/logical/identity.proto +++ b/sdk/logical/identity.proto @@ -1,91 +1,95 @@ -syntax = "proto3"; +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 -option go_package = "github.com/hashicorp/vault/sdk/logical"; +syntax = "proto3"; package logical; +option go_package = "github.com/hashicorp/vault/sdk/logical"; + message Entity { - // ID is the unique identifier for the entity - string ID = 1; + // ID is the unique identifier for the entity + string ID = 1; + + // Name is the human-friendly unique identifier for the entity + string name = 2; - // Name is the human-friendly unique identifier for the entity - string name = 2; + // Aliases contains thhe alias mappings for the given entity + repeated Alias aliases = 3; - // Aliases contains thhe alias mappings for the given entity - repeated Alias aliases = 3; + // Metadata represents the custom data tied to this entity + map metadata = 4; - // Metadata represents the custom data tied to this entity - map metadata = 4; - - // Disabled is true if the entity is disabled. - bool disabled = 5; + // Disabled is true if the entity is disabled. + bool disabled = 5; - // NamespaceID is the identifier of the namespace to which this entity - // belongs to. - string namespace_id = 6; + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. + string namespace_id = 6; } message Alias { - // MountType is the backend mount's type to which this identity belongs - string mount_type = 1; - - // MountAccessor is the identifier of the mount entry to which this - // identity belongs - string mount_accessor = 2; - - // Name is the identifier of this identity in its authentication source - string name = 3; - - // Metadata represents the custom data tied to this alias. Fields added - // to it should have a low rate of change (or no change) because each - // change incurs a storage write, so quickly-changing fields can have - // a significant performance impact at scale. See the SDK's - // "aliasmetadata" package for a helper that eases and standardizes - // using this safely. - map metadata = 4; - - // ID is the unique identifier for the alias - string ID = 5; - - // NamespaceID is the identifier of the namespace to which this alias - // belongs. - string namespace_id = 6; - - // Custom Metadata represents the custom data tied to this alias - map custom_metadata = 7; - - // Local indicates if the alias only belongs to the cluster where it was - // created. If true, the alias will be stored in a location that are ignored - // by the performance replication subsystem. - bool local = 8; + // MountType is the backend mount's type to which this identity belongs + string mount_type = 1; + + // MountAccessor is the identifier of the mount entry to which this + // identity belongs + string mount_accessor = 2; + + // Name is the identifier of this identity in its authentication source + string name = 3; + + // Metadata represents the custom data tied to this alias. Fields added + // to it should have a low rate of change (or no change) because each + // change incurs a storage write, so quickly-changing fields can have + // a significant performance impact at scale. See the SDK's + // "aliasmetadata" package for a helper that eases and standardizes + // using this safely. + map metadata = 4; + + // ID is the unique identifier for the alias + string ID = 5; + + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + string namespace_id = 6; + + // Custom Metadata represents the custom data tied to this alias + map custom_metadata = 7; + + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that are ignored + // by the performance replication subsystem. + bool local = 8; } message Group { - // ID is the unique identifier for the group - string ID = 1; + // ID is the unique identifier for the group + string ID = 1; - // Name is the human-friendly unique identifier for the group - string name = 2; + // Name is the human-friendly unique identifier for the group + string name = 2; - // Metadata represents the custom data tied to this group - map metadata = 3; + // Metadata represents the custom data tied to this group + map metadata = 3; - // NamespaceID is the identifier of the namespace to which this group - // belongs to. - string namespace_id = 4; + // NamespaceID is the identifier of the namespace to which this group + // belongs to. + string namespace_id = 4; } message MFAMethodID { - string type = 1; - string id = 2; - bool uses_passcode = 3; + string type = 1; + string id = 2; + bool uses_passcode = 3; + string name = 4; } message MFAConstraintAny { - repeated MFAMethodID any = 1; + repeated MFAMethodID any = 1; } message MFARequirement { - string mfa_request_id = 1; - map mfa_constraints = 2; + string mfa_request_id = 1; + map mfa_constraints = 2; } diff --git a/sdk/logical/keyusage_enumer.go b/sdk/logical/keyusage_enumer.go new file mode 100644 index 000000000000..83998c4a2a57 --- /dev/null +++ b/sdk/logical/keyusage_enumer.go @@ -0,0 +1,55 @@ +// Code generated by "enumer -type=KeyUsage -trimprefix=KeyUsage -transform=snake"; DO NOT EDIT. + +package logical + +import ( + "fmt" +) + +const _KeyUsageName = "encryptdecryptsignverifywrapunwrapgenerate_random" + +var _KeyUsageIndex = [...]uint8{0, 7, 14, 18, 24, 28, 34, 49} + +func (i KeyUsage) String() string { + i -= 1 + if i < 0 || i >= KeyUsage(len(_KeyUsageIndex)-1) { + return fmt.Sprintf("KeyUsage(%d)", i+1) + } + return _KeyUsageName[_KeyUsageIndex[i]:_KeyUsageIndex[i+1]] +} + +var _KeyUsageValues = []KeyUsage{1, 2, 3, 4, 5, 6, 7} + +var _KeyUsageNameToValueMap = map[string]KeyUsage{ + _KeyUsageName[0:7]: 1, + _KeyUsageName[7:14]: 2, + _KeyUsageName[14:18]: 3, + _KeyUsageName[18:24]: 4, + _KeyUsageName[24:28]: 5, + _KeyUsageName[28:34]: 6, + _KeyUsageName[34:49]: 7, +} + +// KeyUsageString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func KeyUsageString(s string) (KeyUsage, error) { + if val, ok := _KeyUsageNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to KeyUsage values", s) +} + +// KeyUsageValues returns all values of the enum +func KeyUsageValues() []KeyUsage { + return _KeyUsageValues +} + +// IsAKeyUsage returns "true" if the value is listed in the enum definition. "false" otherwise +func (i KeyUsage) IsAKeyUsage() bool { + for _, v := range _KeyUsageValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/logical/lease.go b/sdk/logical/lease.go index 97bbe4f6582b..e00fb52d64b1 100644 --- a/sdk/logical/lease.go +++ b/sdk/logical/lease.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/lease_test.go b/sdk/logical/lease_test.go index 050b7db8e92b..aee2bbdbcb3b 100644 --- a/sdk/logical/lease_test.go +++ b/sdk/logical/lease_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/logical.go b/sdk/logical/logical.go index 601148952f0f..be527ba9e9c7 100644 --- a/sdk/logical/logical.go +++ b/sdk/logical/logical.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( @@ -105,6 +108,9 @@ type BackendConfig struct { // Config is the opaque user configuration provided when mounting Config map[string]string + + // EventsSender provides a mechanism to interact with Vault events. + EventsSender EventSender } // Factory is the factory function to create a logical backend. @@ -131,6 +137,43 @@ type Paths struct { // should be seal wrapped with extra encryption. It is exact matching // unless it ends with '/' in which case it will be treated as a prefix. SealWrapStorage []string + + // WriteForwardedStorage are storage paths that, when running on a PR + // Secondary cluster, cause a GRPC call up to the PR Primary cluster's + // active node to handle storage.Put(...) and storage.Delete(...) events. + // These paths MUST include a {{clusterId}} literal, which the write layer + // will resolve to this cluster's UUID ("replication set" identifier). + // storage.List(...) and storage.Get(...) operations occur from the + // locally replicated data set, but can use path template expansion to be + // identifier agnostic. + // + // These paths require careful considerations by developers to use. In + // particular, writes on secondary clusters will not appear (when a + // corresponding read is issued immediately after a write) until the + // replication from primary->secondary has occurred. This replication + // triggers an InvalidateKey(...) call on the secondary, which can be + // used to detect the write has finished syncing. However, this will + // likely occur after the request has finished, so it is important to + // not block on this occurring. + // + // On standby nodes, like all storage write operations, this will trigger + // an ErrReadOnly return. + WriteForwardedStorage []string + + // Binary paths are those whose request bodies should not be assumed to + // be JSON encoded, and for which the backend will decode values for auditing + Binary []string + + // Limited paths are storage paths that require special-cased request + // limiting. + // + // This was initially added to separate limiting of "write" requests + // (limits.WriteLimiter) from limiting for CPU-bound pki/issue requests + // (limits.SpecialPathLimiter). Other plugins might also choose to mark + // paths if they don't follow a typical resource usage pattern. + // + // For more details, consult limits/registry.go. + Limited []string } type Auditor interface { @@ -138,11 +181,6 @@ type Auditor interface { AuditResponse(ctx context.Context, input *LogInput) error } -// Externaler allows us to check if a backend is running externally (i.e., over GRPC) -type Externaler interface { - IsExternal() bool -} - type PluginVersion struct { Version string } diff --git a/sdk/logical/logical_storage.go b/sdk/logical/logical_storage.go index 16b85cd797e0..b4fbc2b72fdb 100644 --- a/sdk/logical/logical_storage.go +++ b/sdk/logical/logical_storage.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/managed_key.go b/sdk/logical/managed_key.go index 6f642ad570fd..b7bfb2f13d78 100644 --- a/sdk/logical/managed_key.go +++ b/sdk/logical/managed_key.go @@ -1,12 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( "context" "crypto" - "crypto/cipher" "io" + + wrapping "github.com/hashicorp/go-kms-wrapping/v2" ) +//go:generate enumer -type=KeyUsage -trimprefix=KeyUsage -transform=snake type KeyUsage int const ( @@ -16,6 +21,7 @@ const ( KeyUsageVerify KeyUsageWrap KeyUsageUnwrap + KeyUsageGenerateRandom ) type ManagedKey interface { @@ -101,7 +107,8 @@ type ManagedSigningKey interface { type ManagedEncryptingKey interface { ManagedKey - GetAEAD(iv []byte) (cipher.AEAD, error) + Encrypt(ctx context.Context, plaintext []byte, options ...wrapping.Option) ([]byte, error) + Decrypt(ctx context.Context, ciphertext []byte, options ...wrapping.Option) ([]byte, error) } type ManagedMACKey interface { @@ -115,5 +122,5 @@ type ManagedKeyRandomSource interface { ManagedKey // GetRandomBytes returns a number (specified by the count parameter) of random bytes sourced from the target managed key. - GetRandomBytes(ctx context.Context, count int) ([]byte, error) + GetRandomBytes(count int) ([]byte, error) } diff --git a/sdk/logical/plugin.pb.go b/sdk/logical/plugin.pb.go index f3a9ec52c37c..3c9fc96e6774 100644 --- a/sdk/logical/plugin.pb.go +++ b/sdk/logical/plugin.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: sdk/logical/plugin.proto package logical diff --git a/sdk/logical/plugin.proto b/sdk/logical/plugin.proto index f2df6c75d97c..5e19274ee2cb 100644 --- a/sdk/logical/plugin.proto +++ b/sdk/logical/plugin.proto @@ -1,16 +1,19 @@ -syntax = "proto3"; +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 -option go_package = "github.com/hashicorp/vault/sdk/logical"; +syntax = "proto3"; package logical; +option go_package = "github.com/hashicorp/vault/sdk/logical"; + message PluginEnvironment { - // VaultVersion is the version of the Vault server - string vault_version = 1; - - // VaultVersionPrerelease is the prerelease information of the Vault server - string vault_version_prerelease = 2; - - // VaultVersionMetadata is the version metadata of the Vault server - string vault_version_metadata = 3; + // VaultVersion is the version of the Vault server + string vault_version = 1; + + // VaultVersionPrerelease is the prerelease information of the Vault server + string vault_version_prerelease = 2; + + // VaultVersionMetadata is the version metadata of the Vault server + string vault_version_metadata = 3; } diff --git a/sdk/logical/request.go b/sdk/logical/request.go index d774fd176b4a..33bd850d4961 100644 --- a/sdk/logical/request.go +++ b/sdk/logical/request.go @@ -1,8 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( "context" "fmt" + "io" "net/http" "strings" "time" @@ -47,12 +51,14 @@ func (r *RequestWrapInfo) SentinelKeys() []string { } } +//go:generate enumer -type=ClientTokenSource -trimprefix=ClientTokenFrom -transform=snake type ClientTokenSource uint32 const ( NoClientToken ClientTokenSource = iota ClientTokenFromVaultHeader ClientTokenFromAuthzHeader + ClientTokenFromInternalAuth ) type WALState struct { @@ -153,6 +159,22 @@ type Request struct { // backends can be tied to the mount it belongs to. MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor" sentinel:""` + // mountRunningVersion is used internally to propagate the semantic version + // of the mounted plugin as reported by its vault.MountEntry to audit logging + mountRunningVersion string + + // mountRunningSha256 is used internally to propagate the encoded sha256 + // of the mounted plugin as reported its vault.MountEntry to audit logging + mountRunningSha256 string + + // mountIsExternalPlugin is used internally to propagate whether + // the backend of the mounted plugin is running externally (i.e., over GRPC) + // to audit logging + mountIsExternalPlugin bool + + // mountClass is used internally to propagate the mount class of the mounted plugin to audit logging + mountClass string + // WrapInfo contains requested response wrapping parameters WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info" sentinel:""` @@ -173,6 +195,10 @@ type Request struct { // accessible. Unauthenticated bool `json:"unauthenticated" structs:"unauthenticated" mapstructure:"unauthenticated"` + // PathLimited indicates that the request path is marked for special-case + // request limiting. + PathLimited bool `json:"path_limited" structs:"path_limited" mapstructure:"path_limited"` + // MFACreds holds the parsed MFA information supplied over the API as part of // X-Vault-MFA header MFACreds MFACreds `json:"mfa_creds" structs:"mfa_creds" mapstructure:"mfa_creds" sentinel:""` @@ -224,15 +250,41 @@ type Request struct { // InboundSSCToken is the token that arrives on an inbound request, supplied // by the vault user. InboundSSCToken string + + // When a request has been forwarded, contains information of the host the request was forwarded 'from' + ForwardedFrom string `json:"forwarded_from,omitempty"` + + // Name of the chroot namespace for the listener that the request was made against + ChrootNamespace string `json:"chroot_namespace,omitempty"` + + // RequestLimiterDisabled tells whether the request context has Request Limiter applied. + RequestLimiterDisabled bool `json:"request_limiter_disabled,omitempty"` } -// Clone returns a deep copy of the request by using copystructure +// Clone returns a deep copy (almost) of the request. +// It will set unexported fields which were only previously accessible outside +// the package via receiver methods. +// NOTE: Request.Connection is NOT deep-copied, due to issues with the results +// of copystructure on serial numbers within the x509.Certificate objects. func (r *Request) Clone() (*Request, error) { cpy, err := copystructure.Copy(r) if err != nil { return nil, err } - return cpy.(*Request), nil + + req := cpy.(*Request) + + // Add the unexported values that were only retrievable via receivers. + // copystructure isn't able to do this, which is why we're doing it manually. + req.mountClass = r.MountClass() + req.mountRunningVersion = r.MountRunningVersion() + req.mountRunningSha256 = r.MountRunningSha256() + req.mountIsExternalPlugin = r.MountIsExternalPlugin() + // This needs to be overwritten as the internal connection state is not cloned properly + // mainly the big.Int serial numbers within the x509.Certificate objects get mangled. + req.Connection = r.Connection + + return req, nil } // Get returns a data field and guards for nil Data @@ -280,6 +332,38 @@ func (r *Request) SentinelKeys() []string { } } +func (r *Request) MountRunningVersion() string { + return r.mountRunningVersion +} + +func (r *Request) SetMountRunningVersion(mountRunningVersion string) { + r.mountRunningVersion = mountRunningVersion +} + +func (r *Request) MountRunningSha256() string { + return r.mountRunningSha256 +} + +func (r *Request) SetMountRunningSha256(mountRunningSha256 string) { + r.mountRunningSha256 = mountRunningSha256 +} + +func (r *Request) MountIsExternalPlugin() bool { + return r.mountIsExternalPlugin +} + +func (r *Request) SetMountIsExternalPlugin(mountIsExternalPlugin bool) { + r.mountIsExternalPlugin = mountIsExternalPlugin +} + +func (r *Request) MountClass() string { + return r.mountClass +} + +func (r *Request) SetMountClass(mountClass string) { + r.mountClass = mountClass +} + func (r *Request) LastRemoteWAL() uint64 { return r.lastRemoteWAL } @@ -366,6 +450,7 @@ const ( HelpOperation = "help" AliasLookaheadOperation = "alias-lookahead" ResolveRoleOperation = "resolve-role" + HeaderOperation = "header" // The operations below are called globally, the path is less relevant. RevokeOperation Operation = "revoke" @@ -392,3 +477,109 @@ type CtxKeyInFlightRequestID struct{} func (c CtxKeyInFlightRequestID) String() string { return "in-flight-request-ID" } + +type CtxKeyRequestRole struct{} + +func (c CtxKeyRequestRole) String() string { + return "request-role" +} + +// ctxKeyDisableReplicationStatusEndpoints is a custom type used as a key in +// context.Context to store the value `true` when the +// disable_replication_status_endpoints configuration parameter is set to true +// for the listener through which a request was received. +type ctxKeyDisableReplicationStatusEndpoints struct{} + +// String returns a string representation of the receiver type. +func (c ctxKeyDisableReplicationStatusEndpoints) String() string { + return "disable-replication-status-endpoints" +} + +// ContextDisableReplicationStatusEndpointsValue examines the provided +// context.Context for the disable replication status endpoints value and +// returns it as a bool value if it's found along with the ok return value set +// to true; otherwise the ok return value is false. +func ContextDisableReplicationStatusEndpointsValue(ctx context.Context) (value, ok bool) { + value, ok = ctx.Value(ctxKeyDisableReplicationStatusEndpoints{}).(bool) + + return +} + +// CreateContextDisableReplicationStatusEndpoints creates a new context.Context +// based on the provided parent that also includes the provided value for the +// ctxKeyDisableReplicationStatusEndpoints key. +func CreateContextDisableReplicationStatusEndpoints(parent context.Context, value bool) context.Context { + return context.WithValue(parent, ctxKeyDisableReplicationStatusEndpoints{}, value) +} + +// CtxKeyOriginalRequestPath is a custom type used as a key in context.Context +// to store the original request path. +type ctxKeyOriginalRequestPath struct{} + +// String returns a string representation of the receiver type. +func (c ctxKeyOriginalRequestPath) String() string { + return "original_request_path" +} + +// ContextOriginalRequestPathValue examines the provided context.Context for the +// original request path value and returns it as a string value if it's found +// along with the ok value set to true; otherwise the ok return value is false. +func ContextOriginalRequestPathValue(ctx context.Context) (value string, ok bool) { + value, ok = ctx.Value(ctxKeyOriginalRequestPath{}).(string) + + return +} + +// CreateContextOriginalRequestPath creates a new context.Context based on the +// provided parent that also includes the provided original request path value +// for the ctxKeyOriginalRequestPath key. +func CreateContextOriginalRequestPath(parent context.Context, value string) context.Context { + return context.WithValue(parent, ctxKeyOriginalRequestPath{}, value) +} + +type ctxKeyOriginalBody struct{} + +func ContextOriginalBodyValue(ctx context.Context) (io.ReadCloser, bool) { + value, ok := ctx.Value(ctxKeyOriginalBody{}).(io.ReadCloser) + return value, ok +} + +func CreateContextOriginalBody(parent context.Context, body io.ReadCloser) context.Context { + return context.WithValue(parent, ctxKeyOriginalBody{}, body) +} + +type CtxKeyDisableRequestLimiter struct{} + +func (c CtxKeyDisableRequestLimiter) String() string { + return "disable_request_limiter" +} + +// ctxKeyRedactionSettings is a custom type used as a key in context.Context to +// store the value the redaction settings for the listener that received the +// request. +type ctxKeyRedactionSettings struct{} + +// String returns a string representation of the receiver type. +func (c ctxKeyRedactionSettings) String() string { + return "redaction-settings" +} + +// CtxRedactionSettingsValue examines the provided context.Context for the +// redaction settings value and returns them as a tuple of bool values if they +// are found along with the ok return value set to true; otherwise the ok return +// value is false. +func CtxRedactionSettingsValue(ctx context.Context) (redactVersion, redactAddresses, redactClusterName, ok bool) { + value, ok := ctx.Value(ctxKeyRedactionSettings{}).([]bool) + if !ok { + return false, false, false, false + } + + return value[0], value[1], value[2], true +} + +// CreatecontextRedactionSettings creates a new context.Context based on the +// provided parent that also includes the provided redaction settings values for +// the ctxKeyRedactionSettings key. +func CreateContextRedactionSettings(parent context.Context, redactVersion, redactAddresses, redactClusterName bool) context.Context { + return context.WithValue(parent, ctxKeyRedactionSettings{}, []bool{redactVersion, redactAddresses, redactClusterName}) +} diff --git a/sdk/logical/request_test.go b/sdk/logical/request_test.go new file mode 100644 index 000000000000..69663be4e416 --- /dev/null +++ b/sdk/logical/request_test.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestContextDisableReplicationStatusEndpointsValue(t *testing.T) { + testcases := []struct { + name string + ctx context.Context + expectedValue bool + expectedOk bool + }{ + { + name: "without-value", + ctx: context.Background(), + expectedValue: false, + expectedOk: false, + }, + { + name: "with-nil", + ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, nil), + expectedValue: false, + expectedOk: false, + }, + { + name: "with-incompatible-value", + ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, "true"), + expectedValue: false, + expectedOk: false, + }, + { + name: "with-bool-true", + ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, true), + expectedValue: true, + expectedOk: true, + }, + { + name: "with-bool-false", + ctx: context.WithValue(context.Background(), ctxKeyDisableReplicationStatusEndpoints{}, false), + expectedValue: false, + expectedOk: true, + }, + } + + for _, testcase := range testcases { + value, ok := ContextDisableReplicationStatusEndpointsValue(testcase.ctx) + assert.Equal(t, testcase.expectedValue, value, testcase.name) + assert.Equal(t, testcase.expectedOk, ok, testcase.name) + } +} + +func TestCreateContextDisableReplicationStatusEndpoints(t *testing.T) { + ctx := CreateContextDisableReplicationStatusEndpoints(context.Background(), true) + + value := ctx.Value(ctxKeyDisableReplicationStatusEndpoints{}) + + assert.NotNil(t, ctx) + assert.NotNil(t, value) + assert.IsType(t, bool(false), value) + assert.Equal(t, true, value.(bool)) + + ctx = CreateContextDisableReplicationStatusEndpoints(context.Background(), false) + + value = ctx.Value(ctxKeyDisableReplicationStatusEndpoints{}) + + assert.NotNil(t, ctx) + assert.NotNil(t, value) + assert.IsType(t, bool(false), value) + assert.Equal(t, false, value.(bool)) +} + +func TestContextOriginalRequestPathValue(t *testing.T) { + testcases := []struct { + name string + ctx context.Context + expectedValue string + expectedOk bool + }{ + { + name: "without-value", + ctx: context.Background(), + expectedValue: "", + expectedOk: false, + }, + { + name: "with-nil", + ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, nil), + expectedValue: "", + expectedOk: false, + }, + { + name: "with-incompatible-value", + ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, 6666), + expectedValue: "", + expectedOk: false, + }, + { + name: "with-string-value", + ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, "test"), + expectedValue: "test", + expectedOk: true, + }, + { + name: "with-empty-string", + ctx: context.WithValue(context.Background(), ctxKeyOriginalRequestPath{}, ""), + expectedValue: "", + expectedOk: true, + }, + } + + for _, testcase := range testcases { + value, ok := ContextOriginalRequestPathValue(testcase.ctx) + assert.Equal(t, testcase.expectedValue, value, testcase.name) + assert.Equal(t, testcase.expectedOk, ok, testcase.name) + } +} + +func TestCreateContextOriginalRequestPath(t *testing.T) { + ctx := CreateContextOriginalRequestPath(context.Background(), "test") + + value := ctx.Value(ctxKeyOriginalRequestPath{}) + + assert.NotNil(t, ctx) + assert.NotNil(t, value) + assert.IsType(t, string(""), value) + assert.Equal(t, "test", value.(string)) + + ctx = CreateContextOriginalRequestPath(context.Background(), "") + + value = ctx.Value(ctxKeyOriginalRequestPath{}) + + assert.NotNil(t, ctx) + assert.NotNil(t, value) + assert.IsType(t, string(""), value) + assert.Equal(t, "", value.(string)) +} diff --git a/sdk/logical/response.go b/sdk/logical/response.go index 0f8a2210ecab..721618c76c17 100644 --- a/sdk/logical/response.go +++ b/sdk/logical/response.go @@ -1,9 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( + "bufio" "encoding/json" "errors" "fmt" + "net" "net/http" "strconv" "sync/atomic" @@ -80,6 +85,10 @@ type Response struct { // Headers will contain the http headers from the plugin that it wishes to // have as part of the output Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers"` + + // MountType, if non-empty, provides some information about what kind + // of mount this secret came from. + MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type"` } // AddWarning adds a warning into the response's warning list @@ -242,6 +251,13 @@ func NewStatusHeaderResponseWriter(w http.ResponseWriter, h map[string][]*Custom } } +func (w *StatusHeaderResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if h, ok := w.wrapped.(http.Hijacker); ok { + return h.Hijack() + } + return nil, nil, fmt.Errorf("could not hijack because wrapped connection is %T and it does not implement http.Hijacker", w.wrapped) +} + func (w *StatusHeaderResponseWriter) Wrapped() http.ResponseWriter { return w.wrapped } diff --git a/sdk/logical/response_util.go b/sdk/logical/response_util.go index 4a9f61d563f6..2aebe3a43be4 100644 --- a/sdk/logical/response_util.go +++ b/sdk/logical/response_util.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( @@ -17,7 +20,7 @@ import ( func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { if err == nil && (resp == nil || !resp.IsError()) { switch { - case req.Operation == ReadOperation: + case req.Operation == ReadOperation || req.Operation == HeaderOperation: if resp == nil { return http.StatusNotFound, nil } @@ -73,10 +76,21 @@ func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { var allErrors error var codedErr *ReplicationCodedError errwrap.Walk(err, func(inErr error) { + // The Walk function does not just traverse leaves, and execute the + // callback function on the entire error first. So, if the error is + // of type multierror.Error, we may want to skip storing the entire + // error first to avoid adding duplicate errors when walking down + // the leaf errors + if _, ok := inErr.(*multierror.Error); ok { + return + } newErr, ok := inErr.(*ReplicationCodedError) if ok { codedErr = newErr } else { + // if the error is of type fmt.wrapError which is typically + // made by calling fmt.Errorf("... %w", err), allErrors will + // contain duplicated error messages allErrors = multierror.Append(allErrors, inErr) } }) @@ -124,6 +138,8 @@ func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { statusCode = http.StatusBadRequest case errwrap.Contains(err, ErrInvalidCredentials.Error()): statusCode = http.StatusBadRequest + case errors.Is(err, ErrNotFound): + statusCode = http.StatusNotFound } } @@ -191,7 +207,7 @@ func RespondErrorAndData(w http.ResponseWriter, status int, data interface{}, er type ErrorAndDataResponse struct { Errors []string `json:"errors"` - Data interface{} `json:"data""` + Data interface{} `json:"data"` } resp := &ErrorAndDataResponse{Errors: make([]string, 0, 1)} if err != nil { diff --git a/sdk/logical/response_util_test.go b/sdk/logical/response_util_test.go index 00d70a5c4a2e..eafaa2fc7610 100644 --- a/sdk/logical/response_util_test.go +++ b/sdk/logical/response_util_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( @@ -39,6 +42,14 @@ func TestResponseUtil_RespondErrorCommon_basic(t *testing.T) { respErr: nil, expectedStatus: 404, }, + { + title: "Header not found", + req: &Request{ + Operation: HeaderOperation, + }, + respErr: nil, + expectedStatus: 404, + }, { title: "List with response and no keys", req: &Request{ diff --git a/sdk/logical/secret.go b/sdk/logical/secret.go index a2128d868994..e6b4d14d4732 100644 --- a/sdk/logical/secret.go +++ b/sdk/logical/secret.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import "fmt" diff --git a/sdk/logical/storage.go b/sdk/logical/storage.go index 0802ad01a0f6..886ad51be781 100644 --- a/sdk/logical/storage.go +++ b/sdk/logical/storage.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( @@ -20,6 +23,11 @@ var ErrReadOnly = errors.New("cannot write to readonly storage") // storage while the backend is still being setup. var ErrSetupReadOnly = errors.New("cannot write to storage during setup") +// Plugins using Paths.WriteForwardedStorage will need to use this sentinel +// in their path to write cross-cluster. See the description of that parameter +// for more information. +const PBPWFClusterSentinel = "{{clusterId}}" + // Storage is the way that logical backends are able read/write data. type Storage interface { List(context.Context, string) ([]string, error) @@ -89,6 +97,40 @@ func ScanView(ctx context.Context, view ClearableView, cb func(path string)) err return nil } +// AbortableScanView is used to scan all the keys in a view iteratively, +// but will abort the scan if cb returns false +func AbortableScanView(ctx context.Context, view ClearableView, cb func(path string) (cont bool)) error { + frontier := []string{""} + for len(frontier) > 0 { + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // List the contents + contents, err := view.List(ctx, current) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err) + } + + // Handle the contents in the directory + for _, c := range contents { + // Exit if the context has been canceled + if ctx.Err() != nil { + return ctx.Err() + } + fullPath := current + c + if strings.HasSuffix(c, "/") { + frontier = append(frontier, fullPath) + } else { + if !cb(fullPath) { + return nil + } + } + } + } + return nil +} + // CollectKeys is used to collect all the keys in a view func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) { return CollectKeysWithPrefix(ctx, view, "") diff --git a/sdk/logical/storage_inmem.go b/sdk/logical/storage_inmem.go index 65368a070fe4..62ec58290a4b 100644 --- a/sdk/logical/storage_inmem.go +++ b/sdk/logical/storage_inmem.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/storage_inmem_test.go b/sdk/logical/storage_inmem_test.go index 8e0964fd4af8..2ed776b20c38 100644 --- a/sdk/logical/storage_inmem_test.go +++ b/sdk/logical/storage_inmem_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/storage_test.go b/sdk/logical/storage_test.go index 3b96b4dbef34..1d6014dd9769 100644 --- a/sdk/logical/storage_test.go +++ b/sdk/logical/storage_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/storage_view.go b/sdk/logical/storage_view.go index 2cd07715c2ae..df40dca4fc53 100644 --- a/sdk/logical/storage_view.go +++ b/sdk/logical/storage_view.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/system_view.go b/sdk/logical/system_view.go index fc7f30a7ff0c..cecbc261e14e 100644 --- a/sdk/logical/system_view.go +++ b/sdk/logical/system_view.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( @@ -89,6 +92,14 @@ type SystemView interface { // GeneratePasswordFromPolicy generates a password from the policy referenced. // If the policy does not exist, this will return an error. GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) + + // ClusterID returns the replication ClusterID, for use with path-based + // write forwarding (WriteForwardedPaths). This value will be templated + // in for the {{cluterId}} sentinel. + ClusterID(ctx context.Context) (string, error) + + // GenerateIdentityToken returns an identity token for the requesting plugin. + GenerateIdentityToken(ctx context.Context, req *pluginutil.IdentityTokenRequest) (*pluginutil.IdentityTokenResponse, error) } type PasswordPolicy interface { @@ -96,29 +107,51 @@ type PasswordPolicy interface { Generate(context.Context, io.Reader) (string, error) } +type WellKnownSystemView interface { + // RequestWellKnownRedirect registers a redirect from .well-known/src + // to dest, where dest is a sub-path of the mount. An error + // is returned if that source path is already taken + RequestWellKnownRedirect(ctx context.Context, src, dest string) error + + // DeregisterWellKnownRedirect unregisters a specific redirect. Returns + // true if that redirect source was found + DeregisterWellKnownRedirect(ctx context.Context, src string) bool +} + type ExtendedSystemView interface { + WellKnownSystemView + Auditor() Auditor ForwardGenericRequest(context.Context, *Request) (*Response, error) + + // APILockShouldBlockRequest returns whether a namespace for the requested + // mount is locked and should be blocked + APILockShouldBlockRequest() (bool, error) + + // GetPinnedPluginVersion returns the pinned version for the given plugin, if any. + GetPinnedPluginVersion(ctx context.Context, pluginType consts.PluginType, pluginName string) (*pluginutil.PinnedVersion, error) } type PasswordGenerator func() (password string, err error) type StaticSystemView struct { - DefaultLeaseTTLVal time.Duration - MaxLeaseTTLVal time.Duration - SudoPrivilegeVal bool - TaintedVal bool - CachingDisabledVal bool - Primary bool - EnableMlock bool - LocalMountVal bool - ReplicationStateVal consts.ReplicationState - EntityVal *Entity - GroupsVal []*Group - Features license.Features - PluginEnvironment *PluginEnvironment - PasswordPolicies map[string]PasswordGenerator - VersionString string + DefaultLeaseTTLVal time.Duration + MaxLeaseTTLVal time.Duration + SudoPrivilegeVal bool + TaintedVal bool + CachingDisabledVal bool + Primary bool + EnableMlock bool + LocalMountVal bool + ReplicationStateVal consts.ReplicationState + EntityVal *Entity + GroupsVal []*Group + Features license.Features + PluginEnvironment *PluginEnvironment + PasswordPolicies map[string]PasswordGenerator + VersionString string + ClusterUUID string + APILockShouldBlockRequestVal bool } type noopAuditor struct{} @@ -240,3 +273,15 @@ func (d *StaticSystemView) DeletePasswordPolicy(name string) (existed bool) { delete(d.PasswordPolicies, name) return existed } + +func (d StaticSystemView) ClusterID(ctx context.Context) (string, error) { + return d.ClusterUUID, nil +} + +func (d StaticSystemView) GenerateIdentityToken(_ context.Context, _ *pluginutil.IdentityTokenRequest) (*pluginutil.IdentityTokenResponse, error) { + return nil, errors.New("GenerateIdentityToken is not implemented in StaticSystemView") +} + +func (d StaticSystemView) APILockShouldBlockRequest() (bool, error) { + return d.APILockShouldBlockRequestVal, nil +} diff --git a/sdk/logical/testing.go b/sdk/logical/testing.go index 8cb41e2e7c58..a173c7c5f7b2 100644 --- a/sdk/logical/testing.go +++ b/sdk/logical/testing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/token.go b/sdk/logical/token.go index ebebd4ad9ca7..12114548ee56 100644 --- a/sdk/logical/token.go +++ b/sdk/logical/token.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( @@ -8,9 +11,10 @@ import ( "strings" "time" - sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/go-sockaddr" ) +//go:generate enumer -type=TokenType -trimprefix=TokenType -transform=kebab type TokenType uint8 const ( @@ -69,23 +73,6 @@ func (t *TokenType) UnmarshalJSON(b []byte) error { return nil } -func (t TokenType) String() string { - switch t { - case TokenTypeDefault: - return "default" - case TokenTypeService: - return "service" - case TokenTypeBatch: - return "batch" - case TokenTypeDefaultService: - return "default-service" - case TokenTypeDefaultBatch: - return "default-batch" - default: - panic("unreachable") - } -} - // TokenEntry is used to represent a given token type TokenEntry struct { Type TokenType `json:"type" mapstructure:"type" structs:"type" sentinel:""` diff --git a/sdk/logical/token_test.go b/sdk/logical/token_test.go index e44c707a5165..641d688b9dd1 100644 --- a/sdk/logical/token_test.go +++ b/sdk/logical/token_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( diff --git a/sdk/logical/tokentype_enumer.go b/sdk/logical/tokentype_enumer.go new file mode 100644 index 000000000000..9b350a74d355 --- /dev/null +++ b/sdk/logical/tokentype_enumer.go @@ -0,0 +1,52 @@ +// Code generated by "enumer -type=TokenType -trimprefix=TokenType -transform=kebab"; DO NOT EDIT. + +package logical + +import ( + "fmt" +) + +const _TokenTypeName = "defaultservicebatchdefault-servicedefault-batch" + +var _TokenTypeIndex = [...]uint8{0, 7, 14, 19, 34, 47} + +func (i TokenType) String() string { + if i >= TokenType(len(_TokenTypeIndex)-1) { + return fmt.Sprintf("TokenType(%d)", i) + } + return _TokenTypeName[_TokenTypeIndex[i]:_TokenTypeIndex[i+1]] +} + +var _TokenTypeValues = []TokenType{0, 1, 2, 3, 4} + +var _TokenTypeNameToValueMap = map[string]TokenType{ + _TokenTypeName[0:7]: 0, + _TokenTypeName[7:14]: 1, + _TokenTypeName[14:19]: 2, + _TokenTypeName[19:34]: 3, + _TokenTypeName[34:47]: 4, +} + +// TokenTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func TokenTypeString(s string) (TokenType, error) { + if val, ok := _TokenTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to TokenType values", s) +} + +// TokenTypeValues returns all values of the enum +func TokenTypeValues() []TokenType { + return _TokenTypeValues +} + +// IsATokenType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i TokenType) IsATokenType() bool { + for _, v := range _TokenTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/sdk/logical/translate_response.go b/sdk/logical/translate_response.go index de5ea8fdbe21..ca832ebd691b 100644 --- a/sdk/logical/translate_response.go +++ b/sdk/logical/translate_response.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package logical import ( @@ -13,9 +16,10 @@ import ( // values we don't. func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { httpResp := &HTTPResponse{ - Data: input.Data, - Warnings: input.Warnings, - Headers: input.Headers, + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + MountType: input.MountType, } if input.Secret != nil { @@ -49,9 +53,10 @@ func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response { logicalResp := &Response{ - Data: input.Data, - Warnings: input.Warnings, - Headers: input.Headers, + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + MountType: input.MountType, } if input.LeaseID != "" { @@ -96,6 +101,7 @@ type HTTPResponse struct { Warnings []string `json:"warnings"` Headers map[string][]string `json:"-"` Auth *HTTPAuth `json:"auth"` + MountType string `json:"mount_type"` } type HTTPAuth struct { diff --git a/sdk/logical/version.pb.go b/sdk/logical/version.pb.go index fb3ce8121b97..c21789b7aa2f 100644 --- a/sdk/logical/version.pb.go +++ b/sdk/logical/version.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: sdk/logical/version.proto package logical diff --git a/sdk/logical/version.proto b/sdk/logical/version.proto index 345051ae9de9..704e212056c4 100644 --- a/sdk/logical/version.proto +++ b/sdk/logical/version.proto @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + syntax = "proto3"; package logical; @@ -14,4 +17,4 @@ message VersionReply { service PluginVersion { // Version returns version information for the plugin. rpc Version(Empty) returns (VersionReply); -} \ No newline at end of file +} diff --git a/sdk/logical/version_grpc.pb.go b/sdk/logical/version_grpc.pb.go index a69e97059978..bdb3561449a6 100644 --- a/sdk/logical/version_grpc.pb.go +++ b/sdk/logical/version_grpc.pb.go @@ -1,4 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: sdk/logical/version.proto package logical @@ -14,6 +21,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + PluginVersion_Version_FullMethodName = "/logical.PluginVersion/Version" +) + // PluginVersionClient is the client API for PluginVersion service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -32,7 +43,7 @@ func NewPluginVersionClient(cc grpc.ClientConnInterface) PluginVersionClient { func (c *pluginVersionClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) { out := new(VersionReply) - err := c.cc.Invoke(ctx, "/logical.PluginVersion/Version", in, out, opts...) + err := c.cc.Invoke(ctx, PluginVersion_Version_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -78,7 +89,7 @@ func _PluginVersion_Version_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/logical.PluginVersion/Version", + FullMethod: PluginVersion_Version_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(PluginVersionServer).Version(ctx, req.(*Empty)) diff --git a/sdk/physical/cache.go b/sdk/physical/cache.go index af40f5385957..3816609e2bcd 100644 --- a/sdk/physical/cache.go +++ b/sdk/physical/cache.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package physical import ( @@ -29,6 +32,16 @@ var cacheExceptionsPaths = []string{ "sys/expire/", "core/poison-pill", "core/raft/tls", + + // Add barrierSealConfigPath and recoverySealConfigPlaintextPath to the cache + // exceptions to avoid unseal errors. See VAULT-17227 + "core/seal-config", + "core/recovery-config", + + // we need to make sure the persisted license is read from the storage + // to ensure the changes to the autoloaded license on the active node + // is observed on the perfStandby nodes + "core/autoloaded-license", } // CacheRefreshContext returns a context with an added value denoting if the @@ -73,6 +86,7 @@ var ( _ ToggleablePurgemonster = (*TransactionalCache)(nil) _ Backend = (*Cache)(nil) _ Transactional = (*TransactionalCache)(nil) + _ TransactionalLimits = (*TransactionalCache)(nil) ) // NewCache returns a physical cache of the given size. @@ -258,3 +272,14 @@ func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry) return nil } + +// TransactionLimits implements physical.TransactionalLimits +func (c *TransactionalCache) TransactionLimits() (int, int) { + if tl, ok := c.Transactional.(TransactionalLimits); ok { + return tl.TransactionLimits() + } + // We don't have any specific limits of our own so return zeros to signal that + // the caller should use whatever reasonable defaults it would if it used a + // non-TransactionalLimits backend. + return 0, 0 +} diff --git a/sdk/physical/cache_test.go b/sdk/physical/cache_test.go new file mode 100644 index 000000000000..7e9bf3232a04 --- /dev/null +++ b/sdk/physical/cache_test.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestTransactionalCache_TransactionLimits(t *testing.T) { + tc := []struct { + name string + be Backend + wantEntries int + wantSize int + }{ + { + name: "non-transactionlimits backend", + be: &TestTransactionalNonLimitBackend{}, + + // Should return zeros to let the implementor choose defaults. + wantEntries: 0, + wantSize: 0, + }, + { + name: "transactionlimits backend", + be: &TestTransactionalLimitBackend{ + MaxEntries: 123, + MaxSize: 345, + }, + + // Should return underlying limits + wantEntries: 123, + wantSize: 345, + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + logger := hclog.NewNullLogger() + + be := NewTransactionalCache(tt.be, 1024, logger, nil) + + // Call the TransactionLimits method + maxEntries, maxBytes := be.TransactionLimits() + + require.Equal(t, tt.wantEntries, maxEntries) + require.Equal(t, tt.wantSize, maxBytes) + }) + } +} diff --git a/sdk/physical/encoding.go b/sdk/physical/encoding.go index dbde84cc6dc4..af581207f9cb 100644 --- a/sdk/physical/encoding.go +++ b/sdk/physical/encoding.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package physical import ( @@ -95,6 +98,17 @@ func (e *TransactionalStorageEncoding) Transaction(ctx context.Context, txns []* return e.Transactional.Transaction(ctx, txns) } +// TransactionLimits implements physical.TransactionalLimits +func (e *TransactionalStorageEncoding) TransactionLimits() (int, int) { + if tl, ok := e.Transactional.(TransactionalLimits); ok { + return tl.TransactionLimits() + } + // We don't have any specific limits of our own so return zeros to signal that + // the caller should use whatever reasonable defaults it would if it used a + // non-TransactionalLimits backend. + return 0, 0 +} + func (e *StorageEncoding) Purge(ctx context.Context) { if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { purgeable.Purge(ctx) diff --git a/sdk/physical/encoding_test.go b/sdk/physical/encoding_test.go new file mode 100644 index 000000000000..e4d9cceaa417 --- /dev/null +++ b/sdk/physical/encoding_test.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTransactionalStorageEncoding_TransactionLimits(t *testing.T) { + tc := []struct { + name string + be Backend + wantEntries int + wantSize int + }{ + { + name: "non-transactionlimits backend", + be: &TestTransactionalNonLimitBackend{}, + + // Should return zeros to let the implementor choose defaults. + wantEntries: 0, + wantSize: 0, + }, + { + name: "transactionlimits backend", + be: &TestTransactionalLimitBackend{ + MaxEntries: 123, + MaxSize: 345, + }, + + // Should return underlying limits + wantEntries: 123, + wantSize: 345, + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + be := NewStorageEncoding(tt.be).(TransactionalLimits) + + // Call the TransactionLimits method + maxEntries, maxBytes := be.TransactionLimits() + + require.Equal(t, tt.wantEntries, maxEntries) + require.Equal(t, tt.wantSize, maxBytes) + }) + } +} diff --git a/sdk/physical/entry.go b/sdk/physical/entry.go index 389fe6c81c14..1d907425dc08 100644 --- a/sdk/physical/entry.go +++ b/sdk/physical/entry.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package physical import ( diff --git a/sdk/physical/error.go b/sdk/physical/error.go index b547e4e4288d..aa7418fd7893 100644 --- a/sdk/physical/error.go +++ b/sdk/physical/error.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package physical import ( @@ -108,3 +111,14 @@ func (e *TransactionalErrorInjector) Transaction(ctx context.Context, txns []*Tx } return e.Transactional.Transaction(ctx, txns) } + +// TransactionLimits implements physical.TransactionalLimits +func (e *TransactionalErrorInjector) TransactionLimits() (int, int) { + if tl, ok := e.Transactional.(TransactionalLimits); ok { + return tl.TransactionLimits() + } + // We don't have any specific limits of our own so return zeros to signal that + // the caller should use whatever reasonable defaults it would if it used a + // non-TransactionalLimits backend. + return 0, 0 +} diff --git a/sdk/physical/error_test.go b/sdk/physical/error_test.go new file mode 100644 index 000000000000..779cd1bc1c98 --- /dev/null +++ b/sdk/physical/error_test.go @@ -0,0 +1,53 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestTransactionalErrorInjector_TransactionLimits(t *testing.T) { + tc := []struct { + name string + be Backend + wantEntries int + wantSize int + }{ + { + name: "non-transactionlimits backend", + be: &TestTransactionalNonLimitBackend{}, + + // Should return zeros to let the implementor choose defaults. + wantEntries: 0, + wantSize: 0, + }, + { + name: "transactionlimits backend", + be: &TestTransactionalLimitBackend{ + MaxEntries: 123, + MaxSize: 345, + }, + + // Should return underlying limits + wantEntries: 123, + wantSize: 345, + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + logger := hclog.NewNullLogger() + + injector := NewTransactionalErrorInjector(tt.be, 0, logger) + + maxEntries, maxBytes := injector.TransactionLimits() + + require.Equal(t, tt.wantEntries, maxEntries) + require.Equal(t, tt.wantSize, maxBytes) + }) + } +} diff --git a/sdk/physical/file/file.go b/sdk/physical/file/file.go index e5e64e6efa41..95f420c69c3f 100644 --- a/sdk/physical/file/file.go +++ b/sdk/physical/file/file.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package file import ( @@ -242,17 +245,21 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er // JSON encode the entry and write it fullPath := filepath.Join(path, key) - tempPath := fullPath + ".temp" - f, err := os.OpenFile( - tempPath, - os.O_CREATE|os.O_TRUNC|os.O_WRONLY, - 0o600) + f, err := os.CreateTemp(path, key) if err != nil { if f != nil { f.Close() } return err } + + if err = os.Chmod(f.Name(), 0o600); err != nil { + if f != nil { + f.Close() + } + return err + } + if f == nil { return errors.New("could not successfully get a file handle") } @@ -263,7 +270,7 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er }) f.Close() if encErr == nil { - err = os.Rename(tempPath, fullPath) + err = os.Rename(f.Name(), fullPath) if err != nil { return err } @@ -275,7 +282,7 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er // See if we ended up with a zero-byte file and if so delete it, might be a // case of disk being full but the file info is in metadata that is // reserved. - fi, err := os.Stat(tempPath) + fi, err := os.Stat(f.Name()) if err != nil { return encErr } @@ -283,7 +290,7 @@ func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) er return encErr } if fi.Size() == 0 { - os.Remove(tempPath) + os.Remove(f.Name()) } return encErr } diff --git a/sdk/physical/file/file_test.go b/sdk/physical/file/file_test.go index 724b8a012a66..7fc6398c8dff 100644 --- a/sdk/physical/file/file_test.go +++ b/sdk/physical/file/file_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package file import ( @@ -237,3 +240,54 @@ func TestFileBackend(t *testing.T) { physical.ExerciseBackend_ListPrefix(t, b) } + +func TestFileBackendCreateTempKey(t *testing.T) { + dir := t.TempDir() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewFileBackend(map[string]string{ + "path": dir, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + temp := &physical.Entry{Key: "example.temp", Value: []byte("tempfoo")} + err = b.Put(context.Background(), temp) + if err != nil { + t.Fatalf("err: %v", err) + } + + nonTemp := &physical.Entry{Key: "example", Value: []byte("foobar")} + err = b.Put(context.Background(), nonTemp) + if err != nil { + t.Fatalf("err: %v", err) + } + + vals, err := b.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + if len(vals) != 2 || vals[0] == vals[1] { + t.Fatalf("bad: %v", vals) + } + for _, val := range vals { + if val != "example.temp" && val != "example" { + t.Fatalf("bad val: %v", val) + } + } + out, err := b.Get(context.Background(), "example") + if err != nil { + t.Fatalf("err: %v", err) + } + if !reflect.DeepEqual(out, nonTemp) { + t.Fatalf("bad: %v expected: %v", out, nonTemp) + } + out, err = b.Get(context.Background(), "example.temp") + if err != nil { + t.Fatalf("err: %v", err) + } + if !reflect.DeepEqual(out, temp) { + t.Fatalf("bad: %v expected: %v", out, temp) + } +} diff --git a/sdk/physical/inmem/cache_test.go b/sdk/physical/inmem/cache_test.go index e6e6dabfe376..3014fc1768da 100644 --- a/sdk/physical/inmem/cache_test.go +++ b/sdk/physical/inmem/cache_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package inmem import ( diff --git a/sdk/physical/inmem/inmem.go b/sdk/physical/inmem/inmem.go index be16b4caa12f..5f237e0616d7 100644 --- a/sdk/physical/inmem/inmem.go +++ b/sdk/physical/inmem/inmem.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package inmem import ( @@ -9,20 +12,23 @@ import ( "strings" "sync" "sync/atomic" + "time" "github.com/armon/go-radix" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/physical" + uberAtomic "go.uber.org/atomic" ) // Verify interfaces are satisfied var ( - _ physical.Backend = (*InmemBackend)(nil) - _ physical.HABackend = (*InmemHABackend)(nil) - _ physical.HABackend = (*TransactionalInmemHABackend)(nil) - _ physical.Lock = (*InmemLock)(nil) - _ physical.Transactional = (*TransactionalInmemBackend)(nil) - _ physical.Transactional = (*TransactionalInmemHABackend)(nil) + _ physical.Backend = (*InmemBackend)(nil) + _ physical.HABackend = (*InmemHABackend)(nil) + _ physical.HABackend = (*TransactionalInmemHABackend)(nil) + _ physical.Lock = (*InmemLock)(nil) + _ physical.Transactional = (*TransactionalInmemBackend)(nil) + _ physical.Transactional = (*TransactionalInmemHABackend)(nil) + _ physical.TransactionalLimits = (*TransactionalInmemBackend)(nil) ) var ( @@ -48,10 +54,21 @@ type InmemBackend struct { failGetInTxn *uint32 logOps bool maxValueSize int + writeLatency time.Duration } type TransactionalInmemBackend struct { InmemBackend + + // Using Uber atomic because our SemGrep rules don't like the old pointer + // trick we used above any more even though it's fine. The newer sync/atomic + // types are almost the same, but lack was to initialize them cleanly in New* + // functions so sticking with what SemGrep likes for now. + maxBatchEntries *uberAtomic.Int32 + maxBatchSize *uberAtomic.Int32 + + largestBatchLen *uberAtomic.Uint64 + largestBatchSize *uberAtomic.Uint64 } // NewInmem constructs a new in-memory backend @@ -106,9 +123,25 @@ func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical. logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", maxValueSize: maxValueSize, }, + + maxBatchEntries: uberAtomic.NewInt32(64), + maxBatchSize: uberAtomic.NewInt32(128 * 1024), + largestBatchLen: uberAtomic.NewUint64(0), + largestBatchSize: uberAtomic.NewUint64(0), }, nil } +// SetWriteLatency add a sleep to each Put/Delete operation (and each op in a +// transaction for a TransactionalInmemBackend). It's not so much to simulate +// real disk latency as much as to make the go runtime schedule things more like +// a real disk where concurrent write operations are more likely to interleave +// as each one blocks on disk IO. Set to 0 to disable again (the default). +func (i *InmemBackend) SetWriteLatency(latency time.Duration) { + i.Lock() + defer i.Unlock() + i.writeLatency = latency +} + // Put is used to insert or update an entry func (i *InmemBackend) Put(ctx context.Context, entry *physical.Entry) error { i.permitPool.Acquire() @@ -139,6 +172,9 @@ func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) e } i.root.Insert(entry.Key, entry.Value) + if i.writeLatency > 0 { + time.Sleep(i.writeLatency) + } return nil } @@ -225,6 +261,9 @@ func (i *InmemBackend) DeleteInternal(ctx context.Context, key string) error { } i.root.Delete(key) + if i.writeLatency > 0 { + time.Sleep(i.writeLatency) + } return nil } @@ -300,11 +339,39 @@ func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*phy defer t.Unlock() failGetInTxn := atomic.LoadUint32(t.failGetInTxn) + size := uint64(0) for _, t := range txns { + // We use 2x key length to match the logic in WALBackend.persistWALs + // presumably this is attempting to account for some amount of encoding + // overhead. + size += uint64(2*len(t.Entry.Key) + len(t.Entry.Value)) if t.Operation == physical.GetOperation && failGetInTxn != 0 { return GetInTxnDisabledError } } + if size > t.largestBatchSize.Load() { + t.largestBatchSize.Store(size) + } + if len(txns) > int(t.largestBatchLen.Load()) { + t.largestBatchLen.Store(uint64(len(txns))) + } + return physical.GenericTransactionHandler(ctx, t, txns) } + +func (t *TransactionalInmemBackend) SetMaxBatchEntries(entries int) { + t.maxBatchEntries.Store(int32(entries)) +} + +func (t *TransactionalInmemBackend) SetMaxBatchSize(entries int) { + t.maxBatchSize.Store(int32(entries)) +} + +func (t *TransactionalInmemBackend) TransactionLimits() (int, int) { + return int(t.maxBatchEntries.Load()), int(t.maxBatchSize.Load()) +} + +func (t *TransactionalInmemBackend) BatchStats() (maxEntries uint64, maxSize uint64) { + return t.largestBatchLen.Load(), t.largestBatchSize.Load() +} diff --git a/sdk/physical/inmem/inmem_ha.go b/sdk/physical/inmem/inmem_ha.go index 64fcb3a66dce..1db26ca7461f 100644 --- a/sdk/physical/inmem/inmem_ha.go +++ b/sdk/physical/inmem/inmem_ha.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package inmem import ( diff --git a/sdk/physical/inmem/inmem_ha_test.go b/sdk/physical/inmem/inmem_ha_test.go index 850d63a230be..bb427a385e99 100644 --- a/sdk/physical/inmem/inmem_ha_test.go +++ b/sdk/physical/inmem/inmem_ha_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package inmem import ( diff --git a/sdk/physical/inmem/inmem_test.go b/sdk/physical/inmem/inmem_test.go index 678061326a21..56c029a43303 100644 --- a/sdk/physical/inmem/inmem_test.go +++ b/sdk/physical/inmem/inmem_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package inmem import ( diff --git a/sdk/physical/inmem/physical_view_test.go b/sdk/physical/inmem/physical_view_test.go index ea4a3ce24f18..24b47d7ae7ec 100644 --- a/sdk/physical/inmem/physical_view_test.go +++ b/sdk/physical/inmem/physical_view_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package inmem import ( diff --git a/sdk/physical/inmem/transactions_test.go b/sdk/physical/inmem/transactions_test.go index 7ed3d5949241..71a4829f9664 100644 --- a/sdk/physical/inmem/transactions_test.go +++ b/sdk/physical/inmem/transactions_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package inmem import ( diff --git a/sdk/physical/latency.go b/sdk/physical/latency.go index 18b2c4c1451b..56d045e30264 100644 --- a/sdk/physical/latency.go +++ b/sdk/physical/latency.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package physical import ( @@ -56,6 +59,9 @@ func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log } // NewTransactionalLatencyInjector creates a new transactional LatencyInjector +// jitter is the random percent that latency will vary between. +// For example, if you specify latency = 50ms and jitter = 20, then for any +// given operation, the latency will be 50ms +- 10ms (20% of 50), or between 40 and 60ms. func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector { return &TransactionalLatencyInjector{ LatencyInjector: NewLatencyInjector(b, latency, jitter, logger), @@ -111,3 +117,14 @@ func (l *TransactionalLatencyInjector) Transaction(ctx context.Context, txns []* l.addLatency() return l.Transactional.Transaction(ctx, txns) } + +// TransactionLimits implements physical.TransactionalLimits +func (l *TransactionalLatencyInjector) TransactionLimits() (int, int) { + if tl, ok := l.Transactional.(TransactionalLimits); ok { + return tl.TransactionLimits() + } + // We don't have any specific limits of our own so return zeros to signal that + // the caller should use whatever reasonable defaults it would if it used a + // non-TransactionalLimits backend. + return 0, 0 +} diff --git a/sdk/physical/latency_test.go b/sdk/physical/latency_test.go new file mode 100644 index 000000000000..2585a04e0f12 --- /dev/null +++ b/sdk/physical/latency_test.go @@ -0,0 +1,53 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestTransactionalLatencyInjector_TransactionLimits(t *testing.T) { + tc := []struct { + name string + be Backend + wantEntries int + wantSize int + }{ + { + name: "non-transactionlimits backend", + be: &TestTransactionalNonLimitBackend{}, + + // Should return zeros to let the implementor choose defaults. + wantEntries: 0, + wantSize: 0, + }, + { + name: "transactionlimits backend", + be: &TestTransactionalLimitBackend{ + MaxEntries: 123, + MaxSize: 345, + }, + + // Should return underlying limits + wantEntries: 123, + wantSize: 345, + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + logger := hclog.NewNullLogger() + + injector := NewTransactionalLatencyInjector(tt.be, 0, 0, logger) + + maxEntries, maxBytes := injector.TransactionLimits() + + require.Equal(t, tt.wantEntries, maxEntries) + require.Equal(t, tt.wantSize, maxBytes) + }) + } +} diff --git a/sdk/physical/physical.go b/sdk/physical/physical.go index 808abd50fcd8..8a6e4883df77 100644 --- a/sdk/physical/physical.go +++ b/sdk/physical/physical.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package physical import ( @@ -57,6 +60,69 @@ type HABackend interface { HAEnabled() bool } +// FencingHABackend is an HABackend which provides the additional guarantee that +// each Lock it returns from LockWith is also a FencingLock. A FencingLock +// provides a mechanism to retrieve a fencing token that can be included by +// future writes by the backend to ensure that it is still the current lock +// holder at the time the write commits. Without this timing might allow a lock +// holder not to notice it's no longer the active node for long enough for it to +// write data to storage even while a new active node is writing causing +// corruption. For Consul backend the fencing token is the session id which is +// submitted with `check-session` operation on each write to ensure the write +// only completes if the session is still holding the lock. For raft backend +// this isn't needed because our in-process raft library is unable to write if +// it's not the leader anyway. +// +// If you implement this, Vault will call RegisterActiveNodeLock with the Lock +// instance returned by LockWith after it successfully locks it. This keeps the +// backend oblivious to the specific key we use for active node locks and allows +// potential future usage of locks for other purposes in the future. +// +// Note that all implementations must support writing to storage before +// RegisterActiveNodeLock is called to support initialization of a new cluster. +// They must also skip fencing writes if the write's Context contains a special +// value. This is necessary to allow Vault to clear and re-initialise secondary +// clusters even though there is already an active node with a specific lock +// session since we clear the cluster while Vault is sealed and clearing the +// data might remove the lock in some storages (e.g. Consul). As noted above +// it's not generally safe to allow unfenced writes after a lock so instead we +// special case just a few types of writes that only happen rarely while the +// cluster is sealed. See the IsUnfencedWrite helper function. +type FencingHABackend interface { + HABackend + + RegisterActiveNodeLock(l Lock) error +} + +// unfencedWriteContextKeyType is a special type to identify context values to +// disable fencing. It's a separate type per the best-practice in Context.Value +// docs to avoid collisions even if the key might match. +type unfencedWriteContextKeyType string + +const ( + // unfencedWriteContextKey is the context key we pass the option to bypass + // fencing through to a FencingHABackend. Note that this is not an ideal use + // of context values and violates the "do not use it for optional arguments" + // guidance but has been agreed as a pragmatic option for this case rather + // than needing to specialize every physical.Backend to understand this + // option. + unfencedWriteContextKey unfencedWriteContextKeyType = "vault-disable-fencing" +) + +// UnfencedWriteCtx adds metadata to a ctx such that any writes performed +// directly on a FencingHABackend using that context will _not_ add a fencing +// token. +func UnfencedWriteCtx(ctx context.Context) context.Context { + return context.WithValue(ctx, unfencedWriteContextKey, true) +} + +// IsUnfencedWrite returns whether or not the context passed has the unfenced +// flag value set. +func IsUnfencedWrite(ctx context.Context) bool { + isUnfenced, ok := ctx.Value(unfencedWriteContextKey).(bool) + return ok && isUnfenced +} + // ToggleablePurgemonster is an interface for backends that can toggle on or // off special functionality and/or support purging. This is only used for the // cache, don't use it for other things. @@ -83,7 +149,7 @@ type Lock interface { // Unlock is used to release the lock Unlock() error - // Returns the value of the lock and if it is held + // Returns the value of the lock and if it is held by _any_ node Value() (bool, string, error) } diff --git a/sdk/physical/physical_access.go b/sdk/physical/physical_access.go index 7497313afca2..048ee8385667 100644 --- a/sdk/physical/physical_access.go +++ b/sdk/physical/physical_access.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package physical import ( diff --git a/sdk/physical/physical_view.go b/sdk/physical/physical_view.go index 189ac93172a5..0369e13778a0 100644 --- a/sdk/physical/physical_view.go +++ b/sdk/physical/physical_view.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package physical import ( diff --git a/sdk/physical/testing.go b/sdk/physical/testing.go index 6e0ddfcc0eae..9b7d339284dd 100644 --- a/sdk/physical/testing.go +++ b/sdk/physical/testing.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package physical import ( @@ -6,13 +9,16 @@ import ( "sort" "testing" "time" + + "github.com/stretchr/testify/require" ) func ExerciseBackend(t testing.TB, b Backend) { t.Helper() + ctx := context.Background() // Should be empty - keys, err := b.List(context.Background(), "") + keys, err := b.List(ctx, "") if err != nil { t.Fatalf("initial list failed: %v", err) } @@ -21,13 +27,13 @@ func ExerciseBackend(t testing.TB, b Backend) { } // Delete should work if it does not exist - err = b.Delete(context.Background(), "foo") + err = b.Delete(ctx, "foo") if err != nil { t.Fatalf("idempotent delete: %v", err) } // Get should not fail, but be nil - out, err := b.Get(context.Background(), "foo") + out, err := b.Get(ctx, "foo") if err != nil { t.Fatalf("initial get failed: %v", err) } @@ -37,13 +43,13 @@ func ExerciseBackend(t testing.TB, b Backend) { // Make an entry e := &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("put failed: %v", err) } // Get should work - out, err = b.Get(context.Background(), "foo") + out, err = b.Get(ctx, "foo") if err != nil { t.Fatalf("get failed: %v", err) } @@ -52,7 +58,7 @@ func ExerciseBackend(t testing.TB, b Backend) { } // List should not be empty - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("list failed: %v", err) } @@ -61,13 +67,13 @@ func ExerciseBackend(t testing.TB, b Backend) { } // Delete should work - err = b.Delete(context.Background(), "foo") + err = b.Delete(ctx, "foo") if err != nil { t.Fatalf("delete: %v", err) } // Should be empty - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("list after delete: %v", err) } @@ -76,7 +82,7 @@ func ExerciseBackend(t testing.TB, b Backend) { } // Get should fail - out, err = b.Get(context.Background(), "foo") + out, err = b.Get(ctx, "foo") if err != nil { t.Fatalf("get after delete: %v", err) } @@ -86,25 +92,25 @@ func ExerciseBackend(t testing.TB, b Backend) { // Multiple Puts should work; GH-189 e = &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("multi put 1 failed: %v", err) } e = &Entry{Key: "foo", Value: []byte("test")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("multi put 2 failed: %v", err) } // Make a nested entry e = &Entry{Key: "foo/bar", Value: []byte("baz")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("nested put failed: %v", err) } // Get should work - out, err = b.Get(context.Background(), "foo/bar") + out, err = b.Get(ctx, "foo/bar") if err != nil { t.Fatalf("get failed: %v", err) } @@ -112,7 +118,7 @@ func ExerciseBackend(t testing.TB, b Backend) { t.Errorf("bad: %v expected: %v", out, e) } - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("list multi failed: %v", err) } @@ -122,13 +128,13 @@ func ExerciseBackend(t testing.TB, b Backend) { } // Delete with children should work - err = b.Delete(context.Background(), "foo") + err = b.Delete(ctx, "foo") if err != nil { t.Fatalf("delete after multi: %v", err) } // Get should return the child - out, err = b.Get(context.Background(), "foo/bar") + out, err = b.Get(ctx, "foo/bar") if err != nil { t.Fatalf("get after multi delete: %v", err) } @@ -138,17 +144,17 @@ func ExerciseBackend(t testing.TB, b Backend) { // Removal of nested secret should not leave artifacts e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("deep nest: %v", err) } - err = b.Delete(context.Background(), "foo/nested1/nested2/nested3") + err = b.Delete(ctx, "foo/nested1/nested2/nested3") if err != nil { t.Fatalf("failed to remove deep nest: %v", err) } - keys, err = b.List(context.Background(), "foo/") + keys, err = b.List(ctx, "foo/") if err != nil { t.Fatalf("err: %v", err) } @@ -158,18 +164,18 @@ func ExerciseBackend(t testing.TB, b Backend) { // Make a second nested entry to test prefix removal e = &Entry{Key: "foo/zip", Value: []byte("zap")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("failed to create second nested: %v", err) } // Delete should not remove the prefix - err = b.Delete(context.Background(), "foo/bar") + err = b.Delete(ctx, "foo/bar") if err != nil { t.Fatalf("failed to delete nested prefix: %v", err) } - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("list nested prefix: %v", err) } @@ -178,12 +184,12 @@ func ExerciseBackend(t testing.TB, b Backend) { } // Delete should remove the prefix - err = b.Delete(context.Background(), "foo/zip") + err = b.Delete(ctx, "foo/zip") if err != nil { t.Fatalf("failed to delete second prefix: %v", err) } - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("listing after second delete failed: %v", err) } @@ -193,29 +199,29 @@ func ExerciseBackend(t testing.TB, b Backend) { // When the root path is empty, adding and removing deep nested values should not break listing e = &Entry{Key: "foo/nested1/nested2/value1", Value: []byte("baz")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("deep nest: %v", err) } e = &Entry{Key: "foo/nested1/nested2/value2", Value: []byte("baz")} - err = b.Put(context.Background(), e) + err = b.Put(ctx, e) if err != nil { t.Fatalf("deep nest: %v", err) } - err = b.Delete(context.Background(), "foo/nested1/nested2/value2") + err = b.Delete(ctx, "foo/nested1/nested2/value2") if err != nil { t.Fatalf("failed to remove deep nest: %v", err) } - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("listing of root failed after deletion: %v", err) } if len(keys) == 0 { t.Errorf("root is returning empty after deleting a single nested value, expected nested1/: %v", keys) - keys, err = b.List(context.Background(), "foo/nested1") + keys, err = b.List(ctx, "foo/nested1") if err != nil { t.Fatalf("listing of expected nested path 'foo/nested1' failed: %v", err) } @@ -226,12 +232,12 @@ func ExerciseBackend(t testing.TB, b Backend) { } // cleanup left over listing bug test value - err = b.Delete(context.Background(), "foo/nested1/nested2/value1") + err = b.Delete(ctx, "foo/nested1/nested2/value1") if err != nil { t.Fatalf("failed to remove deep nest: %v", err) } - keys, err = b.List(context.Background(), "") + keys, err = b.List(ctx, "") if err != nil { t.Fatalf("listing of root failed after delete of deep nest: %v", err) } @@ -242,32 +248,33 @@ func ExerciseBackend(t testing.TB, b Backend) { func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { t.Helper() + ctx := context.Background() e1 := &Entry{Key: "foo", Value: []byte("test")} e2 := &Entry{Key: "foo/bar", Value: []byte("test")} e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")} defer func() { - b.Delete(context.Background(), "foo") - b.Delete(context.Background(), "foo/bar") - b.Delete(context.Background(), "foo/bar/baz") + _ = b.Delete(ctx, "foo") + _ = b.Delete(ctx, "foo/bar") + _ = b.Delete(ctx, "foo/bar/baz") }() - err := b.Put(context.Background(), e1) + err := b.Put(ctx, e1) if err != nil { t.Fatalf("failed to put entry 1: %v", err) } - err = b.Put(context.Background(), e2) + err = b.Put(ctx, e2) if err != nil { t.Fatalf("failed to put entry 2: %v", err) } - err = b.Put(context.Background(), e3) + err = b.Put(ctx, e3) if err != nil { t.Fatalf("failed to put entry 3: %v", err) } // Scan the root - keys, err := b.List(context.Background(), "") + keys, err := b.List(ctx, "") if err != nil { t.Fatalf("list root: %v", err) } @@ -277,7 +284,7 @@ func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { } // Scan foo/ - keys, err = b.List(context.Background(), "foo/") + keys, err = b.List(ctx, "foo/") if err != nil { t.Fatalf("list level 1: %v", err) } @@ -287,7 +294,7 @@ func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { } // Scan foo/bar/ - keys, err = b.List(context.Background(), "foo/bar/") + keys, err = b.List(ctx, "foo/bar/") if err != nil { t.Fatalf("list level 2: %v", err) } @@ -327,12 +334,25 @@ func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { t.Errorf("expected value bar: %v", err) } + // Check if it's fencing that we can register the lock + if fba, ok := b.(FencingHABackend); ok { + require.NoError(t, fba.RegisterActiveNodeLock(lock)) + } + // Second acquisition should fail lock2, err := b2.LockWith("foo", "baz") if err != nil { t.Fatalf("lock 2: %v", err) } + // Checking the lock from b2 should discover that the lock is held since held + // implies only that there is _some_ leader not that b2 is leader (this was + // not clear before so we make it explicit with this assertion). + held2, val2, err := lock2.Value() + require.NoError(t, err) + require.Equal(t, "bar", val2) + require.True(t, held2) + // Cancel attempt in 50 msec stopCh := make(chan struct{}) time.AfterFunc(50*time.Millisecond, func() { @@ -360,6 +380,11 @@ func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { t.Errorf("should get leaderCh") } + // Check if it's fencing that we can register the lock + if fba2, ok := b2.(FencingHABackend); ok { + require.NoError(t, fba2.RegisterActiveNodeLock(lock)) + } + // Check the value held, val, err = lock2.Value() if err != nil { @@ -378,6 +403,8 @@ func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { func ExerciseTransactionalBackend(t testing.TB, b Backend) { t.Helper() + ctx := context.Background() + tb, ok := b.(Transactional) if !ok { t.Fatal("Not a transactional backend") @@ -385,11 +412,11 @@ func ExerciseTransactionalBackend(t testing.TB, b Backend) { txns := SetupTestingTransactions(t, b) - if err := tb.Transaction(context.Background(), txns); err != nil { + if err := tb.Transaction(ctx, txns); err != nil { t.Fatal(err) } - keys, err := b.List(context.Background(), "") + keys, err := b.List(ctx, "") if err != nil { t.Fatal(err) } @@ -402,7 +429,7 @@ func ExerciseTransactionalBackend(t testing.TB, b Backend) { t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys) } - entry, err := b.Get(context.Background(), "foo") + entry, err := b.Get(ctx, "foo") if err != nil { t.Fatal(err) } @@ -416,7 +443,7 @@ func ExerciseTransactionalBackend(t testing.TB, b Backend) { t.Fatal("updates did not apply correctly") } - entry, err = b.Get(context.Background(), "zip") + entry, err = b.Get(ctx, "zip") if err != nil { t.Fatal(err) } @@ -433,25 +460,27 @@ func ExerciseTransactionalBackend(t testing.TB, b Backend) { func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { t.Helper() + ctx := context.Background() + // Add a few keys so that we test rollback with deletion - if err := b.Put(context.Background(), &Entry{ + if err := b.Put(ctx, &Entry{ Key: "foo", Value: []byte("bar"), }); err != nil { t.Fatal(err) } - if err := b.Put(context.Background(), &Entry{ + if err := b.Put(ctx, &Entry{ Key: "zip", Value: []byte("zap"), }); err != nil { t.Fatal(err) } - if err := b.Put(context.Background(), &Entry{ + if err := b.Put(ctx, &Entry{ Key: "deleteme", }); err != nil { t.Fatal(err) } - if err := b.Put(context.Background(), &Entry{ + if err := b.Put(ctx, &Entry{ Key: "deleteme2", }); err != nil { t.Fatal(err) @@ -495,3 +524,43 @@ func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { return txns } + +// Several tests across packages have to test logic with a few variations of +// transactional backends. Make some suitable for testing limits support that +// can be re-used. + +type TestTransactionalNonLimitBackend struct{} + +var _ Transactional = (*TestTransactionalNonLimitBackend)(nil) + +func (b *TestTransactionalNonLimitBackend) Put(ctx context.Context, entry *Entry) error { + return nil +} + +func (b *TestTransactionalNonLimitBackend) Get(ctx context.Context, key string) (*Entry, error) { + return nil, nil +} + +func (b *TestTransactionalNonLimitBackend) Delete(ctx context.Context, key string) error { + return nil +} + +func (b *TestTransactionalNonLimitBackend) List(ctx context.Context, prefix string) ([]string, error) { + return nil, nil +} + +func (b *TestTransactionalNonLimitBackend) Transaction(ctx context.Context, txns []*TxnEntry) error { + return nil +} + +type TestTransactionalLimitBackend struct { + TestTransactionalNonLimitBackend + + MaxEntries, MaxSize int +} + +var _ TransactionalLimits = (*TestTransactionalLimitBackend)(nil) + +func (b *TestTransactionalLimitBackend) TransactionLimits() (int, int) { + return b.MaxEntries, b.MaxSize +} diff --git a/sdk/physical/transactions.go b/sdk/physical/transactions.go index a943c6bd95ef..de91689ffc57 100644 --- a/sdk/physical/transactions.go +++ b/sdk/physical/transactions.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package physical import ( @@ -31,6 +34,35 @@ type TransactionalBackend interface { Transactional } +// TransactionalLimits SHOULD be implemented by all TransactionalBackend +// implementations. It is separate for backwards compatibility reasons since +// this in a public SDK module. If a TransactionalBackend does not implement +// this, the historic default limits of 63 entries and 128kb (based on Consul's +// limits) are used by replication internals when encoding batches of +// transactions. +type TransactionalLimits interface { + TransactionalBackend + + // TransactionLimits must return the limits of how large each transaction may + // be. The limits returned indicate how many individual operation entries are + // supported in total and an overall size limit on the contents of each + // transaction if applicable. Vault will deduct any meta-operations it needs + // to add from the maxEntries given. maxSize will be compared against the sum + // of the key and value sizes for all operations in a transaction. The backend + // should provide a reasonable margin of safety for any overhead it may have + // while encoding, for example Consul's encoded transaction in JSON must fit + // in the configured max transaction size so it must leave adequate room for + // JSON encoding overhead on top of the raw key and value sizes. + // + // If zero is returned for either value, the replication internals will use + // historic reasonable defaults. This allows middleware implementations such + // as cache layers to either pass through to the underlying backend if it + // implements this interface, or to return zeros to indicate that the + // implementer should apply whatever defaults it would use if the middleware + // were not present. + TransactionLimits() (maxEntries int, maxSize int) +} + type PseudoTransactional interface { // An internal function should do no locking or permit pool acquisition. // Depending on the backend and if it natively supports transactions, these diff --git a/sdk/plugin/backend.go b/sdk/plugin/backend.go index 46e3710fdbac..2da1378eaa91 100644 --- a/sdk/plugin/backend.go +++ b/sdk/plugin/backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/sdk/plugin/grpc_backend.go b/sdk/plugin/grpc_backend.go index a65eeebeb432..f0114b946579 100644 --- a/sdk/plugin/grpc_backend.go +++ b/sdk/plugin/grpc_backend.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/sdk/plugin/grpc_backend_client.go b/sdk/plugin/grpc_backend_client.go index 91503f176e70..4e92ad13ec58 100644 --- a/sdk/plugin/grpc_backend_client.go +++ b/sdk/plugin/grpc_backend_client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -22,7 +25,7 @@ var ( ) // Validate backendGRPCPluginClient satisfies the logical.Backend interface -var _ logical.Backend = &backendGRPCPluginClient{} +var _ logical.Backend = (*backendGRPCPluginClient)(nil) // backendPluginClient implements logical.Backend and is the // go-plugin client. @@ -124,10 +127,11 @@ func (b *backendGRPCPluginClient) SpecialPaths() *logical.Paths { } return &logical.Paths{ - Root: reply.Paths.Root, - Unauthenticated: reply.Paths.Unauthenticated, - LocalStorage: reply.Paths.LocalStorage, - SealWrapStorage: reply.Paths.SealWrapStorage, + Root: reply.Paths.Root, + Unauthenticated: reply.Paths.Unauthenticated, + LocalStorage: reply.Paths.LocalStorage, + SealWrapStorage: reply.Paths.SealWrapStorage, + WriteForwardedStorage: reply.Paths.WriteForwardedStorage, } } @@ -179,17 +183,21 @@ func (b *backendGRPCPluginClient) Cleanup(ctx context.Context) { defer close(quitCh) defer cancel() - b.client.Cleanup(ctx, &pb.Empty{}) - - // This will block until Setup has run the function to create a new server - // in b.server. If we stop here before it has a chance to actually start - // listening, when it starts listening it will immediately error out and - // exit, which is fine. Overall this ensures that we do not miss stopping - // the server if it ends up being created after Cleanup is called. - <-b.cleanupCh + // Only wait on graceful cleanup if we can establish communication with the + // plugin, otherwise b.cleanupCh may never get closed. + if _, err := b.client.Cleanup(ctx, &pb.Empty{}); status.Code(err) != codes.Unavailable { + // This will block until Setup has run the function to create a new server + // in b.server. If we stop here before it has a chance to actually start + // listening, when it starts listening it will immediately error out and + // exit, which is fine. Overall this ensures that we do not miss stopping + // the server if it ends up being created after Cleanup is called. + select { + case <-b.cleanupCh: + } + } server := b.server.Load() - if server != nil { - server.(*grpc.Server).GracefulStop() + if grpcServer, ok := server.(*grpc.Server); ok && grpcServer != nil { + grpcServer.GracefulStop() } } @@ -227,6 +235,10 @@ func (b *backendGRPCPluginClient) Setup(ctx context.Context, config *logical.Bac impl: sysViewImpl, } + events := &GRPCEventsServer{ + impl: config.EventsSender, + } + // Register the server in this closure. serverFunc := func(opts []grpc.ServerOption) *grpc.Server { opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32)) @@ -235,6 +247,7 @@ func (b *backendGRPCPluginClient) Setup(ctx context.Context, config *logical.Bac s := grpc.NewServer(opts...) pb.RegisterSystemViewServer(s, sysView) pb.RegisterStorageServer(s, storage) + pb.RegisterEventsServer(s, events) b.server.Store(s) close(b.cleanupCh) return s diff --git a/sdk/plugin/grpc_backend_server.go b/sdk/plugin/grpc_backend_server.go index 361f3171f673..cdc19b604c25 100644 --- a/sdk/plugin/grpc_backend_server.go +++ b/sdk/plugin/grpc_backend_server.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -6,8 +9,9 @@ import ( "fmt" "sync" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" + + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/pluginutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" @@ -92,13 +96,15 @@ func (b *backendGRPCPluginServer) Setup(ctx context.Context, args *pb.SetupArgs) storage := newGRPCStorageClient(brokeredClient) sysView := newGRPCSystemView(brokeredClient) + events := newGRPCEventsClient(brokeredClient) config := &logical.BackendConfig{ - StorageView: storage, - Logger: b.logger, - System: sysView, - Config: args.Config, - BackendUUID: args.BackendUUID, + StorageView: storage, + Logger: b.logger, + System: sysView, + Config: args.Config, + BackendUUID: args.BackendUUID, + EventsSender: events, } // Call the underlying backend factory after shims have been created @@ -186,10 +192,13 @@ func (b *backendGRPCPluginServer) SpecialPaths(ctx context.Context, args *pb.Emp return &pb.SpecialPathsReply{ Paths: &pb.Paths{ - Root: paths.Root, - Unauthenticated: paths.Unauthenticated, - LocalStorage: paths.LocalStorage, - SealWrapStorage: paths.SealWrapStorage, + Root: paths.Root, + Unauthenticated: paths.Unauthenticated, + LocalStorage: paths.LocalStorage, + SealWrapStorage: paths.SealWrapStorage, + WriteForwardedStorage: paths.WriteForwardedStorage, + Binary: paths.Binary, + Limited: paths.Limited, }, }, nil } diff --git a/sdk/plugin/grpc_backend_test.go b/sdk/plugin/grpc_backend_test.go index 2f665beb044a..880f09930feb 100644 --- a/sdk/plugin/grpc_backend_test.go +++ b/sdk/plugin/grpc_backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -174,7 +177,7 @@ func testGRPCBackend(t *testing.T) (logical.Backend, func()) { }), }, } - client, _ := gplugin.TestPluginGRPCConn(t, pluginMap) + client, _ := gplugin.TestPluginGRPCConn(t, false, pluginMap) cleanup := func() { client.Close() } diff --git a/sdk/plugin/grpc_events.go b/sdk/plugin/grpc_events.go new file mode 100644 index 000000000000..3a4d50cc9387 --- /dev/null +++ b/sdk/plugin/grpc_events.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" +) + +func newGRPCEventsClient(conn *grpc.ClientConn) *GRPCEventsClient { + return &GRPCEventsClient{ + client: pb.NewEventsClient(conn), + } +} + +type GRPCEventsClient struct { + client pb.EventsClient +} + +var _ logical.EventSender = (*GRPCEventsClient)(nil) + +func (s *GRPCEventsClient) SendEvent(ctx context.Context, eventType logical.EventType, event *logical.EventData) error { + _, err := s.client.SendEvent(ctx, &pb.SendEventRequest{ + EventType: string(eventType), + Event: event, + }) + return err +} + +type GRPCEventsServer struct { + pb.UnimplementedEventsServer + impl logical.EventSender +} + +func (s *GRPCEventsServer) SendEvent(ctx context.Context, req *pb.SendEventRequest) (*pb.Empty, error) { + if s.impl == nil { + return &pb.Empty{}, nil + } + + err := s.impl.SendEvent(ctx, logical.EventType(req.EventType), req.Event) + if err != nil { + return nil, err + } + return &pb.Empty{}, nil +} diff --git a/sdk/plugin/grpc_storage.go b/sdk/plugin/grpc_storage.go index 6a04b3a97687..5c2f0de3f4f0 100644 --- a/sdk/plugin/grpc_storage.go +++ b/sdk/plugin/grpc_storage.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/sdk/plugin/grpc_system.go b/sdk/plugin/grpc_system.go index f6cbe6001c1d..405d03163a19 100644 --- a/sdk/plugin/grpc_system.go +++ b/sdk/plugin/grpc_system.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -199,6 +202,30 @@ func (s *gRPCSystemViewClient) GeneratePasswordFromPolicy(ctx context.Context, p return resp.Password, nil } +func (s gRPCSystemViewClient) ClusterID(ctx context.Context) (string, error) { + reply, err := s.client.ClusterInfo(ctx, &pb.Empty{}) + if err != nil { + return "", err + } + + return reply.ClusterID, nil +} + +func (s *gRPCSystemViewClient) GenerateIdentityToken(ctx context.Context, req *pluginutil.IdentityTokenRequest) (*pluginutil.IdentityTokenResponse, error) { + resp, err := s.client.GenerateIdentityToken(ctx, &pb.GenerateIdentityTokenRequest{ + Audience: req.Audience, + TTL: int64(req.TTL.Seconds()), + }) + if err != nil { + return nil, err + } + + return &pluginutil.IdentityTokenResponse{ + Token: pluginutil.IdentityToken(resp.Token), + TTL: time.Duration(resp.TTL) * time.Second, + }, nil +} + type gRPCSystemViewServer struct { pb.UnimplementedSystemViewServer @@ -367,3 +394,38 @@ func (s *gRPCSystemViewServer) GeneratePasswordFromPolicy(ctx context.Context, r } return resp, nil } + +func (s *gRPCSystemViewServer) ClusterInfo(ctx context.Context, _ *pb.Empty) (*pb.ClusterInfoReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + + clusterId, err := s.impl.ClusterID(ctx) + if err != nil { + return &pb.ClusterInfoReply{}, status.Errorf(codes.Internal, "failed to fetch cluster id") + } + + return &pb.ClusterInfoReply{ + ClusterID: clusterId, + }, nil +} + +func (s *gRPCSystemViewServer) GenerateIdentityToken(ctx context.Context, req *pb.GenerateIdentityTokenRequest) (*pb.GenerateIdentityTokenResponse, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + + res, err := s.impl.GenerateIdentityToken(ctx, &pluginutil.IdentityTokenRequest{ + Audience: req.GetAudience(), + TTL: time.Duration(req.GetTTL()) * time.Second, + }) + if err != nil { + return &pb.GenerateIdentityTokenResponse{}, status.Errorf(codes.Internal, + "failed to generate plugin identity token") + } + + return &pb.GenerateIdentityTokenResponse{ + Token: res.Token.Token(), + TTL: int64(res.TTL.Seconds()), + }, nil +} diff --git a/sdk/plugin/grpc_system_test.go b/sdk/plugin/grpc_system_test.go index 7a282608ab39..19a5ecbaaeea 100644 --- a/sdk/plugin/grpc_system_test.go +++ b/sdk/plugin/grpc_system_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/sdk/plugin/logger.go b/sdk/plugin/logger.go index ecf6ed01f158..1ef4694e9b8f 100644 --- a/sdk/plugin/logger.go +++ b/sdk/plugin/logger.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import hclog "github.com/hashicorp/go-hclog" diff --git a/sdk/plugin/logger_test.go b/sdk/plugin/logger_test.go index a2b8a80155cd..c47a70b1c579 100644 --- a/sdk/plugin/logger_test.go +++ b/sdk/plugin/logger_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/sdk/plugin/middleware.go b/sdk/plugin/middleware.go index 546584ccc736..4411c788297b 100644 --- a/sdk/plugin/middleware.go +++ b/sdk/plugin/middleware.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/sdk/plugin/mock/backend.go b/sdk/plugin/mock/backend.go index a75b639ef653..b34191b938a3 100644 --- a/sdk/plugin/mock/backend.go +++ b/sdk/plugin/mock/backend.go @@ -1,14 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package mock import ( "context" + "fmt" "os" + "testing" + "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) -const MockPluginVersionEnv = "TESTING_MOCK_VAULT_PLUGIN_VERSION" +const ( + MockPluginVersionEnv = "TESTING_MOCK_VAULT_PLUGIN_VERSION" + MockPluginDefaultInternalValue = "bar" +) // New returns a new backend as an interface. This func // is only necessary for builtin backend plugins. @@ -50,6 +59,7 @@ func Backend() *backend { pathInternal(&b), pathSpecial(&b), pathRaw(&b), + pathEnv(&b), }, ), PathsSpecial: &logical.Paths{ @@ -61,7 +71,7 @@ func Backend() *backend { Invalidate: b.invalidate, BackendType: logical.TypeLogical, } - b.internal = "bar" + b.internal = MockPluginDefaultInternalValue b.RunningVersion = "v0.0.0+mock" if version := os.Getenv(MockPluginVersionEnv); version != "" { b.RunningVersion = version @@ -72,7 +82,7 @@ func Backend() *backend { type backend struct { *framework.Backend - // internal is used to test invalidate + // internal is used to test invalidate and reloads. internal string } @@ -82,3 +92,39 @@ func (b *backend) invalidate(ctx context.Context, key string) { b.internal = "" } } + +// WriteInternalValue is a helper to set an in-memory value in the plugin, +// allowing tests to later assert that the plugin either has or hasn't been +// restarted. +func WriteInternalValue(t *testing.T, client *api.Client, mountPath, value string) { + t.Helper() + resp, err := client.Logical().Write(fmt.Sprintf("%s/internal", mountPath), map[string]interface{}{ + "value": value, + }) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp != nil { + t.Fatalf("bad: %v", resp) + } +} + +// ExpectInternalValue checks the internal in-memory value. +func ExpectInternalValue(t *testing.T, client *api.Client, mountPath, expected string) { + t.Helper() + expectInternalValue(t, client, mountPath, expected) +} + +func expectInternalValue(t *testing.T, client *api.Client, mountPath, expected string) { + t.Helper() + resp, err := client.Logical().Read(fmt.Sprintf("%s/internal", mountPath)) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil { + t.Fatalf("bad: response should not be nil") + } + if resp.Data["value"].(string) != expected { + t.Fatalf("expected %q but got %q", expected, resp.Data["value"].(string)) + } +} diff --git a/sdk/plugin/mock/backend_test.go b/sdk/plugin/mock/backend_test.go index 15860906d4a7..640eec11643d 100644 --- a/sdk/plugin/mock/backend_test.go +++ b/sdk/plugin/mock/backend_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package mock import ( diff --git a/sdk/plugin/mock/path_env.go b/sdk/plugin/mock/path_env.go new file mode 100644 index 000000000000..18b4b71ccc32 --- /dev/null +++ b/sdk/plugin/mock/path_env.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mock + +import ( + "context" + "os" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// pathEnv is used to interrogate plugin env vars. +func pathEnv(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "env/" + framework.GenericNameRegex("key"), + Fields: map[string]*framework.FieldSchema{ + "key": { + Type: framework.TypeString, + Required: true, + Description: "The name of the environment variable to read.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathEnvRead, + }, + } +} + +func (b *backend) pathEnvRead(_ context.Context, _ *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Return the secret + return &logical.Response{ + Data: map[string]interface{}{ + "key": os.Getenv(data.Get("key").(string)), + }, + }, nil +} diff --git a/sdk/plugin/mock/path_errors.go b/sdk/plugin/mock/path_errors.go index 05ef474a7eaf..9c765c67ad32 100644 --- a/sdk/plugin/mock/path_errors.go +++ b/sdk/plugin/mock/path_errors.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package mock import ( @@ -33,7 +36,6 @@ func errorPaths(b *backend) []*framework.Path { "err_type": {Type: framework.TypeInt}, }, Callbacks: map[logical.Operation]framework.OperationFunc{ - logical.CreateOperation: b.pathErrorRPCRead, logical.UpdateOperation: b.pathErrorRPCRead, }, }, diff --git a/sdk/plugin/mock/path_internal.go b/sdk/plugin/mock/path_internal.go index 26ede270fac2..30c2926f5cfa 100644 --- a/sdk/plugin/mock/path_internal.go +++ b/sdk/plugin/mock/path_internal.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package mock import ( diff --git a/sdk/plugin/mock/path_kv.go b/sdk/plugin/mock/path_kv.go index 1946b5762496..fd8080572cd4 100644 --- a/sdk/plugin/mock/path_kv.go +++ b/sdk/plugin/mock/path_kv.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package mock import ( diff --git a/sdk/plugin/mock/path_raw.go b/sdk/plugin/mock/path_raw.go index 55cb7c937408..2a4b77fb731a 100644 --- a/sdk/plugin/mock/path_raw.go +++ b/sdk/plugin/mock/path_raw.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package mock import ( diff --git a/sdk/plugin/mock/path_special.go b/sdk/plugin/mock/path_special.go index 22afa41c6dae..4223f91053dc 100644 --- a/sdk/plugin/mock/path_special.go +++ b/sdk/plugin/mock/path_special.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package mock import ( diff --git a/sdk/plugin/pb/backend.pb.go b/sdk/plugin/pb/backend.pb.go index fd6971d8f1aa..1a171af39c6b 100644 --- a/sdk/plugin/pb/backend.pb.go +++ b/sdk/plugin/pb/backend.pb.go @@ -1,7 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.9 +// protoc-gen-go v1.34.1 +// protoc (unknown) // source: sdk/plugin/pb/backend.proto package pb @@ -199,6 +202,20 @@ type Paths struct { // should be seal wrapped with extra encryption. It is exact matching // unless it ends with '/' in which case it will be treated as a prefix. SealWrapStorage []string `protobuf:"bytes,4,rep,name=seal_wrap_storage,json=sealWrapStorage,proto3" json:"seal_wrap_storage,omitempty"` + // WriteForwardedStorage are storage paths that, when running on a PR + // Secondary cluster, cause a GRPC call up to the PR Primary cluster's + // active node to handle storage.Put(...) and storage.Delete(...) events. + // + // See extended note in /sdk/logical/logical.go. + WriteForwardedStorage []string `protobuf:"bytes,5,rep,name=write_forwarded_storage,json=writeForwardedStorage,proto3" json:"write_forwarded_storage,omitempty"` + // Binary are paths whose request bodies are binary, not JSON + // + // See note in /sdk/logical/logical.go. + Binary []string `protobuf:"bytes,6,rep,name=binary,proto3" json:"binary,omitempty"` + // Limited paths are storage paths that require special-case request limiting. + // + // See note in /sdk/logical/logical.go. + Limited []string `protobuf:"bytes,7,rep,name=limited,proto3" json:"limited,omitempty"` } func (x *Paths) Reset() { @@ -261,6 +278,27 @@ func (x *Paths) GetSealWrapStorage() []string { return nil } +func (x *Paths) GetWriteForwardedStorage() []string { + if x != nil { + return x.WriteForwardedStorage + } + return nil +} + +func (x *Paths) GetBinary() []string { + if x != nil { + return x.Binary + } + return nil +} + +func (x *Paths) GetLimited() []string { + if x != nil { + return x.Limited + } + return nil +} + type Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -757,6 +795,7 @@ type TokenEntry struct { InternalMeta map[string]string `protobuf:"bytes,19,rep,name=internal_meta,json=internalMeta,proto3" json:"internal_meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` InlinePolicy string `protobuf:"bytes,20,opt,name=inline_policy,json=inlinePolicy,proto3" json:"inline_policy,omitempty"` NoIdentityPolicies bool `protobuf:"varint,21,opt,name=no_identity_policies,json=noIdentityPolicies,proto3" json:"no_identity_policies,omitempty"` + ExternalID string `protobuf:"bytes,22,opt,name=external_id,json=externalId,proto3" json:"external_id,omitempty"` } func (x *TokenEntry) Reset() { @@ -938,6 +977,13 @@ func (x *TokenEntry) GetNoIdentityPolicies() bool { return false } +func (x *TokenEntry) GetExternalID() string { + if x != nil { + return x.ExternalID + } + return "" +} + type LeaseOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1115,6 +1161,9 @@ type Response struct { // be used in the audit broker to ensure we are auditing only the allowed // headers. Headers map[string]*Header `protobuf:"bytes,7,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // MountType, if non-empty, provides some information about what kind + // of mount this secret came from. + MountType string `protobuf:"bytes,8,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` } func (x *Response) Reset() { @@ -1198,6 +1247,13 @@ func (x *Response) GetHeaders() map[string]*Header { return nil } +func (x *Response) GetMountType() string { + if x != nil { + return x.MountType + } + return "" +} + type ResponseWrapInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3127,6 +3183,179 @@ func (x *GeneratePasswordFromPolicyReply) GetPassword() string { return "" } +type ClusterInfoReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + ClusterID string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Err string `protobuf:"bytes,3,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *ClusterInfoReply) Reset() { + *x = ClusterInfoReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterInfoReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterInfoReply) ProtoMessage() {} + +func (x *ClusterInfoReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterInfoReply.ProtoReflect.Descriptor instead. +func (*ClusterInfoReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{46} +} + +func (x *ClusterInfoReply) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *ClusterInfoReply) GetClusterID() string { + if x != nil { + return x.ClusterID + } + return "" +} + +func (x *ClusterInfoReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +type GenerateIdentityTokenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Audience string `protobuf:"bytes,1,opt,name=audience,proto3" json:"audience,omitempty"` + TTL int64 `protobuf:"varint,2,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *GenerateIdentityTokenRequest) Reset() { + *x = GenerateIdentityTokenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateIdentityTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateIdentityTokenRequest) ProtoMessage() {} + +func (x *GenerateIdentityTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateIdentityTokenRequest.ProtoReflect.Descriptor instead. +func (*GenerateIdentityTokenRequest) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{47} +} + +func (x *GenerateIdentityTokenRequest) GetAudience() string { + if x != nil { + return x.Audience + } + return "" +} + +func (x *GenerateIdentityTokenRequest) GetTTL() int64 { + if x != nil { + return x.TTL + } + return 0 +} + +type GenerateIdentityTokenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + TTL int64 `protobuf:"varint,2,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *GenerateIdentityTokenResponse) Reset() { + *x = GenerateIdentityTokenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateIdentityTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateIdentityTokenResponse) ProtoMessage() {} + +func (x *GenerateIdentityTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateIdentityTokenResponse.ProtoReflect.Descriptor instead. +func (*GenerateIdentityTokenResponse) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{48} +} + +func (x *GenerateIdentityTokenResponse) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *GenerateIdentityTokenResponse) GetTTL() int64 { + if x != nil { + return x.TTL + } + return 0 +} + type Connection struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3144,7 +3373,7 @@ type Connection struct { func (x *Connection) Reset() { *x = Connection{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[46] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3157,7 +3386,7 @@ func (x *Connection) String() string { func (*Connection) ProtoMessage() {} func (x *Connection) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[46] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3170,7 +3399,7 @@ func (x *Connection) ProtoReflect() protoreflect.Message { // Deprecated: Use Connection.ProtoReflect.Descriptor instead. func (*Connection) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{46} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{49} } func (x *Connection) GetRemoteAddr() string { @@ -3216,7 +3445,7 @@ type ConnectionState struct { func (x *ConnectionState) Reset() { *x = ConnectionState{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3229,7 +3458,7 @@ func (x *ConnectionState) String() string { func (*ConnectionState) ProtoMessage() {} func (x *ConnectionState) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3242,7 +3471,7 @@ func (x *ConnectionState) ProtoReflect() protoreflect.Message { // Deprecated: Use ConnectionState.ProtoReflect.Descriptor instead. func (*ConnectionState) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{47} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{50} } func (x *ConnectionState) GetVersion() uint32 { @@ -3340,7 +3569,7 @@ type Certificate struct { func (x *Certificate) Reset() { *x = Certificate{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3353,7 +3582,7 @@ func (x *Certificate) String() string { func (*Certificate) ProtoMessage() {} func (x *Certificate) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3366,7 +3595,7 @@ func (x *Certificate) ProtoReflect() protoreflect.Message { // Deprecated: Use Certificate.ProtoReflect.Descriptor instead. func (*Certificate) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{48} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{51} } func (x *Certificate) GetAsn1Data() []byte { @@ -3387,7 +3616,7 @@ type CertificateChain struct { func (x *CertificateChain) Reset() { *x = CertificateChain{} if protoimpl.UnsafeEnabled { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3400,7 +3629,7 @@ func (x *CertificateChain) String() string { func (*CertificateChain) ProtoMessage() {} func (x *CertificateChain) ProtoReflect() protoreflect.Message { - mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3413,7 +3642,7 @@ func (x *CertificateChain) ProtoReflect() protoreflect.Message { // Deprecated: Use CertificateChain.ProtoReflect.Descriptor instead. func (*CertificateChain) Descriptor() ([]byte, []int) { - return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{49} + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{52} } func (x *CertificateChain) GetCertificates() []*Certificate { @@ -3423,6 +3652,61 @@ func (x *CertificateChain) GetCertificates() []*Certificate { return nil } +type SendEventRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EventType string `protobuf:"bytes,1,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` + Event *logical.EventData `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"` +} + +func (x *SendEventRequest) Reset() { + *x = SendEventRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendEventRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendEventRequest) ProtoMessage() {} + +func (x *SendEventRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendEventRequest.ProtoReflect.Descriptor instead. +func (*SendEventRequest) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{53} +} + +func (x *SendEventRequest) GetEventType() string { + if x != nil { + return x.EventType + } + return "" +} + +func (x *SendEventRequest) GetEvent() *logical.EventData { + if x != nil { + return x.Event + } + return nil +} + var File_sdk_plugin_pb_backend_proto protoreflect.FileDescriptor var file_sdk_plugin_pb_backend_proto_rawDesc = []byte{ @@ -3430,519 +3714,565 @@ var file_sdk_plugin_pb_backend_proto_rawDesc = []byte{ 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1a, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, - 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x22, 0x20, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x22, 0x5b, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x72, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x07, 0x65, 0x72, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, - 0x65, 0x72, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, - 0x72, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x72, 0x72, 0x5f, 0x63, 0x6f, 0x64, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x72, 0x72, 0x43, 0x6f, 0x64, 0x65, - 0x22, 0x96, 0x01, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, - 0x6f, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x28, - 0x0a, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, - 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, - 0x11, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, - 0x61, 0x70, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0xbf, 0x06, 0x0a, 0x07, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x06, 0x73, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, - 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, - 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, - 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x32, 0x0a, - 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, - 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, - 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, - 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x0e, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x1b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x75, - 0x73, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x55, - 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, - 0x69, 0x64, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x75, 0x6e, 0x61, - 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x13, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x05, 0x0a, 0x04, - 0x41, 0x75, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, - 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, - 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, - 0x32, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, - 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, 0x75, - 0x6d, 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, - 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x49, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x33, 0x0a, 0x0d, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x0a, - 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, 0x25, - 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, - 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, - 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x6d, - 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, 0x78, - 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6e, - 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa9, 0x06, 0x0a, 0x0a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, - 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, - 0x75, 0x73, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x55, - 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, - 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, 0x61, - 0x78, 0x54, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, - 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, - 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x0e, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x1f, 0x0a, - 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x0f, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, - 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, 0x6f, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, 0x6f, - 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, - 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x6e, 0x6f, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x12, 0x6e, 0x6f, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x3f, 0x0a, 0x11, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0xaf, 0x01, 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, - 0x54, 0x54, 0x4c, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x62, 0x6c, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x62, 0x6c, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, - 0x39, 0x0a, 0x0a, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x61, - 0x78, 0x54, 0x54, 0x4c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x61, 0x78, 0x54, - 0x54, 0x4c, 0x22, 0x7f, 0x0a, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x35, 0x0a, 0x0d, - 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x65, 0x61, 0x73, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x65, 0x61, 0x73, - 0x65, 0x49, 0x64, 0x22, 0xc8, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x22, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, 0x61, 0x75, - 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x31, - 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, - 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x33, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x74, 0x6f, 0x1a, 0x17, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x73, 0x64, 0x6b, + 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, + 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x20, 0x0a, 0x06, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x5b, 0x0a, 0x0a, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x72, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x65, 0x72, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x5f, 0x6d, 0x73, 0x67, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x19, + 0x0a, 0x08, 0x65, 0x72, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x65, 0x72, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x05, 0x50, 0x61, + 0x74, 0x68, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, + 0x72, 0x61, 0x70, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0f, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x15, 0x77, 0x72, 0x69, 0x74, 0x65, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, + 0x64, 0x65, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x62, 0x69, 0x6e, 0x61, + 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x64, 0x22, 0xbf, 0x06, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, + 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, + 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, + 0x12, 0x32, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x64, + 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, + 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x1b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0f, + 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc8, - 0x02, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x77, 0x72, 0x61, 0x70, - 0x70, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, - 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1b, 0x0a, 0x09, - 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x58, 0x0a, 0x0f, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, - 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x16, - 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, - 0x72, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, - 0x72, 0x61, 0x70, 0x22, 0x59, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x60, - 0x0a, 0x12, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, - 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, - 0x22, 0x10, 0x0a, 0x0e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, - 0x67, 0x73, 0x22, 0x33, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x34, 0x0a, 0x11, 0x53, 0x70, 0x65, 0x63, 0x69, - 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x05, - 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x62, - 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x22, 0x60, 0x0a, - 0x18, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x76, 0x0a, 0x19, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x0b, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x16, 0x0a, - 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, - 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0xb8, 0x01, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x75, - 0x70, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, - 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, - 0x55, 0x55, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b, - 0x65, 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x1e, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, - 0x72, 0x72, 0x22, 0x1f, 0x0a, 0x09, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x22, 0x25, 0x0a, 0x11, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x53, 0x0a, 0x0c, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, - 0x29, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, - 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x38, 0x0a, 0x10, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x12, - 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, - 0x79, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x65, 0x72, 0x72, 0x22, 0x22, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, - 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x4b, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x38, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, - 0x23, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x65, 0x72, 0x72, 0x22, 0x25, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x26, 0x0a, 0x12, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x65, 0x72, 0x72, 0x22, 0x1c, 0x0a, 0x08, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, + 0x05, 0x0a, 0x04, 0x41, 0x75, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x12, 0x32, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x19, 0x0a, + 0x08, 0x6e, 0x75, 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x6e, 0x75, 0x6d, 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x33, 0x0a, 0x0d, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, + 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0f, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, + 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0e, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, + 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, + 0x0a, 0x11, 0x6e, 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6e, 0x6f, 0x44, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xca, 0x06, 0x0a, 0x0a, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x04, 0x6d, 0x65, + 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, + 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, + 0x75, 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, + 0x75, 0x6d, 0x55, 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, + 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, + 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, + 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, + 0x74, 0x4d, 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, + 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, + 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, 0x6f, 0x6c, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x62, 0x62, + 0x79, 0x68, 0x6f, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, + 0x74, 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, + 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x6e, 0x6f, 0x5f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x6e, 0x6f, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, + 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xaf, 0x01, 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, + 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x6e, 0x65, + 0x77, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x4d, 0x61, 0x78, 0x54, 0x54, 0x4c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x4d, 0x61, 0x78, 0x54, 0x54, 0x4c, 0x22, 0x7f, 0x0a, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x12, 0x35, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, + 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x64, 0x22, 0xe7, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, + 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, + 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, + 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x33, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xc8, 0x02, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, + 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, + 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x77, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x41, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x58, 0x0a, 0x0f, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, - 0x4c, 0x22, 0x28, 0x0a, 0x0c, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x22, 0x32, 0x0a, 0x14, 0x43, - 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, - 0x2d, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x4e, - 0x0a, 0x14, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, - 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, - 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x10, 0x0a, 0x03, - 0x4a, 0x57, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x4a, 0x57, 0x54, 0x22, 0x5c, - 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, - 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x2d, 0x0a, 0x11, - 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x27, 0x0a, 0x0f, 0x4c, - 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x22, 0x2d, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, - 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x49, 0x64, 0x22, 0x4c, 0x0a, 0x0f, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x27, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, - 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, - 0x72, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, - 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x06, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x65, 0x72, 0x72, 0x22, 0x6d, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x12, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, - 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x11, 0x70, + 0x4c, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, + 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, + 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x59, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x60, 0x0a, 0x12, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, + 0x65, 0x72, 0x72, 0x22, 0x10, 0x0a, 0x0e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x41, 0x72, 0x67, 0x73, 0x22, 0x33, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x34, 0x0a, 0x11, 0x53, 0x70, + 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x1f, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, + 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, + 0x22, 0x60, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, + 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x76, 0x0a, 0x19, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x1f, 0x0a, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0xb8, 0x01, 0x0a, 0x09, 0x53, + 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x6f, 0x6b, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x62, 0x72, 0x6f, + 0x6b, 0x65, 0x72, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, + 0x41, 0x72, 0x67, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1e, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1f, 0x0a, 0x09, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x25, 0x0a, 0x11, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x53, 0x0a, + 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, + 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, + 0x61, 0x70, 0x22, 0x29, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, + 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x38, 0x0a, + 0x10, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x22, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x47, 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x4b, 0x0a, 0x0f, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, + 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x38, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x22, 0x23, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x25, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x26, + 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1c, 0x0a, 0x08, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x03, 0x54, 0x54, 0x4c, 0x22, 0x28, 0x0a, 0x0c, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x22, 0x32, + 0x0a, 0x14, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x22, 0x2d, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x22, 0x4e, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, + 0x70, 0x44, 0x61, 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, + 0x03, 0x54, 0x54, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, + 0x10, 0x0a, 0x03, 0x4a, 0x57, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x4a, 0x57, + 0x54, 0x22, 0x5c, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, + 0x70, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, + 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, + 0x2d, 0x0a, 0x11, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x27, + 0x0a, 0x0f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x22, 0x2d, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x4c, 0x0a, 0x0f, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x27, 0x0a, 0x06, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x6f, 0x67, 0x69, + 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x65, 0x72, 0x72, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, + 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x06, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, + 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x06, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x6d, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x45, 0x6e, 0x76, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x12, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, - 0x72, 0x72, 0x22, 0x44, 0x0a, 0x21, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x1f, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x70, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, - 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x8e, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x3e, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0xbb, 0x04, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, - 0x61, 0x6b, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x11, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x43, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x69, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x75, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x69, 0x64, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, - 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x6e, 0x65, 0x67, 0x6f, 0x74, - 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x6e, 0x65, 0x67, 0x6f, - 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, - 0x69, 0x73, 0x5f, 0x6d, 0x75, 0x74, 0x75, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x1a, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x49, 0x73, 0x4d, 0x75, 0x74, 0x75, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x11, - 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x10, 0x70, - 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, - 0x3d, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x0e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x42, - 0x0a, 0x1d, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x75, - 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x74, 0x6c, 0x73, - 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x22, 0x2a, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x6e, 0x31, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x73, 0x6e, 0x31, 0x44, 0x61, - 0x74, 0x61, 0x22, 0x47, 0x0a, 0x10, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x33, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, - 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x32, 0xa5, 0x03, 0x0a, 0x07, - 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x3e, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, - 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, 0x0a, 0x0c, 0x53, 0x70, 0x65, 0x63, 0x69, - 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, - 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x53, 0x0a, 0x14, 0x48, 0x61, 0x6e, - 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x12, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x1a, - 0x1d, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, - 0x0a, 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x31, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, - 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x53, 0x65, 0x74, 0x75, 0x70, 0x12, 0x0d, 0x2e, 0x70, 0x62, - 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x0e, 0x2e, 0x70, 0x62, 0x2e, - 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x49, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, - 0x62, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x20, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x32, 0xd5, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, - 0x31, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x14, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, - 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, - 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x37, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, - 0x72, 0x67, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0xb1, 0x05, 0x0a, 0x0a, - 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x69, 0x65, 0x77, 0x12, 0x2a, 0x0a, 0x0f, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, - 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, - 0x4c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x0b, 0x4d, 0x61, 0x78, 0x4c, 0x65, 0x61, - 0x73, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, - 0x0a, 0x07, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x0f, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, - 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, - 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x38, - 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, - 0x70, 0x62, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x47, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x2e, 0x70, - 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, - 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x30, 0x0a, 0x0c, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, - 0x62, 0x2e, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x2c, 0x0a, 0x0a, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x70, - 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, - 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x09, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x52, - 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, - 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x18, 0x2e, 0x70, 0x62, - 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x68, 0x0a, 0x1a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x25, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x44, 0x0a, 0x21, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x62, 0x2e, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, - 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, - 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, - 0x6b, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x1f, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, + 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x66, 0x0a, 0x10, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, + 0x72, 0x72, 0x22, 0x4c, 0x0a, 0x1c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x22, 0x47, 0x0a, 0x1d, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0x8e, 0x01, 0x0a, 0x0a, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x3e, 0x0a, 0x10, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0xbb, 0x04, 0x0a, 0x0f, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x43, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x69, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x75, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x69, 0x64, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x6e, 0x65, 0x67, + 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, + 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x6e, 0x65, + 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x5f, 0x69, 0x73, 0x5f, 0x6d, 0x75, 0x74, 0x75, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x1a, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x49, 0x73, 0x4d, 0x75, 0x74, 0x75, 0x61, 0x6c, 0x12, 0x1f, 0x0a, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, + 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x10, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x0e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, + 0x12, 0x42, 0x0a, 0x1d, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, 0x63, 0x73, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6c, 0x73, + 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x74, + 0x6c, 0x73, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x22, 0x2a, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x6e, 0x31, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x73, 0x6e, 0x31, + 0x44, 0x61, 0x74, 0x61, 0x22, 0x47, 0x0a, 0x10, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x33, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, + 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x5b, 0x0a, + 0x10, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x28, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, + 0x61, 0x74, 0x61, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x32, 0xa5, 0x03, 0x0a, 0x07, 0x42, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x3e, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, + 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x16, + 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, 0x0a, 0x0c, 0x53, 0x70, 0x65, 0x63, 0x69, 0x61, + 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, + 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x53, 0x0a, 0x14, 0x48, 0x61, 0x6e, 0x64, + 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x12, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x1d, + 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, 0x0a, + 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x31, + 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, + 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x26, 0x0a, 0x05, 0x53, 0x65, 0x74, 0x75, 0x70, 0x12, 0x0d, 0x2e, 0x70, 0x62, 0x2e, + 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x53, + 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x49, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, + 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x20, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x32, 0xd5, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x31, + 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x14, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x37, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x70, 0x62, + 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, + 0x67, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0xbf, 0x06, 0x0a, 0x0a, 0x53, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x69, 0x65, 0x77, 0x12, 0x2a, 0x0a, 0x0f, 0x44, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, + 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x0b, 0x4d, 0x61, 0x78, 0x4c, 0x65, 0x61, 0x73, + 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, + 0x07, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x0f, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, + 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, + 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x38, 0x0a, + 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x70, + 0x62, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x47, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x2e, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, + 0x61, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x30, 0x0a, 0x0c, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, + 0x2e, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x2c, 0x0a, 0x0a, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x70, 0x62, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x35, 0x0a, 0x0a, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, + 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, + 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x09, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x45, 0x6e, 0x76, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, + 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x68, 0x0a, 0x1a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x12, 0x25, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x62, 0x2e, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, + 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, + 0x0a, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x09, 0x2e, + 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x5c, + 0x0a, 0x15, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x20, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x62, 0x2e, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x36, 0x0a, 0x06, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2c, 0x0a, 0x09, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, + 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x62, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3957,7 +4287,7 @@ func file_sdk_plugin_pb_backend_proto_rawDescGZIP() []byte { return file_sdk_plugin_pb_backend_proto_rawDescData } -var file_sdk_plugin_pb_backend_proto_msgTypes = make([]protoimpl.MessageInfo, 56) +var file_sdk_plugin_pb_backend_proto_msgTypes = make([]protoimpl.MessageInfo, 60) var file_sdk_plugin_pb_backend_proto_goTypes = []interface{}{ (*Empty)(nil), // 0: pb.Empty (*Header)(nil), // 1: pb.Header @@ -4005,41 +4335,46 @@ var file_sdk_plugin_pb_backend_proto_goTypes = []interface{}{ (*PluginEnvReply)(nil), // 43: pb.PluginEnvReply (*GeneratePasswordFromPolicyRequest)(nil), // 44: pb.GeneratePasswordFromPolicyRequest (*GeneratePasswordFromPolicyReply)(nil), // 45: pb.GeneratePasswordFromPolicyReply - (*Connection)(nil), // 46: pb.Connection - (*ConnectionState)(nil), // 47: pb.ConnectionState - (*Certificate)(nil), // 48: pb.Certificate - (*CertificateChain)(nil), // 49: pb.CertificateChain - nil, // 50: pb.Request.HeadersEntry - nil, // 51: pb.Auth.MetadataEntry - nil, // 52: pb.TokenEntry.MetaEntry - nil, // 53: pb.TokenEntry.InternalMetaEntry - nil, // 54: pb.Response.HeadersEntry - nil, // 55: pb.SetupArgs.ConfigEntry - (*logical.Alias)(nil), // 56: logical.Alias - (*timestamppb.Timestamp)(nil), // 57: google.protobuf.Timestamp - (*logical.Entity)(nil), // 58: logical.Entity - (*logical.Group)(nil), // 59: logical.Group - (*logical.PluginEnvironment)(nil), // 60: logical.PluginEnvironment + (*ClusterInfoReply)(nil), // 46: pb.ClusterInfoReply + (*GenerateIdentityTokenRequest)(nil), // 47: pb.GenerateIdentityTokenRequest + (*GenerateIdentityTokenResponse)(nil), // 48: pb.GenerateIdentityTokenResponse + (*Connection)(nil), // 49: pb.Connection + (*ConnectionState)(nil), // 50: pb.ConnectionState + (*Certificate)(nil), // 51: pb.Certificate + (*CertificateChain)(nil), // 52: pb.CertificateChain + (*SendEventRequest)(nil), // 53: pb.SendEventRequest + nil, // 54: pb.Request.HeadersEntry + nil, // 55: pb.Auth.MetadataEntry + nil, // 56: pb.TokenEntry.MetaEntry + nil, // 57: pb.TokenEntry.InternalMetaEntry + nil, // 58: pb.Response.HeadersEntry + nil, // 59: pb.SetupArgs.ConfigEntry + (*logical.Alias)(nil), // 60: logical.Alias + (*timestamppb.Timestamp)(nil), // 61: google.protobuf.Timestamp + (*logical.Entity)(nil), // 62: logical.Entity + (*logical.Group)(nil), // 63: logical.Group + (*logical.PluginEnvironment)(nil), // 64: logical.PluginEnvironment + (*logical.EventData)(nil), // 65: logical.EventData } var file_sdk_plugin_pb_backend_proto_depIDxs = []int32{ 8, // 0: pb.Request.secret:type_name -> pb.Secret 5, // 1: pb.Request.auth:type_name -> pb.Auth - 50, // 2: pb.Request.headers:type_name -> pb.Request.HeadersEntry + 54, // 2: pb.Request.headers:type_name -> pb.Request.HeadersEntry 11, // 3: pb.Request.wrap_info:type_name -> pb.RequestWrapInfo - 46, // 4: pb.Request.connection:type_name -> pb.Connection + 49, // 4: pb.Request.connection:type_name -> pb.Connection 7, // 5: pb.Auth.lease_options:type_name -> pb.LeaseOptions - 51, // 6: pb.Auth.metadata:type_name -> pb.Auth.MetadataEntry - 56, // 7: pb.Auth.alias:type_name -> logical.Alias - 56, // 8: pb.Auth.group_aliases:type_name -> logical.Alias - 52, // 9: pb.TokenEntry.meta:type_name -> pb.TokenEntry.MetaEntry - 53, // 10: pb.TokenEntry.internal_meta:type_name -> pb.TokenEntry.InternalMetaEntry - 57, // 11: pb.LeaseOptions.issue_time:type_name -> google.protobuf.Timestamp + 55, // 6: pb.Auth.metadata:type_name -> pb.Auth.MetadataEntry + 60, // 7: pb.Auth.alias:type_name -> logical.Alias + 60, // 8: pb.Auth.group_aliases:type_name -> logical.Alias + 56, // 9: pb.TokenEntry.meta:type_name -> pb.TokenEntry.MetaEntry + 57, // 10: pb.TokenEntry.internal_meta:type_name -> pb.TokenEntry.InternalMetaEntry + 61, // 11: pb.LeaseOptions.issue_time:type_name -> google.protobuf.Timestamp 7, // 12: pb.Secret.lease_options:type_name -> pb.LeaseOptions 8, // 13: pb.Response.secret:type_name -> pb.Secret 5, // 14: pb.Response.auth:type_name -> pb.Auth 10, // 15: pb.Response.wrap_info:type_name -> pb.ResponseWrapInfo - 54, // 16: pb.Response.headers:type_name -> pb.Response.HeadersEntry - 57, // 17: pb.ResponseWrapInfo.creation_time:type_name -> google.protobuf.Timestamp + 58, // 16: pb.Response.headers:type_name -> pb.Response.HeadersEntry + 61, // 17: pb.ResponseWrapInfo.creation_time:type_name -> google.protobuf.Timestamp 4, // 18: pb.HandleRequestArgs.request:type_name -> pb.Request 9, // 19: pb.HandleRequestReply.response:type_name -> pb.Response 2, // 20: pb.HandleRequestReply.err:type_name -> pb.ProtoError @@ -4047,72 +4382,79 @@ var file_sdk_plugin_pb_backend_proto_depIDxs = []int32{ 3, // 22: pb.SpecialPathsReply.paths:type_name -> pb.Paths 4, // 23: pb.HandleExistenceCheckArgs.request:type_name -> pb.Request 2, // 24: pb.HandleExistenceCheckReply.err:type_name -> pb.ProtoError - 55, // 25: pb.SetupArgs.Config:type_name -> pb.SetupArgs.ConfigEntry + 59, // 25: pb.SetupArgs.Config:type_name -> pb.SetupArgs.ConfigEntry 23, // 26: pb.StorageGetReply.entry:type_name -> pb.StorageEntry 23, // 27: pb.StoragePutArgs.entry:type_name -> pb.StorageEntry 10, // 28: pb.ResponseWrapDataReply.wrap_info:type_name -> pb.ResponseWrapInfo - 58, // 29: pb.EntityInfoReply.entity:type_name -> logical.Entity - 59, // 30: pb.GroupsForEntityReply.groups:type_name -> logical.Group - 60, // 31: pb.PluginEnvReply.plugin_environment:type_name -> logical.PluginEnvironment - 47, // 32: pb.Connection.connection_state:type_name -> pb.ConnectionState - 49, // 33: pb.ConnectionState.peer_certificates:type_name -> pb.CertificateChain - 49, // 34: pb.ConnectionState.verified_chains:type_name -> pb.CertificateChain - 48, // 35: pb.CertificateChain.certificates:type_name -> pb.Certificate - 1, // 36: pb.Request.HeadersEntry.value:type_name -> pb.Header - 1, // 37: pb.Response.HeadersEntry.value:type_name -> pb.Header - 12, // 38: pb.Backend.HandleRequest:input_type -> pb.HandleRequestArgs - 0, // 39: pb.Backend.SpecialPaths:input_type -> pb.Empty - 17, // 40: pb.Backend.HandleExistenceCheck:input_type -> pb.HandleExistenceCheckArgs - 0, // 41: pb.Backend.Cleanup:input_type -> pb.Empty - 22, // 42: pb.Backend.InvalidateKey:input_type -> pb.InvalidateKeyArgs - 19, // 43: pb.Backend.Setup:input_type -> pb.SetupArgs - 14, // 44: pb.Backend.Initialize:input_type -> pb.InitializeArgs - 0, // 45: pb.Backend.Type:input_type -> pb.Empty - 24, // 46: pb.Storage.List:input_type -> pb.StorageListArgs - 26, // 47: pb.Storage.Get:input_type -> pb.StorageGetArgs - 28, // 48: pb.Storage.Put:input_type -> pb.StoragePutArgs - 30, // 49: pb.Storage.Delete:input_type -> pb.StorageDeleteArgs - 0, // 50: pb.SystemView.DefaultLeaseTTL:input_type -> pb.Empty - 0, // 51: pb.SystemView.MaxLeaseTTL:input_type -> pb.Empty - 0, // 52: pb.SystemView.Tainted:input_type -> pb.Empty - 0, // 53: pb.SystemView.CachingDisabled:input_type -> pb.Empty - 0, // 54: pb.SystemView.ReplicationState:input_type -> pb.Empty - 36, // 55: pb.SystemView.ResponseWrapData:input_type -> pb.ResponseWrapDataArgs - 0, // 56: pb.SystemView.MlockEnabled:input_type -> pb.Empty - 0, // 57: pb.SystemView.LocalMount:input_type -> pb.Empty - 40, // 58: pb.SystemView.EntityInfo:input_type -> pb.EntityInfoArgs - 0, // 59: pb.SystemView.PluginEnv:input_type -> pb.Empty - 40, // 60: pb.SystemView.GroupsForEntity:input_type -> pb.EntityInfoArgs - 44, // 61: pb.SystemView.GeneratePasswordFromPolicy:input_type -> pb.GeneratePasswordFromPolicyRequest - 13, // 62: pb.Backend.HandleRequest:output_type -> pb.HandleRequestReply - 16, // 63: pb.Backend.SpecialPaths:output_type -> pb.SpecialPathsReply - 18, // 64: pb.Backend.HandleExistenceCheck:output_type -> pb.HandleExistenceCheckReply - 0, // 65: pb.Backend.Cleanup:output_type -> pb.Empty - 0, // 66: pb.Backend.InvalidateKey:output_type -> pb.Empty - 20, // 67: pb.Backend.Setup:output_type -> pb.SetupReply - 15, // 68: pb.Backend.Initialize:output_type -> pb.InitializeReply - 21, // 69: pb.Backend.Type:output_type -> pb.TypeReply - 25, // 70: pb.Storage.List:output_type -> pb.StorageListReply - 27, // 71: pb.Storage.Get:output_type -> pb.StorageGetReply - 29, // 72: pb.Storage.Put:output_type -> pb.StoragePutReply - 31, // 73: pb.Storage.Delete:output_type -> pb.StorageDeleteReply - 32, // 74: pb.SystemView.DefaultLeaseTTL:output_type -> pb.TTLReply - 32, // 75: pb.SystemView.MaxLeaseTTL:output_type -> pb.TTLReply - 33, // 76: pb.SystemView.Tainted:output_type -> pb.TaintedReply - 34, // 77: pb.SystemView.CachingDisabled:output_type -> pb.CachingDisabledReply - 35, // 78: pb.SystemView.ReplicationState:output_type -> pb.ReplicationStateReply - 37, // 79: pb.SystemView.ResponseWrapData:output_type -> pb.ResponseWrapDataReply - 38, // 80: pb.SystemView.MlockEnabled:output_type -> pb.MlockEnabledReply - 39, // 81: pb.SystemView.LocalMount:output_type -> pb.LocalMountReply - 41, // 82: pb.SystemView.EntityInfo:output_type -> pb.EntityInfoReply - 43, // 83: pb.SystemView.PluginEnv:output_type -> pb.PluginEnvReply - 42, // 84: pb.SystemView.GroupsForEntity:output_type -> pb.GroupsForEntityReply - 45, // 85: pb.SystemView.GeneratePasswordFromPolicy:output_type -> pb.GeneratePasswordFromPolicyReply - 62, // [62:86] is the sub-list for method output_type - 38, // [38:62] is the sub-list for method input_type - 38, // [38:38] is the sub-list for extension type_name - 38, // [38:38] is the sub-list for extension extendee - 0, // [0:38] is the sub-list for field type_name + 62, // 29: pb.EntityInfoReply.entity:type_name -> logical.Entity + 63, // 30: pb.GroupsForEntityReply.groups:type_name -> logical.Group + 64, // 31: pb.PluginEnvReply.plugin_environment:type_name -> logical.PluginEnvironment + 50, // 32: pb.Connection.connection_state:type_name -> pb.ConnectionState + 52, // 33: pb.ConnectionState.peer_certificates:type_name -> pb.CertificateChain + 52, // 34: pb.ConnectionState.verified_chains:type_name -> pb.CertificateChain + 51, // 35: pb.CertificateChain.certificates:type_name -> pb.Certificate + 65, // 36: pb.SendEventRequest.event:type_name -> logical.EventData + 1, // 37: pb.Request.HeadersEntry.value:type_name -> pb.Header + 1, // 38: pb.Response.HeadersEntry.value:type_name -> pb.Header + 12, // 39: pb.Backend.HandleRequest:input_type -> pb.HandleRequestArgs + 0, // 40: pb.Backend.SpecialPaths:input_type -> pb.Empty + 17, // 41: pb.Backend.HandleExistenceCheck:input_type -> pb.HandleExistenceCheckArgs + 0, // 42: pb.Backend.Cleanup:input_type -> pb.Empty + 22, // 43: pb.Backend.InvalidateKey:input_type -> pb.InvalidateKeyArgs + 19, // 44: pb.Backend.Setup:input_type -> pb.SetupArgs + 14, // 45: pb.Backend.Initialize:input_type -> pb.InitializeArgs + 0, // 46: pb.Backend.Type:input_type -> pb.Empty + 24, // 47: pb.Storage.List:input_type -> pb.StorageListArgs + 26, // 48: pb.Storage.Get:input_type -> pb.StorageGetArgs + 28, // 49: pb.Storage.Put:input_type -> pb.StoragePutArgs + 30, // 50: pb.Storage.Delete:input_type -> pb.StorageDeleteArgs + 0, // 51: pb.SystemView.DefaultLeaseTTL:input_type -> pb.Empty + 0, // 52: pb.SystemView.MaxLeaseTTL:input_type -> pb.Empty + 0, // 53: pb.SystemView.Tainted:input_type -> pb.Empty + 0, // 54: pb.SystemView.CachingDisabled:input_type -> pb.Empty + 0, // 55: pb.SystemView.ReplicationState:input_type -> pb.Empty + 36, // 56: pb.SystemView.ResponseWrapData:input_type -> pb.ResponseWrapDataArgs + 0, // 57: pb.SystemView.MlockEnabled:input_type -> pb.Empty + 0, // 58: pb.SystemView.LocalMount:input_type -> pb.Empty + 40, // 59: pb.SystemView.EntityInfo:input_type -> pb.EntityInfoArgs + 0, // 60: pb.SystemView.PluginEnv:input_type -> pb.Empty + 40, // 61: pb.SystemView.GroupsForEntity:input_type -> pb.EntityInfoArgs + 44, // 62: pb.SystemView.GeneratePasswordFromPolicy:input_type -> pb.GeneratePasswordFromPolicyRequest + 0, // 63: pb.SystemView.ClusterInfo:input_type -> pb.Empty + 47, // 64: pb.SystemView.GenerateIdentityToken:input_type -> pb.GenerateIdentityTokenRequest + 53, // 65: pb.Events.SendEvent:input_type -> pb.SendEventRequest + 13, // 66: pb.Backend.HandleRequest:output_type -> pb.HandleRequestReply + 16, // 67: pb.Backend.SpecialPaths:output_type -> pb.SpecialPathsReply + 18, // 68: pb.Backend.HandleExistenceCheck:output_type -> pb.HandleExistenceCheckReply + 0, // 69: pb.Backend.Cleanup:output_type -> pb.Empty + 0, // 70: pb.Backend.InvalidateKey:output_type -> pb.Empty + 20, // 71: pb.Backend.Setup:output_type -> pb.SetupReply + 15, // 72: pb.Backend.Initialize:output_type -> pb.InitializeReply + 21, // 73: pb.Backend.Type:output_type -> pb.TypeReply + 25, // 74: pb.Storage.List:output_type -> pb.StorageListReply + 27, // 75: pb.Storage.Get:output_type -> pb.StorageGetReply + 29, // 76: pb.Storage.Put:output_type -> pb.StoragePutReply + 31, // 77: pb.Storage.Delete:output_type -> pb.StorageDeleteReply + 32, // 78: pb.SystemView.DefaultLeaseTTL:output_type -> pb.TTLReply + 32, // 79: pb.SystemView.MaxLeaseTTL:output_type -> pb.TTLReply + 33, // 80: pb.SystemView.Tainted:output_type -> pb.TaintedReply + 34, // 81: pb.SystemView.CachingDisabled:output_type -> pb.CachingDisabledReply + 35, // 82: pb.SystemView.ReplicationState:output_type -> pb.ReplicationStateReply + 37, // 83: pb.SystemView.ResponseWrapData:output_type -> pb.ResponseWrapDataReply + 38, // 84: pb.SystemView.MlockEnabled:output_type -> pb.MlockEnabledReply + 39, // 85: pb.SystemView.LocalMount:output_type -> pb.LocalMountReply + 41, // 86: pb.SystemView.EntityInfo:output_type -> pb.EntityInfoReply + 43, // 87: pb.SystemView.PluginEnv:output_type -> pb.PluginEnvReply + 42, // 88: pb.SystemView.GroupsForEntity:output_type -> pb.GroupsForEntityReply + 45, // 89: pb.SystemView.GeneratePasswordFromPolicy:output_type -> pb.GeneratePasswordFromPolicyReply + 46, // 90: pb.SystemView.ClusterInfo:output_type -> pb.ClusterInfoReply + 48, // 91: pb.SystemView.GenerateIdentityToken:output_type -> pb.GenerateIdentityTokenResponse + 0, // 92: pb.Events.SendEvent:output_type -> pb.Empty + 66, // [66:93] is the sub-list for method output_type + 39, // [39:66] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name } func init() { file_sdk_plugin_pb_backend_proto_init() } @@ -4674,7 +5016,7 @@ func file_sdk_plugin_pb_backend_proto_init() { } } file_sdk_plugin_pb_backend_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Connection); i { + switch v := v.(*ClusterInfoReply); i { case 0: return &v.state case 1: @@ -4686,7 +5028,7 @@ func file_sdk_plugin_pb_backend_proto_init() { } } file_sdk_plugin_pb_backend_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConnectionState); i { + switch v := v.(*GenerateIdentityTokenRequest); i { case 0: return &v.state case 1: @@ -4698,7 +5040,7 @@ func file_sdk_plugin_pb_backend_proto_init() { } } file_sdk_plugin_pb_backend_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Certificate); i { + switch v := v.(*GenerateIdentityTokenResponse); i { case 0: return &v.state case 1: @@ -4710,6 +5052,42 @@ func file_sdk_plugin_pb_backend_proto_init() { } } file_sdk_plugin_pb_backend_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Connection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectionState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CertificateChain); i { case 0: return &v.state @@ -4721,6 +5099,18 @@ func file_sdk_plugin_pb_backend_proto_init() { return nil } } + file_sdk_plugin_pb_backend_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendEventRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -4728,9 +5118,9 @@ func file_sdk_plugin_pb_backend_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_sdk_plugin_pb_backend_proto_rawDesc, NumEnums: 0, - NumMessages: 56, + NumMessages: 60, NumExtensions: 0, - NumServices: 3, + NumServices: 4, }, GoTypes: file_sdk_plugin_pb_backend_proto_goTypes, DependencyIndexes: file_sdk_plugin_pb_backend_proto_depIDxs, diff --git a/sdk/plugin/pb/backend.proto b/sdk/plugin/pb/backend.proto index 19a56be639e4..cf0f47d8dbe7 100644 --- a/sdk/plugin/pb/backend.proto +++ b/sdk/plugin/pb/backend.proto @@ -1,659 +1,714 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + syntax = "proto3"; package pb; -option go_package = "github.com/hashicorp/vault/sdk/plugin/pb"; - import "google/protobuf/timestamp.proto"; +import "sdk/logical/event.proto"; import "sdk/logical/identity.proto"; import "sdk/logical/plugin.proto"; +option go_package = "github.com/hashicorp/vault/sdk/plugin/pb"; + message Empty {} message Header { - repeated string header = 1; + repeated string header = 1; } message ProtoError { - // Error type can be one of: - // ErrTypeUnknown uint32 = iota - // ErrTypeUserError - // ErrTypeInternalError - // ErrTypeCodedError - // ErrTypeStatusBadRequest - // ErrTypeUnsupportedOperation - // ErrTypeUnsupportedPath - // ErrTypeInvalidRequest - // ErrTypePermissionDenied - // ErrTypeMultiAuthzPending - // ErrTypeUnrecoverable - uint32 err_type = 1; - string err_msg = 2; - int64 err_code = 3; + // Error type can be one of: + // ErrTypeUnknown uint32 = iota + // ErrTypeUserError + // ErrTypeInternalError + // ErrTypeCodedError + // ErrTypeStatusBadRequest + // ErrTypeUnsupportedOperation + // ErrTypeUnsupportedPath + // ErrTypeInvalidRequest + // ErrTypePermissionDenied + // ErrTypeMultiAuthzPending + // ErrTypeUnrecoverable + uint32 err_type = 1; + string err_msg = 2; + int64 err_code = 3; } // Paths is the structure of special paths that is used for SpecialPaths. message Paths { - // Root are the paths that require a root token to access - repeated string root = 1; + // Root are the paths that require a root token to access + repeated string root = 1; + + // Unauthenticated are the paths that can be accessed without any auth. + repeated string unauthenticated = 2; - // Unauthenticated are the paths that can be accessed without any auth. - repeated string unauthenticated = 2; + // LocalStorage are paths (prefixes) that are local to this instance; this + // indicates that these paths should not be replicated + repeated string local_storage = 3; - // LocalStorage are paths (prefixes) that are local to this instance; this - // indicates that these paths should not be replicated - repeated string local_storage = 3; + // SealWrapStorage are storage paths that, when using a capable seal, + // should be seal wrapped with extra encryption. It is exact matching + // unless it ends with '/' in which case it will be treated as a prefix. + repeated string seal_wrap_storage = 4; - // SealWrapStorage are storage paths that, when using a capable seal, - // should be seal wrapped with extra encryption. It is exact matching - // unless it ends with '/' in which case it will be treated as a prefix. - repeated string seal_wrap_storage = 4; + // WriteForwardedStorage are storage paths that, when running on a PR + // Secondary cluster, cause a GRPC call up to the PR Primary cluster's + // active node to handle storage.Put(...) and storage.Delete(...) events. + // + // See extended note in /sdk/logical/logical.go. + repeated string write_forwarded_storage = 5; + + // Binary are paths whose request bodies are binary, not JSON + // + // See note in /sdk/logical/logical.go. + repeated string binary = 6; + + // Limited paths are storage paths that require special-case request limiting. + // + // See note in /sdk/logical/logical.go. + repeated string limited = 7; } message Request { - // Id is the uuid associated with each request - string id = 1; + // Id is the uuid associated with each request + string id = 1; - // If set, the name given to the replication secondary where this request - // originated - string ReplicationCluster = 2; + // If set, the name given to the replication secondary where this request + // originated + string ReplicationCluster = 2; - // Operation is the requested operation type - string operation = 3; + // Operation is the requested operation type + string operation = 3; - // Path is the part of the request path not consumed by the - // routing. As an example, if the original request path is "prod/aws/foo" - // and the AWS logical backend is mounted at "prod/aws/", then the - // final path is "foo" since the mount prefix is trimmed. - string path = 4; + // Path is the part of the request path not consumed by the + // routing. As an example, if the original request path is "prod/aws/foo" + // and the AWS logical backend is mounted at "prod/aws/", then the + // final path is "foo" since the mount prefix is trimmed. + string path = 4; - // Request data is a JSON object that must have keys with string type. - string data = 5; + // Request data is a JSON object that must have keys with string type. + string data = 5; - // Secret will be non-nil only for Revoke and Renew operations - // to represent the secret that was returned prior. - Secret secret = 6; + // Secret will be non-nil only for Revoke and Renew operations + // to represent the secret that was returned prior. + Secret secret = 6; - // Auth will be non-nil only for Renew operations - // to represent the auth that was returned prior. - Auth auth = 7; + // Auth will be non-nil only for Renew operations + // to represent the auth that was returned prior. + Auth auth = 7; - // Headers will contain the http headers from the request. This value will - // be used in the audit broker to ensure we are auditing only the allowed - // headers. - map headers = 8; + // Headers will contain the http headers from the request. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + map headers = 8; - // ClientToken is provided to the core so that the identity - // can be verified and ACLs applied. This value is passed - // through to the logical backends but after being salted and - // hashed. - string client_token = 9; + // ClientToken is provided to the core so that the identity + // can be verified and ACLs applied. This value is passed + // through to the logical backends but after being salted and + // hashed. + string client_token = 9; - // ClientTokenAccessor is provided to the core so that the it can get - // logged as part of request audit logging. - string client_token_accessor = 10; + // ClientTokenAccessor is provided to the core so that the it can get + // logged as part of request audit logging. + string client_token_accessor = 10; - // DisplayName is provided to the logical backend to help associate - // dynamic secrets with the source entity. This is not a sensitive - // name, but is useful for operators. - string display_name = 11; + // DisplayName is provided to the logical backend to help associate + // dynamic secrets with the source entity. This is not a sensitive + // name, but is useful for operators. + string display_name = 11; - // MountPoint is provided so that a logical backend can generate - // paths relative to itself. The `Path` is effectively the client - // request path with the MountPoint trimmed off. - string mount_point = 12; + // MountPoint is provided so that a logical backend can generate + // paths relative to itself. The `Path` is effectively the client + // request path with the MountPoint trimmed off. + string mount_point = 12; - // MountType is provided so that a logical backend can make decisions - // based on the specific mount type (e.g., if a mount type has different - // aliases, generating different defaults depending on the alias) - string mount_type = 13; + // MountType is provided so that a logical backend can make decisions + // based on the specific mount type (e.g., if a mount type has different + // aliases, generating different defaults depending on the alias) + string mount_type = 13; - // MountAccessor is provided so that identities returned by the authentication - // backends can be tied to the mount it belongs to. - string mount_accessor = 14; + // MountAccessor is provided so that identities returned by the authentication + // backends can be tied to the mount it belongs to. + string mount_accessor = 14; - // WrapInfo contains requested response wrapping parameters - RequestWrapInfo wrap_info = 15; + // WrapInfo contains requested response wrapping parameters + RequestWrapInfo wrap_info = 15; - // ClientTokenRemainingUses represents the allowed number of uses left on the - // token supplied - int64 client_token_remaining_uses = 16; + // ClientTokenRemainingUses represents the allowed number of uses left on the + // token supplied + int64 client_token_remaining_uses = 16; - // EntityID is the identity of the caller extracted out of the token used - // to make this request - string entity_id = 17; + // EntityID is the identity of the caller extracted out of the token used + // to make this request + string entity_id = 17; - // PolicyOverride indicates that the requestor wishes to override - // soft-mandatory Sentinel policies - bool policy_override = 18; + // PolicyOverride indicates that the requestor wishes to override + // soft-mandatory Sentinel policies + bool policy_override = 18; - // Whether the request is unauthenticated, as in, had no client token - // attached. Useful in some situations where the client token is not made - // accessible. - bool unauthenticated = 19; + // Whether the request is unauthenticated, as in, had no client token + // attached. Useful in some situations where the client token is not made + // accessible. + bool unauthenticated = 19; - // Connection will be non-nil only for credential providers to - // inspect the connection information and potentially use it for - // authentication/protection. - Connection connection = 20; + // Connection will be non-nil only for credential providers to + // inspect the connection information and potentially use it for + // authentication/protection. + Connection connection = 20; } message Auth { - LeaseOptions lease_options = 1; - - // InternalData is a JSON object that is stored with the auth struct. - // This will be sent back during a Renew/Revoke for storing internal data - // used for those operations. - string internal_data = 2; - - // DisplayName is a non-security sensitive identifier that is - // applicable to this Auth. It is used for logging and prefixing - // of dynamic secrets. For example, DisplayName may be "armon" for - // the github credential backend. If the client token is used to - // generate a SQL credential, the user may be "github-armon-uuid". - // This is to help identify the source without using audit tables. - string display_name = 3; - - // Policies is the list of policies that the authenticated user - // is associated with. - repeated string policies = 4; - - // Metadata is used to attach arbitrary string-type metadata to - // an authenticated user. This metadata will be outputted into the - // audit log. - map metadata = 5; - - // ClientToken is the token that is generated for the authentication. - // This will be filled in by Vault core when an auth structure is - // returned. Setting this manually will have no effect. - string client_token = 6; - - // Accessor is the identifier for the ClientToken. This can be used - // to perform management functionalities (especially revocation) when - // ClientToken in the audit logs are obfuscated. Accessor can be used - // to revoke a ClientToken and to lookup the capabilities of the ClientToken, - // both without actually knowing the ClientToken. - string accessor = 7; - - // Period indicates that the token generated using this Auth object - // should never expire. The token should be renewed within the duration - // specified by this period. - int64 period = 8; - - // Number of allowed uses of the issued token - int64 num_uses = 9; - - // EntityID is the identifier of the entity in identity store to which the - // identity of the authenticating client belongs to. - string entity_id = 10; - - // Alias is the information about the authenticated client returned by - // the auth backend - logical.Alias alias = 11; - - // GroupAliases are the informational mappings of external groups which an - // authenticated user belongs to. This is used to check if there are - // mappings groups for the group aliases in identity store. For all the - // matching groups, the entity ID of the user will be added. - repeated logical.Alias group_aliases = 12; - - // If set, restricts usage of the certificates to client IPs falling within - // the range of the specified CIDR(s). - repeated string bound_cidrs = 13; - - // TokenPolicies and IdentityPolicies break down the list in Policies to - // help determine where a policy was sourced - repeated string token_policies = 14; - repeated string identity_policies = 15; - - // Explicit maximum lifetime for the token. Unlike normal TTLs, the maximum - // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens. - int64 explicit_max_ttl = 16; - - // TokenType is the type of token being requested - uint32 token_type = 17; - - // Whether the default policy should be added automatically by core - bool no_default_policy = 18; + LeaseOptions lease_options = 1; + + // InternalData is a JSON object that is stored with the auth struct. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + string internal_data = 2; + + // DisplayName is a non-security sensitive identifier that is + // applicable to this Auth. It is used for logging and prefixing + // of dynamic secrets. For example, DisplayName may be "armon" for + // the github credential backend. If the client token is used to + // generate a SQL credential, the user may be "github-armon-uuid". + // This is to help identify the source without using audit tables. + string display_name = 3; + + // Policies is the list of policies that the authenticated user + // is associated with. + repeated string policies = 4; + + // Metadata is used to attach arbitrary string-type metadata to + // an authenticated user. This metadata will be outputted into the + // audit log. + map metadata = 5; + + // ClientToken is the token that is generated for the authentication. + // This will be filled in by Vault core when an auth structure is + // returned. Setting this manually will have no effect. + string client_token = 6; + + // Accessor is the identifier for the ClientToken. This can be used + // to perform management functionalities (especially revocation) when + // ClientToken in the audit logs are obfuscated. Accessor can be used + // to revoke a ClientToken and to lookup the capabilities of the ClientToken, + // both without actually knowing the ClientToken. + string accessor = 7; + + // Period indicates that the token generated using this Auth object + // should never expire. The token should be renewed within the duration + // specified by this period. + int64 period = 8; + + // Number of allowed uses of the issued token + int64 num_uses = 9; + + // EntityID is the identifier of the entity in identity store to which the + // identity of the authenticating client belongs to. + string entity_id = 10; + + // Alias is the information about the authenticated client returned by + // the auth backend + logical.Alias alias = 11; + + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + repeated logical.Alias group_aliases = 12; + + // If set, restricts usage of the certificates to client IPs falling within + // the range of the specified CIDR(s). + repeated string bound_cidrs = 13; + + // TokenPolicies and IdentityPolicies break down the list in Policies to + // help determine where a policy was sourced + repeated string token_policies = 14; + repeated string identity_policies = 15; + + // Explicit maximum lifetime for the token. Unlike normal TTLs, the maximum + // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens. + int64 explicit_max_ttl = 16; + + // TokenType is the type of token being requested + uint32 token_type = 17; + + // Whether the default policy should be added automatically by core + bool no_default_policy = 18; } message TokenEntry { - string id = 1; - string accessor = 2; - string parent = 3; - repeated string policies = 4; - string path = 5; - map meta = 6; - string display_name = 7; - int64 num_uses = 8; - int64 creation_time = 9; - int64 ttl = 10; - int64 explicit_max_ttl = 11; - string role = 12; - int64 period = 13; - string entity_id = 14; - repeated string bound_cidrs = 15; - string namespace_id = 16; - string cubbyhole_id = 17; - uint32 type = 18; - map internal_meta = 19; - string inline_policy = 20; - bool no_identity_policies = 21; + string id = 1; + string accessor = 2; + string parent = 3; + repeated string policies = 4; + string path = 5; + map meta = 6; + string display_name = 7; + int64 num_uses = 8; + int64 creation_time = 9; + int64 ttl = 10; + int64 explicit_max_ttl = 11; + string role = 12; + int64 period = 13; + string entity_id = 14; + repeated string bound_cidrs = 15; + string namespace_id = 16; + string cubbyhole_id = 17; + uint32 type = 18; + map internal_meta = 19; + string inline_policy = 20; + bool no_identity_policies = 21; + string external_id = 22; } message LeaseOptions { - int64 TTL = 1; + int64 TTL = 1; - bool renewable = 2; + bool renewable = 2; - int64 increment = 3; + int64 increment = 3; - google.protobuf.Timestamp issue_time = 4; + google.protobuf.Timestamp issue_time = 4; - int64 MaxTTL = 5; + int64 MaxTTL = 5; } message Secret { - LeaseOptions lease_options = 1; + LeaseOptions lease_options = 1; - // InternalData is a JSON object that is stored with the secret. - // This will be sent back during a Renew/Revoke for storing internal data - // used for those operations. - string internal_data = 2; + // InternalData is a JSON object that is stored with the secret. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + string internal_data = 2; - // LeaseID is the ID returned to the user to manage this secret. - // This is generated by Vault core. Any set value will be ignored. - // For requests, this will always be blank. - string lease_id = 3; + // LeaseID is the ID returned to the user to manage this secret. + // This is generated by Vault core. Any set value will be ignored. + // For requests, this will always be blank. + string lease_id = 3; } message Response { - // Secret, if not nil, denotes that this response represents a secret. - Secret secret = 1; + // Secret, if not nil, denotes that this response represents a secret. + Secret secret = 1; + + // Auth, if not nil, contains the authentication information for + // this response. This is only checked and means something for + // credential backends. + Auth auth = 2; - // Auth, if not nil, contains the authentication information for - // this response. This is only checked and means something for - // credential backends. - Auth auth = 2; + // Response data is a JSON object that must have string keys. For + // secrets, this data is sent down to the user as-is. To store internal + // data that you don't want the user to see, store it in + // Secret.InternalData. + string data = 3; - // Response data is a JSON object that must have string keys. For - // secrets, this data is sent down to the user as-is. To store internal - // data that you don't want the user to see, store it in - // Secret.InternalData. - string data = 3; + // Redirect is an HTTP URL to redirect to for further authentication. + // This is only valid for credential backends. This will be blanked + // for any logical backend and ignored. + string redirect = 4; - // Redirect is an HTTP URL to redirect to for further authentication. - // This is only valid for credential backends. This will be blanked - // for any logical backend and ignored. - string redirect = 4; + // Warnings allow operations or backends to return warnings in response + // to user actions without failing the action outright. + repeated string warnings = 5; - // Warnings allow operations or backends to return warnings in response - // to user actions without failing the action outright. - repeated string warnings = 5; + // Information for wrapping the response in a cubbyhole + ResponseWrapInfo wrap_info = 6; - // Information for wrapping the response in a cubbyhole - ResponseWrapInfo wrap_info = 6; + // Headers will contain the http headers from the response. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + map headers = 7; - // Headers will contain the http headers from the response. This value will - // be used in the audit broker to ensure we are auditing only the allowed - // headers. - map headers = 7; + // MountType, if non-empty, provides some information about what kind + // of mount this secret came from. + string mount_type = 8; } message ResponseWrapInfo { - // Setting to non-zero specifies that the response should be wrapped. - // Specifies the desired TTL of the wrapping token. - int64 TTL = 1; + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + int64 TTL = 1; - // The token containing the wrapped response - string token = 2; + // The token containing the wrapped response + string token = 2; - // The token accessor for the wrapped response token - string accessor = 3; + // The token accessor for the wrapped response token + string accessor = 3; - // The creation time. This can be used with the TTL to figure out an - // expected expiration. - google.protobuf.Timestamp creation_time = 4; + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + google.protobuf.Timestamp creation_time = 4; - // If the contained response is the output of a token creation call, the - // created token's accessor will be accessible here - string wrapped_accessor = 5; + // If the contained response is the output of a token creation call, the + // created token's accessor will be accessible here + string wrapped_accessor = 5; - // WrappedEntityID is the entity identifier of the caller who initiated the - // wrapping request - string wrapped_entity_id = 6; + // WrappedEntityID is the entity identifier of the caller who initiated the + // wrapping request + string wrapped_entity_id = 6; - // The format to use. This doesn't get returned, it's only internal. - string format = 7; + // The format to use. This doesn't get returned, it's only internal. + string format = 7; - // CreationPath is the original request path that was used to create - // the wrapped response. - string creation_path = 8; + // CreationPath is the original request path that was used to create + // the wrapped response. + string creation_path = 8; - // Controls seal wrapping behavior downstream for specific use cases - bool seal_wrap = 9; + // Controls seal wrapping behavior downstream for specific use cases + bool seal_wrap = 9; } message RequestWrapInfo { - // Setting to non-zero specifies that the response should be wrapped. - // Specifies the desired TTL of the wrapping token. - int64 TTL = 1; + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + int64 TTL = 1; - // The format to use for the wrapped response; if not specified it's a bare - // token - string format = 2; + // The format to use for the wrapped response; if not specified it's a bare + // token + string format = 2; - // A flag to conforming backends that data for a given request should be - // seal wrapped - bool seal_wrap = 3; + // A flag to conforming backends that data for a given request should be + // seal wrapped + bool seal_wrap = 3; } // HandleRequestArgs is the args for HandleRequest method. message HandleRequestArgs { - uint32 storage_id = 1; - Request request = 2; + uint32 storage_id = 1; + Request request = 2; } // HandleRequestReply is the reply for HandleRequest method. message HandleRequestReply { - Response response = 1; - ProtoError err = 2; + Response response = 1; + ProtoError err = 2; } // InitializeArgs is the args for Initialize method. -message InitializeArgs { -} +message InitializeArgs {} // InitializeReply is the reply for Initialize method. message InitializeReply { - ProtoError err = 1; + ProtoError err = 1; } // SpecialPathsReply is the reply for SpecialPaths method. message SpecialPathsReply { - Paths paths = 1; + Paths paths = 1; } // HandleExistenceCheckArgs is the args for HandleExistenceCheck method. message HandleExistenceCheckArgs { - uint32 storage_id = 1; - Request request = 2; + uint32 storage_id = 1; + Request request = 2; } // HandleExistenceCheckReply is the reply for HandleExistenceCheck method. message HandleExistenceCheckReply { - bool check_found = 1; - bool exists = 2; - ProtoError err = 3; + bool check_found = 1; + bool exists = 2; + ProtoError err = 3; } // SetupArgs is the args for Setup method. message SetupArgs { - uint32 broker_id = 1; - map Config = 2; - string backendUUID = 3; + uint32 broker_id = 1; + map Config = 2; + string backendUUID = 3; } // SetupReply is the reply for Setup method. message SetupReply { - string err = 1; + string err = 1; } // TypeReply is the reply for the Type method. message TypeReply { - uint32 type = 1; + uint32 type = 1; } message InvalidateKeyArgs { - string key = 1; + string key = 1; } // Backend is the interface that plugins must satisfy. The plugin should // implement the server for this service. Requests will first run the // HandleExistenceCheck rpc then run the HandleRequests rpc. service Backend { - // HandleRequest is used to handle a request and generate a response. - // The plugins must check the operation type and handle appropriately. - rpc HandleRequest(HandleRequestArgs) returns (HandleRequestReply); - - // SpecialPaths is a list of paths that are special in some way. - // See PathType for the types of special paths. The key is the type - // of the special path, and the value is a list of paths for this type. - // This is not a regular expression but is an exact match. If the path - // ends in '*' then it is a prefix-based match. The '*' can only appear - // at the end. - rpc SpecialPaths(Empty) returns (SpecialPathsReply); - - // HandleExistenceCheck is used to handle a request and generate a response - // indicating whether the given path exists or not; this is used to - // understand whether the request must have a Create or Update capability - // ACL applied. The first bool indicates whether an existence check - // function was found for the backend; the second indicates whether, if an - // existence check function was found, the item exists or not. - rpc HandleExistenceCheck(HandleExistenceCheckArgs) returns (HandleExistenceCheckReply); - - // Cleanup is invoked during an unmount of a backend to allow it to - // handle any cleanup like connection closing or releasing of file handles. - // Cleanup is called right before Vault closes the plugin process. - rpc Cleanup(Empty) returns (Empty); - - // InvalidateKey may be invoked when an object is modified that belongs - // to the backend. The backend can use this to clear any caches or reset - // internal state as needed. - rpc InvalidateKey(InvalidateKeyArgs) returns (Empty); - - // Setup is used to set up the backend based on the provided backend - // configuration. The plugin's setup implementation should use the provided - // broker_id to create a connection back to Vault for use with the Storage - // and SystemView clients. - rpc Setup(SetupArgs) returns (SetupReply); - - // Initialize is invoked just after mounting a backend to allow it to - // handle any initialization tasks that need to be performed. - rpc Initialize(InitializeArgs) returns (InitializeReply); - - // Type returns the BackendType for the particular backend - rpc Type(Empty) returns (TypeReply); + // HandleRequest is used to handle a request and generate a response. + // The plugins must check the operation type and handle appropriately. + rpc HandleRequest(HandleRequestArgs) returns (HandleRequestReply); + + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + rpc SpecialPaths(Empty) returns (SpecialPathsReply); + + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + rpc HandleExistenceCheck(HandleExistenceCheckArgs) returns (HandleExistenceCheckReply); + + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + // Cleanup is called right before Vault closes the plugin process. + rpc Cleanup(Empty) returns (Empty); + + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + rpc InvalidateKey(InvalidateKeyArgs) returns (Empty); + + // Setup is used to set up the backend based on the provided backend + // configuration. The plugin's setup implementation should use the provided + // broker_id to create a connection back to Vault for use with the Storage + // and SystemView clients. + rpc Setup(SetupArgs) returns (SetupReply); + + // Initialize is invoked just after mounting a backend to allow it to + // handle any initialization tasks that need to be performed. + rpc Initialize(InitializeArgs) returns (InitializeReply); + + // Type returns the BackendType for the particular backend + rpc Type(Empty) returns (TypeReply); } message StorageEntry { - string key = 1; - bytes value = 2; - bool seal_wrap = 3; + string key = 1; + bytes value = 2; + bool seal_wrap = 3; } message StorageListArgs { - string prefix = 1; + string prefix = 1; } message StorageListReply { - repeated string keys = 1; - string err = 2; + repeated string keys = 1; + string err = 2; } message StorageGetArgs { - string key = 1; + string key = 1; } message StorageGetReply { - StorageEntry entry = 1; - string err = 2; + StorageEntry entry = 1; + string err = 2; } message StoragePutArgs { - StorageEntry entry = 1; + StorageEntry entry = 1; } message StoragePutReply { - string err = 1; + string err = 1; } message StorageDeleteArgs { - string key = 1; + string key = 1; } message StorageDeleteReply { - string err = 1; + string err = 1; } // Storage is the way that plugins are able read/write data. Plugins should // implement the client for this service. service Storage { - rpc List(StorageListArgs) returns (StorageListReply); - rpc Get(StorageGetArgs) returns (StorageGetReply); - rpc Put(StoragePutArgs) returns (StoragePutReply); - rpc Delete(StorageDeleteArgs) returns (StorageDeleteReply); + rpc List(StorageListArgs) returns (StorageListReply); + rpc Get(StorageGetArgs) returns (StorageGetReply); + rpc Put(StoragePutArgs) returns (StoragePutReply); + rpc Delete(StorageDeleteArgs) returns (StorageDeleteReply); } message TTLReply { - int64 TTL = 1; + int64 TTL = 1; } message TaintedReply { - bool tainted = 1; + bool tainted = 1; } message CachingDisabledReply { - bool disabled = 1; + bool disabled = 1; } message ReplicationStateReply { - int32 state = 1; + int32 state = 1; } message ResponseWrapDataArgs { - string data = 1; - int64 TTL = 2; - bool JWT = 3; + string data = 1; + int64 TTL = 2; + bool JWT = 3; } message ResponseWrapDataReply { - ResponseWrapInfo wrap_info = 1; - string err = 2; + ResponseWrapInfo wrap_info = 1; + string err = 2; } message MlockEnabledReply { - bool enabled = 1; + bool enabled = 1; } message LocalMountReply { - bool local = 1; + bool local = 1; } message EntityInfoArgs { - string entity_id = 1; + string entity_id = 1; } message EntityInfoReply { - logical.Entity entity = 1; - string err = 2; + logical.Entity entity = 1; + string err = 2; } message GroupsForEntityReply { - repeated logical.Group groups = 1; - string err = 2; + repeated logical.Group groups = 1; + string err = 2; } message PluginEnvReply { - logical.PluginEnvironment plugin_environment = 1; - string err = 2; + logical.PluginEnvironment plugin_environment = 1; + string err = 2; } message GeneratePasswordFromPolicyRequest { - string policy_name = 1; + string policy_name = 1; } message GeneratePasswordFromPolicyReply { - string password = 1; + string password = 1; +} + +message ClusterInfoReply { + string cluster_name = 1; + string cluster_id = 2; + string err = 3; +} + +message GenerateIdentityTokenRequest { + string audience = 1; + int64 ttl = 2; +} + +message GenerateIdentityTokenResponse { + string token = 1; + int64 ttl = 2; } // SystemView exposes system configuration information in a safe way for plugins // to consume. Plugins should implement the client for this service. service SystemView { - // DefaultLeaseTTL returns the default lease TTL set in Vault configuration - rpc DefaultLeaseTTL(Empty) returns (TTLReply); + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + rpc DefaultLeaseTTL(Empty) returns (TTLReply); - // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend - // authors should take care not to issue credentials that last longer than - // this value, as Vault will revoke them - rpc MaxLeaseTTL(Empty) returns (TTLReply); + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + rpc MaxLeaseTTL(Empty) returns (TTLReply); - // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the - // process of being unmounted. This should only be used in special - // circumstances; a primary use-case is as a guard in revocation functions. - // If revocation of a backend's leases fails it can keep the unmounting - // process from being successful. If the reason for this failure is not - // relevant when the mount is tainted (for instance, saving a CRL to disk - // when the stored CRL will be removed during the unmounting process - // anyways), we can ignore the errors to allow unmounting to complete. - rpc Tainted(Empty) returns (TaintedReply); + // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + rpc Tainted(Empty) returns (TaintedReply); - // CachingDisabled returns true if caching is disabled. If true, no caches - // should be used, despite known slowdowns. - rpc CachingDisabled(Empty) returns (CachingDisabledReply); + // CachingDisabled returns true if caching is disabled. If true, no caches + // should be used, despite known slowdowns. + rpc CachingDisabled(Empty) returns (CachingDisabledReply); - // ReplicationState indicates the state of cluster replication - rpc ReplicationState(Empty) returns (ReplicationStateReply); + // ReplicationState indicates the state of cluster replication + rpc ReplicationState(Empty) returns (ReplicationStateReply); - // ResponseWrapData wraps the given data in a cubbyhole and returns the - // token used to unwrap. - rpc ResponseWrapData(ResponseWrapDataArgs) returns (ResponseWrapDataReply); + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + rpc ResponseWrapData(ResponseWrapDataArgs) returns (ResponseWrapDataReply); - // MlockEnabled returns the configuration setting for enabling mlock on - // plugins. - rpc MlockEnabled(Empty) returns (MlockEnabledReply); + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + rpc MlockEnabled(Empty) returns (MlockEnabledReply); - // LocalMount, when run from a system view attached to a request, indicates - // whether the request is affecting a local mount or not - rpc LocalMount(Empty) returns (LocalMountReply); + // LocalMount, when run from a system view attached to a request, indicates + // whether the request is affecting a local mount or not + rpc LocalMount(Empty) returns (LocalMountReply); - // EntityInfo returns the basic entity information for the given entity id - rpc EntityInfo(EntityInfoArgs) returns (EntityInfoReply); + // EntityInfo returns the basic entity information for the given entity id + rpc EntityInfo(EntityInfoArgs) returns (EntityInfoReply); - // PluginEnv returns Vault environment information used by plugins - rpc PluginEnv(Empty) returns (PluginEnvReply); + // PluginEnv returns Vault environment information used by plugins + rpc PluginEnv(Empty) returns (PluginEnvReply); - // GroupsForEntity returns the group membership information for the given - // entity id - rpc GroupsForEntity(EntityInfoArgs) returns (GroupsForEntityReply); + // GroupsForEntity returns the group membership information for the given + // entity id + rpc GroupsForEntity(EntityInfoArgs) returns (GroupsForEntityReply); - // GeneratePasswordFromPolicy generates a password from an existing password policy - rpc GeneratePasswordFromPolicy(GeneratePasswordFromPolicyRequest) returns (GeneratePasswordFromPolicyReply); + // GeneratePasswordFromPolicy generates a password from an existing password policy + rpc GeneratePasswordFromPolicy(GeneratePasswordFromPolicyRequest) returns (GeneratePasswordFromPolicyReply); + + // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. + rpc ClusterInfo(Empty) returns (ClusterInfoReply); + + // GenerateIdentityToken returns an identity token for the requesting plugin. + rpc GenerateIdentityToken(GenerateIdentityTokenRequest) returns (GenerateIdentityTokenResponse); } message Connection { - // RemoteAddr is the network address that sent the request. - string remote_addr = 1; + // RemoteAddr is the network address that sent the request. + string remote_addr = 1; - // RemotePort is the network port that sent the request. - int32 remote_port = 3; + // RemotePort is the network port that sent the request. + int32 remote_port = 3; - // ConnectionState is the marshalled tls.ConnectionState from the original - // request - ConnectionState connection_state = 2; + // ConnectionState is the marshalled tls.ConnectionState from the original + // request + ConnectionState connection_state = 2; } message ConnectionState { - uint32 version = 1; - bool handshake_complete = 2; - bool did_resume = 3; - uint32 cipher_suite = 4; - string negotiated_protocol = 5; - bool negotiated_protocol_is_mutual = 6; - string server_name = 7; - CertificateChain peer_certificates = 8; + uint32 version = 1; + bool handshake_complete = 2; + bool did_resume = 3; + uint32 cipher_suite = 4; + string negotiated_protocol = 5; + bool negotiated_protocol_is_mutual = 6; + string server_name = 7; + CertificateChain peer_certificates = 8; - repeated CertificateChain verified_chains = 9; - repeated bytes signed_certificate_timestamps = 10; + repeated CertificateChain verified_chains = 9; + repeated bytes signed_certificate_timestamps = 10; - bytes ocsp_response = 11; - bytes tls_unique = 12; + bytes ocsp_response = 11; + bytes tls_unique = 12; } message Certificate { - bytes asn1_data = 1; + bytes asn1_data = 1; } message CertificateChain { - repeated Certificate certificates = 1; + repeated Certificate certificates = 1; } +message SendEventRequest { + string event_type = 1; + logical.EventData event = 2; +} + +service Events { + rpc SendEvent(SendEventRequest) returns (Empty); +} diff --git a/sdk/plugin/pb/backend_grpc.pb.go b/sdk/plugin/pb/backend_grpc.pb.go index 9be6bacdc5d6..b0e96cd4a748 100644 --- a/sdk/plugin/pb/backend_grpc.pb.go +++ b/sdk/plugin/pb/backend_grpc.pb.go @@ -1,4 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: sdk/plugin/pb/backend.proto package pb @@ -14,6 +21,17 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Backend_HandleRequest_FullMethodName = "/pb.Backend/HandleRequest" + Backend_SpecialPaths_FullMethodName = "/pb.Backend/SpecialPaths" + Backend_HandleExistenceCheck_FullMethodName = "/pb.Backend/HandleExistenceCheck" + Backend_Cleanup_FullMethodName = "/pb.Backend/Cleanup" + Backend_InvalidateKey_FullMethodName = "/pb.Backend/InvalidateKey" + Backend_Setup_FullMethodName = "/pb.Backend/Setup" + Backend_Initialize_FullMethodName = "/pb.Backend/Initialize" + Backend_Type_FullMethodName = "/pb.Backend/Type" +) + // BackendClient is the client API for Backend service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -65,7 +83,7 @@ func NewBackendClient(cc grpc.ClientConnInterface) BackendClient { func (c *backendClient) HandleRequest(ctx context.Context, in *HandleRequestArgs, opts ...grpc.CallOption) (*HandleRequestReply, error) { out := new(HandleRequestReply) - err := c.cc.Invoke(ctx, "/pb.Backend/HandleRequest", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_HandleRequest_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -74,7 +92,7 @@ func (c *backendClient) HandleRequest(ctx context.Context, in *HandleRequestArgs func (c *backendClient) SpecialPaths(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*SpecialPathsReply, error) { out := new(SpecialPathsReply) - err := c.cc.Invoke(ctx, "/pb.Backend/SpecialPaths", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_SpecialPaths_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -83,7 +101,7 @@ func (c *backendClient) SpecialPaths(ctx context.Context, in *Empty, opts ...grp func (c *backendClient) HandleExistenceCheck(ctx context.Context, in *HandleExistenceCheckArgs, opts ...grpc.CallOption) (*HandleExistenceCheckReply, error) { out := new(HandleExistenceCheckReply) - err := c.cc.Invoke(ctx, "/pb.Backend/HandleExistenceCheck", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_HandleExistenceCheck_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -92,7 +110,7 @@ func (c *backendClient) HandleExistenceCheck(ctx context.Context, in *HandleExis func (c *backendClient) Cleanup(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/pb.Backend/Cleanup", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_Cleanup_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -101,7 +119,7 @@ func (c *backendClient) Cleanup(ctx context.Context, in *Empty, opts ...grpc.Cal func (c *backendClient) InvalidateKey(ctx context.Context, in *InvalidateKeyArgs, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/pb.Backend/InvalidateKey", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_InvalidateKey_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -110,7 +128,7 @@ func (c *backendClient) InvalidateKey(ctx context.Context, in *InvalidateKeyArgs func (c *backendClient) Setup(ctx context.Context, in *SetupArgs, opts ...grpc.CallOption) (*SetupReply, error) { out := new(SetupReply) - err := c.cc.Invoke(ctx, "/pb.Backend/Setup", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_Setup_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -119,7 +137,7 @@ func (c *backendClient) Setup(ctx context.Context, in *SetupArgs, opts ...grpc.C func (c *backendClient) Initialize(ctx context.Context, in *InitializeArgs, opts ...grpc.CallOption) (*InitializeReply, error) { out := new(InitializeReply) - err := c.cc.Invoke(ctx, "/pb.Backend/Initialize", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_Initialize_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -128,7 +146,7 @@ func (c *backendClient) Initialize(ctx context.Context, in *InitializeArgs, opts func (c *backendClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeReply, error) { out := new(TypeReply) - err := c.cc.Invoke(ctx, "/pb.Backend/Type", in, out, opts...) + err := c.cc.Invoke(ctx, Backend_Type_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -228,7 +246,7 @@ func _Backend_HandleRequest_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/HandleRequest", + FullMethod: Backend_HandleRequest_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).HandleRequest(ctx, req.(*HandleRequestArgs)) @@ -246,7 +264,7 @@ func _Backend_SpecialPaths_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/SpecialPaths", + FullMethod: Backend_SpecialPaths_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).SpecialPaths(ctx, req.(*Empty)) @@ -264,7 +282,7 @@ func _Backend_HandleExistenceCheck_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/HandleExistenceCheck", + FullMethod: Backend_HandleExistenceCheck_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).HandleExistenceCheck(ctx, req.(*HandleExistenceCheckArgs)) @@ -282,7 +300,7 @@ func _Backend_Cleanup_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/Cleanup", + FullMethod: Backend_Cleanup_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).Cleanup(ctx, req.(*Empty)) @@ -300,7 +318,7 @@ func _Backend_InvalidateKey_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/InvalidateKey", + FullMethod: Backend_InvalidateKey_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).InvalidateKey(ctx, req.(*InvalidateKeyArgs)) @@ -318,7 +336,7 @@ func _Backend_Setup_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/Setup", + FullMethod: Backend_Setup_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).Setup(ctx, req.(*SetupArgs)) @@ -336,7 +354,7 @@ func _Backend_Initialize_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/Initialize", + FullMethod: Backend_Initialize_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).Initialize(ctx, req.(*InitializeArgs)) @@ -354,7 +372,7 @@ func _Backend_Type_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Backend/Type", + FullMethod: Backend_Type_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BackendServer).Type(ctx, req.(*Empty)) @@ -406,6 +424,13 @@ var Backend_ServiceDesc = grpc.ServiceDesc{ Metadata: "sdk/plugin/pb/backend.proto", } +const ( + Storage_List_FullMethodName = "/pb.Storage/List" + Storage_Get_FullMethodName = "/pb.Storage/Get" + Storage_Put_FullMethodName = "/pb.Storage/Put" + Storage_Delete_FullMethodName = "/pb.Storage/Delete" +) + // StorageClient is the client API for Storage service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -426,7 +451,7 @@ func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { func (c *storageClient) List(ctx context.Context, in *StorageListArgs, opts ...grpc.CallOption) (*StorageListReply, error) { out := new(StorageListReply) - err := c.cc.Invoke(ctx, "/pb.Storage/List", in, out, opts...) + err := c.cc.Invoke(ctx, Storage_List_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -435,7 +460,7 @@ func (c *storageClient) List(ctx context.Context, in *StorageListArgs, opts ...g func (c *storageClient) Get(ctx context.Context, in *StorageGetArgs, opts ...grpc.CallOption) (*StorageGetReply, error) { out := new(StorageGetReply) - err := c.cc.Invoke(ctx, "/pb.Storage/Get", in, out, opts...) + err := c.cc.Invoke(ctx, Storage_Get_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -444,7 +469,7 @@ func (c *storageClient) Get(ctx context.Context, in *StorageGetArgs, opts ...grp func (c *storageClient) Put(ctx context.Context, in *StoragePutArgs, opts ...grpc.CallOption) (*StoragePutReply, error) { out := new(StoragePutReply) - err := c.cc.Invoke(ctx, "/pb.Storage/Put", in, out, opts...) + err := c.cc.Invoke(ctx, Storage_Put_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -453,7 +478,7 @@ func (c *storageClient) Put(ctx context.Context, in *StoragePutArgs, opts ...grp func (c *storageClient) Delete(ctx context.Context, in *StorageDeleteArgs, opts ...grpc.CallOption) (*StorageDeleteReply, error) { out := new(StorageDeleteReply) - err := c.cc.Invoke(ctx, "/pb.Storage/Delete", in, out, opts...) + err := c.cc.Invoke(ctx, Storage_Delete_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -510,7 +535,7 @@ func _Storage_List_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Storage/List", + FullMethod: Storage_List_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).List(ctx, req.(*StorageListArgs)) @@ -528,7 +553,7 @@ func _Storage_Get_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Storage/Get", + FullMethod: Storage_Get_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).Get(ctx, req.(*StorageGetArgs)) @@ -546,7 +571,7 @@ func _Storage_Put_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Storage/Put", + FullMethod: Storage_Put_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).Put(ctx, req.(*StoragePutArgs)) @@ -564,7 +589,7 @@ func _Storage_Delete_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.Storage/Delete", + FullMethod: Storage_Delete_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(StorageServer).Delete(ctx, req.(*StorageDeleteArgs)) @@ -600,6 +625,23 @@ var Storage_ServiceDesc = grpc.ServiceDesc{ Metadata: "sdk/plugin/pb/backend.proto", } +const ( + SystemView_DefaultLeaseTTL_FullMethodName = "/pb.SystemView/DefaultLeaseTTL" + SystemView_MaxLeaseTTL_FullMethodName = "/pb.SystemView/MaxLeaseTTL" + SystemView_Tainted_FullMethodName = "/pb.SystemView/Tainted" + SystemView_CachingDisabled_FullMethodName = "/pb.SystemView/CachingDisabled" + SystemView_ReplicationState_FullMethodName = "/pb.SystemView/ReplicationState" + SystemView_ResponseWrapData_FullMethodName = "/pb.SystemView/ResponseWrapData" + SystemView_MlockEnabled_FullMethodName = "/pb.SystemView/MlockEnabled" + SystemView_LocalMount_FullMethodName = "/pb.SystemView/LocalMount" + SystemView_EntityInfo_FullMethodName = "/pb.SystemView/EntityInfo" + SystemView_PluginEnv_FullMethodName = "/pb.SystemView/PluginEnv" + SystemView_GroupsForEntity_FullMethodName = "/pb.SystemView/GroupsForEntity" + SystemView_GeneratePasswordFromPolicy_FullMethodName = "/pb.SystemView/GeneratePasswordFromPolicy" + SystemView_ClusterInfo_FullMethodName = "/pb.SystemView/ClusterInfo" + SystemView_GenerateIdentityToken_FullMethodName = "/pb.SystemView/GenerateIdentityToken" +) + // SystemViewClient is the client API for SystemView service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -642,6 +684,10 @@ type SystemViewClient interface { GroupsForEntity(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*GroupsForEntityReply, error) // GeneratePasswordFromPolicy generates a password from an existing password policy GeneratePasswordFromPolicy(ctx context.Context, in *GeneratePasswordFromPolicyRequest, opts ...grpc.CallOption) (*GeneratePasswordFromPolicyReply, error) + // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. + ClusterInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ClusterInfoReply, error) + // GenerateIdentityToken returns an identity token for the requesting plugin. + GenerateIdentityToken(ctx context.Context, in *GenerateIdentityTokenRequest, opts ...grpc.CallOption) (*GenerateIdentityTokenResponse, error) } type systemViewClient struct { @@ -654,7 +700,7 @@ func NewSystemViewClient(cc grpc.ClientConnInterface) SystemViewClient { func (c *systemViewClient) DefaultLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) { out := new(TTLReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/DefaultLeaseTTL", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_DefaultLeaseTTL_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -663,7 +709,7 @@ func (c *systemViewClient) DefaultLeaseTTL(ctx context.Context, in *Empty, opts func (c *systemViewClient) MaxLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) { out := new(TTLReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/MaxLeaseTTL", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_MaxLeaseTTL_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -672,7 +718,7 @@ func (c *systemViewClient) MaxLeaseTTL(ctx context.Context, in *Empty, opts ...g func (c *systemViewClient) Tainted(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TaintedReply, error) { out := new(TaintedReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/Tainted", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_Tainted_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -681,7 +727,7 @@ func (c *systemViewClient) Tainted(ctx context.Context, in *Empty, opts ...grpc. func (c *systemViewClient) CachingDisabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CachingDisabledReply, error) { out := new(CachingDisabledReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/CachingDisabled", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_CachingDisabled_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -690,7 +736,7 @@ func (c *systemViewClient) CachingDisabled(ctx context.Context, in *Empty, opts func (c *systemViewClient) ReplicationState(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReplicationStateReply, error) { out := new(ReplicationStateReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/ReplicationState", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_ReplicationState_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -699,7 +745,7 @@ func (c *systemViewClient) ReplicationState(ctx context.Context, in *Empty, opts func (c *systemViewClient) ResponseWrapData(ctx context.Context, in *ResponseWrapDataArgs, opts ...grpc.CallOption) (*ResponseWrapDataReply, error) { out := new(ResponseWrapDataReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/ResponseWrapData", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_ResponseWrapData_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -708,7 +754,7 @@ func (c *systemViewClient) ResponseWrapData(ctx context.Context, in *ResponseWra func (c *systemViewClient) MlockEnabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MlockEnabledReply, error) { out := new(MlockEnabledReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/MlockEnabled", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_MlockEnabled_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -717,7 +763,7 @@ func (c *systemViewClient) MlockEnabled(ctx context.Context, in *Empty, opts ... func (c *systemViewClient) LocalMount(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*LocalMountReply, error) { out := new(LocalMountReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/LocalMount", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_LocalMount_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -726,7 +772,7 @@ func (c *systemViewClient) LocalMount(ctx context.Context, in *Empty, opts ...gr func (c *systemViewClient) EntityInfo(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*EntityInfoReply, error) { out := new(EntityInfoReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/EntityInfo", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_EntityInfo_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -735,7 +781,7 @@ func (c *systemViewClient) EntityInfo(ctx context.Context, in *EntityInfoArgs, o func (c *systemViewClient) PluginEnv(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PluginEnvReply, error) { out := new(PluginEnvReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/PluginEnv", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_PluginEnv_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -744,7 +790,7 @@ func (c *systemViewClient) PluginEnv(ctx context.Context, in *Empty, opts ...grp func (c *systemViewClient) GroupsForEntity(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*GroupsForEntityReply, error) { out := new(GroupsForEntityReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/GroupsForEntity", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_GroupsForEntity_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -753,7 +799,25 @@ func (c *systemViewClient) GroupsForEntity(ctx context.Context, in *EntityInfoAr func (c *systemViewClient) GeneratePasswordFromPolicy(ctx context.Context, in *GeneratePasswordFromPolicyRequest, opts ...grpc.CallOption) (*GeneratePasswordFromPolicyReply, error) { out := new(GeneratePasswordFromPolicyReply) - err := c.cc.Invoke(ctx, "/pb.SystemView/GeneratePasswordFromPolicy", in, out, opts...) + err := c.cc.Invoke(ctx, SystemView_GeneratePasswordFromPolicy_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) ClusterInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ClusterInfoReply, error) { + out := new(ClusterInfoReply) + err := c.cc.Invoke(ctx, SystemView_ClusterInfo_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) GenerateIdentityToken(ctx context.Context, in *GenerateIdentityTokenRequest, opts ...grpc.CallOption) (*GenerateIdentityTokenResponse, error) { + out := new(GenerateIdentityTokenResponse) + err := c.cc.Invoke(ctx, SystemView_GenerateIdentityToken_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -802,6 +866,10 @@ type SystemViewServer interface { GroupsForEntity(context.Context, *EntityInfoArgs) (*GroupsForEntityReply, error) // GeneratePasswordFromPolicy generates a password from an existing password policy GeneratePasswordFromPolicy(context.Context, *GeneratePasswordFromPolicyRequest) (*GeneratePasswordFromPolicyReply, error) + // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. + ClusterInfo(context.Context, *Empty) (*ClusterInfoReply, error) + // GenerateIdentityToken returns an identity token for the requesting plugin. + GenerateIdentityToken(context.Context, *GenerateIdentityTokenRequest) (*GenerateIdentityTokenResponse, error) mustEmbedUnimplementedSystemViewServer() } @@ -845,6 +913,12 @@ func (UnimplementedSystemViewServer) GroupsForEntity(context.Context, *EntityInf func (UnimplementedSystemViewServer) GeneratePasswordFromPolicy(context.Context, *GeneratePasswordFromPolicyRequest) (*GeneratePasswordFromPolicyReply, error) { return nil, status.Errorf(codes.Unimplemented, "method GeneratePasswordFromPolicy not implemented") } +func (UnimplementedSystemViewServer) ClusterInfo(context.Context, *Empty) (*ClusterInfoReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") +} +func (UnimplementedSystemViewServer) GenerateIdentityToken(context.Context, *GenerateIdentityTokenRequest) (*GenerateIdentityTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenerateIdentityToken not implemented") +} func (UnimplementedSystemViewServer) mustEmbedUnimplementedSystemViewServer() {} // UnsafeSystemViewServer may be embedded to opt out of forward compatibility for this service. @@ -868,7 +942,7 @@ func _SystemView_DefaultLeaseTTL_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/DefaultLeaseTTL", + FullMethod: SystemView_DefaultLeaseTTL_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).DefaultLeaseTTL(ctx, req.(*Empty)) @@ -886,7 +960,7 @@ func _SystemView_MaxLeaseTTL_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/MaxLeaseTTL", + FullMethod: SystemView_MaxLeaseTTL_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).MaxLeaseTTL(ctx, req.(*Empty)) @@ -904,7 +978,7 @@ func _SystemView_Tainted_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/Tainted", + FullMethod: SystemView_Tainted_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).Tainted(ctx, req.(*Empty)) @@ -922,7 +996,7 @@ func _SystemView_CachingDisabled_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/CachingDisabled", + FullMethod: SystemView_CachingDisabled_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).CachingDisabled(ctx, req.(*Empty)) @@ -940,7 +1014,7 @@ func _SystemView_ReplicationState_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/ReplicationState", + FullMethod: SystemView_ReplicationState_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).ReplicationState(ctx, req.(*Empty)) @@ -958,7 +1032,7 @@ func _SystemView_ResponseWrapData_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/ResponseWrapData", + FullMethod: SystemView_ResponseWrapData_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).ResponseWrapData(ctx, req.(*ResponseWrapDataArgs)) @@ -976,7 +1050,7 @@ func _SystemView_MlockEnabled_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/MlockEnabled", + FullMethod: SystemView_MlockEnabled_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).MlockEnabled(ctx, req.(*Empty)) @@ -994,7 +1068,7 @@ func _SystemView_LocalMount_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/LocalMount", + FullMethod: SystemView_LocalMount_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).LocalMount(ctx, req.(*Empty)) @@ -1012,7 +1086,7 @@ func _SystemView_EntityInfo_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/EntityInfo", + FullMethod: SystemView_EntityInfo_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).EntityInfo(ctx, req.(*EntityInfoArgs)) @@ -1030,7 +1104,7 @@ func _SystemView_PluginEnv_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/PluginEnv", + FullMethod: SystemView_PluginEnv_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).PluginEnv(ctx, req.(*Empty)) @@ -1048,7 +1122,7 @@ func _SystemView_GroupsForEntity_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/GroupsForEntity", + FullMethod: SystemView_GroupsForEntity_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).GroupsForEntity(ctx, req.(*EntityInfoArgs)) @@ -1066,7 +1140,7 @@ func _SystemView_GeneratePasswordFromPolicy_Handler(srv interface{}, ctx context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/pb.SystemView/GeneratePasswordFromPolicy", + FullMethod: SystemView_GeneratePasswordFromPolicy_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SystemViewServer).GeneratePasswordFromPolicy(ctx, req.(*GeneratePasswordFromPolicyRequest)) @@ -1074,6 +1148,42 @@ func _SystemView_GeneratePasswordFromPolicy_Handler(srv interface{}, ctx context return interceptor(ctx, in, info, handler) } +func _SystemView_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).ClusterInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SystemView_ClusterInfo_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).ClusterInfo(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_GenerateIdentityToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GenerateIdentityTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).GenerateIdentityToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SystemView_GenerateIdentityToken_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).GenerateIdentityToken(ctx, req.(*GenerateIdentityTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + // SystemView_ServiceDesc is the grpc.ServiceDesc for SystemView service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1129,6 +1239,104 @@ var SystemView_ServiceDesc = grpc.ServiceDesc{ MethodName: "GeneratePasswordFromPolicy", Handler: _SystemView_GeneratePasswordFromPolicy_Handler, }, + { + MethodName: "ClusterInfo", + Handler: _SystemView_ClusterInfo_Handler, + }, + { + MethodName: "GenerateIdentityToken", + Handler: _SystemView_GenerateIdentityToken_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/plugin/pb/backend.proto", +} + +const ( + Events_SendEvent_FullMethodName = "/pb.Events/SendEvent" +) + +// EventsClient is the client API for Events service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type EventsClient interface { + SendEvent(ctx context.Context, in *SendEventRequest, opts ...grpc.CallOption) (*Empty, error) +} + +type eventsClient struct { + cc grpc.ClientConnInterface +} + +func NewEventsClient(cc grpc.ClientConnInterface) EventsClient { + return &eventsClient{cc} +} + +func (c *eventsClient) SendEvent(ctx context.Context, in *SendEventRequest, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, Events_SendEvent_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EventsServer is the server API for Events service. +// All implementations must embed UnimplementedEventsServer +// for forward compatibility +type EventsServer interface { + SendEvent(context.Context, *SendEventRequest) (*Empty, error) + mustEmbedUnimplementedEventsServer() +} + +// UnimplementedEventsServer must be embedded to have forward compatible implementations. +type UnimplementedEventsServer struct { +} + +func (UnimplementedEventsServer) SendEvent(context.Context, *SendEventRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendEvent not implemented") +} +func (UnimplementedEventsServer) mustEmbedUnimplementedEventsServer() {} + +// UnsafeEventsServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to EventsServer will +// result in compilation errors. +type UnsafeEventsServer interface { + mustEmbedUnimplementedEventsServer() +} + +func RegisterEventsServer(s grpc.ServiceRegistrar, srv EventsServer) { + s.RegisterService(&Events_ServiceDesc, srv) +} + +func _Events_SendEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendEventRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventsServer).SendEvent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Events_SendEvent_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventsServer).SendEvent(ctx, req.(*SendEventRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Events_ServiceDesc is the grpc.ServiceDesc for Events service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Events_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "pb.Events", + HandlerType: (*EventsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SendEvent", + Handler: _Events_SendEvent_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "sdk/plugin/pb/backend.proto", diff --git a/sdk/plugin/pb/translation.go b/sdk/plugin/pb/translation.go index 732086b45e3c..5a349b5b6672 100644 --- a/sdk/plugin/pb/translation.go +++ b/sdk/plugin/pb/translation.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pb import ( @@ -401,13 +404,14 @@ func ProtoResponseToLogicalResponse(r *Response) (*logical.Response, error) { } return &logical.Response{ - Secret: secret, - Auth: auth, - Data: data, - Redirect: r.Redirect, - Warnings: r.Warnings, - WrapInfo: wrapInfo, - Headers: headers, + Secret: secret, + Auth: auth, + Data: data, + Redirect: r.Redirect, + Warnings: r.Warnings, + WrapInfo: wrapInfo, + Headers: headers, + MountType: r.MountType, }, nil } @@ -488,13 +492,14 @@ func LogicalResponseToProtoResponse(r *logical.Response) (*Response, error) { } return &Response{ - Secret: secret, - Auth: auth, - Data: string(buf[:]), - Redirect: r.Redirect, - Warnings: r.Warnings, - WrapInfo: wrapInfo, - Headers: headers, + Secret: secret, + Auth: auth, + Data: string(buf[:]), + Redirect: r.Redirect, + Warnings: r.Warnings, + WrapInfo: wrapInfo, + Headers: headers, + MountType: r.MountType, }, nil } @@ -620,6 +625,7 @@ func LogicalTokenEntryToProtoTokenEntry(t *logical.TokenEntry) *TokenEntry { NamespaceID: t.NamespaceID, CubbyholeID: t.CubbyholeID, Type: uint32(t.Type), + ExternalID: t.ExternalID, } } @@ -660,6 +666,7 @@ func ProtoTokenEntryToLogicalTokenEntry(t *TokenEntry) (*logical.TokenEntry, err NamespaceID: t.NamespaceID, CubbyholeID: t.CubbyholeID, Type: logical.TokenType(t.Type), + ExternalID: t.ExternalID, }, nil } diff --git a/sdk/plugin/pb/translation_test.go b/sdk/plugin/pb/translation_test.go index 83cad401e553..4386ae59b21b 100644 --- a/sdk/plugin/pb/translation_test.go +++ b/sdk/plugin/pb/translation_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package pb import ( @@ -262,6 +265,7 @@ func TestTranslation_Response(t *testing.T) { CreationPath: "test/foo", SealWrap: true, }, + MountType: "mountType", }, } diff --git a/sdk/plugin/plugin.go b/sdk/plugin/plugin.go index edbffcd6983a..ec58417ec7d2 100644 --- a/sdk/plugin/plugin.go +++ b/sdk/plugin/plugin.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -151,14 +154,4 @@ func (b *BackendPluginClient) PluginVersion() logical.PluginVersion { return logical.EmptyPluginVersion } -func (b *BackendPluginClient) IsExternal() bool { - if externaler, ok := b.Backend.(logical.Externaler); ok { - return externaler.IsExternal() - } - return true // default to true since this is only used for GRPC plugins -} - -var ( - _ logical.PluginVersioner = (*BackendPluginClient)(nil) - _ logical.Externaler = (*BackendPluginClient)(nil) -) +var _ logical.PluginVersioner = (*BackendPluginClient)(nil) diff --git a/sdk/plugin/plugin_v5.go b/sdk/plugin/plugin_v5.go index 2adf020a48ee..cc2d1383775d 100644 --- a/sdk/plugin/plugin_v5.go +++ b/sdk/plugin/plugin_v5.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -52,10 +55,7 @@ func (b *BackendPluginClientV5) PluginVersion() logical.PluginVersion { return logical.EmptyPluginVersion } -var ( - _ logical.PluginVersioner = (*BackendPluginClientV5)(nil) - _ logical.Externaler = (*BackendPluginClientV5)(nil) -) +var _ logical.PluginVersioner = (*BackendPluginClientV5)(nil) // NewBackendV5 will return an instance of an RPC-based client implementation of // the backend for external plugins, or a concrete implementation of the diff --git a/sdk/plugin/serve.go b/sdk/plugin/serve.go index 0da143f769b8..37e0c729a57e 100644 --- a/sdk/plugin/serve.go +++ b/sdk/plugin/serve.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( @@ -94,7 +97,7 @@ func ServeMultiplex(opts *ServeOpts) error { logger := opts.Logger if logger == nil { logger = log.New(&log.LoggerOptions{ - Level: log.Info, + Level: log.Trace, Output: os.Stderr, JSONFormat: true, }) diff --git a/sdk/plugin/storage_test.go b/sdk/plugin/storage_test.go index 651d36199042..61a5deec6720 100644 --- a/sdk/plugin/storage_test.go +++ b/sdk/plugin/storage_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package plugin import ( diff --git a/sdk/queue/priority_queue.go b/sdk/queue/priority_queue.go index 3994841773e6..802a538587b3 100644 --- a/sdk/queue/priority_queue.go +++ b/sdk/queue/priority_queue.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Package queue provides Vault plugins with a Priority Queue. It can be used // as an in-memory list of queue.Item sorted by their priority, and offers // methods to find or remove items by their key. Internally it uses diff --git a/sdk/queue/priority_queue_test.go b/sdk/queue/priority_queue_test.go index 928442b5246b..108a26cc0edc 100644 --- a/sdk/queue/priority_queue_test.go +++ b/sdk/queue/priority_queue_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package queue import ( diff --git a/sdk/version/cgo.go b/sdk/version/cgo.go deleted file mode 100644 index 5bc93e5bfcda..000000000000 --- a/sdk/version/cgo.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build cgo - -package version - -func init() { - CgoEnabled = true -} diff --git a/sdk/version/version.go b/sdk/version/version.go deleted file mode 100644 index 78b8eb829cdd..000000000000 --- a/sdk/version/version.go +++ /dev/null @@ -1,80 +0,0 @@ -package version - -import ( - "bytes" - "fmt" -) - -// VersionInfo -type VersionInfo struct { - Revision string `json:"revision,omitempty"` - Version string `json:"version,omitempty"` - VersionPrerelease string `json:"version_prerelease,omitempty"` - VersionMetadata string `json:"version_metadata,omitempty"` - BuildDate string `json:"build_date,omitempty"` -} - -func GetVersion() *VersionInfo { - ver := Version - rel := VersionPrerelease - md := VersionMetadata - if GitDescribe != "" { - ver = GitDescribe - } - if GitDescribe == "" && rel == "" && VersionPrerelease != "" { - rel = "dev" - } - - return &VersionInfo{ - Revision: GitCommit, - Version: ver, - VersionPrerelease: rel, - VersionMetadata: md, - BuildDate: BuildDate, - } -} - -func (c *VersionInfo) VersionNumber() string { - if Version == "unknown" && VersionPrerelease == "unknown" { - return "(version unknown)" - } - - version := c.Version - - if c.VersionPrerelease != "" { - version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease) - } - - if c.VersionMetadata != "" { - version = fmt.Sprintf("%s+%s", version, c.VersionMetadata) - } - - return version -} - -func (c *VersionInfo) FullVersionNumber(rev bool) string { - var versionString bytes.Buffer - - if Version == "unknown" && VersionPrerelease == "unknown" { - return "Vault (version unknown)" - } - - fmt.Fprintf(&versionString, "Vault v%s", c.Version) - if c.VersionPrerelease != "" { - fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) - } - - if c.VersionMetadata != "" { - fmt.Fprintf(&versionString, "+%s", c.VersionMetadata) - } - - if rev && c.Revision != "" { - fmt.Fprintf(&versionString, " (%s)", c.Revision) - } - - if c.BuildDate != "" { - fmt.Fprintf(&versionString, ", built %s", c.BuildDate) - } - - return versionString.String() -} diff --git a/sdk/version/version_base.go b/sdk/version/version_base.go deleted file mode 100644 index e45626e2cd8a..000000000000 --- a/sdk/version/version_base.go +++ /dev/null @@ -1,17 +0,0 @@ -package version - -var ( - // The git commit that was compiled. This will be filled in by the compiler. - GitCommit string - GitDescribe string - - // The compilation date. This will be filled in by the compiler. - BuildDate string - - // Whether cgo is enabled or not; set at build time - CgoEnabled bool - - Version = "1.13.0" - VersionPrerelease = "dev1" - VersionMetadata = "" -) diff --git a/serviceregistration/consul/consul_service_registration.go b/serviceregistration/consul/consul_service_registration.go index 79008967a569..a3534e4ff37b 100644 --- a/serviceregistration/consul/consul_service_registration.go +++ b/serviceregistration/consul/consul_service_registration.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( @@ -85,6 +88,10 @@ type serviceRegistration struct { // NewConsulServiceRegistration constructs a Consul-based ServiceRegistration. func NewServiceRegistration(conf map[string]string, logger log.Logger, state sr.State) (sr.ServiceRegistration, error) { + if logger == nil { + return nil, errors.New("logger is required") + } + // Allow admins to disable consul integration disableReg, ok := conf["disable_registration"] var disableRegistration bool @@ -165,7 +172,7 @@ func NewServiceRegistration(conf map[string]string, logger log.Logger, state sr. logger: logger, serviceName: service, - serviceTags: strutil.ParseDedupLowercaseAndSortStrings(tags, ","), + serviceTags: strutil.ParseDedupAndSortStrings(tags, ","), serviceAddress: serviceAddr, checkTimeout: checkTimeout, disableRegistration: disableRegistration, @@ -309,7 +316,7 @@ func (c *serviceRegistration) NotifyInitializedStateChange(isInitialized bool) e default: // NOTE: If this occurs Vault's initialized status could be out of // sync with Consul until checkTimer expires. - c.logger.Warn("concurrent initalize state change notify dropped") + c.logger.Warn("concurrent initialize state change notify dropped") } return nil @@ -395,9 +402,9 @@ func (c *serviceRegistration) runEventDemuxer(waitGroup *sync.WaitGroup, shutdow } c.serviceLock.Lock() - defer c.serviceLock.Unlock() - registeredServiceID = serviceID + c.serviceLock.Unlock() + return } }() diff --git a/serviceregistration/consul/consul_service_registration_test.go b/serviceregistration/consul/consul_service_registration_test.go index 21b2b2573b53..20bc764bedbe 100644 --- a/serviceregistration/consul/consul_service_registration_test.go +++ b/serviceregistration/consul/consul_service_registration_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package consul import ( @@ -7,6 +10,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/go-test/deep" "github.com/hashicorp/consul/api" log "github.com/hashicorp/go-hclog" @@ -560,3 +565,44 @@ func TestConsul_serviceID(t *testing.T) { } } } + +// TestConsul_NewServiceRegistration_serviceTags ensures that we do not modify +// the case of any 'service_tags' set by the config. +// We do expect tags to be sorted in lexicographic order (A-Z). +func TestConsul_NewServiceRegistration_serviceTags(t *testing.T) { + tests := map[string]struct { + Tags string + ExpectedTags []string + }{ + "lowercase": { + Tags: "foo,bar", + ExpectedTags: []string{"bar", "foo"}, + }, + "uppercase": { + Tags: "FOO,BAR", + ExpectedTags: []string{"BAR", "FOO"}, + }, + "PascalCase": { + Tags: "FooBar, Feedface", + ExpectedTags: []string{"Feedface", "FooBar"}, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + cfg := map[string]string{"service_tags": tc.Tags} + logger := logging.NewVaultLogger(log.Trace) + be, err := NewServiceRegistration(cfg, logger, sr.State{}) + require.NoError(t, err) + require.NotNil(t, be) + c, ok := be.(*serviceRegistration) + require.True(t, ok) + require.NotNil(t, c) + require.Equal(t, tc.ExpectedTags, c.serviceTags) + }) + } +} diff --git a/serviceregistration/kubernetes/client/client.go b/serviceregistration/kubernetes/client/client.go index 934d3bad908c..0fbef7267b0b 100644 --- a/serviceregistration/kubernetes/client/client.go +++ b/serviceregistration/kubernetes/client/client.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package client import ( diff --git a/serviceregistration/kubernetes/client/client_test.go b/serviceregistration/kubernetes/client/client_test.go index 9f0dfad6e9a4..a02749e39155 100644 --- a/serviceregistration/kubernetes/client/client_test.go +++ b/serviceregistration/kubernetes/client/client_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package client import ( diff --git a/serviceregistration/kubernetes/client/cmd/kubeclient/main.go b/serviceregistration/kubernetes/client/cmd/kubeclient/main.go index 9eb031a362c6..3110facc8098 100644 --- a/serviceregistration/kubernetes/client/cmd/kubeclient/main.go +++ b/serviceregistration/kubernetes/client/cmd/kubeclient/main.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package main // This code builds a minimal binary of the lightweight kubernetes diff --git a/serviceregistration/kubernetes/client/config.go b/serviceregistration/kubernetes/client/config.go index 4e6a0f45848c..e8210964826c 100644 --- a/serviceregistration/kubernetes/client/config.go +++ b/serviceregistration/kubernetes/client/config.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package client import ( diff --git a/serviceregistration/kubernetes/retry_handler.go b/serviceregistration/kubernetes/retry_handler.go index 68afa8cdc576..3a4397c3b70b 100644 --- a/serviceregistration/kubernetes/retry_handler.go +++ b/serviceregistration/kubernetes/retry_handler.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package kubernetes import ( diff --git a/serviceregistration/kubernetes/retry_handler_test.go b/serviceregistration/kubernetes/retry_handler_test.go index e2be809d0a09..19f1c5b31ce6 100644 --- a/serviceregistration/kubernetes/retry_handler_test.go +++ b/serviceregistration/kubernetes/retry_handler_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package kubernetes import ( diff --git a/serviceregistration/kubernetes/service_registration.go b/serviceregistration/kubernetes/service_registration.go index f1c9a3c8ce40..1c22888016a3 100644 --- a/serviceregistration/kubernetes/service_registration.go +++ b/serviceregistration/kubernetes/service_registration.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package kubernetes import ( diff --git a/serviceregistration/kubernetes/service_registration_test.go b/serviceregistration/kubernetes/service_registration_test.go index a1bf001f1642..808807d1a9dc 100644 --- a/serviceregistration/kubernetes/service_registration_test.go +++ b/serviceregistration/kubernetes/service_registration_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package kubernetes import ( diff --git a/serviceregistration/kubernetes/testing/testserver.go b/serviceregistration/kubernetes/testing/testserver.go index 50232a2e573e..225431fc1fae 100644 --- a/serviceregistration/kubernetes/testing/testserver.go +++ b/serviceregistration/kubernetes/testing/testserver.go @@ -1,6 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package testing import ( + _ "embed" "encoding/json" "fmt" "io/ioutil" @@ -18,15 +22,27 @@ import ( const ( ExpectedNamespace = "default" ExpectedPodName = "shell-demo" - - // File names of samples pulled from real life. - caCrtFile = "ca.crt" - respGetPod = "resp-get-pod.json" - respNotFound = "resp-not-found.json" - respUpdatePod = "resp-update-pod.json" - tokenFile = "token" ) +// Pull real-life-based testing data in from files at compile time. +// We decided to embed them in the test binary because of past issues +// with reading files that we encountered on CI workers. + +//go:embed ca.crt +var caCrt string + +//go:embed resp-get-pod.json +var getPodResponse string + +//go:embed resp-not-found.json +var notFoundResponse string + +//go:embed resp-update-pod.json +var updatePodTagsResponse string + +//go:embed token +var token string + var ( // ReturnGatewayTimeouts toggles whether the test server should return, // well, gateway timeouts... @@ -78,28 +94,6 @@ func Server(t *testing.T) (testState *State, testConf *Conf, closeFunc func()) { } } - // Read in our sample files. - token, err := readFile(tokenFile) - if err != nil { - t.Fatal(err) - } - caCrt, err := readFile(caCrtFile) - if err != nil { - t.Fatal(err) - } - notFoundResponse, err := readFile(respNotFound) - if err != nil { - t.Fatal(err) - } - getPodResponse, err := readFile(respGetPod) - if err != nil { - t.Fatal(err) - } - updatePodTagsResponse, err := readFile(respUpdatePod) - if err != nil { - t.Fatal(err) - } - // Plant our token in a place where it can be read for the config. tmpToken, err := ioutil.TempFile("", "token") if err != nil { diff --git a/serviceregistration/service_registration.go b/serviceregistration/service_registration.go index 5dc67af2bee1..4eb560793d42 100644 --- a/serviceregistration/service_registration.go +++ b/serviceregistration/service_registration.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + package serviceregistration /* diff --git a/shamir/.copywrite.hcl b/shamir/.copywrite.hcl new file mode 100644 index 000000000000..c4b09f33640c --- /dev/null +++ b/shamir/.copywrite.hcl @@ -0,0 +1,8 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2024 + + header_ignore = [] +} diff --git a/shamir/LICENSE b/shamir/LICENSE new file mode 100644 index 000000000000..f4f97ee5853a --- /dev/null +++ b/shamir/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/shamir/shamir.go b/shamir/shamir.go index e311f501d9bc..d9c0271a7098 100644 --- a/shamir/shamir.go +++ b/shamir/shamir.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package shamir import ( @@ -86,31 +89,40 @@ func div(a, b uint8) uint8 { panic("divide by zero") } - log_a := logTable[a] - log_b := logTable[b] - diff := ((int(log_a) - int(log_b)) + 255) % 255 - - ret := int(expTable[diff]) + ret := int(mult(a, inverse(b))) // Ensure we return zero if a is zero but aren't subject to timing attacks ret = subtle.ConstantTimeSelect(subtle.ConstantTimeByteEq(a, 0), 0, ret) return uint8(ret) } +// inverse calculates the inverse of a number in GF(2^8) +func inverse(a uint8) uint8 { + b := mult(a, a) + c := mult(a, b) + b = mult(c, c) + b = mult(b, b) + c = mult(b, c) + b = mult(b, b) + b = mult(b, b) + b = mult(b, c) + b = mult(b, b) + b = mult(a, b) + + return mult(b, b) +} + // mult multiplies two numbers in GF(2^8) func mult(a, b uint8) (out uint8) { - log_a := logTable[a] - log_b := logTable[b] - sum := (int(log_a) + int(log_b)) % 255 + var r uint8 = 0 + var i uint8 = 8 - ret := int(expTable[sum]) - - // Ensure we return zero if either a or b are zero but aren't subject to - // timing attacks - ret = subtle.ConstantTimeSelect(subtle.ConstantTimeByteEq(a, 0), 0, ret) - ret = subtle.ConstantTimeSelect(subtle.ConstantTimeByteEq(b, 0), 0, ret) + for i > 0 { + i-- + r = (-(b >> i & 1) & a) ^ (-(r >> 7) & 0x1B) ^ (r + r) + } - return uint8(ret) + return r } // add combines two numbers in GF(2^8) diff --git a/shamir/shamir_test.go b/shamir/shamir_test.go index 90a7c371c294..940a34ecf167 100644 --- a/shamir/shamir_test.go +++ b/shamir/shamir_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package shamir import ( diff --git a/shamir/tables.go b/shamir/tables.go deleted file mode 100644 index 07ec4e5283fc..000000000000 --- a/shamir/tables.go +++ /dev/null @@ -1,79 +0,0 @@ -package shamir - -// Tables taken from http://www.samiam.org/galois.html -// They use 0xe5 (229) as the generator - -var ( - // logTable provides the log(X)/log(g) at each index X - logTable = [256]uint8{ - 0x00, 0xff, 0xc8, 0x08, 0x91, 0x10, 0xd0, 0x36, - 0x5a, 0x3e, 0xd8, 0x43, 0x99, 0x77, 0xfe, 0x18, - 0x23, 0x20, 0x07, 0x70, 0xa1, 0x6c, 0x0c, 0x7f, - 0x62, 0x8b, 0x40, 0x46, 0xc7, 0x4b, 0xe0, 0x0e, - 0xeb, 0x16, 0xe8, 0xad, 0xcf, 0xcd, 0x39, 0x53, - 0x6a, 0x27, 0x35, 0x93, 0xd4, 0x4e, 0x48, 0xc3, - 0x2b, 0x79, 0x54, 0x28, 0x09, 0x78, 0x0f, 0x21, - 0x90, 0x87, 0x14, 0x2a, 0xa9, 0x9c, 0xd6, 0x74, - 0xb4, 0x7c, 0xde, 0xed, 0xb1, 0x86, 0x76, 0xa4, - 0x98, 0xe2, 0x96, 0x8f, 0x02, 0x32, 0x1c, 0xc1, - 0x33, 0xee, 0xef, 0x81, 0xfd, 0x30, 0x5c, 0x13, - 0x9d, 0x29, 0x17, 0xc4, 0x11, 0x44, 0x8c, 0x80, - 0xf3, 0x73, 0x42, 0x1e, 0x1d, 0xb5, 0xf0, 0x12, - 0xd1, 0x5b, 0x41, 0xa2, 0xd7, 0x2c, 0xe9, 0xd5, - 0x59, 0xcb, 0x50, 0xa8, 0xdc, 0xfc, 0xf2, 0x56, - 0x72, 0xa6, 0x65, 0x2f, 0x9f, 0x9b, 0x3d, 0xba, - 0x7d, 0xc2, 0x45, 0x82, 0xa7, 0x57, 0xb6, 0xa3, - 0x7a, 0x75, 0x4f, 0xae, 0x3f, 0x37, 0x6d, 0x47, - 0x61, 0xbe, 0xab, 0xd3, 0x5f, 0xb0, 0x58, 0xaf, - 0xca, 0x5e, 0xfa, 0x85, 0xe4, 0x4d, 0x8a, 0x05, - 0xfb, 0x60, 0xb7, 0x7b, 0xb8, 0x26, 0x4a, 0x67, - 0xc6, 0x1a, 0xf8, 0x69, 0x25, 0xb3, 0xdb, 0xbd, - 0x66, 0xdd, 0xf1, 0xd2, 0xdf, 0x03, 0x8d, 0x34, - 0xd9, 0x92, 0x0d, 0x63, 0x55, 0xaa, 0x49, 0xec, - 0xbc, 0x95, 0x3c, 0x84, 0x0b, 0xf5, 0xe6, 0xe7, - 0xe5, 0xac, 0x7e, 0x6e, 0xb9, 0xf9, 0xda, 0x8e, - 0x9a, 0xc9, 0x24, 0xe1, 0x0a, 0x15, 0x6b, 0x3a, - 0xa0, 0x51, 0xf4, 0xea, 0xb2, 0x97, 0x9e, 0x5d, - 0x22, 0x88, 0x94, 0xce, 0x19, 0x01, 0x71, 0x4c, - 0xa5, 0xe3, 0xc5, 0x31, 0xbb, 0xcc, 0x1f, 0x2d, - 0x3b, 0x52, 0x6f, 0xf6, 0x2e, 0x89, 0xf7, 0xc0, - 0x68, 0x1b, 0x64, 0x04, 0x06, 0xbf, 0x83, 0x38, - } - - // expTable provides the anti-log or exponentiation value - // for the equivalent index - expTable = [256]uint8{ - 0x01, 0xe5, 0x4c, 0xb5, 0xfb, 0x9f, 0xfc, 0x12, - 0x03, 0x34, 0xd4, 0xc4, 0x16, 0xba, 0x1f, 0x36, - 0x05, 0x5c, 0x67, 0x57, 0x3a, 0xd5, 0x21, 0x5a, - 0x0f, 0xe4, 0xa9, 0xf9, 0x4e, 0x64, 0x63, 0xee, - 0x11, 0x37, 0xe0, 0x10, 0xd2, 0xac, 0xa5, 0x29, - 0x33, 0x59, 0x3b, 0x30, 0x6d, 0xef, 0xf4, 0x7b, - 0x55, 0xeb, 0x4d, 0x50, 0xb7, 0x2a, 0x07, 0x8d, - 0xff, 0x26, 0xd7, 0xf0, 0xc2, 0x7e, 0x09, 0x8c, - 0x1a, 0x6a, 0x62, 0x0b, 0x5d, 0x82, 0x1b, 0x8f, - 0x2e, 0xbe, 0xa6, 0x1d, 0xe7, 0x9d, 0x2d, 0x8a, - 0x72, 0xd9, 0xf1, 0x27, 0x32, 0xbc, 0x77, 0x85, - 0x96, 0x70, 0x08, 0x69, 0x56, 0xdf, 0x99, 0x94, - 0xa1, 0x90, 0x18, 0xbb, 0xfa, 0x7a, 0xb0, 0xa7, - 0xf8, 0xab, 0x28, 0xd6, 0x15, 0x8e, 0xcb, 0xf2, - 0x13, 0xe6, 0x78, 0x61, 0x3f, 0x89, 0x46, 0x0d, - 0x35, 0x31, 0x88, 0xa3, 0x41, 0x80, 0xca, 0x17, - 0x5f, 0x53, 0x83, 0xfe, 0xc3, 0x9b, 0x45, 0x39, - 0xe1, 0xf5, 0x9e, 0x19, 0x5e, 0xb6, 0xcf, 0x4b, - 0x38, 0x04, 0xb9, 0x2b, 0xe2, 0xc1, 0x4a, 0xdd, - 0x48, 0x0c, 0xd0, 0x7d, 0x3d, 0x58, 0xde, 0x7c, - 0xd8, 0x14, 0x6b, 0x87, 0x47, 0xe8, 0x79, 0x84, - 0x73, 0x3c, 0xbd, 0x92, 0xc9, 0x23, 0x8b, 0x97, - 0x95, 0x44, 0xdc, 0xad, 0x40, 0x65, 0x86, 0xa2, - 0xa4, 0xcc, 0x7f, 0xec, 0xc0, 0xaf, 0x91, 0xfd, - 0xf7, 0x4f, 0x81, 0x2f, 0x5b, 0xea, 0xa8, 0x1c, - 0x02, 0xd1, 0x98, 0x71, 0xed, 0x25, 0xe3, 0x24, - 0x06, 0x68, 0xb3, 0x93, 0x2c, 0x6f, 0x3e, 0x6c, - 0x0a, 0xb8, 0xce, 0xae, 0x74, 0xb1, 0x42, 0xb4, - 0x1e, 0xd3, 0x49, 0xe9, 0x9c, 0xc8, 0xc6, 0xc7, - 0x22, 0x6e, 0xdb, 0x20, 0xbf, 0x43, 0x51, 0x52, - 0x66, 0xb2, 0x76, 0x60, 0xda, 0xc5, 0xf3, 0xf6, - 0xaa, 0xcd, 0x9a, 0xa0, 0x75, 0x54, 0x0e, 0x01, - } -) diff --git a/shamir/tables_test.go b/shamir/tables_test.go deleted file mode 100644 index 81aa983b1087..000000000000 --- a/shamir/tables_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package shamir - -import "testing" - -func TestTables(t *testing.T) { - for i := 1; i < 256; i++ { - logV := logTable[i] - expV := expTable[logV] - if expV != uint8(i) { - t.Fatalf("bad: %d log: %d exp: %d", i, logV, expV) - } - } -} diff --git a/tools/codechecker/main.go b/tools/codechecker/main.go new file mode 100644 index 000000000000..e9c761c369cd --- /dev/null +++ b/tools/codechecker/main.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package main + +import ( + "github.com/hashicorp/vault/tools/codechecker/pkg/godoctests" + "github.com/hashicorp/vault/tools/codechecker/pkg/gonilnilfunctions" + "golang.org/x/tools/go/analysis/multichecker" +) + +func main() { + multichecker.Main(gonilnilfunctions.Analyzer, godoctests.Analyzer) +} diff --git a/tools/codechecker/pkg/godoctests/analyzer.go b/tools/codechecker/pkg/godoctests/analyzer.go new file mode 100644 index 000000000000..98b17a111be9 --- /dev/null +++ b/tools/codechecker/pkg/godoctests/analyzer.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package godoctests + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "godoctests", + Doc: "Verifies that every go test has a go doc", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + funcDecl, ok := node.(*ast.FuncDecl) + if !ok { + return + } + + // starts with 'Test' + if !strings.HasPrefix(funcDecl.Name.Name, "Test") { + return + } + + // has one parameter + params := funcDecl.Type.Params.List + if len(params) != 1 { + return + } + + // parameter is a pointer + firstParamType, ok := params[0].Type.(*ast.StarExpr) + if !ok { + return + } + + selector, ok := firstParamType.X.(*ast.SelectorExpr) + if !ok { + return + } + + // the pointer comes from package 'testing' + selectorIdent, ok := selector.X.(*ast.Ident) + if !ok { + return + } + if selectorIdent.Name != "testing" { + return + } + + // the pointer has type 'T' + if selector.Sel == nil || selector.Sel.Name != "T" { + return + } + + // then there must be a godoc + if funcDecl.Doc == nil { + pass.Reportf(node.Pos(), "Test %s is missing a go doc", + funcDecl.Name.Name) + } else if !strings.HasPrefix(funcDecl.Doc.Text(), funcDecl.Name.Name) { + pass.Reportf(node.Pos(), "Test %s must have a go doc beginning with the function name", + funcDecl.Name.Name) + } + }) + return nil, nil +} diff --git a/tools/codechecker/pkg/godoctests/analyzer_test.go b/tools/codechecker/pkg/godoctests/analyzer_test.go new file mode 100644 index 000000000000..65bf6af1ddcd --- /dev/null +++ b/tools/codechecker/pkg/godoctests/analyzer_test.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package godoctests + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +// TestAnalyzer runs the analyzer on the test functions in testdata/funcs.go. The report from the analyzer is compared against +// the comments in funcs.go beginning with "want." If there is no comment beginning with "want", then the analyzer is expected +// not to report anything. +func TestAnalyzer(t *testing.T) { + f, err := os.Getwd() + if err != nil { + t.Fatal("failed to get working directory", err) + } + analysistest.Run(t, filepath.Join(f, "testdata"), Analyzer, ".") +} diff --git a/tools/codechecker/pkg/godoctests/testdata/funcs.go b/tools/codechecker/pkg/godoctests/testdata/funcs.go new file mode 100644 index 000000000000..ddaf56bfd2a4 --- /dev/null +++ b/tools/codechecker/pkg/godoctests/testdata/funcs.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package testdata + +import "testing" + +// Test_GoDocOK is a test that has a go doc +func Test_GoDocOK(t *testing.T) {} + +func Test_NoGoDocFails(t *testing.T) {} // want "Test Test_NoGoDocFails is missing a go doc" + +// This test does not have a go doc beginning with the function name +func Test_BadGoDocFails(t *testing.T) {} // want "Test Test_BadGoDocFails must have a go doc beginning with the function name" + +func test_TestHelperNoGoDocOK(t *testing.T) {} + +func Test_DifferentSignatureNoGoDocOK() {} + +func Test_DifferentSignature2NoGoDocOK(t *testing.T, a int) {} diff --git a/tools/codechecker/pkg/gonilnilfunctions/analyzer.go b/tools/codechecker/pkg/gonilnilfunctions/analyzer.go new file mode 100644 index 000000000000..fbb32dcf1f0e --- /dev/null +++ b/tools/codechecker/pkg/gonilnilfunctions/analyzer.go @@ -0,0 +1,171 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package gonilnilfunctions + +import ( + "go/ast" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "gonilnilfunctions", + Doc: "Verifies that every go function with error as one of its two return types cannot return nil, nil", + Run: run, + ResultType: reflect.TypeOf((interface{})(nil)), + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +// getNestedReturnStatements searches the AST for return statements, and returns +// them in a tail-call optimized list. +func getNestedReturnStatements(s ast.Stmt, returns []*ast.ReturnStmt) []*ast.ReturnStmt { + switch s := s.(type) { + case *ast.BlockStmt: + statements := make([]*ast.ReturnStmt, 0) + for _, stmt := range s.List { + statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) + } + + return append(returns, statements...) + case *ast.BranchStmt: + return returns + case *ast.ForStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.IfStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.LabeledStmt: + return getNestedReturnStatements(s.Stmt, returns) + case *ast.RangeStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.ReturnStmt: + return append(returns, s) + case *ast.SwitchStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.SelectStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.TypeSwitchStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.CommClause: + statements := make([]*ast.ReturnStmt, 0) + for _, stmt := range s.Body { + statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) + } + + return append(returns, statements...) + case *ast.CaseClause: + statements := make([]*ast.ReturnStmt, 0) + for _, stmt := range s.Body { + statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) + } + + return append(returns, statements...) + case *ast.ExprStmt: + return returns + } + return returns +} + +// run runs the analysis, failing for functions whose signatures contain two results including one error +// (e.g. (something, error)), that contain multiple nil returns +func run(pass *analysis.Pass) (interface{}, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + funcDecl, ok := node.(*ast.FuncDecl) + if !ok { + return + } + + // If the function has the "Ignore" godoc comment, skip it + if strings.Contains(funcDecl.Doc.Text(), "ignore-nil-nil-function-check") { + return + } + + // The function returns something + if funcDecl == nil || funcDecl.Type == nil || funcDecl.Type.Results == nil { + return + } + + // The function has more than 1 return value + results := funcDecl.Type.Results.List + if len(results) < 2 { + return + } + + // isError is a helper function to check if a Field is of error type + isError := func(field *ast.Field) bool { + if named, ok := pass.TypesInfo.TypeOf(field.Type).(*types.Named); ok { + namedObject := named.Obj() + return namedObject != nil && namedObject.Pkg() == nil && namedObject.Name() == "error" + } + return false + } + + // one of the return values is error + var errorFound bool + for _, result := range results { + if isError(result) { + errorFound = true + break + } + } + + if !errorFound { + return + } + + // Since these statements might be e.g. blocks with + // other statements inside, we need to get the return statements + // from inside them, first. + statements := funcDecl.Body.List + + returnStatements := make([]*ast.ReturnStmt, 0) + for _, statement := range statements { + returnStatements = append(returnStatements, getNestedReturnStatements(statement, make([]*ast.ReturnStmt, 0))...) + } + + for _, returnStatement := range returnStatements { + numResultsNil := 0 + results := returnStatement.Results + + // We only want two-arg functions (something, nil) + // We can remove this block in the future if we change our mind + if len(results) != 2 { + continue + } + + for _, result := range results { + // nil is an ident + ident, isIdent := result.(*ast.Ident) + if isIdent { + if ident.Name == "nil" { + // We found one nil in the return list + numResultsNil++ + } + } + } + // We found N nils, and our function returns N results, so this fails the check + if numResultsNil == len(results) { + // All the return values are nil, so we fail the report + pass.Reportf(node.Pos(), "Function %s can return an error, and has a statement that returns only nils", + funcDecl.Name.Name) + + // We break out of the loop of checking return statements, so that we don't repeat ourselves + break + } + } + }) + + var success interface{} + return success, nil +} diff --git a/tools/codechecker/pkg/gonilnilfunctions/analyzer_test.go b/tools/codechecker/pkg/gonilnilfunctions/analyzer_test.go new file mode 100644 index 000000000000..4cfac4af4825 --- /dev/null +++ b/tools/codechecker/pkg/gonilnilfunctions/analyzer_test.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package gonilnilfunctions + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +// TestAnalyzer runs the analyzer on the test functions in testdata/funcs.go. The report from the analyzer is compared against +// the comments in funcs.go beginning with "want." If there is no comment beginning with "want", then the analyzer is expected +// not to report anything. +func TestAnalyzer(t *testing.T) { + f, err := os.Getwd() + if err != nil { + t.Fatal("failed to get working directory", err) + } + analysistest.Run(t, filepath.Join(f, "testdata"), Analyzer, ".") +} diff --git a/tools/codechecker/pkg/gonilnilfunctions/testdata/funcs.go b/tools/codechecker/pkg/gonilnilfunctions/testdata/funcs.go new file mode 100644 index 000000000000..73f3ee9f589b --- /dev/null +++ b/tools/codechecker/pkg/gonilnilfunctions/testdata/funcs.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package testdata + +func ReturnReturnOkay() (any, error) { + var i interface{} + return i, nil +} + +func OneGoodOneBad() (any, error) { // want "Function OneGoodOneBad can return an error, and has a statement that returns only nils" + var i interface{} + if true { + return i, nil + } + return nil, nil +} + +func OneBadOneGood() (any, error) { // want "Function OneBadOneGood can return an error, and has a statement that returns only nils" + var i interface{} + if true { + return nil, nil + } + return i, nil +} + +func EmptyFunc() {} + +func TwoNilNils() (any, error) { // want "Function TwoNilNils can return an error, and has a statement that returns only nils" + if true { + return nil, nil + } + return nil, nil +} + +// ThreeResults should not fail, as while it returns nil, nil, nil, it has three results, not two. +func ThreeResults() (any, any, error) { + return nil, nil, nil +} + +func TwoArgsNoError() (any, any) { + return nil, nil +} + +func NestedReturn() (any, error) { // want "Function NestedReturn can return an error, and has a statement that returns only nils" + { + { + { + return nil, nil + } + } + } +} + +func NestedForReturn() (any, error) { // want "Function NestedForReturn can return an error, and has a statement that returns only nils" + for { + for i := 0; i < 100; i++ { + { + return nil, nil + } + } + } +} + +func AnyErrorNilNil() (any, error) { // want "Function AnyErrorNilNil can return an error, and has a statement that returns only nils" + return nil, nil +} + +// Skipped should be skipped because of the following line: +// ignore-nil-nil-function-check +func Skipped() (any, error) { + return nil, nil +} diff --git a/tools/semgrep/ci/atomic.yml b/tools/semgrep/ci/atomic.yml index 2fea38bd77b4..97bad1ba858d 100644 --- a/tools/semgrep/ci/atomic.yml +++ b/tools/semgrep/ci/atomic.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: atomics-64bit-safety patterns: diff --git a/tools/semgrep/ci/bad-multierror-append.yml b/tools/semgrep/ci/bad-multierror-append.yml index 86b637577d9d..166d0564ba70 100644 --- a/tools/semgrep/ci/bad-multierror-append.yml +++ b/tools/semgrep/ci/bad-multierror-append.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: bad-multierror-append patterns: diff --git a/tools/semgrep/ci/bad-nil-guard.yml b/tools/semgrep/ci/bad-nil-guard.yml index f5fd122e8c96..01e51d298312 100644 --- a/tools/semgrep/ci/bad-nil-guard.yml +++ b/tools/semgrep/ci/bad-nil-guard.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: bad-nil-guard patterns: diff --git a/tools/semgrep/ci/error-shadowing.yml b/tools/semgrep/ci/error-shadowing.yml index 8362df5f7a7e..490a6667f5fe 100644 --- a/tools/semgrep/ci/error-shadowing.yml +++ b/tools/semgrep/ci/error-shadowing.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: error-shadow-check-types patterns: diff --git a/tools/semgrep/ci/fmt-printf.yml b/tools/semgrep/ci/fmt-printf.yml index 18777cabeffc..47e298ebf001 100644 --- a/tools/semgrep/ci/fmt-printf.yml +++ b/tools/semgrep/ci/fmt-printf.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: fmt.Printf languages: [go] diff --git a/tools/semgrep/ci/hashsum.yml b/tools/semgrep/ci/hashsum.yml index 47dfc02a98c4..8ef8ca7f0de7 100644 --- a/tools/semgrep/ci/hashsum.yml +++ b/tools/semgrep/ci/hashsum.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: hash-sum-without-write patterns: diff --git a/tools/semgrep/ci/hmac-bytes.yml b/tools/semgrep/ci/hmac-bytes.yml index 629a8fe6c5b5..e5ce32ef2d6b 100644 --- a/tools/semgrep/ci/hmac-bytes.yml +++ b/tools/semgrep/ci/hmac-bytes.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: use-hmac-equal patterns: diff --git a/tools/semgrep/ci/hmac-hash.yml b/tools/semgrep/ci/hmac-hash.yml index 625d271c18b4..76e1e9e726fc 100644 --- a/tools/semgrep/ci/hmac-hash.yml +++ b/tools/semgrep/ci/hmac-hash.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: hmac-needs-new patterns: diff --git a/tools/semgrep/lock-not-unlocked-on-return.yml b/tools/semgrep/ci/lock-not-unlocked-on-return.yml similarity index 96% rename from tools/semgrep/lock-not-unlocked-on-return.yml rename to tools/semgrep/ci/lock-not-unlocked-on-return.yml index 2d097f1aa0ff..958d8dfc17e5 100644 --- a/tools/semgrep/lock-not-unlocked-on-return.yml +++ b/tools/semgrep/ci/lock-not-unlocked-on-return.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: lock_not_unlocked message: | @@ -86,6 +89,10 @@ rules: ... return ... } + # Another lock object that returns an error + - pattern-not: | + $ERR = $LOCK.Lock() + ... # deferred unlock with release function - pattern-not: | $LOCK.Lock() @@ -290,4 +297,4 @@ rules: if $COND { ... return ... - } \ No newline at end of file + } diff --git a/tools/semgrep/ci/logger-format-string.yml b/tools/semgrep/ci/logger-format-string.yml index bb1b83e209bc..136c4eb1486c 100644 --- a/tools/semgrep/ci/logger-format-string.yml +++ b/tools/semgrep/ci/logger-format-string.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: logger-used-with-format-string patterns: diff --git a/tools/semgrep/ci/loop-time-after.yml b/tools/semgrep/ci/loop-time-after.yml new file mode 100644 index 000000000000..4104c9568e03 --- /dev/null +++ b/tools/semgrep/ci/loop-time-after.yml @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +rules: + - id: loop-time-after + pattern: | + for ... { + ... + select { + case ... + case <-time.After(...): + ... + case ... + } + ... + } + message: <-time.After() used in for loop, consider using a ticker or a timer instead + languages: + - go + severity: WARNING \ No newline at end of file diff --git a/tools/semgrep/ci/loopclosure.yml b/tools/semgrep/ci/loopclosure.yml index 967376127db8..ce358e951b0b 100644 --- a/tools/semgrep/ci/loopclosure.yml +++ b/tools/semgrep/ci/loopclosure.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: loopclosure patterns: diff --git a/tools/semgrep/ci/no-nil-check.yml b/tools/semgrep/ci/no-nil-check.yml index c39bbb5420f8..00c64c3641bb 100644 --- a/tools/semgrep/ci/no-nil-check.yml +++ b/tools/semgrep/ci/no-nil-check.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: nil-check-logical-storage patterns: diff --git a/tools/semgrep/ci/oddifsequence.yml b/tools/semgrep/ci/oddifsequence.yml index bee36d06fad2..8e4e12d143da 100644 --- a/tools/semgrep/ci/oddifsequence.yml +++ b/tools/semgrep/ci/oddifsequence.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: odd-sequence-ifs patterns: diff --git a/tools/semgrep/ci/return-nil-error.yml b/tools/semgrep/ci/return-nil-error.yml index 0b6f7f677c65..18a8bdff86a1 100644 --- a/tools/semgrep/ci/return-nil-error.yml +++ b/tools/semgrep/ci/return-nil-error.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: return-nil patterns: diff --git a/tools/semgrep/ci/return-nil.yml b/tools/semgrep/ci/return-nil.yml index 18910c4a5fbb..66fd310aa692 100644 --- a/tools/semgrep/ci/return-nil.yml +++ b/tools/semgrep/ci/return-nil.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: hc-return-nil patterns: diff --git a/tools/semgrep/ci/time-parse-duration.yml b/tools/semgrep/ci/time-parse-duration.yml new file mode 100644 index 000000000000..3c1746d69f0f --- /dev/null +++ b/tools/semgrep/ci/time-parse-duration.yml @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +rules: + - id: time-parse-duration + patterns: + - pattern: time.ParseDuration + message: "Usage of time.ParseDuration. Use parseutil.ParseDurationSeconds, instead!" + languages: [go] + severity: ERROR diff --git a/tools/semgrep/ci/wrongerrcall.yml b/tools/semgrep/ci/wrongerrcall.yml index 5a1a4d37af8d..8de9627b45a5 100644 --- a/tools/semgrep/ci/wrongerrcall.yml +++ b/tools/semgrep/ci/wrongerrcall.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: maybe-wrong-err patterns: diff --git a/tools/semgrep/ci/wronglock.yml b/tools/semgrep/ci/wronglock.yml index 5f8422ce46ca..5b408b6a0fe4 100644 --- a/tools/semgrep/ci/wronglock.yml +++ b/tools/semgrep/ci/wronglock.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: wrong-lock-unlock patterns: diff --git a/tools/semgrep/hostport.yml b/tools/semgrep/hostport.yml index c47510e8814b..f687ae707f29 100644 --- a/tools/semgrep/hostport.yml +++ b/tools/semgrep/hostport.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + # https://github.com/golang/go/issues/28308, from @stapelberg rules: - id: sprintf-host-port diff --git a/tools/semgrep/joinpath.yml b/tools/semgrep/joinpath.yml index 173aba0ab0a0..bc58134f7ee3 100644 --- a/tools/semgrep/joinpath.yml +++ b/tools/semgrep/joinpath.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: use-strings-join-path patterns: diff --git a/tools/semgrep/logger-sprintf.yml b/tools/semgrep/logger-sprintf.yml index 3f58ba18ba8f..a478f085b16a 100644 --- a/tools/semgrep/logger-sprintf.yml +++ b/tools/semgrep/logger-sprintf.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: logger-used-with-sprintf patterns: diff --git a/tools/semgrep/paths-with-callbacks-and-operations.yml b/tools/semgrep/paths-with-callbacks-and-operations.yml index 08e9c1ec1e39..33b710012004 100644 --- a/tools/semgrep/paths-with-callbacks-and-operations.yml +++ b/tools/semgrep/paths-with-callbacks-and-operations.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: path-has-both-callbacks-and-operations patterns: diff --git a/tools/semgrep/paths-with-callbacks.yml b/tools/semgrep/paths-with-callbacks.yml index 3a122cc6d9e4..d04a85ea3d76 100644 --- a/tools/semgrep/paths-with-callbacks.yml +++ b/tools/semgrep/paths-with-callbacks.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: uses-path-callbacks patterns: diff --git a/tools/semgrep/physical-storage.yml b/tools/semgrep/physical-storage.yml index 970c77693ed3..660a7c179438 100644 --- a/tools/semgrep/physical-storage.yml +++ b/tools/semgrep/physical-storage.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: physical-storage-bypass-encryption patterns: diff --git a/tools/semgrep/replication-has-state.yml b/tools/semgrep/replication-has-state.yml index 416a59e6af6a..d97bff493691 100644 --- a/tools/semgrep/replication-has-state.yml +++ b/tools/semgrep/replication-has-state.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: replication-state-should-use-IsPerfSecondary patterns: diff --git a/tools/semgrep/self-equals.yml b/tools/semgrep/self-equals.yml index 7cc5243f2b07..1e43b253f8b9 100644 --- a/tools/semgrep/self-equals.yml +++ b/tools/semgrep/self-equals.yml @@ -1,3 +1,6 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + rules: - id: self-equals patterns: diff --git a/tools/stubmaker/main.go b/tools/stubmaker/main.go new file mode 100644 index 000000000000..187ca219e226 --- /dev/null +++ b/tools/stubmaker/main.go @@ -0,0 +1,324 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package main + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "go/types" + "os" + "path/filepath" + "strings" + + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/hashicorp/go-hclog" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/imports" +) + +var logger hclog.Logger + +func fatal(err error) { + logger.Error("fatal error", "error", err) + os.Exit(1) +} + +type generator struct { + file *ast.File + fset *token.FileSet +} + +func main() { + logger = hclog.New(&hclog.LoggerOptions{ + Name: "stubmaker", + Level: hclog.Trace, + }) + + // Setup git, both so we can determine if we're running on enterprise, and + // so we can make sure we don't clobber a non-transient file. + repo, err := git.PlainOpenWithOptions(".", &git.PlainOpenOptions{ + DetectDotGit: true, + }) + if err != nil { + fatal(err) + } + + wt, err := repo.Worktree() + if err != nil { + fatal(err) + } + if !isEnterprise(wt) { + return + } + + // Read the file and figure out if we need to do anything. + inputFile := os.Getenv("GOFILE") + if !strings.HasSuffix(inputFile, "_stubs_oss.go") { + fatal(fmt.Errorf("stubmaker should only be invoked from files ending in _stubs_oss.go")) + } + + baseFilename := strings.TrimSuffix(inputFile, "_stubs_oss.go") + outputFile := baseFilename + "_stubs_ent.go" + b, err := os.ReadFile(inputFile) + if err != nil { + fatal(err) + } + + inputParsed, err := parseFile(b) + if err != nil { + fatal(err) + } + needed, existing, err := inputParsed.areStubsNeeded() + if err != nil { + fatal(err) + } + if !needed { + return + } + + // We'd like to write the file, but first make sure that we're not going + // to blow away anyone's work or overwrite a file already in git. + head, err := repo.Head() + if err != nil { + fatal(err) + } + obj, err := repo.Object(plumbing.AnyObject, head.Hash()) + if err != nil { + fatal(err) + } + + st, err := wt.Status() + if err != nil { + fatal(err) + } + + tracked, err := inGit(wt, st, obj, outputFile) + if err != nil { + fatal(err) + } + if tracked { + fatal(fmt.Errorf("output file %s exists in git, not overwriting", outputFile)) + } + + // Now we can finally write the file + output, err := os.Create(outputFile + ".tmp") + if err != nil { + fatal(err) + } + err = inputParsed.writeStubs(output, existing) + if err != nil { + // If we don't end up writing to the file, delete it. + os.Remove(outputFile + ".tmp") + } else { + os.Rename(outputFile+".tmp", outputFile) + } + if err != nil { + fatal(err) + } +} + +func (g *generator) writeStubs(output *os.File, existingFuncs map[string]struct{}) error { + // delete all functions/methods that are already defined + g.modifyAST(existingFuncs) + + // write the updated code to buf + buf := new(bytes.Buffer) + err := format.Node(buf, g.fset, g.file) + if err != nil { + return err + } + + // remove any unneeded imports + res, err := imports.Process("", buf.Bytes(), &imports.Options{ + Fragment: true, + AllErrors: false, + Comments: true, + FormatOnly: false, + }) + if err != nil { + return err + } + + // add the code generation line and update the build tags + outputLines, err := fixGeneratedComments(res) + if err != nil { + return err + } + _, err = output.WriteString(strings.Join(outputLines, "\n") + "\n") + return err +} + +func fixGeneratedComments(b []byte) ([]string, error) { + warning := "// Code generated by tools/stubmaker; DO NOT EDIT." + goGenerate := "//go:generate go run github.com/hashicorp/vault/tools/stubmaker" + + scanner := bufio.NewScanner(bytes.NewBuffer(b)) + var outputLines []string + for scanner.Scan() { + line := scanner.Text() + switch { + case strings.Contains(line, "//go:build ") && strings.Contains(line, "!enterprise"): + outputLines = append(outputLines, warning, "") + line = strings.ReplaceAll(line, "!enterprise", "enterprise") + case line == goGenerate: + continue + } + outputLines = append(outputLines, line) + } + return outputLines, scanner.Err() +} + +func inGit(wt *git.Worktree, st git.Status, obj object.Object, path string) (bool, error) { + absPath, err := filepath.Abs(path) + if err != nil { + return false, fmt.Errorf("path %s can't be made absolute: %w", path, err) + } + relPath, err := filepath.Rel(wt.Filesystem.Root(), absPath) + if err != nil { + return false, fmt.Errorf("path %s can't be made relative: %w", absPath, err) + } + + fst := st.File(relPath) + if fst.Worktree != git.Untracked || fst.Staging != git.Untracked { + return true, nil + } + + curwd, err := os.Getwd() + if err != nil { + return false, err + } + + blob, err := resolve(obj, relPath) + if err != nil && !strings.Contains(err.Error(), "file not found") { + return false, fmt.Errorf("error resolving path %s from %s: %w", relPath, curwd, err) + } + + return blob != nil, nil +} + +func isEnterprise(wt *git.Worktree) bool { + st, err := wt.Filesystem.Stat("enthelpers") + onOss := errors.Is(err, os.ErrNotExist) + onEnt := st != nil + + switch { + case onOss && !onEnt: + case !onOss && onEnt: + default: + fatal(err) + } + return onEnt +} + +// resolve blob at given path from obj. obj can be a commit, tag, tree, or blob. +func resolve(obj object.Object, path string) (*object.Blob, error) { + switch o := obj.(type) { + case *object.Commit: + t, err := o.Tree() + if err != nil { + return nil, err + } + return resolve(t, path) + case *object.Tag: + target, err := o.Object() + if err != nil { + return nil, err + } + return resolve(target, path) + case *object.Tree: + file, err := o.File(path) + if err != nil { + return nil, err + } + return &file.Blob, nil + case *object.Blob: + return o, nil + default: + return nil, object.ErrUnsupportedObject + } +} + +// areStubsNeeded checks if all functions and methods defined in the stub file +// are present in the package +func (g *generator) areStubsNeeded() (needed bool, existingStubs map[string]struct{}, err error) { + pkg, err := parsePackage(".", []string{"enterprise"}) + if err != nil { + return false, nil, err + } + + stubFunctions := make(map[string]struct{}) + for _, d := range g.file.Decls { + dFunc, ok := d.(*ast.FuncDecl) + if !ok { + continue + } + stubFunctions[dFunc.Name.Name] = struct{}{} + + } + found := make(map[string]struct{}) + for name, val := range pkg.TypesInfo.Defs { + if val == nil { + continue + } + _, ok := val.Type().(*types.Signature) + if !ok { + continue + } + if _, ok := stubFunctions[name.Name]; ok { + found[name.Name] = struct{}{} + } + } + + return len(found) != len(stubFunctions), found, nil +} + +func (g *generator) modifyAST(exists map[string]struct{}) { + astutil.Apply(g.file, nil, func(c *astutil.Cursor) bool { + switch x := c.Node().(type) { + case *ast.FuncDecl: + if _, ok := exists[x.Name.Name]; ok { + c.Delete() + } + } + + return true + }) +} + +func parsePackage(name string, tags []string) (*packages.Package, error) { + cfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, + Tests: false, + BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, + } + pkgs, err := packages.Load(cfg, name) + if err != nil { + return nil, fmt.Errorf("error parsing package %s: %v", name, err) + } + if len(pkgs) != 1 { + return nil, fmt.Errorf("error: %d packages found", len(pkgs)) + } + return pkgs[0], nil +} + +func parseFile(buffer []byte) (*generator, error) { + fs := token.NewFileSet() + f, err := parser.ParseFile(fs, "", buffer, parser.AllErrors|parser.ParseComments) + if err != nil { + return nil, err + } + return &generator{ + file: f, + fset: fs, + }, nil +} diff --git a/tools/tools.go b/tools/tools.go index 7458113cec22..066198c07145 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -1,31 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + //go:build tools -// This file ensures tool dependencies are kept in sync. This is the -// recommended way of doing this according to -// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module -// To install the following tools at the version used by this repo run: -// $ make bootstrap -// or +// This file is here for backwards compat only. You can now use make instead of go generate to +// install tools. + +// You can replace // $ go generate -tags tools tools/tools.go +// with +// $ make tools package tools -//go:generate go install golang.org/x/tools/cmd/goimports -//go:generate go install github.com/client9/misspell/cmd/misspell -//go:generate go install mvdan.cc/gofumpt -//go:generate go install google.golang.org/protobuf/cmd/protoc-gen-go -//go:generate go install google.golang.org/grpc/cmd/protoc-gen-go-grpc -//go:generate go install github.com/favadi/protoc-go-inject-tag -import ( - _ "golang.org/x/tools/cmd/goimports" - - _ "github.com/client9/misspell/cmd/misspell" - - _ "mvdan.cc/gofumpt" - - _ "google.golang.org/protobuf/cmd/protoc-gen-go" - - _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" - - _ "github.com/favadi/protoc-go-inject-tag" -) +//go:generate ./tools.sh install-tools diff --git a/tools/tools.sh b/tools/tools.sh new file mode 100755 index 000000000000..16a117b0efd7 --- /dev/null +++ b/tools/tools.sh @@ -0,0 +1,156 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +set -euo pipefail + +# Determine the root directory of the repository +repo_root() { + git rev-parse --show-toplevel +} + +# Install an external Go tool. +go_install() { + if go install "$1"; then + echo "--> $1 ✔" + else + echo "--> $1 ✖" + return 1 + fi +} + +# Check for a tool binary in the path. +check_tool() { + if builtin type -P "$2" &> /dev/null; then + echo "--> $2 ✔" + else + echo "--> $2 ✖" + echo "Could not find required $1 tool $2. Run 'make tools-$1' to install it." 1>&2 + return 1 + fi +} + +# Install external tools. +install_external() { + local tools + # If you update this please update check_external below as well as our external tools + # install action ./github/actions/install-external-tools.yml + tools=( + github.com/bufbuild/buf/cmd/buf@v1.25.0 + github.com/favadi/protoc-go-inject-tag@latest + github.com/golangci/misspell/cmd/misspell@latest + github.com/golangci/revgrep/cmd/revgrep@latest + golang.org/x/tools/cmd/goimports@latest + google.golang.org/protobuf/cmd/protoc-gen-go@latest + google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest + gotest.tools/gotestsum@latest + honnef.co/go/tools/cmd/staticcheck@latest + mvdan.cc/gofumpt@latest + github.com/loggerhead/enumer@latest + ) + + echo "==> Installing external tools..." + for tool in "${tools[@]}"; do + go_install "$tool" + done +} + +# Check that all tools are installed +check_external() { + # Ensure that all external tools are available. In CI we'll prefer installing pre-built external + # tools for speed instead of go install so that we don't require downloading Go modules and + # compiling tools from scratch in every CI job. + # See .github/actions/install-external-tools.yml for that workflow. + local tools + tools=( + buf + enumer + gofumpt + goimports + gotestsum + misspell + protoc-gen-go + protoc-gen-go-grpc + protoc-go-inject-tag + revgrep + staticcheck + ) + + echo "==> Checking for external tools..." + for tool in "${tools[@]}"; do + check_tool external "$tool" + done +} + +# Install internal tools. +install_internal() { + local tools + # If you update this please update check tools below. + tools=( + codechecker + stubmaker + ) + + echo "==> Installing internal tools..." + pushd "$(repo_root)" &> /dev/null + for tool in "${tools[@]}"; do + go_install ./tools/"$tool" + done + popd &> /dev/null +} + +# Check internal that all tools are installed +check_internal() { + # Ensure that all required internal tools are available. + local tools + tools=( + codechecker + stubmaker + ) + + echo "==> Checking for internal tools..." + for tool in "${tools[@]}"; do + check_tool internal "$tool" + done +} + +# Install tools. +install() { + install_internal + install_external +} + +# Check tools. +check() { + check_internal + check_external +} + +main() { + case $1 in + install-external) + install_external + ;; + install-internal) + install_internal + ;; + check-external) + check_external + ;; + check-internal) + check_internal + ;; + install) + install + ;; + check) + check + ;; + *) + echo "unknown sub-command" >&2 + exit 1 + ;; + esac +} + +main "$@" diff --git a/ui/.copywrite.hcl b/ui/.copywrite.hcl new file mode 100644 index 000000000000..935bc238e476 --- /dev/null +++ b/ui/.copywrite.hcl @@ -0,0 +1,33 @@ +# (OPTIONAL) Overrides the copywrite config schema version +# Default: 1 +schema_version = 1 + +project { + # (OPTIONAL) SPDX-compatible license identifier + # Leave blank if you don't wish to license the project + # Default: "MPL-2.0" + license = "BUSL-1.1" + + # (OPTIONAL) Represents the copyright holder used in all statements + # Default: HashiCorp, Inc. + # copyright_holder = "" + + # (OPTIONAL) Represents the year that the project initially began + # Default: + # copyright_year = 0 + + # (OPTIONAL) A list of globs that should not have copyright or license headers . + # Supports doublestar glob patterns for more flexibility in defining which + # files or folders should be ignored + # Default: [] + header_ignore = [ + "node_modules/**" + ] + + # (OPTIONAL) Links to an upstream repo for determining repo relationships + # This is for special cases and should not normally be set. + # Default: "" + # upstream = "hashicorp/" +} + + diff --git a/ui/.ember-cli b/ui/.ember-cli index f6e59871ff52..fcd9114b1235 100644 --- a/ui/.ember-cli +++ b/ui/.ember-cli @@ -9,8 +9,8 @@ "output-path": "../http/web_ui", /** - Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript - rather than JavaScript by default, when a TypeScript version of a given blueprint is available. + Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript + rather than JavaScript by default, when a TypeScript version of a given blueprint is available. */ "isTypeScriptProject": false } diff --git a/ui/.eslintrc.js b/ui/.eslintrc.js index 79b54b4eed05..8ec37474d360 100644 --- a/ui/.eslintrc.js +++ b/ui/.eslintrc.js @@ -1,15 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + /* eslint-disable no-undef */ 'use strict'; module.exports = { - parser: 'babel-eslint', + parser: '@babel/eslint-parser', root: true, parserOptions: { - ecmaVersion: 2018, + ecmaVersion: 'latest', sourceType: 'module', - ecmaFeatures: { - legacyDecorators: true, + requireConfigFile: false, + babelOptions: { + plugins: [['@babel/plugin-proposal-decorators', { decoratorsBeforeExport: true }]], }, }, plugins: ['ember'], @@ -40,6 +46,7 @@ module.exports = { files: [ './.eslintrc.js', './.prettierrc.js', + './.stylelintrc.js', './.template-lintrc.js', './ember-cli-build.js', './testem.js', @@ -55,13 +62,7 @@ module.exports = { browser: false, node: true, }, - plugins: ['node'], - extends: ['plugin:node/recommended'], - rules: { - // this can be removed once the following is fixed - // https://github.com/mysticatea/eslint-plugin-node/issues/77 - 'node/no-unpublished-require': 'off', - }, + extends: ['plugin:n/recommended'], }, { // test files diff --git a/ui/.github/workflows/ci.yml b/ui/.github/workflows/ci.yml deleted file mode 100644 index 6287d32644de..000000000000 --- a/ui/.github/workflows/ci.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: CI - -on: - push: - branches: - - main - - master - pull_request: {} - -concurrency: - group: ci-${{ github.head_ref || github.ref }} - cancel-in-progress: true - -jobs: - lint: - name: "Lint" - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Install Node - uses: actions/setup-node@v3 - with: - node-version: 12.x - cache: yarn - - name: Install Dependencies - run: yarn install --frozen-lockfile - - name: Lint - run: yarn lint - - test: - name: "Test" - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - name: Install Node - uses: actions/setup-node@v3 - with: - node-version: 12.x - cache: yarn - - name: Install Dependencies - run: yarn install --frozen-lockfile - - name: Run Tests - run: yarn test diff --git a/ui/.gitignore b/ui/.gitignore index 70da8c051de3..5ba1567f70f1 100644 --- a/ui/.gitignore +++ b/ui/.gitignore @@ -29,3 +29,16 @@ package-lock.json # broccoli-debug /DEBUG/ + +# yarn +.pnp.* +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/sdks +!.yarn/versions + +# copywrite tool used in pre-commit hook +.copywrite + diff --git a/ui/.nvmrc b/ui/.nvmrc deleted file mode 100644 index 958b5a36e1fa..000000000000 --- a/ui/.nvmrc +++ /dev/null @@ -1 +0,0 @@ -v14 diff --git a/ui/.prettierrc.js b/ui/.prettierrc.js index 8f507fd9b33d..691cad3025e1 100644 --- a/ui/.prettierrc.js +++ b/ui/.prettierrc.js @@ -1,3 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + 'use strict'; module.exports = { @@ -12,5 +17,11 @@ module.exports = { printWidth: 125, }, }, + { + files: '*.{js,ts}', + options: { + singleQuote: true, + }, + }, ], }; diff --git a/ui/.stylelintignore b/ui/.stylelintignore new file mode 100644 index 000000000000..a0cf71cbd183 --- /dev/null +++ b/ui/.stylelintignore @@ -0,0 +1,8 @@ +# unconventional files +/blueprints/*/files/ + +# compiled output +/dist/ + +# addons +/.node_modules.ember-try/ diff --git a/ui/.stylelintrc.js b/ui/.stylelintrc.js new file mode 100644 index 000000000000..4409325c3e75 --- /dev/null +++ b/ui/.stylelintrc.js @@ -0,0 +1,10 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +'use strict'; + +module.exports = { + extends: ['stylelint-config-standard', 'stylelint-prettier/recommended'], +}; diff --git a/ui/.template-lintrc.js b/ui/.template-lintrc.js index e4e51b09aa4b..3936c7dd4769 100644 --- a/ui/.template-lintrc.js +++ b/ui/.template-lintrc.js @@ -1,54 +1,32 @@ -'use strict'; - -const fs = require('fs'); -let testOverrides = {}; -try { - // ember-template-lint no longer exports anything so we cannot access the rule definitions conventionally - // read file, convert to json string and parse - const toJSON = (str) => { - return JSON.parse( - str - .slice(str.indexOf(':') + 2) // get rid of export statement - .slice(0, -(str.length - str.lastIndexOf(','))) // remove trailing brackets from export - .replace(/:.*,/g, `: ${false},`) // convert values to false - .replace(/,([^,]*)$/, '$1') // remove last comma - .replace(/'/g, '"') // convert to double quotes - .replace(/(\w[^"].*[^"]):/g, '"$1":') // wrap quotes around single word keys - .trim() - ); - }; - const recommended = toJSON( - fs.readFileSync('node_modules/ember-template-lint/lib/config/recommended.js').toString() - ); - const stylistic = toJSON( - fs.readFileSync('node_modules/ember-template-lint/lib/config/stylistic.js').toString() - ); - testOverrides = { - ...recommended, - ...stylistic, - prettier: false, - }; -} catch (error) { - console.log(error); // eslint-disable-line -} +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ module.exports = { plugins: ['ember-template-lint-plugin-prettier'], + extends: ['recommended', 'ember-template-lint-plugin-prettier:recommended'], + rules: { 'no-action': 'off', 'no-implicit-this': { allow: ['supported-auth-backends'], }, 'require-input-label': 'off', + 'no-array-prototype-extensions': 'off', + // from bump to ember-template-lint@6.0.0 + 'no-builtin-form-components': 'off', + 'no-at-ember-render-modifiers': 'off', + 'no-unnecessary-curly-strings': 'off', + 'no-unnecessary-curly-parens': 'off', }, - ignore: ['lib/story-md', 'tests/**'], - // ember language server vscode extension does not currently respect the ignore field - // override all rules manually as workaround to align with cli overrides: [ { files: ['**/*-test.js'], - rules: testOverrides, + rules: { + prettier: false, + }, }, ], }; diff --git a/ui/.yarn/patches/lodash.template-npm-4.5.0-5272df3039.patch b/ui/.yarn/patches/lodash.template-npm-4.5.0-5272df3039.patch new file mode 100644 index 000000000000..920eeb901f3f --- /dev/null +++ b/ui/.yarn/patches/lodash.template-npm-4.5.0-5272df3039.patch @@ -0,0 +1,45 @@ +diff --git a/index.js b/index.js +index f051141e362679e1cc12f3dca924d8f6e7f5459b..63815c4c53412263de74fd4d779cfd198be87c8e 100644 +--- a/index.js ++++ b/index.js +@@ -17,6 +17,9 @@ var HOT_COUNT = 800, + var INFINITY = 1 / 0, + MAX_SAFE_INTEGER = 9007199254740991; + ++/** Error message constants. */ ++var INVALID_TEMPL_VAR_ERROR_TEXT = 'Invalid `variable` option passed into `_.template`'; ++ + /** `Object#toString` result references. */ + var argsTag = '[object Arguments]', + arrayTag = '[object Array]', +@@ -1343,6 +1346,18 @@ function keysIn(object) { + return isArrayLike(object) ? arrayLikeKeys(object, true) : baseKeysIn(object); + } + ++/** ++ * Used to validate the `validate` option in `_.template` variable. ++ * ++ * Forbids characters which could potentially change the meaning of the function argument definition: ++ * - "()," (modification of function parameters) ++ * - "=" (default value) ++ * - "[]{}" (destructuring of function parameters) ++ * - "/" (beginning of a comment) ++ * - whitespace ++ */ ++var reForbiddenIdentifierChars = /[()=,{}\[\]\/\s]/; ++ + /** + * Creates a compiled template function that can interpolate data properties + * in "interpolate" delimiters, HTML-escape interpolated data properties in +@@ -1522,6 +1537,11 @@ function template(string, options, guard) { + if (!variable) { + source = 'with (obj) {\n' + source + '\n}\n'; + } ++ // Throw an error if a forbidden character was found in `variable`, to prevent ++ // potential command injection attacks. ++ else if (reForbiddenIdentifierChars.test(variable)) { ++ throw new Error(INVALID_TEMPL_VAR_ERROR_TEXT); ++ } + // Cleanup code by stripping empty strings. + source = (isEvaluating ? source.replace(reEmptyStringLeading, '') : source) + .replace(reEmptyStringMiddle, '$1') diff --git a/ui/.yarn/releases/yarn-1.19.1.js b/ui/.yarn/releases/yarn-1.19.1.js deleted file mode 100755 index 3907b87325d0..000000000000 --- a/ui/.yarn/releases/yarn-1.19.1.js +++ /dev/null @@ -1,147216 +0,0 @@ -#!/usr/bin/env node -module.exports = -/******/ (function(modules) { // webpackBootstrap -/******/ // The module cache -/******/ var installedModules = {}; -/******/ -/******/ // The require function -/******/ function __webpack_require__(moduleId) { -/******/ -/******/ // Check if module is in cache -/******/ if(installedModules[moduleId]) { -/******/ return installedModules[moduleId].exports; -/******/ } -/******/ // Create a new module (and put it into the cache) -/******/ var module = installedModules[moduleId] = { -/******/ i: moduleId, -/******/ l: false, -/******/ exports: {} -/******/ }; -/******/ -/******/ // Execute the module function -/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); -/******/ -/******/ // Flag the module as loaded -/******/ module.l = true; -/******/ -/******/ // Return the exports of the module -/******/ return module.exports; -/******/ } -/******/ -/******/ -/******/ // expose the modules object (__webpack_modules__) -/******/ __webpack_require__.m = modules; -/******/ -/******/ // expose the module cache -/******/ __webpack_require__.c = installedModules; -/******/ -/******/ // identity function for calling harmony imports with the correct context -/******/ __webpack_require__.i = function(value) { return value; }; -/******/ -/******/ // define getter function for harmony exports -/******/ __webpack_require__.d = function(exports, name, getter) { -/******/ if(!__webpack_require__.o(exports, name)) { -/******/ Object.defineProperty(exports, name, { -/******/ configurable: false, -/******/ enumerable: true, -/******/ get: getter -/******/ }); -/******/ } -/******/ }; -/******/ -/******/ // getDefaultExport function for compatibility with non-harmony modules -/******/ __webpack_require__.n = function(module) { -/******/ var getter = module && module.__esModule ? -/******/ function getDefault() { return module['default']; } : -/******/ function getModuleExports() { return module; }; -/******/ __webpack_require__.d(getter, 'a', getter); -/******/ return getter; -/******/ }; -/******/ -/******/ // Object.prototype.hasOwnProperty.call -/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; -/******/ -/******/ // __webpack_public_path__ -/******/ __webpack_require__.p = ""; -/******/ -/******/ // Load entry module and return exports -/******/ return __webpack_require__(__webpack_require__.s = 549); -/******/ }) -/************************************************************************/ -/******/ ([ -/* 0 */ -/***/ (function(module, exports) { - -module.exports = require("path"); - -/***/ }), -/* 1 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (immutable) */ __webpack_exports__["a"] = __extends; -/* unused harmony export __assign */ -/* unused harmony export __rest */ -/* unused harmony export __decorate */ -/* unused harmony export __param */ -/* unused harmony export __metadata */ -/* unused harmony export __awaiter */ -/* unused harmony export __generator */ -/* unused harmony export __exportStar */ -/* unused harmony export __values */ -/* unused harmony export __read */ -/* unused harmony export __spread */ -/* unused harmony export __await */ -/* unused harmony export __asyncGenerator */ -/* unused harmony export __asyncDelegator */ -/* unused harmony export __asyncValues */ -/* unused harmony export __makeTemplateObject */ -/* unused harmony export __importStar */ -/* unused harmony export __importDefault */ -/*! ***************************************************************************** -Copyright (c) Microsoft Corporation. All rights reserved. -Licensed under the Apache License, Version 2.0 (the "License"); you may not use -this file except in compliance with the License. You may obtain a copy of the -License at http://www.apache.org/licenses/LICENSE-2.0 - -THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED -WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -MERCHANTABLITY OR NON-INFRINGEMENT. - -See the Apache Version 2.0 License for specific language governing permissions -and limitations under the License. -***************************************************************************** */ -/* global Reflect, Promise */ - -var extendStatics = function(d, b) { - extendStatics = Object.setPrototypeOf || - ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || - function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; }; - return extendStatics(d, b); -}; - -function __extends(d, b) { - extendStatics(d, b); - function __() { this.constructor = d; } - d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); -} - -var __assign = function() { - __assign = Object.assign || function __assign(t) { - for (var s, i = 1, n = arguments.length; i < n; i++) { - s = arguments[i]; - for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; - } - return t; - } - return __assign.apply(this, arguments); -} - -function __rest(s, e) { - var t = {}; - for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0) - t[p] = s[p]; - if (s != null && typeof Object.getOwnPropertySymbols === "function") - for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) if (e.indexOf(p[i]) < 0) - t[p[i]] = s[p[i]]; - return t; -} - -function __decorate(decorators, target, key, desc) { - var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d; - if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc); - else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r; - return c > 3 && r && Object.defineProperty(target, key, r), r; -} - -function __param(paramIndex, decorator) { - return function (target, key) { decorator(target, key, paramIndex); } -} - -function __metadata(metadataKey, metadataValue) { - if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(metadataKey, metadataValue); -} - -function __awaiter(thisArg, _arguments, P, generator) { - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : new P(function (resolve) { resolve(result.value); }).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); -} - -function __generator(thisArg, body) { - var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; - return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; - function verb(n) { return function (v) { return step([n, v]); }; } - function step(op) { - if (f) throw new TypeError("Generator is already executing."); - while (_) try { - if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; - if (y = 0, t) op = [op[0] & 2, t.value]; - switch (op[0]) { - case 0: case 1: t = op; break; - case 4: _.label++; return { value: op[1], done: false }; - case 5: _.label++; y = op[1]; op = [0]; continue; - case 7: op = _.ops.pop(); _.trys.pop(); continue; - default: - if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } - if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } - if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } - if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } - if (t[2]) _.ops.pop(); - _.trys.pop(); continue; - } - op = body.call(thisArg, _); - } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } - if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; - } -} - -function __exportStar(m, exports) { - for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p]; -} - -function __values(o) { - var m = typeof Symbol === "function" && o[Symbol.iterator], i = 0; - if (m) return m.call(o); - return { - next: function () { - if (o && i >= o.length) o = void 0; - return { value: o && o[i++], done: !o }; - } - }; -} - -function __read(o, n) { - var m = typeof Symbol === "function" && o[Symbol.iterator]; - if (!m) return o; - var i = m.call(o), r, ar = [], e; - try { - while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value); - } - catch (error) { e = { error: error }; } - finally { - try { - if (r && !r.done && (m = i["return"])) m.call(i); - } - finally { if (e) throw e.error; } - } - return ar; -} - -function __spread() { - for (var ar = [], i = 0; i < arguments.length; i++) - ar = ar.concat(__read(arguments[i])); - return ar; -} - -function __await(v) { - return this instanceof __await ? (this.v = v, this) : new __await(v); -} - -function __asyncGenerator(thisArg, _arguments, generator) { - if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined."); - var g = generator.apply(thisArg, _arguments || []), i, q = []; - return i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i; - function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; } - function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } } - function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); } - function fulfill(value) { resume("next", value); } - function reject(value) { resume("throw", value); } - function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); } -} - -function __asyncDelegator(o) { - var i, p; - return i = {}, verb("next"), verb("throw", function (e) { throw e; }), verb("return"), i[Symbol.iterator] = function () { return this; }, i; - function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === "return" } : f ? f(v) : v; } : f; } -} - -function __asyncValues(o) { - if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined."); - var m = o[Symbol.asyncIterator], i; - return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i); - function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; } - function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); } -} - -function __makeTemplateObject(cooked, raw) { - if (Object.defineProperty) { Object.defineProperty(cooked, "raw", { value: raw }); } else { cooked.raw = raw; } - return cooked; -}; - -function __importStar(mod) { - if (mod && mod.__esModule) return mod; - var result = {}; - if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; - result.default = mod; - return result; -} - -function __importDefault(mod) { - return (mod && mod.__esModule) ? mod : { default: mod }; -} - - -/***/ }), -/* 2 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -exports.__esModule = true; - -var _promise = __webpack_require__(227); - -var _promise2 = _interopRequireDefault(_promise); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -exports.default = function (fn) { - return function () { - var gen = fn.apply(this, arguments); - return new _promise2.default(function (resolve, reject) { - function step(key, arg) { - try { - var info = gen[key](arg); - var value = info.value; - } catch (error) { - reject(error); - return; - } - - if (info.done) { - resolve(value); - } else { - return _promise2.default.resolve(value).then(function (value) { - step("next", value); - }, function (err) { - step("throw", err); - }); - } - } - - return step("next"); - }); - }; -}; - -/***/ }), -/* 3 */ -/***/ (function(module, exports) { - -module.exports = require("util"); - -/***/ }), -/* 4 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.getFirstSuitableFolder = exports.readFirstAvailableStream = exports.makeTempDir = exports.hardlinksWork = exports.writeFilePreservingEol = exports.getFileSizeOnDisk = exports.walk = exports.symlink = exports.find = exports.readJsonAndFile = exports.readJson = exports.readFileAny = exports.hardlinkBulk = exports.copyBulk = exports.unlink = exports.glob = exports.link = exports.chmod = exports.lstat = exports.exists = exports.mkdirp = exports.stat = exports.access = exports.rename = exports.readdir = exports.realpath = exports.readlink = exports.writeFile = exports.open = exports.readFileBuffer = exports.lockQueue = exports.constants = undefined; - -var _asyncToGenerator2; - -function _load_asyncToGenerator() { - return _asyncToGenerator2 = _interopRequireDefault(__webpack_require__(2)); -} - -let buildActionsForCopy = (() => { - var _ref = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, events, possibleExtraneous, reporter) { - - // - let build = (() => { - var _ref5 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { - const src = data.src, - dest = data.dest, - type = data.type; - - const onFresh = data.onFresh || noop; - const onDone = data.onDone || noop; - - // TODO https://github.com/yarnpkg/yarn/issues/3751 - // related to bundled dependencies handling - if (files.has(dest.toLowerCase())) { - reporter.verbose(`The case-insensitive file ${dest} shouldn't be copied twice in one bulk copy`); - } else { - files.add(dest.toLowerCase()); - } - - if (type === 'symlink') { - yield mkdirp((_path || _load_path()).default.dirname(dest)); - onFresh(); - actions.symlink.push({ - dest, - linkname: src - }); - onDone(); - return; - } - - if (events.ignoreBasenames.indexOf((_path || _load_path()).default.basename(src)) >= 0) { - // ignored file - return; - } - - const srcStat = yield lstat(src); - let srcFiles; - - if (srcStat.isDirectory()) { - srcFiles = yield readdir(src); - } - - let destStat; - try { - // try accessing the destination - destStat = yield lstat(dest); - } catch (e) { - // proceed if destination doesn't exist, otherwise error - if (e.code !== 'ENOENT') { - throw e; - } - } - - // if destination exists - if (destStat) { - const bothSymlinks = srcStat.isSymbolicLink() && destStat.isSymbolicLink(); - const bothFolders = srcStat.isDirectory() && destStat.isDirectory(); - const bothFiles = srcStat.isFile() && destStat.isFile(); - - // EINVAL access errors sometimes happen which shouldn't because node shouldn't be giving - // us modes that aren't valid. investigate this, it's generally safe to proceed. - - /* if (srcStat.mode !== destStat.mode) { - try { - await access(dest, srcStat.mode); - } catch (err) {} - } */ - - if (bothFiles && artifactFiles.has(dest)) { - // this file gets changed during build, likely by a custom install script. Don't bother checking it. - onDone(); - reporter.verbose(reporter.lang('verboseFileSkipArtifact', src)); - return; - } - - if (bothFiles && srcStat.size === destStat.size && (0, (_fsNormalized || _load_fsNormalized()).fileDatesEqual)(srcStat.mtime, destStat.mtime)) { - // we can safely assume this is the same file - onDone(); - reporter.verbose(reporter.lang('verboseFileSkip', src, dest, srcStat.size, +srcStat.mtime)); - return; - } - - if (bothSymlinks) { - const srcReallink = yield readlink(src); - if (srcReallink === (yield readlink(dest))) { - // if both symlinks are the same then we can continue on - onDone(); - reporter.verbose(reporter.lang('verboseFileSkipSymlink', src, dest, srcReallink)); - return; - } - } - - if (bothFolders) { - // mark files that aren't in this folder as possibly extraneous - const destFiles = yield readdir(dest); - invariant(srcFiles, 'src files not initialised'); - - for (var _iterator4 = destFiles, _isArray4 = Array.isArray(_iterator4), _i4 = 0, _iterator4 = _isArray4 ? _iterator4 : _iterator4[Symbol.iterator]();;) { - var _ref6; - - if (_isArray4) { - if (_i4 >= _iterator4.length) break; - _ref6 = _iterator4[_i4++]; - } else { - _i4 = _iterator4.next(); - if (_i4.done) break; - _ref6 = _i4.value; - } - - const file = _ref6; - - if (srcFiles.indexOf(file) < 0) { - const loc = (_path || _load_path()).default.join(dest, file); - possibleExtraneous.add(loc); - - if ((yield lstat(loc)).isDirectory()) { - for (var _iterator5 = yield readdir(loc), _isArray5 = Array.isArray(_iterator5), _i5 = 0, _iterator5 = _isArray5 ? _iterator5 : _iterator5[Symbol.iterator]();;) { - var _ref7; - - if (_isArray5) { - if (_i5 >= _iterator5.length) break; - _ref7 = _iterator5[_i5++]; - } else { - _i5 = _iterator5.next(); - if (_i5.done) break; - _ref7 = _i5.value; - } - - const file = _ref7; - - possibleExtraneous.add((_path || _load_path()).default.join(loc, file)); - } - } - } - } - } - } - - if (destStat && destStat.isSymbolicLink()) { - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(dest); - destStat = null; - } - - if (srcStat.isSymbolicLink()) { - onFresh(); - const linkname = yield readlink(src); - actions.symlink.push({ - dest, - linkname - }); - onDone(); - } else if (srcStat.isDirectory()) { - if (!destStat) { - reporter.verbose(reporter.lang('verboseFileFolder', dest)); - yield mkdirp(dest); - } - - const destParts = dest.split((_path || _load_path()).default.sep); - while (destParts.length) { - files.add(destParts.join((_path || _load_path()).default.sep).toLowerCase()); - destParts.pop(); - } - - // push all files to queue - invariant(srcFiles, 'src files not initialised'); - let remaining = srcFiles.length; - if (!remaining) { - onDone(); - } - for (var _iterator6 = srcFiles, _isArray6 = Array.isArray(_iterator6), _i6 = 0, _iterator6 = _isArray6 ? _iterator6 : _iterator6[Symbol.iterator]();;) { - var _ref8; - - if (_isArray6) { - if (_i6 >= _iterator6.length) break; - _ref8 = _iterator6[_i6++]; - } else { - _i6 = _iterator6.next(); - if (_i6.done) break; - _ref8 = _i6.value; - } - - const file = _ref8; - - queue.push({ - dest: (_path || _load_path()).default.join(dest, file), - onFresh, - onDone: function (_onDone) { - function onDone() { - return _onDone.apply(this, arguments); - } - - onDone.toString = function () { - return _onDone.toString(); - }; - - return onDone; - }(function () { - if (--remaining === 0) { - onDone(); - } - }), - src: (_path || _load_path()).default.join(src, file) - }); - } - } else if (srcStat.isFile()) { - onFresh(); - actions.file.push({ - src, - dest, - atime: srcStat.atime, - mtime: srcStat.mtime, - mode: srcStat.mode - }); - onDone(); - } else { - throw new Error(`unsure how to copy this: ${src}`); - } - }); - - return function build(_x5) { - return _ref5.apply(this, arguments); - }; - })(); - - const artifactFiles = new Set(events.artifactFiles || []); - const files = new Set(); - - // initialise events - for (var _iterator = queue, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { - var _ref2; - - if (_isArray) { - if (_i >= _iterator.length) break; - _ref2 = _iterator[_i++]; - } else { - _i = _iterator.next(); - if (_i.done) break; - _ref2 = _i.value; - } - - const item = _ref2; - - const onDone = item.onDone; - item.onDone = function () { - events.onProgress(item.dest); - if (onDone) { - onDone(); - } - }; - } - events.onStart(queue.length); - - // start building actions - const actions = { - file: [], - symlink: [], - link: [] - }; - - // custom concurrency logic as we're always executing stacks of CONCURRENT_QUEUE_ITEMS queue items - // at a time due to the requirement to push items onto the queue - while (queue.length) { - const items = queue.splice(0, CONCURRENT_QUEUE_ITEMS); - yield Promise.all(items.map(build)); - } - - // simulate the existence of some files to prevent considering them extraneous - for (var _iterator2 = artifactFiles, _isArray2 = Array.isArray(_iterator2), _i2 = 0, _iterator2 = _isArray2 ? _iterator2 : _iterator2[Symbol.iterator]();;) { - var _ref3; - - if (_isArray2) { - if (_i2 >= _iterator2.length) break; - _ref3 = _iterator2[_i2++]; - } else { - _i2 = _iterator2.next(); - if (_i2.done) break; - _ref3 = _i2.value; - } - - const file = _ref3; - - if (possibleExtraneous.has(file)) { - reporter.verbose(reporter.lang('verboseFilePhantomExtraneous', file)); - possibleExtraneous.delete(file); - } - } - - for (var _iterator3 = possibleExtraneous, _isArray3 = Array.isArray(_iterator3), _i3 = 0, _iterator3 = _isArray3 ? _iterator3 : _iterator3[Symbol.iterator]();;) { - var _ref4; - - if (_isArray3) { - if (_i3 >= _iterator3.length) break; - _ref4 = _iterator3[_i3++]; - } else { - _i3 = _iterator3.next(); - if (_i3.done) break; - _ref4 = _i3.value; - } - - const loc = _ref4; - - if (files.has(loc.toLowerCase())) { - possibleExtraneous.delete(loc); - } - } - - return actions; - }); - - return function buildActionsForCopy(_x, _x2, _x3, _x4) { - return _ref.apply(this, arguments); - }; -})(); - -let buildActionsForHardlink = (() => { - var _ref9 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, events, possibleExtraneous, reporter) { - - // - let build = (() => { - var _ref13 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { - const src = data.src, - dest = data.dest; - - const onFresh = data.onFresh || noop; - const onDone = data.onDone || noop; - if (files.has(dest.toLowerCase())) { - // Fixes issue https://github.com/yarnpkg/yarn/issues/2734 - // When bulk hardlinking we have A -> B structure that we want to hardlink to A1 -> B1, - // package-linker passes that modules A1 and B1 need to be hardlinked, - // the recursive linking algorithm of A1 ends up scheduling files in B1 to be linked twice which will case - // an exception. - onDone(); - return; - } - files.add(dest.toLowerCase()); - - if (events.ignoreBasenames.indexOf((_path || _load_path()).default.basename(src)) >= 0) { - // ignored file - return; - } - - const srcStat = yield lstat(src); - let srcFiles; - - if (srcStat.isDirectory()) { - srcFiles = yield readdir(src); - } - - const destExists = yield exists(dest); - if (destExists) { - const destStat = yield lstat(dest); - - const bothSymlinks = srcStat.isSymbolicLink() && destStat.isSymbolicLink(); - const bothFolders = srcStat.isDirectory() && destStat.isDirectory(); - const bothFiles = srcStat.isFile() && destStat.isFile(); - - if (srcStat.mode !== destStat.mode) { - try { - yield access(dest, srcStat.mode); - } catch (err) { - // EINVAL access errors sometimes happen which shouldn't because node shouldn't be giving - // us modes that aren't valid. investigate this, it's generally safe to proceed. - reporter.verbose(err); - } - } - - if (bothFiles && artifactFiles.has(dest)) { - // this file gets changed during build, likely by a custom install script. Don't bother checking it. - onDone(); - reporter.verbose(reporter.lang('verboseFileSkipArtifact', src)); - return; - } - - // correct hardlink - if (bothFiles && srcStat.ino !== null && srcStat.ino === destStat.ino) { - onDone(); - reporter.verbose(reporter.lang('verboseFileSkip', src, dest, srcStat.ino)); - return; - } - - if (bothSymlinks) { - const srcReallink = yield readlink(src); - if (srcReallink === (yield readlink(dest))) { - // if both symlinks are the same then we can continue on - onDone(); - reporter.verbose(reporter.lang('verboseFileSkipSymlink', src, dest, srcReallink)); - return; - } - } - - if (bothFolders) { - // mark files that aren't in this folder as possibly extraneous - const destFiles = yield readdir(dest); - invariant(srcFiles, 'src files not initialised'); - - for (var _iterator10 = destFiles, _isArray10 = Array.isArray(_iterator10), _i10 = 0, _iterator10 = _isArray10 ? _iterator10 : _iterator10[Symbol.iterator]();;) { - var _ref14; - - if (_isArray10) { - if (_i10 >= _iterator10.length) break; - _ref14 = _iterator10[_i10++]; - } else { - _i10 = _iterator10.next(); - if (_i10.done) break; - _ref14 = _i10.value; - } - - const file = _ref14; - - if (srcFiles.indexOf(file) < 0) { - const loc = (_path || _load_path()).default.join(dest, file); - possibleExtraneous.add(loc); - - if ((yield lstat(loc)).isDirectory()) { - for (var _iterator11 = yield readdir(loc), _isArray11 = Array.isArray(_iterator11), _i11 = 0, _iterator11 = _isArray11 ? _iterator11 : _iterator11[Symbol.iterator]();;) { - var _ref15; - - if (_isArray11) { - if (_i11 >= _iterator11.length) break; - _ref15 = _iterator11[_i11++]; - } else { - _i11 = _iterator11.next(); - if (_i11.done) break; - _ref15 = _i11.value; - } - - const file = _ref15; - - possibleExtraneous.add((_path || _load_path()).default.join(loc, file)); - } - } - } - } - } - } - - if (srcStat.isSymbolicLink()) { - onFresh(); - const linkname = yield readlink(src); - actions.symlink.push({ - dest, - linkname - }); - onDone(); - } else if (srcStat.isDirectory()) { - reporter.verbose(reporter.lang('verboseFileFolder', dest)); - yield mkdirp(dest); - - const destParts = dest.split((_path || _load_path()).default.sep); - while (destParts.length) { - files.add(destParts.join((_path || _load_path()).default.sep).toLowerCase()); - destParts.pop(); - } - - // push all files to queue - invariant(srcFiles, 'src files not initialised'); - let remaining = srcFiles.length; - if (!remaining) { - onDone(); - } - for (var _iterator12 = srcFiles, _isArray12 = Array.isArray(_iterator12), _i12 = 0, _iterator12 = _isArray12 ? _iterator12 : _iterator12[Symbol.iterator]();;) { - var _ref16; - - if (_isArray12) { - if (_i12 >= _iterator12.length) break; - _ref16 = _iterator12[_i12++]; - } else { - _i12 = _iterator12.next(); - if (_i12.done) break; - _ref16 = _i12.value; - } - - const file = _ref16; - - queue.push({ - onFresh, - src: (_path || _load_path()).default.join(src, file), - dest: (_path || _load_path()).default.join(dest, file), - onDone: function (_onDone2) { - function onDone() { - return _onDone2.apply(this, arguments); - } - - onDone.toString = function () { - return _onDone2.toString(); - }; - - return onDone; - }(function () { - if (--remaining === 0) { - onDone(); - } - }) - }); - } - } else if (srcStat.isFile()) { - onFresh(); - actions.link.push({ - src, - dest, - removeDest: destExists - }); - onDone(); - } else { - throw new Error(`unsure how to copy this: ${src}`); - } - }); - - return function build(_x10) { - return _ref13.apply(this, arguments); - }; - })(); - - const artifactFiles = new Set(events.artifactFiles || []); - const files = new Set(); - - // initialise events - for (var _iterator7 = queue, _isArray7 = Array.isArray(_iterator7), _i7 = 0, _iterator7 = _isArray7 ? _iterator7 : _iterator7[Symbol.iterator]();;) { - var _ref10; - - if (_isArray7) { - if (_i7 >= _iterator7.length) break; - _ref10 = _iterator7[_i7++]; - } else { - _i7 = _iterator7.next(); - if (_i7.done) break; - _ref10 = _i7.value; - } - - const item = _ref10; - - const onDone = item.onDone || noop; - item.onDone = function () { - events.onProgress(item.dest); - onDone(); - }; - } - events.onStart(queue.length); - - // start building actions - const actions = { - file: [], - symlink: [], - link: [] - }; - - // custom concurrency logic as we're always executing stacks of CONCURRENT_QUEUE_ITEMS queue items - // at a time due to the requirement to push items onto the queue - while (queue.length) { - const items = queue.splice(0, CONCURRENT_QUEUE_ITEMS); - yield Promise.all(items.map(build)); - } - - // simulate the existence of some files to prevent considering them extraneous - for (var _iterator8 = artifactFiles, _isArray8 = Array.isArray(_iterator8), _i8 = 0, _iterator8 = _isArray8 ? _iterator8 : _iterator8[Symbol.iterator]();;) { - var _ref11; - - if (_isArray8) { - if (_i8 >= _iterator8.length) break; - _ref11 = _iterator8[_i8++]; - } else { - _i8 = _iterator8.next(); - if (_i8.done) break; - _ref11 = _i8.value; - } - - const file = _ref11; - - if (possibleExtraneous.has(file)) { - reporter.verbose(reporter.lang('verboseFilePhantomExtraneous', file)); - possibleExtraneous.delete(file); - } - } - - for (var _iterator9 = possibleExtraneous, _isArray9 = Array.isArray(_iterator9), _i9 = 0, _iterator9 = _isArray9 ? _iterator9 : _iterator9[Symbol.iterator]();;) { - var _ref12; - - if (_isArray9) { - if (_i9 >= _iterator9.length) break; - _ref12 = _iterator9[_i9++]; - } else { - _i9 = _iterator9.next(); - if (_i9.done) break; - _ref12 = _i9.value; - } - - const loc = _ref12; - - if (files.has(loc.toLowerCase())) { - possibleExtraneous.delete(loc); - } - } - - return actions; - }); - - return function buildActionsForHardlink(_x6, _x7, _x8, _x9) { - return _ref9.apply(this, arguments); - }; -})(); - -let copyBulk = exports.copyBulk = (() => { - var _ref17 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, reporter, _events) { - const events = { - onStart: _events && _events.onStart || noop, - onProgress: _events && _events.onProgress || noop, - possibleExtraneous: _events ? _events.possibleExtraneous : new Set(), - ignoreBasenames: _events && _events.ignoreBasenames || [], - artifactFiles: _events && _events.artifactFiles || [] - }; - - const actions = yield buildActionsForCopy(queue, events, events.possibleExtraneous, reporter); - events.onStart(actions.file.length + actions.symlink.length + actions.link.length); - - const fileActions = actions.file; - - const currentlyWriting = new Map(); - - yield (_promise || _load_promise()).queue(fileActions, (() => { - var _ref18 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { - let writePromise; - while (writePromise = currentlyWriting.get(data.dest)) { - yield writePromise; - } - - reporter.verbose(reporter.lang('verboseFileCopy', data.src, data.dest)); - const copier = (0, (_fsNormalized || _load_fsNormalized()).copyFile)(data, function () { - return currentlyWriting.delete(data.dest); - }); - currentlyWriting.set(data.dest, copier); - events.onProgress(data.dest); - return copier; - }); - - return function (_x14) { - return _ref18.apply(this, arguments); - }; - })(), CONCURRENT_QUEUE_ITEMS); - - // we need to copy symlinks last as they could reference files we were copying - const symlinkActions = actions.symlink; - yield (_promise || _load_promise()).queue(symlinkActions, function (data) { - const linkname = (_path || _load_path()).default.resolve((_path || _load_path()).default.dirname(data.dest), data.linkname); - reporter.verbose(reporter.lang('verboseFileSymlink', data.dest, linkname)); - return symlink(linkname, data.dest); - }); - }); - - return function copyBulk(_x11, _x12, _x13) { - return _ref17.apply(this, arguments); - }; -})(); - -let hardlinkBulk = exports.hardlinkBulk = (() => { - var _ref19 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (queue, reporter, _events) { - const events = { - onStart: _events && _events.onStart || noop, - onProgress: _events && _events.onProgress || noop, - possibleExtraneous: _events ? _events.possibleExtraneous : new Set(), - artifactFiles: _events && _events.artifactFiles || [], - ignoreBasenames: [] - }; - - const actions = yield buildActionsForHardlink(queue, events, events.possibleExtraneous, reporter); - events.onStart(actions.file.length + actions.symlink.length + actions.link.length); - - const fileActions = actions.link; - - yield (_promise || _load_promise()).queue(fileActions, (() => { - var _ref20 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (data) { - reporter.verbose(reporter.lang('verboseFileLink', data.src, data.dest)); - if (data.removeDest) { - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(data.dest); - } - yield link(data.src, data.dest); - }); - - return function (_x18) { - return _ref20.apply(this, arguments); - }; - })(), CONCURRENT_QUEUE_ITEMS); - - // we need to copy symlinks last as they could reference files we were copying - const symlinkActions = actions.symlink; - yield (_promise || _load_promise()).queue(symlinkActions, function (data) { - const linkname = (_path || _load_path()).default.resolve((_path || _load_path()).default.dirname(data.dest), data.linkname); - reporter.verbose(reporter.lang('verboseFileSymlink', data.dest, linkname)); - return symlink(linkname, data.dest); - }); - }); - - return function hardlinkBulk(_x15, _x16, _x17) { - return _ref19.apply(this, arguments); - }; -})(); - -let readFileAny = exports.readFileAny = (() => { - var _ref21 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (files) { - for (var _iterator13 = files, _isArray13 = Array.isArray(_iterator13), _i13 = 0, _iterator13 = _isArray13 ? _iterator13 : _iterator13[Symbol.iterator]();;) { - var _ref22; - - if (_isArray13) { - if (_i13 >= _iterator13.length) break; - _ref22 = _iterator13[_i13++]; - } else { - _i13 = _iterator13.next(); - if (_i13.done) break; - _ref22 = _i13.value; - } - - const file = _ref22; - - if (yield exists(file)) { - return readFile(file); - } - } - return null; - }); - - return function readFileAny(_x19) { - return _ref21.apply(this, arguments); - }; -})(); - -let readJson = exports.readJson = (() => { - var _ref23 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (loc) { - return (yield readJsonAndFile(loc)).object; - }); - - return function readJson(_x20) { - return _ref23.apply(this, arguments); - }; -})(); - -let readJsonAndFile = exports.readJsonAndFile = (() => { - var _ref24 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (loc) { - const file = yield readFile(loc); - try { - return { - object: (0, (_map || _load_map()).default)(JSON.parse(stripBOM(file))), - content: file - }; - } catch (err) { - err.message = `${loc}: ${err.message}`; - throw err; - } - }); - - return function readJsonAndFile(_x21) { - return _ref24.apply(this, arguments); - }; -})(); - -let find = exports.find = (() => { - var _ref25 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (filename, dir) { - const parts = dir.split((_path || _load_path()).default.sep); - - while (parts.length) { - const loc = parts.concat(filename).join((_path || _load_path()).default.sep); - - if (yield exists(loc)) { - return loc; - } else { - parts.pop(); - } - } - - return false; - }); - - return function find(_x22, _x23) { - return _ref25.apply(this, arguments); - }; -})(); - -let symlink = exports.symlink = (() => { - var _ref26 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (src, dest) { - if (process.platform !== 'win32') { - // use relative paths otherwise which will be retained if the directory is moved - src = (_path || _load_path()).default.relative((_path || _load_path()).default.dirname(dest), src); - // When path.relative returns an empty string for the current directory, we should instead use - // '.', which is a valid fs.symlink target. - src = src || '.'; - } - - try { - const stats = yield lstat(dest); - if (stats.isSymbolicLink()) { - const resolved = dest; - if (resolved === src) { - return; - } - } - } catch (err) { - if (err.code !== 'ENOENT') { - throw err; - } - } - - // We use rimraf for unlink which never throws an ENOENT on missing target - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(dest); - - if (process.platform === 'win32') { - // use directory junctions if possible on win32, this requires absolute paths - yield fsSymlink(src, dest, 'junction'); - } else { - yield fsSymlink(src, dest); - } - }); - - return function symlink(_x24, _x25) { - return _ref26.apply(this, arguments); - }; -})(); - -let walk = exports.walk = (() => { - var _ref27 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (dir, relativeDir, ignoreBasenames = new Set()) { - let files = []; - - let filenames = yield readdir(dir); - if (ignoreBasenames.size) { - filenames = filenames.filter(function (name) { - return !ignoreBasenames.has(name); - }); - } - - for (var _iterator14 = filenames, _isArray14 = Array.isArray(_iterator14), _i14 = 0, _iterator14 = _isArray14 ? _iterator14 : _iterator14[Symbol.iterator]();;) { - var _ref28; - - if (_isArray14) { - if (_i14 >= _iterator14.length) break; - _ref28 = _iterator14[_i14++]; - } else { - _i14 = _iterator14.next(); - if (_i14.done) break; - _ref28 = _i14.value; - } - - const name = _ref28; - - const relative = relativeDir ? (_path || _load_path()).default.join(relativeDir, name) : name; - const loc = (_path || _load_path()).default.join(dir, name); - const stat = yield lstat(loc); - - files.push({ - relative, - basename: name, - absolute: loc, - mtime: +stat.mtime - }); - - if (stat.isDirectory()) { - files = files.concat((yield walk(loc, relative, ignoreBasenames))); - } - } - - return files; - }); - - return function walk(_x26, _x27) { - return _ref27.apply(this, arguments); - }; -})(); - -let getFileSizeOnDisk = exports.getFileSizeOnDisk = (() => { - var _ref29 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (loc) { - const stat = yield lstat(loc); - const size = stat.size, - blockSize = stat.blksize; - - - return Math.ceil(size / blockSize) * blockSize; - }); - - return function getFileSizeOnDisk(_x28) { - return _ref29.apply(this, arguments); - }; -})(); - -let getEolFromFile = (() => { - var _ref30 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (path) { - if (!(yield exists(path))) { - return undefined; - } - - const buffer = yield readFileBuffer(path); - - for (let i = 0; i < buffer.length; ++i) { - if (buffer[i] === cr) { - return '\r\n'; - } - if (buffer[i] === lf) { - return '\n'; - } - } - return undefined; - }); - - return function getEolFromFile(_x29) { - return _ref30.apply(this, arguments); - }; -})(); - -let writeFilePreservingEol = exports.writeFilePreservingEol = (() => { - var _ref31 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (path, data) { - const eol = (yield getEolFromFile(path)) || (_os || _load_os()).default.EOL; - if (eol !== '\n') { - data = data.replace(/\n/g, eol); - } - yield writeFile(path, data); - }); - - return function writeFilePreservingEol(_x30, _x31) { - return _ref31.apply(this, arguments); - }; -})(); - -let hardlinksWork = exports.hardlinksWork = (() => { - var _ref32 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (dir) { - const filename = 'test-file' + Math.random(); - const file = (_path || _load_path()).default.join(dir, filename); - const fileLink = (_path || _load_path()).default.join(dir, filename + '-link'); - try { - yield writeFile(file, 'test'); - yield link(file, fileLink); - } catch (err) { - return false; - } finally { - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(file); - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(fileLink); - } - return true; - }); - - return function hardlinksWork(_x32) { - return _ref32.apply(this, arguments); - }; -})(); - -// not a strict polyfill for Node's fs.mkdtemp - - -let makeTempDir = exports.makeTempDir = (() => { - var _ref33 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (prefix) { - const dir = (_path || _load_path()).default.join((_os || _load_os()).default.tmpdir(), `yarn-${prefix || ''}-${Date.now()}-${Math.random()}`); - yield (0, (_fsNormalized || _load_fsNormalized()).unlink)(dir); - yield mkdirp(dir); - return dir; - }); - - return function makeTempDir(_x33) { - return _ref33.apply(this, arguments); - }; -})(); - -let readFirstAvailableStream = exports.readFirstAvailableStream = (() => { - var _ref34 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (paths) { - for (var _iterator15 = paths, _isArray15 = Array.isArray(_iterator15), _i15 = 0, _iterator15 = _isArray15 ? _iterator15 : _iterator15[Symbol.iterator]();;) { - var _ref35; - - if (_isArray15) { - if (_i15 >= _iterator15.length) break; - _ref35 = _iterator15[_i15++]; - } else { - _i15 = _iterator15.next(); - if (_i15.done) break; - _ref35 = _i15.value; - } - - const path = _ref35; - - try { - const fd = yield open(path, 'r'); - return (_fs || _load_fs()).default.createReadStream(path, { fd }); - } catch (err) { - // Try the next one - } - } - return null; - }); - - return function readFirstAvailableStream(_x34) { - return _ref34.apply(this, arguments); - }; -})(); - -let getFirstSuitableFolder = exports.getFirstSuitableFolder = (() => { - var _ref36 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (paths, mode = constants.W_OK | constants.X_OK) { - const result = { - skipped: [], - folder: null - }; - - for (var _iterator16 = paths, _isArray16 = Array.isArray(_iterator16), _i16 = 0, _iterator16 = _isArray16 ? _iterator16 : _iterator16[Symbol.iterator]();;) { - var _ref37; - - if (_isArray16) { - if (_i16 >= _iterator16.length) break; - _ref37 = _iterator16[_i16++]; - } else { - _i16 = _iterator16.next(); - if (_i16.done) break; - _ref37 = _i16.value; - } - - const folder = _ref37; - - try { - yield mkdirp(folder); - yield access(folder, mode); - - result.folder = folder; - - return result; - } catch (error) { - result.skipped.push({ - error, - folder - }); - } - } - return result; - }); - - return function getFirstSuitableFolder(_x35) { - return _ref36.apply(this, arguments); - }; -})(); - -exports.copy = copy; -exports.readFile = readFile; -exports.readFileRaw = readFileRaw; -exports.normalizeOS = normalizeOS; - -var _fs; - -function _load_fs() { - return _fs = _interopRequireDefault(__webpack_require__(5)); -} - -var _glob; - -function _load_glob() { - return _glob = _interopRequireDefault(__webpack_require__(99)); -} - -var _os; - -function _load_os() { - return _os = _interopRequireDefault(__webpack_require__(49)); -} - -var _path; - -function _load_path() { - return _path = _interopRequireDefault(__webpack_require__(0)); -} - -var _blockingQueue; - -function _load_blockingQueue() { - return _blockingQueue = _interopRequireDefault(__webpack_require__(110)); -} - -var _promise; - -function _load_promise() { - return _promise = _interopRequireWildcard(__webpack_require__(50)); -} - -var _promise2; - -function _load_promise2() { - return _promise2 = __webpack_require__(50); -} - -var _map; - -function _load_map() { - return _map = _interopRequireDefault(__webpack_require__(29)); -} - -var _fsNormalized; - -function _load_fsNormalized() { - return _fsNormalized = __webpack_require__(218); -} - -function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const constants = exports.constants = typeof (_fs || _load_fs()).default.constants !== 'undefined' ? (_fs || _load_fs()).default.constants : { - R_OK: (_fs || _load_fs()).default.R_OK, - W_OK: (_fs || _load_fs()).default.W_OK, - X_OK: (_fs || _load_fs()).default.X_OK -}; - -const lockQueue = exports.lockQueue = new (_blockingQueue || _load_blockingQueue()).default('fs lock'); - -const readFileBuffer = exports.readFileBuffer = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.readFile); -const open = exports.open = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.open); -const writeFile = exports.writeFile = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.writeFile); -const readlink = exports.readlink = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.readlink); -const realpath = exports.realpath = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.realpath); -const readdir = exports.readdir = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.readdir); -const rename = exports.rename = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.rename); -const access = exports.access = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.access); -const stat = exports.stat = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.stat); -const mkdirp = exports.mkdirp = (0, (_promise2 || _load_promise2()).promisify)(__webpack_require__(145)); -const exists = exports.exists = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.exists, true); -const lstat = exports.lstat = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.lstat); -const chmod = exports.chmod = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.chmod); -const link = exports.link = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.link); -const glob = exports.glob = (0, (_promise2 || _load_promise2()).promisify)((_glob || _load_glob()).default); -exports.unlink = (_fsNormalized || _load_fsNormalized()).unlink; - -// fs.copyFile uses the native file copying instructions on the system, performing much better -// than any JS-based solution and consumes fewer resources. Repeated testing to fine tune the -// concurrency level revealed 128 as the sweet spot on a quad-core, 16 CPU Intel system with SSD. - -const CONCURRENT_QUEUE_ITEMS = (_fs || _load_fs()).default.copyFile ? 128 : 4; - -const fsSymlink = (0, (_promise2 || _load_promise2()).promisify)((_fs || _load_fs()).default.symlink); -const invariant = __webpack_require__(9); -const stripBOM = __webpack_require__(160); - -const noop = () => {}; - -function copy(src, dest, reporter) { - return copyBulk([{ src, dest }], reporter); -} - -function _readFile(loc, encoding) { - return new Promise((resolve, reject) => { - (_fs || _load_fs()).default.readFile(loc, encoding, function (err, content) { - if (err) { - reject(err); - } else { - resolve(content); - } - }); - }); -} - -function readFile(loc) { - return _readFile(loc, 'utf8').then(normalizeOS); -} - -function readFileRaw(loc) { - return _readFile(loc, 'binary'); -} - -function normalizeOS(body) { - return body.replace(/\r\n/g, '\n'); -} - -const cr = '\r'.charCodeAt(0); -const lf = '\n'.charCodeAt(0); - -/***/ }), -/* 5 */ -/***/ (function(module, exports) { - -module.exports = require("fs"); - -/***/ }), -/* 6 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -class MessageError extends Error { - constructor(msg, code) { - super(msg); - this.code = code; - } - -} - -exports.MessageError = MessageError; -class ProcessSpawnError extends MessageError { - constructor(msg, code, process) { - super(msg, code); - this.process = process; - } - -} - -exports.ProcessSpawnError = ProcessSpawnError; -class SecurityError extends MessageError {} - -exports.SecurityError = SecurityError; -class ProcessTermError extends MessageError {} - -exports.ProcessTermError = ProcessTermError; -class ResponseError extends Error { - constructor(msg, responseCode) { - super(msg); - this.responseCode = responseCode; - } - -} - -exports.ResponseError = ResponseError; -class OneTimePasswordError extends Error {} -exports.OneTimePasswordError = OneTimePasswordError; - -/***/ }), -/* 7 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Subscriber; }); -/* unused harmony export SafeSubscriber */ -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_tslib__ = __webpack_require__(1); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__util_isFunction__ = __webpack_require__(154); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__Observer__ = __webpack_require__(420); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__Subscription__ = __webpack_require__(25); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__internal_symbol_rxSubscriber__ = __webpack_require__(321); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__config__ = __webpack_require__(185); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_6__util_hostReportError__ = __webpack_require__(323); -/** PURE_IMPORTS_START tslib,_util_isFunction,_Observer,_Subscription,_internal_symbol_rxSubscriber,_config,_util_hostReportError PURE_IMPORTS_END */ - - - - - - - -var Subscriber = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](Subscriber, _super); - function Subscriber(destinationOrNext, error, complete) { - var _this = _super.call(this) || this; - _this.syncErrorValue = null; - _this.syncErrorThrown = false; - _this.syncErrorThrowable = false; - _this.isStopped = false; - _this._parentSubscription = null; - switch (arguments.length) { - case 0: - _this.destination = __WEBPACK_IMPORTED_MODULE_2__Observer__["a" /* empty */]; - break; - case 1: - if (!destinationOrNext) { - _this.destination = __WEBPACK_IMPORTED_MODULE_2__Observer__["a" /* empty */]; - break; - } - if (typeof destinationOrNext === 'object') { - if (destinationOrNext instanceof Subscriber) { - _this.syncErrorThrowable = destinationOrNext.syncErrorThrowable; - _this.destination = destinationOrNext; - destinationOrNext.add(_this); - } - else { - _this.syncErrorThrowable = true; - _this.destination = new SafeSubscriber(_this, destinationOrNext); - } - break; - } - default: - _this.syncErrorThrowable = true; - _this.destination = new SafeSubscriber(_this, destinationOrNext, error, complete); - break; - } - return _this; - } - Subscriber.prototype[__WEBPACK_IMPORTED_MODULE_4__internal_symbol_rxSubscriber__["a" /* rxSubscriber */]] = function () { return this; }; - Subscriber.create = function (next, error, complete) { - var subscriber = new Subscriber(next, error, complete); - subscriber.syncErrorThrowable = false; - return subscriber; - }; - Subscriber.prototype.next = function (value) { - if (!this.isStopped) { - this._next(value); - } - }; - Subscriber.prototype.error = function (err) { - if (!this.isStopped) { - this.isStopped = true; - this._error(err); - } - }; - Subscriber.prototype.complete = function () { - if (!this.isStopped) { - this.isStopped = true; - this._complete(); - } - }; - Subscriber.prototype.unsubscribe = function () { - if (this.closed) { - return; - } - this.isStopped = true; - _super.prototype.unsubscribe.call(this); - }; - Subscriber.prototype._next = function (value) { - this.destination.next(value); - }; - Subscriber.prototype._error = function (err) { - this.destination.error(err); - this.unsubscribe(); - }; - Subscriber.prototype._complete = function () { - this.destination.complete(); - this.unsubscribe(); - }; - Subscriber.prototype._unsubscribeAndRecycle = function () { - var _a = this, _parent = _a._parent, _parents = _a._parents; - this._parent = null; - this._parents = null; - this.unsubscribe(); - this.closed = false; - this.isStopped = false; - this._parent = _parent; - this._parents = _parents; - this._parentSubscription = null; - return this; - }; - return Subscriber; -}(__WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */])); - -var SafeSubscriber = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](SafeSubscriber, _super); - function SafeSubscriber(_parentSubscriber, observerOrNext, error, complete) { - var _this = _super.call(this) || this; - _this._parentSubscriber = _parentSubscriber; - var next; - var context = _this; - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_isFunction__["a" /* isFunction */])(observerOrNext)) { - next = observerOrNext; - } - else if (observerOrNext) { - next = observerOrNext.next; - error = observerOrNext.error; - complete = observerOrNext.complete; - if (observerOrNext !== __WEBPACK_IMPORTED_MODULE_2__Observer__["a" /* empty */]) { - context = Object.create(observerOrNext); - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_isFunction__["a" /* isFunction */])(context.unsubscribe)) { - _this.add(context.unsubscribe.bind(context)); - } - context.unsubscribe = _this.unsubscribe.bind(_this); - } - } - _this._context = context; - _this._next = next; - _this._error = error; - _this._complete = complete; - return _this; - } - SafeSubscriber.prototype.next = function (value) { - if (!this.isStopped && this._next) { - var _parentSubscriber = this._parentSubscriber; - if (!__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling || !_parentSubscriber.syncErrorThrowable) { - this.__tryOrUnsub(this._next, value); - } - else if (this.__tryOrSetError(_parentSubscriber, this._next, value)) { - this.unsubscribe(); - } - } - }; - SafeSubscriber.prototype.error = function (err) { - if (!this.isStopped) { - var _parentSubscriber = this._parentSubscriber; - var useDeprecatedSynchronousErrorHandling = __WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling; - if (this._error) { - if (!useDeprecatedSynchronousErrorHandling || !_parentSubscriber.syncErrorThrowable) { - this.__tryOrUnsub(this._error, err); - this.unsubscribe(); - } - else { - this.__tryOrSetError(_parentSubscriber, this._error, err); - this.unsubscribe(); - } - } - else if (!_parentSubscriber.syncErrorThrowable) { - this.unsubscribe(); - if (useDeprecatedSynchronousErrorHandling) { - throw err; - } - __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); - } - else { - if (useDeprecatedSynchronousErrorHandling) { - _parentSubscriber.syncErrorValue = err; - _parentSubscriber.syncErrorThrown = true; - } - else { - __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); - } - this.unsubscribe(); - } - } - }; - SafeSubscriber.prototype.complete = function () { - var _this = this; - if (!this.isStopped) { - var _parentSubscriber = this._parentSubscriber; - if (this._complete) { - var wrappedComplete = function () { return _this._complete.call(_this._context); }; - if (!__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling || !_parentSubscriber.syncErrorThrowable) { - this.__tryOrUnsub(wrappedComplete); - this.unsubscribe(); - } - else { - this.__tryOrSetError(_parentSubscriber, wrappedComplete); - this.unsubscribe(); - } - } - else { - this.unsubscribe(); - } - } - }; - SafeSubscriber.prototype.__tryOrUnsub = function (fn, value) { - try { - fn.call(this._context, value); - } - catch (err) { - this.unsubscribe(); - if (__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { - throw err; - } - else { - __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); - } - } - }; - SafeSubscriber.prototype.__tryOrSetError = function (parent, fn, value) { - if (!__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { - throw new Error('bad call'); - } - try { - fn.call(this._context, value); - } - catch (err) { - if (__WEBPACK_IMPORTED_MODULE_5__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { - parent.syncErrorValue = err; - parent.syncErrorThrown = true; - return true; - } - else { - __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_6__util_hostReportError__["a" /* hostReportError */])(err); - return true; - } - } - return false; - }; - SafeSubscriber.prototype._unsubscribe = function () { - var _parentSubscriber = this._parentSubscriber; - this._context = null; - this._parentSubscriber = null; - _parentSubscriber.unsubscribe(); - }; - return SafeSubscriber; -}(Subscriber)); - -//# sourceMappingURL=Subscriber.js.map - - -/***/ }), -/* 8 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.getPathKey = getPathKey; -const os = __webpack_require__(49); -const path = __webpack_require__(0); -const userHome = __webpack_require__(67).default; - -var _require = __webpack_require__(225); - -const getCacheDir = _require.getCacheDir, - getConfigDir = _require.getConfigDir, - getDataDir = _require.getDataDir; - -const isWebpackBundle = __webpack_require__(278); - -const DEPENDENCY_TYPES = exports.DEPENDENCY_TYPES = ['devDependencies', 'dependencies', 'optionalDependencies', 'peerDependencies']; -const OWNED_DEPENDENCY_TYPES = exports.OWNED_DEPENDENCY_TYPES = ['devDependencies', 'dependencies', 'optionalDependencies']; - -const RESOLUTIONS = exports.RESOLUTIONS = 'resolutions'; -const MANIFEST_FIELDS = exports.MANIFEST_FIELDS = [RESOLUTIONS, ...DEPENDENCY_TYPES]; - -const SUPPORTED_NODE_VERSIONS = exports.SUPPORTED_NODE_VERSIONS = '^4.8.0 || ^5.7.0 || ^6.2.2 || >=8.0.0'; - -const YARN_REGISTRY = exports.YARN_REGISTRY = 'https://registry.yarnpkg.com'; -const NPM_REGISTRY_RE = exports.NPM_REGISTRY_RE = /https?:\/\/registry\.npmjs\.org/g; - -const YARN_DOCS = exports.YARN_DOCS = 'https://yarnpkg.com/en/docs/cli/'; -const YARN_INSTALLER_SH = exports.YARN_INSTALLER_SH = 'https://yarnpkg.com/install.sh'; -const YARN_INSTALLER_MSI = exports.YARN_INSTALLER_MSI = 'https://yarnpkg.com/latest.msi'; - -const SELF_UPDATE_VERSION_URL = exports.SELF_UPDATE_VERSION_URL = 'https://yarnpkg.com/latest-version'; - -// cache version, bump whenever we make backwards incompatible changes -const CACHE_VERSION = exports.CACHE_VERSION = 6; - -// lockfile version, bump whenever we make backwards incompatible changes -const LOCKFILE_VERSION = exports.LOCKFILE_VERSION = 1; - -// max amount of network requests to perform concurrently -const NETWORK_CONCURRENCY = exports.NETWORK_CONCURRENCY = 8; - -// HTTP timeout used when downloading packages -const NETWORK_TIMEOUT = exports.NETWORK_TIMEOUT = 30 * 1000; // in milliseconds - -// max amount of child processes to execute concurrently -const CHILD_CONCURRENCY = exports.CHILD_CONCURRENCY = 5; - -const REQUIRED_PACKAGE_KEYS = exports.REQUIRED_PACKAGE_KEYS = ['name', 'version', '_uid']; - -function getPreferredCacheDirectories() { - const preferredCacheDirectories = [getCacheDir()]; - - if (process.getuid) { - // $FlowFixMe: process.getuid exists, dammit - preferredCacheDirectories.push(path.join(os.tmpdir(), `.yarn-cache-${process.getuid()}`)); - } - - preferredCacheDirectories.push(path.join(os.tmpdir(), `.yarn-cache`)); - - return preferredCacheDirectories; -} - -const PREFERRED_MODULE_CACHE_DIRECTORIES = exports.PREFERRED_MODULE_CACHE_DIRECTORIES = getPreferredCacheDirectories(); -const CONFIG_DIRECTORY = exports.CONFIG_DIRECTORY = getConfigDir(); -const DATA_DIRECTORY = exports.DATA_DIRECTORY = getDataDir(); -const LINK_REGISTRY_DIRECTORY = exports.LINK_REGISTRY_DIRECTORY = path.join(DATA_DIRECTORY, 'link'); -const GLOBAL_MODULE_DIRECTORY = exports.GLOBAL_MODULE_DIRECTORY = path.join(DATA_DIRECTORY, 'global'); - -const NODE_BIN_PATH = exports.NODE_BIN_PATH = process.execPath; -const YARN_BIN_PATH = exports.YARN_BIN_PATH = getYarnBinPath(); - -// Webpack needs to be configured with node.__dirname/__filename = false -function getYarnBinPath() { - if (isWebpackBundle) { - return __filename; - } else { - return path.join(__dirname, '..', 'bin', 'yarn.js'); - } -} - -const NODE_MODULES_FOLDER = exports.NODE_MODULES_FOLDER = 'node_modules'; -const NODE_PACKAGE_JSON = exports.NODE_PACKAGE_JSON = 'package.json'; - -const PNP_FILENAME = exports.PNP_FILENAME = '.pnp.js'; - -const POSIX_GLOBAL_PREFIX = exports.POSIX_GLOBAL_PREFIX = `${process.env.DESTDIR || ''}/usr/local`; -const FALLBACK_GLOBAL_PREFIX = exports.FALLBACK_GLOBAL_PREFIX = path.join(userHome, '.yarn'); - -const META_FOLDER = exports.META_FOLDER = '.yarn-meta'; -const INTEGRITY_FILENAME = exports.INTEGRITY_FILENAME = '.yarn-integrity'; -const LOCKFILE_FILENAME = exports.LOCKFILE_FILENAME = 'yarn.lock'; -const METADATA_FILENAME = exports.METADATA_FILENAME = '.yarn-metadata.json'; -const TARBALL_FILENAME = exports.TARBALL_FILENAME = '.yarn-tarball.tgz'; -const CLEAN_FILENAME = exports.CLEAN_FILENAME = '.yarnclean'; - -const NPM_LOCK_FILENAME = exports.NPM_LOCK_FILENAME = 'package-lock.json'; -const NPM_SHRINKWRAP_FILENAME = exports.NPM_SHRINKWRAP_FILENAME = 'npm-shrinkwrap.json'; - -const DEFAULT_INDENT = exports.DEFAULT_INDENT = ' '; -const SINGLE_INSTANCE_PORT = exports.SINGLE_INSTANCE_PORT = 31997; -const SINGLE_INSTANCE_FILENAME = exports.SINGLE_INSTANCE_FILENAME = '.yarn-single-instance'; - -const ENV_PATH_KEY = exports.ENV_PATH_KEY = getPathKey(process.platform, process.env); - -function getPathKey(platform, env) { - let pathKey = 'PATH'; - - // windows calls its path "Path" usually, but this is not guaranteed. - if (platform === 'win32') { - pathKey = 'Path'; - - for (const key in env) { - if (key.toLowerCase() === 'path') { - pathKey = key; - } - } - } - - return pathKey; -} - -const VERSION_COLOR_SCHEME = exports.VERSION_COLOR_SCHEME = { - major: 'red', - premajor: 'red', - minor: 'yellow', - preminor: 'yellow', - patch: 'green', - prepatch: 'green', - prerelease: 'red', - unchanged: 'white', - unknown: 'red' -}; - -/***/ }), -/* 9 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; -/** - * Copyright (c) 2013-present, Facebook, Inc. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */ - - - -/** - * Use invariant() to assert state which your program assumes to be true. - * - * Provide sprintf-style format (only %s is supported) and arguments - * to provide information about what broke and what you were - * expecting. - * - * The invariant message will be stripped in production, but the invariant - * will remain to ensure logic does not differ in production. - */ - -var NODE_ENV = process.env.NODE_ENV; - -var invariant = function(condition, format, a, b, c, d, e, f) { - if (NODE_ENV !== 'production') { - if (format === undefined) { - throw new Error('invariant requires an error message argument'); - } - } - - if (!condition) { - var error; - if (format === undefined) { - error = new Error( - 'Minified exception occurred; use the non-minified dev environment ' + - 'for the full error message and additional helpful warnings.' - ); - } else { - var args = [a, b, c, d, e, f]; - var argIndex = 0; - error = new Error( - format.replace(/%s/g, function() { return args[argIndex++]; }) - ); - error.name = 'Invariant Violation'; - } - - error.framesToPop = 1; // we don't care about invariant's own frame - throw error; - } -}; - -module.exports = invariant; - - -/***/ }), -/* 10 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -var YAMLException = __webpack_require__(54); - -var TYPE_CONSTRUCTOR_OPTIONS = [ - 'kind', - 'resolve', - 'construct', - 'instanceOf', - 'predicate', - 'represent', - 'defaultStyle', - 'styleAliases' -]; - -var YAML_NODE_KINDS = [ - 'scalar', - 'sequence', - 'mapping' -]; - -function compileStyleAliases(map) { - var result = {}; - - if (map !== null) { - Object.keys(map).forEach(function (style) { - map[style].forEach(function (alias) { - result[String(alias)] = style; - }); - }); - } - - return result; -} - -function Type(tag, options) { - options = options || {}; - - Object.keys(options).forEach(function (name) { - if (TYPE_CONSTRUCTOR_OPTIONS.indexOf(name) === -1) { - throw new YAMLException('Unknown option "' + name + '" is met in definition of "' + tag + '" YAML type.'); - } - }); - - // TODO: Add tag format check. - this.tag = tag; - this.kind = options['kind'] || null; - this.resolve = options['resolve'] || function () { return true; }; - this.construct = options['construct'] || function (data) { return data; }; - this.instanceOf = options['instanceOf'] || null; - this.predicate = options['predicate'] || null; - this.represent = options['represent'] || null; - this.defaultStyle = options['defaultStyle'] || null; - this.styleAliases = compileStyleAliases(options['styleAliases'] || null); - - if (YAML_NODE_KINDS.indexOf(this.kind) === -1) { - throw new YAMLException('Unknown kind "' + this.kind + '" is specified for "' + tag + '" YAML type.'); - } -} - -module.exports = Type; - - -/***/ }), -/* 11 */ -/***/ (function(module, exports) { - -module.exports = require("crypto"); - -/***/ }), -/* 12 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Observable; }); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__util_canReportError__ = __webpack_require__(322); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__util_toSubscriber__ = __webpack_require__(932); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__internal_symbol_observable__ = __webpack_require__(117); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__util_pipe__ = __webpack_require__(324); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__config__ = __webpack_require__(185); -/** PURE_IMPORTS_START _util_canReportError,_util_toSubscriber,_internal_symbol_observable,_util_pipe,_config PURE_IMPORTS_END */ - - - - - -var Observable = /*@__PURE__*/ (function () { - function Observable(subscribe) { - this._isScalar = false; - if (subscribe) { - this._subscribe = subscribe; - } - } - Observable.prototype.lift = function (operator) { - var observable = new Observable(); - observable.source = this; - observable.operator = operator; - return observable; - }; - Observable.prototype.subscribe = function (observerOrNext, error, complete) { - var operator = this.operator; - var sink = __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_toSubscriber__["a" /* toSubscriber */])(observerOrNext, error, complete); - if (operator) { - operator.call(sink, this.source); - } - else { - sink.add(this.source || (__WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].useDeprecatedSynchronousErrorHandling && !sink.syncErrorThrowable) ? - this._subscribe(sink) : - this._trySubscribe(sink)); - } - if (__WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { - if (sink.syncErrorThrowable) { - sink.syncErrorThrowable = false; - if (sink.syncErrorThrown) { - throw sink.syncErrorValue; - } - } - } - return sink; - }; - Observable.prototype._trySubscribe = function (sink) { - try { - return this._subscribe(sink); - } - catch (err) { - if (__WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].useDeprecatedSynchronousErrorHandling) { - sink.syncErrorThrown = true; - sink.syncErrorValue = err; - } - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_0__util_canReportError__["a" /* canReportError */])(sink)) { - sink.error(err); - } - else { - console.warn(err); - } - } - }; - Observable.prototype.forEach = function (next, promiseCtor) { - var _this = this; - promiseCtor = getPromiseCtor(promiseCtor); - return new promiseCtor(function (resolve, reject) { - var subscription; - subscription = _this.subscribe(function (value) { - try { - next(value); - } - catch (err) { - reject(err); - if (subscription) { - subscription.unsubscribe(); - } - } - }, reject, resolve); - }); - }; - Observable.prototype._subscribe = function (subscriber) { - var source = this.source; - return source && source.subscribe(subscriber); - }; - Observable.prototype[__WEBPACK_IMPORTED_MODULE_2__internal_symbol_observable__["a" /* observable */]] = function () { - return this; - }; - Observable.prototype.pipe = function () { - var operations = []; - for (var _i = 0; _i < arguments.length; _i++) { - operations[_i] = arguments[_i]; - } - if (operations.length === 0) { - return this; - } - return __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_3__util_pipe__["b" /* pipeFromArray */])(operations)(this); - }; - Observable.prototype.toPromise = function (promiseCtor) { - var _this = this; - promiseCtor = getPromiseCtor(promiseCtor); - return new promiseCtor(function (resolve, reject) { - var value; - _this.subscribe(function (x) { return value = x; }, function (err) { return reject(err); }, function () { return resolve(value); }); - }); - }; - Observable.create = function (subscribe) { - return new Observable(subscribe); - }; - return Observable; -}()); - -function getPromiseCtor(promiseCtor) { - if (!promiseCtor) { - promiseCtor = __WEBPACK_IMPORTED_MODULE_4__config__["a" /* config */].Promise || Promise; - } - if (!promiseCtor) { - throw new Error('no Promise impl found'); - } - return promiseCtor; -} -//# sourceMappingURL=Observable.js.map - - -/***/ }), -/* 13 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return OuterSubscriber; }); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_tslib__ = __webpack_require__(1); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__Subscriber__ = __webpack_require__(7); -/** PURE_IMPORTS_START tslib,_Subscriber PURE_IMPORTS_END */ - - -var OuterSubscriber = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](OuterSubscriber, _super); - function OuterSubscriber() { - return _super !== null && _super.apply(this, arguments) || this; - } - OuterSubscriber.prototype.notifyNext = function (outerValue, innerValue, outerIndex, innerIndex, innerSub) { - this.destination.next(innerValue); - }; - OuterSubscriber.prototype.notifyError = function (error, innerSub) { - this.destination.error(error); - }; - OuterSubscriber.prototype.notifyComplete = function (innerSub) { - this.destination.complete(); - }; - return OuterSubscriber; -}(__WEBPACK_IMPORTED_MODULE_1__Subscriber__["a" /* Subscriber */])); - -//# sourceMappingURL=OuterSubscriber.js.map - - -/***/ }), -/* 14 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (immutable) */ __webpack_exports__["a"] = subscribeToResult; -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__InnerSubscriber__ = __webpack_require__(84); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__subscribeTo__ = __webpack_require__(446); -/** PURE_IMPORTS_START _InnerSubscriber,_subscribeTo PURE_IMPORTS_END */ - - -function subscribeToResult(outerSubscriber, result, outerValue, outerIndex, destination) { - if (destination === void 0) { - destination = new __WEBPACK_IMPORTED_MODULE_0__InnerSubscriber__["a" /* InnerSubscriber */](outerSubscriber, outerValue, outerIndex); - } - if (destination.closed) { - return; - } - return __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__subscribeTo__["a" /* subscribeTo */])(result)(destination); -} -//# sourceMappingURL=subscribeToResult.js.map - - -/***/ }), -/* 15 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; -/* eslint-disable node/no-deprecated-api */ - - - -var buffer = __webpack_require__(64) -var Buffer = buffer.Buffer - -var safer = {} - -var key - -for (key in buffer) { - if (!buffer.hasOwnProperty(key)) continue - if (key === 'SlowBuffer' || key === 'Buffer') continue - safer[key] = buffer[key] -} - -var Safer = safer.Buffer = {} -for (key in Buffer) { - if (!Buffer.hasOwnProperty(key)) continue - if (key === 'allocUnsafe' || key === 'allocUnsafeSlow') continue - Safer[key] = Buffer[key] -} - -safer.Buffer.prototype = Buffer.prototype - -if (!Safer.from || Safer.from === Uint8Array.from) { - Safer.from = function (value, encodingOrOffset, length) { - if (typeof value === 'number') { - throw new TypeError('The "value" argument must not be of type number. Received type ' + typeof value) - } - if (value && typeof value.length === 'undefined') { - throw new TypeError('The first argument must be one of type string, Buffer, ArrayBuffer, Array, or Array-like Object. Received type ' + typeof value) - } - return Buffer(value, encodingOrOffset, length) - } -} - -if (!Safer.alloc) { - Safer.alloc = function (size, fill, encoding) { - if (typeof size !== 'number') { - throw new TypeError('The "size" argument must be of type number. Received type ' + typeof size) - } - if (size < 0 || size >= 2 * (1 << 30)) { - throw new RangeError('The value "' + size + '" is invalid for option "size"') - } - var buf = Buffer(size) - if (!fill || fill.length === 0) { - buf.fill(0) - } else if (typeof encoding === 'string') { - buf.fill(fill, encoding) - } else { - buf.fill(fill) - } - return buf - } -} - -if (!safer.kStringMaxLength) { - try { - safer.kStringMaxLength = process.binding('buffer').kStringMaxLength - } catch (e) { - // we can't determine kStringMaxLength in environments where process.binding - // is unsupported, so let's not set it - } -} - -if (!safer.constants) { - safer.constants = { - MAX_LENGTH: safer.kMaxLength - } - if (safer.kStringMaxLength) { - safer.constants.MAX_STRING_LENGTH = safer.kStringMaxLength - } -} - -module.exports = safer - - -/***/ }), -/* 16 */ -/***/ (function(module, exports, __webpack_require__) { - -// Copyright (c) 2012, Mark Cavage. All rights reserved. -// Copyright 2015 Joyent, Inc. - -var assert = __webpack_require__(28); -var Stream = __webpack_require__(23).Stream; -var util = __webpack_require__(3); - - -///--- Globals - -/* JSSTYLED */ -var UUID_REGEXP = /^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$/; - - -///--- Internal - -function _capitalize(str) { - return (str.charAt(0).toUpperCase() + str.slice(1)); -} - -function _toss(name, expected, oper, arg, actual) { - throw new assert.AssertionError({ - message: util.format('%s (%s) is required', name, expected), - actual: (actual === undefined) ? typeof (arg) : actual(arg), - expected: expected, - operator: oper || '===', - stackStartFunction: _toss.caller - }); -} - -function _getClass(arg) { - return (Object.prototype.toString.call(arg).slice(8, -1)); -} - -function noop() { - // Why even bother with asserts? -} - - -///--- Exports - -var types = { - bool: { - check: function (arg) { return typeof (arg) === 'boolean'; } - }, - func: { - check: function (arg) { return typeof (arg) === 'function'; } - }, - string: { - check: function (arg) { return typeof (arg) === 'string'; } - }, - object: { - check: function (arg) { - return typeof (arg) === 'object' && arg !== null; - } - }, - number: { - check: function (arg) { - return typeof (arg) === 'number' && !isNaN(arg); - } - }, - finite: { - check: function (arg) { - return typeof (arg) === 'number' && !isNaN(arg) && isFinite(arg); - } - }, - buffer: { - check: function (arg) { return Buffer.isBuffer(arg); }, - operator: 'Buffer.isBuffer' - }, - array: { - check: function (arg) { return Array.isArray(arg); }, - operator: 'Array.isArray' - }, - stream: { - check: function (arg) { return arg instanceof Stream; }, - operator: 'instanceof', - actual: _getClass - }, - date: { - check: function (arg) { return arg instanceof Date; }, - operator: 'instanceof', - actual: _getClass - }, - regexp: { - check: function (arg) { return arg instanceof RegExp; }, - operator: 'instanceof', - actual: _getClass - }, - uuid: { - check: function (arg) { - return typeof (arg) === 'string' && UUID_REGEXP.test(arg); - }, - operator: 'isUUID' - } -}; - -function _setExports(ndebug) { - var keys = Object.keys(types); - var out; - - /* re-export standard assert */ - if (process.env.NODE_NDEBUG) { - out = noop; - } else { - out = function (arg, msg) { - if (!arg) { - _toss(msg, 'true', arg); - } - }; - } - - /* standard checks */ - keys.forEach(function (k) { - if (ndebug) { - out[k] = noop; - return; - } - var type = types[k]; - out[k] = function (arg, msg) { - if (!type.check(arg)) { - _toss(msg, k, type.operator, arg, type.actual); - } - }; - }); - - /* optional checks */ - keys.forEach(function (k) { - var name = 'optional' + _capitalize(k); - if (ndebug) { - out[name] = noop; - return; - } - var type = types[k]; - out[name] = function (arg, msg) { - if (arg === undefined || arg === null) { - return; - } - if (!type.check(arg)) { - _toss(msg, k, type.operator, arg, type.actual); - } - }; - }); - - /* arrayOf checks */ - keys.forEach(function (k) { - var name = 'arrayOf' + _capitalize(k); - if (ndebug) { - out[name] = noop; - return; - } - var type = types[k]; - var expected = '[' + k + ']'; - out[name] = function (arg, msg) { - if (!Array.isArray(arg)) { - _toss(msg, expected, type.operator, arg, type.actual); - } - var i; - for (i = 0; i < arg.length; i++) { - if (!type.check(arg[i])) { - _toss(msg, expected, type.operator, arg, type.actual); - } - } - }; - }); - - /* optionalArrayOf checks */ - keys.forEach(function (k) { - var name = 'optionalArrayOf' + _capitalize(k); - if (ndebug) { - out[name] = noop; - return; - } - var type = types[k]; - var expected = '[' + k + ']'; - out[name] = function (arg, msg) { - if (arg === undefined || arg === null) { - return; - } - if (!Array.isArray(arg)) { - _toss(msg, expected, type.operator, arg, type.actual); - } - var i; - for (i = 0; i < arg.length; i++) { - if (!type.check(arg[i])) { - _toss(msg, expected, type.operator, arg, type.actual); - } - } - }; - }); - - /* re-export built-in assertions */ - Object.keys(assert).forEach(function (k) { - if (k === 'AssertionError') { - out[k] = assert[k]; - return; - } - if (ndebug) { - out[k] = noop; - return; - } - out[k] = assert[k]; - }); - - /* export ourselves (for unit tests _only_) */ - out._setExports = _setExports; - - return out; -} - -module.exports = _setExports(process.env.NODE_NDEBUG); - - -/***/ }), -/* 17 */ -/***/ (function(module, exports) { - -// https://github.com/zloirock/core-js/issues/86#issuecomment-115759028 -var global = module.exports = typeof window != 'undefined' && window.Math == Math - ? window : typeof self != 'undefined' && self.Math == Math ? self - // eslint-disable-next-line no-new-func - : Function('return this')(); -if (typeof __g == 'number') __g = global; // eslint-disable-line no-undef - - -/***/ }), -/* 18 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.sortAlpha = sortAlpha; -exports.sortOptionsByFlags = sortOptionsByFlags; -exports.entries = entries; -exports.removePrefix = removePrefix; -exports.removeSuffix = removeSuffix; -exports.addSuffix = addSuffix; -exports.hyphenate = hyphenate; -exports.camelCase = camelCase; -exports.compareSortedArrays = compareSortedArrays; -exports.sleep = sleep; -const _camelCase = __webpack_require__(230); - -function sortAlpha(a, b) { - // sort alphabetically in a deterministic way - const shortLen = Math.min(a.length, b.length); - for (let i = 0; i < shortLen; i++) { - const aChar = a.charCodeAt(i); - const bChar = b.charCodeAt(i); - if (aChar !== bChar) { - return aChar - bChar; - } - } - return a.length - b.length; -} - -function sortOptionsByFlags(a, b) { - const aOpt = a.flags.replace(/-/g, ''); - const bOpt = b.flags.replace(/-/g, ''); - return sortAlpha(aOpt, bOpt); -} - -function entries(obj) { - const entries = []; - if (obj) { - for (const key in obj) { - entries.push([key, obj[key]]); - } - } - return entries; -} - -function removePrefix(pattern, prefix) { - if (pattern.startsWith(prefix)) { - pattern = pattern.slice(prefix.length); - } - - return pattern; -} - -function removeSuffix(pattern, suffix) { - if (pattern.endsWith(suffix)) { - return pattern.slice(0, -suffix.length); - } - - return pattern; -} - -function addSuffix(pattern, suffix) { - if (!pattern.endsWith(suffix)) { - return pattern + suffix; - } - - return pattern; -} - -function hyphenate(str) { - return str.replace(/[A-Z]/g, match => { - return '-' + match.charAt(0).toLowerCase(); - }); -} - -function camelCase(str) { - if (/[A-Z]/.test(str)) { - return null; - } else { - return _camelCase(str); - } -} - -function compareSortedArrays(array1, array2) { - if (array1.length !== array2.length) { - return false; - } - for (let i = 0, len = array1.length; i < len; i++) { - if (array1[i] !== array2[i]) { - return false; - } - } - return true; -} - -function sleep(ms) { - return new Promise(resolve => { - setTimeout(resolve, ms); - }); -} - -/***/ }), -/* 19 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.stringify = exports.parse = undefined; - -var _asyncToGenerator2; - -function _load_asyncToGenerator() { - return _asyncToGenerator2 = _interopRequireDefault(__webpack_require__(2)); -} - -var _parse; - -function _load_parse() { - return _parse = __webpack_require__(105); -} - -Object.defineProperty(exports, 'parse', { - enumerable: true, - get: function get() { - return _interopRequireDefault(_parse || _load_parse()).default; - } -}); - -var _stringify; - -function _load_stringify() { - return _stringify = __webpack_require__(199); -} - -Object.defineProperty(exports, 'stringify', { - enumerable: true, - get: function get() { - return _interopRequireDefault(_stringify || _load_stringify()).default; - } -}); -exports.implodeEntry = implodeEntry; -exports.explodeEntry = explodeEntry; - -var _misc; - -function _load_misc() { - return _misc = __webpack_require__(18); -} - -var _normalizePattern; - -function _load_normalizePattern() { - return _normalizePattern = __webpack_require__(37); -} - -var _parse2; - -function _load_parse2() { - return _parse2 = _interopRequireDefault(__webpack_require__(105)); -} - -var _constants; - -function _load_constants() { - return _constants = __webpack_require__(8); -} - -var _fs; - -function _load_fs() { - return _fs = _interopRequireWildcard(__webpack_require__(4)); -} - -function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const invariant = __webpack_require__(9); - -const path = __webpack_require__(0); -const ssri = __webpack_require__(65); - -function getName(pattern) { - return (0, (_normalizePattern || _load_normalizePattern()).normalizePattern)(pattern).name; -} - -function blankObjectUndefined(obj) { - return obj && Object.keys(obj).length ? obj : undefined; -} - -function keyForRemote(remote) { - return remote.resolved || (remote.reference && remote.hash ? `${remote.reference}#${remote.hash}` : null); -} - -function serializeIntegrity(integrity) { - // We need this because `Integrity.toString()` does not use sorting to ensure a stable string output - // See https://git.io/vx2Hy - return integrity.toString().split(' ').sort().join(' '); -} - -function implodeEntry(pattern, obj) { - const inferredName = getName(pattern); - const integrity = obj.integrity ? serializeIntegrity(obj.integrity) : ''; - const imploded = { - name: inferredName === obj.name ? undefined : obj.name, - version: obj.version, - uid: obj.uid === obj.version ? undefined : obj.uid, - resolved: obj.resolved, - registry: obj.registry === 'npm' ? undefined : obj.registry, - dependencies: blankObjectUndefined(obj.dependencies), - optionalDependencies: blankObjectUndefined(obj.optionalDependencies), - permissions: blankObjectUndefined(obj.permissions), - prebuiltVariants: blankObjectUndefined(obj.prebuiltVariants) - }; - if (integrity) { - imploded.integrity = integrity; - } - return imploded; -} - -function explodeEntry(pattern, obj) { - obj.optionalDependencies = obj.optionalDependencies || {}; - obj.dependencies = obj.dependencies || {}; - obj.uid = obj.uid || obj.version; - obj.permissions = obj.permissions || {}; - obj.registry = obj.registry || 'npm'; - obj.name = obj.name || getName(pattern); - const integrity = obj.integrity; - if (integrity && integrity.isIntegrity) { - obj.integrity = ssri.parse(integrity); - } - return obj; -} - -class Lockfile { - constructor({ cache, source, parseResultType } = {}) { - this.source = source || ''; - this.cache = cache; - this.parseResultType = parseResultType; - } - - // source string if the `cache` was parsed - - - // if true, we're parsing an old yarn file and need to update integrity fields - hasEntriesExistWithoutIntegrity() { - if (!this.cache) { - return false; - } - - for (const key in this.cache) { - // $FlowFixMe - `this.cache` is clearly defined at this point - if (!/^.*@(file:|http)/.test(key) && this.cache[key] && !this.cache[key].integrity) { - return true; - } - } - - return false; - } - - static fromDirectory(dir, reporter) { - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - // read the manifest in this directory - const lockfileLoc = path.join(dir, (_constants || _load_constants()).LOCKFILE_FILENAME); - - let lockfile; - let rawLockfile = ''; - let parseResult; - - if (yield (_fs || _load_fs()).exists(lockfileLoc)) { - rawLockfile = yield (_fs || _load_fs()).readFile(lockfileLoc); - parseResult = (0, (_parse2 || _load_parse2()).default)(rawLockfile, lockfileLoc); - - if (reporter) { - if (parseResult.type === 'merge') { - reporter.info(reporter.lang('lockfileMerged')); - } else if (parseResult.type === 'conflict') { - reporter.warn(reporter.lang('lockfileConflict')); - } - } - - lockfile = parseResult.object; - } else if (reporter) { - reporter.info(reporter.lang('noLockfileFound')); - } - - if (lockfile && lockfile.__metadata) { - const lockfilev2 = lockfile; - lockfile = {}; - } - - return new Lockfile({ cache: lockfile, source: rawLockfile, parseResultType: parseResult && parseResult.type }); - })(); - } - - getLocked(pattern) { - const cache = this.cache; - if (!cache) { - return undefined; - } - - const shrunk = pattern in cache && cache[pattern]; - - if (typeof shrunk === 'string') { - return this.getLocked(shrunk); - } else if (shrunk) { - explodeEntry(pattern, shrunk); - return shrunk; - } - - return undefined; - } - - removePattern(pattern) { - const cache = this.cache; - if (!cache) { - return; - } - delete cache[pattern]; - } - - getLockfile(patterns) { - const lockfile = {}; - const seen = new Map(); - - // order by name so that lockfile manifest is assigned to the first dependency with this manifest - // the others that have the same remoteKey will just refer to the first - // ordering allows for consistency in lockfile when it is serialized - const sortedPatternsKeys = Object.keys(patterns).sort((_misc || _load_misc()).sortAlpha); - - for (var _iterator = sortedPatternsKeys, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { - var _ref; - - if (_isArray) { - if (_i >= _iterator.length) break; - _ref = _iterator[_i++]; - } else { - _i = _iterator.next(); - if (_i.done) break; - _ref = _i.value; - } - - const pattern = _ref; - - const pkg = patterns[pattern]; - const remote = pkg._remote, - ref = pkg._reference; - - invariant(ref, 'Package is missing a reference'); - invariant(remote, 'Package is missing a remote'); - - const remoteKey = keyForRemote(remote); - const seenPattern = remoteKey && seen.get(remoteKey); - if (seenPattern) { - // no point in duplicating it - lockfile[pattern] = seenPattern; - - // if we're relying on our name being inferred and two of the patterns have - // different inferred names then we need to set it - if (!seenPattern.name && getName(pattern) !== pkg.name) { - seenPattern.name = pkg.name; - } - continue; - } - const obj = implodeEntry(pattern, { - name: pkg.name, - version: pkg.version, - uid: pkg._uid, - resolved: remote.resolved, - integrity: remote.integrity, - registry: remote.registry, - dependencies: pkg.dependencies, - peerDependencies: pkg.peerDependencies, - optionalDependencies: pkg.optionalDependencies, - permissions: ref.permissions, - prebuiltVariants: pkg.prebuiltVariants - }); - - lockfile[pattern] = obj; - - if (remoteKey) { - seen.set(remoteKey, obj); - } - } - - return lockfile; - } -} -exports.default = Lockfile; - -/***/ }), -/* 20 */ -/***/ (function(module, exports, __webpack_require__) { - -var store = __webpack_require__(133)('wks'); -var uid = __webpack_require__(137); -var Symbol = __webpack_require__(17).Symbol; -var USE_SYMBOL = typeof Symbol == 'function'; - -var $exports = module.exports = function (name) { - return store[name] || (store[name] = - USE_SYMBOL && Symbol[name] || (USE_SYMBOL ? Symbol : uid)('Symbol.' + name)); -}; - -$exports.store = store; - - -/***/ }), -/* 21 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -exports.__esModule = true; - -var _assign = __webpack_require__(591); - -var _assign2 = _interopRequireDefault(_assign); - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -exports.default = _assign2.default || function (target) { - for (var i = 1; i < arguments.length; i++) { - var source = arguments[i]; - - for (var key in source) { - if (Object.prototype.hasOwnProperty.call(source, key)) { - target[key] = source[key]; - } - } - } - - return target; -}; - -/***/ }), -/* 22 */ -/***/ (function(module, exports) { - -exports = module.exports = SemVer; - -// The debug function is excluded entirely from the minified version. -/* nomin */ var debug; -/* nomin */ if (typeof process === 'object' && - /* nomin */ process.env && - /* nomin */ process.env.NODE_DEBUG && - /* nomin */ /\bsemver\b/i.test(process.env.NODE_DEBUG)) - /* nomin */ debug = function() { - /* nomin */ var args = Array.prototype.slice.call(arguments, 0); - /* nomin */ args.unshift('SEMVER'); - /* nomin */ console.log.apply(console, args); - /* nomin */ }; -/* nomin */ else - /* nomin */ debug = function() {}; - -// Note: this is the semver.org version of the spec that it implements -// Not necessarily the package version of this code. -exports.SEMVER_SPEC_VERSION = '2.0.0'; - -var MAX_LENGTH = 256; -var MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER || 9007199254740991; - -// Max safe segment length for coercion. -var MAX_SAFE_COMPONENT_LENGTH = 16; - -// The actual regexps go on exports.re -var re = exports.re = []; -var src = exports.src = []; -var R = 0; - -// The following Regular Expressions can be used for tokenizing, -// validating, and parsing SemVer version strings. - -// ## Numeric Identifier -// A single `0`, or a non-zero digit followed by zero or more digits. - -var NUMERICIDENTIFIER = R++; -src[NUMERICIDENTIFIER] = '0|[1-9]\\d*'; -var NUMERICIDENTIFIERLOOSE = R++; -src[NUMERICIDENTIFIERLOOSE] = '[0-9]+'; - - -// ## Non-numeric Identifier -// Zero or more digits, followed by a letter or hyphen, and then zero or -// more letters, digits, or hyphens. - -var NONNUMERICIDENTIFIER = R++; -src[NONNUMERICIDENTIFIER] = '\\d*[a-zA-Z-][a-zA-Z0-9-]*'; - - -// ## Main Version -// Three dot-separated numeric identifiers. - -var MAINVERSION = R++; -src[MAINVERSION] = '(' + src[NUMERICIDENTIFIER] + ')\\.' + - '(' + src[NUMERICIDENTIFIER] + ')\\.' + - '(' + src[NUMERICIDENTIFIER] + ')'; - -var MAINVERSIONLOOSE = R++; -src[MAINVERSIONLOOSE] = '(' + src[NUMERICIDENTIFIERLOOSE] + ')\\.' + - '(' + src[NUMERICIDENTIFIERLOOSE] + ')\\.' + - '(' + src[NUMERICIDENTIFIERLOOSE] + ')'; - -// ## Pre-release Version Identifier -// A numeric identifier, or a non-numeric identifier. - -var PRERELEASEIDENTIFIER = R++; -src[PRERELEASEIDENTIFIER] = '(?:' + src[NUMERICIDENTIFIER] + - '|' + src[NONNUMERICIDENTIFIER] + ')'; - -var PRERELEASEIDENTIFIERLOOSE = R++; -src[PRERELEASEIDENTIFIERLOOSE] = '(?:' + src[NUMERICIDENTIFIERLOOSE] + - '|' + src[NONNUMERICIDENTIFIER] + ')'; - - -// ## Pre-release Version -// Hyphen, followed by one or more dot-separated pre-release version -// identifiers. - -var PRERELEASE = R++; -src[PRERELEASE] = '(?:-(' + src[PRERELEASEIDENTIFIER] + - '(?:\\.' + src[PRERELEASEIDENTIFIER] + ')*))'; - -var PRERELEASELOOSE = R++; -src[PRERELEASELOOSE] = '(?:-?(' + src[PRERELEASEIDENTIFIERLOOSE] + - '(?:\\.' + src[PRERELEASEIDENTIFIERLOOSE] + ')*))'; - -// ## Build Metadata Identifier -// Any combination of digits, letters, or hyphens. - -var BUILDIDENTIFIER = R++; -src[BUILDIDENTIFIER] = '[0-9A-Za-z-]+'; - -// ## Build Metadata -// Plus sign, followed by one or more period-separated build metadata -// identifiers. - -var BUILD = R++; -src[BUILD] = '(?:\\+(' + src[BUILDIDENTIFIER] + - '(?:\\.' + src[BUILDIDENTIFIER] + ')*))'; - - -// ## Full Version String -// A main version, followed optionally by a pre-release version and -// build metadata. - -// Note that the only major, minor, patch, and pre-release sections of -// the version string are capturing groups. The build metadata is not a -// capturing group, because it should not ever be used in version -// comparison. - -var FULL = R++; -var FULLPLAIN = 'v?' + src[MAINVERSION] + - src[PRERELEASE] + '?' + - src[BUILD] + '?'; - -src[FULL] = '^' + FULLPLAIN + '$'; - -// like full, but allows v1.2.3 and =1.2.3, which people do sometimes. -// also, 1.0.0alpha1 (prerelease without the hyphen) which is pretty -// common in the npm registry. -var LOOSEPLAIN = '[v=\\s]*' + src[MAINVERSIONLOOSE] + - src[PRERELEASELOOSE] + '?' + - src[BUILD] + '?'; - -var LOOSE = R++; -src[LOOSE] = '^' + LOOSEPLAIN + '$'; - -var GTLT = R++; -src[GTLT] = '((?:<|>)?=?)'; - -// Something like "2.*" or "1.2.x". -// Note that "x.x" is a valid xRange identifer, meaning "any version" -// Only the first item is strictly required. -var XRANGEIDENTIFIERLOOSE = R++; -src[XRANGEIDENTIFIERLOOSE] = src[NUMERICIDENTIFIERLOOSE] + '|x|X|\\*'; -var XRANGEIDENTIFIER = R++; -src[XRANGEIDENTIFIER] = src[NUMERICIDENTIFIER] + '|x|X|\\*'; - -var XRANGEPLAIN = R++; -src[XRANGEPLAIN] = '[v=\\s]*(' + src[XRANGEIDENTIFIER] + ')' + - '(?:\\.(' + src[XRANGEIDENTIFIER] + ')' + - '(?:\\.(' + src[XRANGEIDENTIFIER] + ')' + - '(?:' + src[PRERELEASE] + ')?' + - src[BUILD] + '?' + - ')?)?'; - -var XRANGEPLAINLOOSE = R++; -src[XRANGEPLAINLOOSE] = '[v=\\s]*(' + src[XRANGEIDENTIFIERLOOSE] + ')' + - '(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' + - '(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' + - '(?:' + src[PRERELEASELOOSE] + ')?' + - src[BUILD] + '?' + - ')?)?'; - -var XRANGE = R++; -src[XRANGE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAIN] + '$'; -var XRANGELOOSE = R++; -src[XRANGELOOSE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAINLOOSE] + '$'; - -// Coercion. -// Extract anything that could conceivably be a part of a valid semver -var COERCE = R++; -src[COERCE] = '(?:^|[^\\d])' + - '(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '})' + - '(?:\\.(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '}))?' + - '(?:\\.(\\d{1,' + MAX_SAFE_COMPONENT_LENGTH + '}))?' + - '(?:$|[^\\d])'; - -// Tilde ranges. -// Meaning is "reasonably at or greater than" -var LONETILDE = R++; -src[LONETILDE] = '(?:~>?)'; - -var TILDETRIM = R++; -src[TILDETRIM] = '(\\s*)' + src[LONETILDE] + '\\s+'; -re[TILDETRIM] = new RegExp(src[TILDETRIM], 'g'); -var tildeTrimReplace = '$1~'; - -var TILDE = R++; -src[TILDE] = '^' + src[LONETILDE] + src[XRANGEPLAIN] + '$'; -var TILDELOOSE = R++; -src[TILDELOOSE] = '^' + src[LONETILDE] + src[XRANGEPLAINLOOSE] + '$'; - -// Caret ranges. -// Meaning is "at least and backwards compatible with" -var LONECARET = R++; -src[LONECARET] = '(?:\\^)'; - -var CARETTRIM = R++; -src[CARETTRIM] = '(\\s*)' + src[LONECARET] + '\\s+'; -re[CARETTRIM] = new RegExp(src[CARETTRIM], 'g'); -var caretTrimReplace = '$1^'; - -var CARET = R++; -src[CARET] = '^' + src[LONECARET] + src[XRANGEPLAIN] + '$'; -var CARETLOOSE = R++; -src[CARETLOOSE] = '^' + src[LONECARET] + src[XRANGEPLAINLOOSE] + '$'; - -// A simple gt/lt/eq thing, or just "" to indicate "any version" -var COMPARATORLOOSE = R++; -src[COMPARATORLOOSE] = '^' + src[GTLT] + '\\s*(' + LOOSEPLAIN + ')$|^$'; -var COMPARATOR = R++; -src[COMPARATOR] = '^' + src[GTLT] + '\\s*(' + FULLPLAIN + ')$|^$'; - - -// An expression to strip any whitespace between the gtlt and the thing -// it modifies, so that `> 1.2.3` ==> `>1.2.3` -var COMPARATORTRIM = R++; -src[COMPARATORTRIM] = '(\\s*)' + src[GTLT] + - '\\s*(' + LOOSEPLAIN + '|' + src[XRANGEPLAIN] + ')'; - -// this one has to use the /g flag -re[COMPARATORTRIM] = new RegExp(src[COMPARATORTRIM], 'g'); -var comparatorTrimReplace = '$1$2$3'; - - -// Something like `1.2.3 - 1.2.4` -// Note that these all use the loose form, because they'll be -// checked against either the strict or loose comparator form -// later. -var HYPHENRANGE = R++; -src[HYPHENRANGE] = '^\\s*(' + src[XRANGEPLAIN] + ')' + - '\\s+-\\s+' + - '(' + src[XRANGEPLAIN] + ')' + - '\\s*$'; - -var HYPHENRANGELOOSE = R++; -src[HYPHENRANGELOOSE] = '^\\s*(' + src[XRANGEPLAINLOOSE] + ')' + - '\\s+-\\s+' + - '(' + src[XRANGEPLAINLOOSE] + ')' + - '\\s*$'; - -// Star ranges basically just allow anything at all. -var STAR = R++; -src[STAR] = '(<|>)?=?\\s*\\*'; - -// Compile to actual regexp objects. -// All are flag-free, unless they were created above with a flag. -for (var i = 0; i < R; i++) { - debug(i, src[i]); - if (!re[i]) - re[i] = new RegExp(src[i]); -} - -exports.parse = parse; -function parse(version, loose) { - if (version instanceof SemVer) - return version; - - if (typeof version !== 'string') - return null; - - if (version.length > MAX_LENGTH) - return null; - - var r = loose ? re[LOOSE] : re[FULL]; - if (!r.test(version)) - return null; - - try { - return new SemVer(version, loose); - } catch (er) { - return null; - } -} - -exports.valid = valid; -function valid(version, loose) { - var v = parse(version, loose); - return v ? v.version : null; -} - - -exports.clean = clean; -function clean(version, loose) { - var s = parse(version.trim().replace(/^[=v]+/, ''), loose); - return s ? s.version : null; -} - -exports.SemVer = SemVer; - -function SemVer(version, loose) { - if (version instanceof SemVer) { - if (version.loose === loose) - return version; - else - version = version.version; - } else if (typeof version !== 'string') { - throw new TypeError('Invalid Version: ' + version); - } - - if (version.length > MAX_LENGTH) - throw new TypeError('version is longer than ' + MAX_LENGTH + ' characters') - - if (!(this instanceof SemVer)) - return new SemVer(version, loose); - - debug('SemVer', version, loose); - this.loose = loose; - var m = version.trim().match(loose ? re[LOOSE] : re[FULL]); - - if (!m) - throw new TypeError('Invalid Version: ' + version); - - this.raw = version; - - // these are actually numbers - this.major = +m[1]; - this.minor = +m[2]; - this.patch = +m[3]; - - if (this.major > MAX_SAFE_INTEGER || this.major < 0) - throw new TypeError('Invalid major version') - - if (this.minor > MAX_SAFE_INTEGER || this.minor < 0) - throw new TypeError('Invalid minor version') - - if (this.patch > MAX_SAFE_INTEGER || this.patch < 0) - throw new TypeError('Invalid patch version') - - // numberify any prerelease numeric ids - if (!m[4]) - this.prerelease = []; - else - this.prerelease = m[4].split('.').map(function(id) { - if (/^[0-9]+$/.test(id)) { - var num = +id; - if (num >= 0 && num < MAX_SAFE_INTEGER) - return num; - } - return id; - }); - - this.build = m[5] ? m[5].split('.') : []; - this.format(); -} - -SemVer.prototype.format = function() { - this.version = this.major + '.' + this.minor + '.' + this.patch; - if (this.prerelease.length) - this.version += '-' + this.prerelease.join('.'); - return this.version; -}; - -SemVer.prototype.toString = function() { - return this.version; -}; - -SemVer.prototype.compare = function(other) { - debug('SemVer.compare', this.version, this.loose, other); - if (!(other instanceof SemVer)) - other = new SemVer(other, this.loose); - - return this.compareMain(other) || this.comparePre(other); -}; - -SemVer.prototype.compareMain = function(other) { - if (!(other instanceof SemVer)) - other = new SemVer(other, this.loose); - - return compareIdentifiers(this.major, other.major) || - compareIdentifiers(this.minor, other.minor) || - compareIdentifiers(this.patch, other.patch); -}; - -SemVer.prototype.comparePre = function(other) { - if (!(other instanceof SemVer)) - other = new SemVer(other, this.loose); - - // NOT having a prerelease is > having one - if (this.prerelease.length && !other.prerelease.length) - return -1; - else if (!this.prerelease.length && other.prerelease.length) - return 1; - else if (!this.prerelease.length && !other.prerelease.length) - return 0; - - var i = 0; - do { - var a = this.prerelease[i]; - var b = other.prerelease[i]; - debug('prerelease compare', i, a, b); - if (a === undefined && b === undefined) - return 0; - else if (b === undefined) - return 1; - else if (a === undefined) - return -1; - else if (a === b) - continue; - else - return compareIdentifiers(a, b); - } while (++i); -}; - -// preminor will bump the version up to the next minor release, and immediately -// down to pre-release. premajor and prepatch work the same way. -SemVer.prototype.inc = function(release, identifier) { - switch (release) { - case 'premajor': - this.prerelease.length = 0; - this.patch = 0; - this.minor = 0; - this.major++; - this.inc('pre', identifier); - break; - case 'preminor': - this.prerelease.length = 0; - this.patch = 0; - this.minor++; - this.inc('pre', identifier); - break; - case 'prepatch': - // If this is already a prerelease, it will bump to the next version - // drop any prereleases that might already exist, since they are not - // relevant at this point. - this.prerelease.length = 0; - this.inc('patch', identifier); - this.inc('pre', identifier); - break; - // If the input is a non-prerelease version, this acts the same as - // prepatch. - case 'prerelease': - if (this.prerelease.length === 0) - this.inc('patch', identifier); - this.inc('pre', identifier); - break; - - case 'major': - // If this is a pre-major version, bump up to the same major version. - // Otherwise increment major. - // 1.0.0-5 bumps to 1.0.0 - // 1.1.0 bumps to 2.0.0 - if (this.minor !== 0 || this.patch !== 0 || this.prerelease.length === 0) - this.major++; - this.minor = 0; - this.patch = 0; - this.prerelease = []; - break; - case 'minor': - // If this is a pre-minor version, bump up to the same minor version. - // Otherwise increment minor. - // 1.2.0-5 bumps to 1.2.0 - // 1.2.1 bumps to 1.3.0 - if (this.patch !== 0 || this.prerelease.length === 0) - this.minor++; - this.patch = 0; - this.prerelease = []; - break; - case 'patch': - // If this is not a pre-release version, it will increment the patch. - // If it is a pre-release it will bump up to the same patch version. - // 1.2.0-5 patches to 1.2.0 - // 1.2.0 patches to 1.2.1 - if (this.prerelease.length === 0) - this.patch++; - this.prerelease = []; - break; - // This probably shouldn't be used publicly. - // 1.0.0 "pre" would become 1.0.0-0 which is the wrong direction. - case 'pre': - if (this.prerelease.length === 0) - this.prerelease = [0]; - else { - var i = this.prerelease.length; - while (--i >= 0) { - if (typeof this.prerelease[i] === 'number') { - this.prerelease[i]++; - i = -2; - } - } - if (i === -1) // didn't increment anything - this.prerelease.push(0); - } - if (identifier) { - // 1.2.0-beta.1 bumps to 1.2.0-beta.2, - // 1.2.0-beta.fooblz or 1.2.0-beta bumps to 1.2.0-beta.0 - if (this.prerelease[0] === identifier) { - if (isNaN(this.prerelease[1])) - this.prerelease = [identifier, 0]; - } else - this.prerelease = [identifier, 0]; - } - break; - - default: - throw new Error('invalid increment argument: ' + release); - } - this.format(); - this.raw = this.version; - return this; -}; - -exports.inc = inc; -function inc(version, release, loose, identifier) { - if (typeof(loose) === 'string') { - identifier = loose; - loose = undefined; - } - - try { - return new SemVer(version, loose).inc(release, identifier).version; - } catch (er) { - return null; - } -} - -exports.diff = diff; -function diff(version1, version2) { - if (eq(version1, version2)) { - return null; - } else { - var v1 = parse(version1); - var v2 = parse(version2); - if (v1.prerelease.length || v2.prerelease.length) { - for (var key in v1) { - if (key === 'major' || key === 'minor' || key === 'patch') { - if (v1[key] !== v2[key]) { - return 'pre'+key; - } - } - } - return 'prerelease'; - } - for (var key in v1) { - if (key === 'major' || key === 'minor' || key === 'patch') { - if (v1[key] !== v2[key]) { - return key; - } - } - } - } -} - -exports.compareIdentifiers = compareIdentifiers; - -var numeric = /^[0-9]+$/; -function compareIdentifiers(a, b) { - var anum = numeric.test(a); - var bnum = numeric.test(b); - - if (anum && bnum) { - a = +a; - b = +b; - } - - return (anum && !bnum) ? -1 : - (bnum && !anum) ? 1 : - a < b ? -1 : - a > b ? 1 : - 0; -} - -exports.rcompareIdentifiers = rcompareIdentifiers; -function rcompareIdentifiers(a, b) { - return compareIdentifiers(b, a); -} - -exports.major = major; -function major(a, loose) { - return new SemVer(a, loose).major; -} - -exports.minor = minor; -function minor(a, loose) { - return new SemVer(a, loose).minor; -} - -exports.patch = patch; -function patch(a, loose) { - return new SemVer(a, loose).patch; -} - -exports.compare = compare; -function compare(a, b, loose) { - return new SemVer(a, loose).compare(new SemVer(b, loose)); -} - -exports.compareLoose = compareLoose; -function compareLoose(a, b) { - return compare(a, b, true); -} - -exports.rcompare = rcompare; -function rcompare(a, b, loose) { - return compare(b, a, loose); -} - -exports.sort = sort; -function sort(list, loose) { - return list.sort(function(a, b) { - return exports.compare(a, b, loose); - }); -} - -exports.rsort = rsort; -function rsort(list, loose) { - return list.sort(function(a, b) { - return exports.rcompare(a, b, loose); - }); -} - -exports.gt = gt; -function gt(a, b, loose) { - return compare(a, b, loose) > 0; -} - -exports.lt = lt; -function lt(a, b, loose) { - return compare(a, b, loose) < 0; -} - -exports.eq = eq; -function eq(a, b, loose) { - return compare(a, b, loose) === 0; -} - -exports.neq = neq; -function neq(a, b, loose) { - return compare(a, b, loose) !== 0; -} - -exports.gte = gte; -function gte(a, b, loose) { - return compare(a, b, loose) >= 0; -} - -exports.lte = lte; -function lte(a, b, loose) { - return compare(a, b, loose) <= 0; -} - -exports.cmp = cmp; -function cmp(a, op, b, loose) { - var ret; - switch (op) { - case '===': - if (typeof a === 'object') a = a.version; - if (typeof b === 'object') b = b.version; - ret = a === b; - break; - case '!==': - if (typeof a === 'object') a = a.version; - if (typeof b === 'object') b = b.version; - ret = a !== b; - break; - case '': case '=': case '==': ret = eq(a, b, loose); break; - case '!=': ret = neq(a, b, loose); break; - case '>': ret = gt(a, b, loose); break; - case '>=': ret = gte(a, b, loose); break; - case '<': ret = lt(a, b, loose); break; - case '<=': ret = lte(a, b, loose); break; - default: throw new TypeError('Invalid operator: ' + op); - } - return ret; -} - -exports.Comparator = Comparator; -function Comparator(comp, loose) { - if (comp instanceof Comparator) { - if (comp.loose === loose) - return comp; - else - comp = comp.value; - } - - if (!(this instanceof Comparator)) - return new Comparator(comp, loose); - - debug('comparator', comp, loose); - this.loose = loose; - this.parse(comp); - - if (this.semver === ANY) - this.value = ''; - else - this.value = this.operator + this.semver.version; - - debug('comp', this); -} - -var ANY = {}; -Comparator.prototype.parse = function(comp) { - var r = this.loose ? re[COMPARATORLOOSE] : re[COMPARATOR]; - var m = comp.match(r); - - if (!m) - throw new TypeError('Invalid comparator: ' + comp); - - this.operator = m[1]; - if (this.operator === '=') - this.operator = ''; - - // if it literally is just '>' or '' then allow anything. - if (!m[2]) - this.semver = ANY; - else - this.semver = new SemVer(m[2], this.loose); -}; - -Comparator.prototype.toString = function() { - return this.value; -}; - -Comparator.prototype.test = function(version) { - debug('Comparator.test', version, this.loose); - - if (this.semver === ANY) - return true; - - if (typeof version === 'string') - version = new SemVer(version, this.loose); - - return cmp(version, this.operator, this.semver, this.loose); -}; - -Comparator.prototype.intersects = function(comp, loose) { - if (!(comp instanceof Comparator)) { - throw new TypeError('a Comparator is required'); - } - - var rangeTmp; - - if (this.operator === '') { - rangeTmp = new Range(comp.value, loose); - return satisfies(this.value, rangeTmp, loose); - } else if (comp.operator === '') { - rangeTmp = new Range(this.value, loose); - return satisfies(comp.semver, rangeTmp, loose); - } - - var sameDirectionIncreasing = - (this.operator === '>=' || this.operator === '>') && - (comp.operator === '>=' || comp.operator === '>'); - var sameDirectionDecreasing = - (this.operator === '<=' || this.operator === '<') && - (comp.operator === '<=' || comp.operator === '<'); - var sameSemVer = this.semver.version === comp.semver.version; - var differentDirectionsInclusive = - (this.operator === '>=' || this.operator === '<=') && - (comp.operator === '>=' || comp.operator === '<='); - var oppositeDirectionsLessThan = - cmp(this.semver, '<', comp.semver, loose) && - ((this.operator === '>=' || this.operator === '>') && - (comp.operator === '<=' || comp.operator === '<')); - var oppositeDirectionsGreaterThan = - cmp(this.semver, '>', comp.semver, loose) && - ((this.operator === '<=' || this.operator === '<') && - (comp.operator === '>=' || comp.operator === '>')); - - return sameDirectionIncreasing || sameDirectionDecreasing || - (sameSemVer && differentDirectionsInclusive) || - oppositeDirectionsLessThan || oppositeDirectionsGreaterThan; -}; - - -exports.Range = Range; -function Range(range, loose) { - if (range instanceof Range) { - if (range.loose === loose) { - return range; - } else { - return new Range(range.raw, loose); - } - } - - if (range instanceof Comparator) { - return new Range(range.value, loose); - } - - if (!(this instanceof Range)) - return new Range(range, loose); - - this.loose = loose; - - // First, split based on boolean or || - this.raw = range; - this.set = range.split(/\s*\|\|\s*/).map(function(range) { - return this.parseRange(range.trim()); - }, this).filter(function(c) { - // throw out any that are not relevant for whatever reason - return c.length; - }); - - if (!this.set.length) { - throw new TypeError('Invalid SemVer Range: ' + range); - } - - this.format(); -} - -Range.prototype.format = function() { - this.range = this.set.map(function(comps) { - return comps.join(' ').trim(); - }).join('||').trim(); - return this.range; -}; - -Range.prototype.toString = function() { - return this.range; -}; - -Range.prototype.parseRange = function(range) { - var loose = this.loose; - range = range.trim(); - debug('range', range, loose); - // `1.2.3 - 1.2.4` => `>=1.2.3 <=1.2.4` - var hr = loose ? re[HYPHENRANGELOOSE] : re[HYPHENRANGE]; - range = range.replace(hr, hyphenReplace); - debug('hyphen replace', range); - // `> 1.2.3 < 1.2.5` => `>1.2.3 <1.2.5` - range = range.replace(re[COMPARATORTRIM], comparatorTrimReplace); - debug('comparator trim', range, re[COMPARATORTRIM]); - - // `~ 1.2.3` => `~1.2.3` - range = range.replace(re[TILDETRIM], tildeTrimReplace); - - // `^ 1.2.3` => `^1.2.3` - range = range.replace(re[CARETTRIM], caretTrimReplace); - - // normalize spaces - range = range.split(/\s+/).join(' '); - - // At this point, the range is completely trimmed and - // ready to be split into comparators. - - var compRe = loose ? re[COMPARATORLOOSE] : re[COMPARATOR]; - var set = range.split(' ').map(function(comp) { - return parseComparator(comp, loose); - }).join(' ').split(/\s+/); - if (this.loose) { - // in loose mode, throw out any that are not valid comparators - set = set.filter(function(comp) { - return !!comp.match(compRe); - }); - } - set = set.map(function(comp) { - return new Comparator(comp, loose); - }); - - return set; -}; - -Range.prototype.intersects = function(range, loose) { - if (!(range instanceof Range)) { - throw new TypeError('a Range is required'); - } - - return this.set.some(function(thisComparators) { - return thisComparators.every(function(thisComparator) { - return range.set.some(function(rangeComparators) { - return rangeComparators.every(function(rangeComparator) { - return thisComparator.intersects(rangeComparator, loose); - }); - }); - }); - }); -}; - -// Mostly just for testing and legacy API reasons -exports.toComparators = toComparators; -function toComparators(range, loose) { - return new Range(range, loose).set.map(function(comp) { - return comp.map(function(c) { - return c.value; - }).join(' ').trim().split(' '); - }); -} - -// comprised of xranges, tildes, stars, and gtlt's at this point. -// already replaced the hyphen ranges -// turn into a set of JUST comparators. -function parseComparator(comp, loose) { - debug('comp', comp); - comp = replaceCarets(comp, loose); - debug('caret', comp); - comp = replaceTildes(comp, loose); - debug('tildes', comp); - comp = replaceXRanges(comp, loose); - debug('xrange', comp); - comp = replaceStars(comp, loose); - debug('stars', comp); - return comp; -} - -function isX(id) { - return !id || id.toLowerCase() === 'x' || id === '*'; -} - -// ~, ~> --> * (any, kinda silly) -// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0 <3.0.0 -// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0 <2.1.0 -// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0 <1.3.0 -// ~1.2.3, ~>1.2.3 --> >=1.2.3 <1.3.0 -// ~1.2.0, ~>1.2.0 --> >=1.2.0 <1.3.0 -function replaceTildes(comp, loose) { - return comp.trim().split(/\s+/).map(function(comp) { - return replaceTilde(comp, loose); - }).join(' '); -} - -function replaceTilde(comp, loose) { - var r = loose ? re[TILDELOOSE] : re[TILDE]; - return comp.replace(r, function(_, M, m, p, pr) { - debug('tilde', comp, _, M, m, p, pr); - var ret; - - if (isX(M)) - ret = ''; - else if (isX(m)) - ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; - else if (isX(p)) - // ~1.2 == >=1.2.0 <1.3.0 - ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; - else if (pr) { - debug('replaceTilde pr', pr); - if (pr.charAt(0) !== '-') - pr = '-' + pr; - ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + M + '.' + (+m + 1) + '.0'; - } else - // ~1.2.3 == >=1.2.3 <1.3.0 - ret = '>=' + M + '.' + m + '.' + p + - ' <' + M + '.' + (+m + 1) + '.0'; - - debug('tilde return', ret); - return ret; - }); -} - -// ^ --> * (any, kinda silly) -// ^2, ^2.x, ^2.x.x --> >=2.0.0 <3.0.0 -// ^2.0, ^2.0.x --> >=2.0.0 <3.0.0 -// ^1.2, ^1.2.x --> >=1.2.0 <2.0.0 -// ^1.2.3 --> >=1.2.3 <2.0.0 -// ^1.2.0 --> >=1.2.0 <2.0.0 -function replaceCarets(comp, loose) { - return comp.trim().split(/\s+/).map(function(comp) { - return replaceCaret(comp, loose); - }).join(' '); -} - -function replaceCaret(comp, loose) { - debug('caret', comp, loose); - var r = loose ? re[CARETLOOSE] : re[CARET]; - return comp.replace(r, function(_, M, m, p, pr) { - debug('caret', comp, _, M, m, p, pr); - var ret; - - if (isX(M)) - ret = ''; - else if (isX(m)) - ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; - else if (isX(p)) { - if (M === '0') - ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; - else - ret = '>=' + M + '.' + m + '.0 <' + (+M + 1) + '.0.0'; - } else if (pr) { - debug('replaceCaret pr', pr); - if (pr.charAt(0) !== '-') - pr = '-' + pr; - if (M === '0') { - if (m === '0') - ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + M + '.' + m + '.' + (+p + 1); - else - ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + M + '.' + (+m + 1) + '.0'; - } else - ret = '>=' + M + '.' + m + '.' + p + pr + - ' <' + (+M + 1) + '.0.0'; - } else { - debug('no pr'); - if (M === '0') { - if (m === '0') - ret = '>=' + M + '.' + m + '.' + p + - ' <' + M + '.' + m + '.' + (+p + 1); - else - ret = '>=' + M + '.' + m + '.' + p + - ' <' + M + '.' + (+m + 1) + '.0'; - } else - ret = '>=' + M + '.' + m + '.' + p + - ' <' + (+M + 1) + '.0.0'; - } - - debug('caret return', ret); - return ret; - }); -} - -function replaceXRanges(comp, loose) { - debug('replaceXRanges', comp, loose); - return comp.split(/\s+/).map(function(comp) { - return replaceXRange(comp, loose); - }).join(' '); -} - -function replaceXRange(comp, loose) { - comp = comp.trim(); - var r = loose ? re[XRANGELOOSE] : re[XRANGE]; - return comp.replace(r, function(ret, gtlt, M, m, p, pr) { - debug('xRange', comp, ret, gtlt, M, m, p, pr); - var xM = isX(M); - var xm = xM || isX(m); - var xp = xm || isX(p); - var anyX = xp; - - if (gtlt === '=' && anyX) - gtlt = ''; - - if (xM) { - if (gtlt === '>' || gtlt === '<') { - // nothing is allowed - ret = '<0.0.0'; - } else { - // nothing is forbidden - ret = '*'; - } - } else if (gtlt && anyX) { - // replace X with 0 - if (xm) - m = 0; - if (xp) - p = 0; - - if (gtlt === '>') { - // >1 => >=2.0.0 - // >1.2 => >=1.3.0 - // >1.2.3 => >= 1.2.4 - gtlt = '>='; - if (xm) { - M = +M + 1; - m = 0; - p = 0; - } else if (xp) { - m = +m + 1; - p = 0; - } - } else if (gtlt === '<=') { - // <=0.7.x is actually <0.8.0, since any 0.7.x should - // pass. Similarly, <=7.x is actually <8.0.0, etc. - gtlt = '<'; - if (xm) - M = +M + 1; - else - m = +m + 1; - } - - ret = gtlt + M + '.' + m + '.' + p; - } else if (xm) { - ret = '>=' + M + '.0.0 <' + (+M + 1) + '.0.0'; - } else if (xp) { - ret = '>=' + M + '.' + m + '.0 <' + M + '.' + (+m + 1) + '.0'; - } - - debug('xRange return', ret); - - return ret; - }); -} - -// Because * is AND-ed with everything else in the comparator, -// and '' means "any version", just remove the *s entirely. -function replaceStars(comp, loose) { - debug('replaceStars', comp, loose); - // Looseness is ignored here. star is always as loose as it gets! - return comp.trim().replace(re[STAR], ''); -} - -// This function is passed to string.replace(re[HYPHENRANGE]) -// M, m, patch, prerelease, build -// 1.2 - 3.4.5 => >=1.2.0 <=3.4.5 -// 1.2.3 - 3.4 => >=1.2.0 <3.5.0 Any 3.4.x will do -// 1.2 - 3.4 => >=1.2.0 <3.5.0 -function hyphenReplace($0, - from, fM, fm, fp, fpr, fb, - to, tM, tm, tp, tpr, tb) { - - if (isX(fM)) - from = ''; - else if (isX(fm)) - from = '>=' + fM + '.0.0'; - else if (isX(fp)) - from = '>=' + fM + '.' + fm + '.0'; - else - from = '>=' + from; - - if (isX(tM)) - to = ''; - else if (isX(tm)) - to = '<' + (+tM + 1) + '.0.0'; - else if (isX(tp)) - to = '<' + tM + '.' + (+tm + 1) + '.0'; - else if (tpr) - to = '<=' + tM + '.' + tm + '.' + tp + '-' + tpr; - else - to = '<=' + to; - - return (from + ' ' + to).trim(); -} - - -// if ANY of the sets match ALL of its comparators, then pass -Range.prototype.test = function(version) { - if (!version) - return false; - - if (typeof version === 'string') - version = new SemVer(version, this.loose); - - for (var i = 0; i < this.set.length; i++) { - if (testSet(this.set[i], version)) - return true; - } - return false; -}; - -function testSet(set, version) { - for (var i = 0; i < set.length; i++) { - if (!set[i].test(version)) - return false; - } - - if (version.prerelease.length) { - // Find the set of versions that are allowed to have prereleases - // For example, ^1.2.3-pr.1 desugars to >=1.2.3-pr.1 <2.0.0 - // That should allow `1.2.3-pr.2` to pass. - // However, `1.2.4-alpha.notready` should NOT be allowed, - // even though it's within the range set by the comparators. - for (var i = 0; i < set.length; i++) { - debug(set[i].semver); - if (set[i].semver === ANY) - continue; - - if (set[i].semver.prerelease.length > 0) { - var allowed = set[i].semver; - if (allowed.major === version.major && - allowed.minor === version.minor && - allowed.patch === version.patch) - return true; - } - } - - // Version has a -pre, but it's not one of the ones we like. - return false; - } - - return true; -} - -exports.satisfies = satisfies; -function satisfies(version, range, loose) { - try { - range = new Range(range, loose); - } catch (er) { - return false; - } - return range.test(version); -} - -exports.maxSatisfying = maxSatisfying; -function maxSatisfying(versions, range, loose) { - var max = null; - var maxSV = null; - try { - var rangeObj = new Range(range, loose); - } catch (er) { - return null; - } - versions.forEach(function (v) { - if (rangeObj.test(v)) { // satisfies(v, range, loose) - if (!max || maxSV.compare(v) === -1) { // compare(max, v, true) - max = v; - maxSV = new SemVer(max, loose); - } - } - }) - return max; -} - -exports.minSatisfying = minSatisfying; -function minSatisfying(versions, range, loose) { - var min = null; - var minSV = null; - try { - var rangeObj = new Range(range, loose); - } catch (er) { - return null; - } - versions.forEach(function (v) { - if (rangeObj.test(v)) { // satisfies(v, range, loose) - if (!min || minSV.compare(v) === 1) { // compare(min, v, true) - min = v; - minSV = new SemVer(min, loose); - } - } - }) - return min; -} - -exports.validRange = validRange; -function validRange(range, loose) { - try { - // Return '*' instead of '' so that truthiness works. - // This will throw if it's invalid anyway - return new Range(range, loose).range || '*'; - } catch (er) { - return null; - } -} - -// Determine if version is less than all the versions possible in the range -exports.ltr = ltr; -function ltr(version, range, loose) { - return outside(version, range, '<', loose); -} - -// Determine if version is greater than all the versions possible in the range. -exports.gtr = gtr; -function gtr(version, range, loose) { - return outside(version, range, '>', loose); -} - -exports.outside = outside; -function outside(version, range, hilo, loose) { - version = new SemVer(version, loose); - range = new Range(range, loose); - - var gtfn, ltefn, ltfn, comp, ecomp; - switch (hilo) { - case '>': - gtfn = gt; - ltefn = lte; - ltfn = lt; - comp = '>'; - ecomp = '>='; - break; - case '<': - gtfn = lt; - ltefn = gte; - ltfn = gt; - comp = '<'; - ecomp = '<='; - break; - default: - throw new TypeError('Must provide a hilo val of "<" or ">"'); - } - - // If it satisifes the range it is not outside - if (satisfies(version, range, loose)) { - return false; - } - - // From now on, variable terms are as if we're in "gtr" mode. - // but note that everything is flipped for the "ltr" function. - - for (var i = 0; i < range.set.length; ++i) { - var comparators = range.set[i]; - - var high = null; - var low = null; - - comparators.forEach(function(comparator) { - if (comparator.semver === ANY) { - comparator = new Comparator('>=0.0.0') - } - high = high || comparator; - low = low || comparator; - if (gtfn(comparator.semver, high.semver, loose)) { - high = comparator; - } else if (ltfn(comparator.semver, low.semver, loose)) { - low = comparator; - } - }); - - // If the edge version comparator has a operator then our version - // isn't outside it - if (high.operator === comp || high.operator === ecomp) { - return false; - } - - // If the lowest version comparator has an operator and our version - // is less than it then it isn't higher than the range - if ((!low.operator || low.operator === comp) && - ltefn(version, low.semver)) { - return false; - } else if (low.operator === ecomp && ltfn(version, low.semver)) { - return false; - } - } - return true; -} - -exports.prerelease = prerelease; -function prerelease(version, loose) { - var parsed = parse(version, loose); - return (parsed && parsed.prerelease.length) ? parsed.prerelease : null; -} - -exports.intersects = intersects; -function intersects(r1, r2, loose) { - r1 = new Range(r1, loose) - r2 = new Range(r2, loose) - return r1.intersects(r2) -} - -exports.coerce = coerce; -function coerce(version) { - if (version instanceof SemVer) - return version; - - if (typeof version !== 'string') - return null; - - var match = version.match(re[COERCE]); - - if (match == null) - return null; - - return parse((match[1] || '0') + '.' + (match[2] || '0') + '.' + (match[3] || '0')); -} - - -/***/ }), -/* 23 */ -/***/ (function(module, exports) { - -module.exports = require("stream"); - -/***/ }), -/* 24 */ -/***/ (function(module, exports) { - -module.exports = require("url"); - -/***/ }), -/* 25 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Subscription; }); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0__util_isArray__ = __webpack_require__(41); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__util_isObject__ = __webpack_require__(444); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__util_isFunction__ = __webpack_require__(154); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__util_tryCatch__ = __webpack_require__(56); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__util_errorObject__ = __webpack_require__(47); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__ = __webpack_require__(441); -/** PURE_IMPORTS_START _util_isArray,_util_isObject,_util_isFunction,_util_tryCatch,_util_errorObject,_util_UnsubscriptionError PURE_IMPORTS_END */ - - - - - - -var Subscription = /*@__PURE__*/ (function () { - function Subscription(unsubscribe) { - this.closed = false; - this._parent = null; - this._parents = null; - this._subscriptions = null; - if (unsubscribe) { - this._unsubscribe = unsubscribe; - } - } - Subscription.prototype.unsubscribe = function () { - var hasErrors = false; - var errors; - if (this.closed) { - return; - } - var _a = this, _parent = _a._parent, _parents = _a._parents, _unsubscribe = _a._unsubscribe, _subscriptions = _a._subscriptions; - this.closed = true; - this._parent = null; - this._parents = null; - this._subscriptions = null; - var index = -1; - var len = _parents ? _parents.length : 0; - while (_parent) { - _parent.remove(this); - _parent = ++index < len && _parents[index] || null; - } - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_2__util_isFunction__["a" /* isFunction */])(_unsubscribe)) { - var trial = __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_3__util_tryCatch__["a" /* tryCatch */])(_unsubscribe).call(this); - if (trial === __WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */]) { - hasErrors = true; - errors = errors || (__WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e instanceof __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */] ? - flattenUnsubscriptionErrors(__WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e.errors) : [__WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e]); - } - } - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_0__util_isArray__["a" /* isArray */])(_subscriptions)) { - index = -1; - len = _subscriptions.length; - while (++index < len) { - var sub = _subscriptions[index]; - if (__webpack_require__.i(__WEBPACK_IMPORTED_MODULE_1__util_isObject__["a" /* isObject */])(sub)) { - var trial = __webpack_require__.i(__WEBPACK_IMPORTED_MODULE_3__util_tryCatch__["a" /* tryCatch */])(sub.unsubscribe).call(sub); - if (trial === __WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */]) { - hasErrors = true; - errors = errors || []; - var err = __WEBPACK_IMPORTED_MODULE_4__util_errorObject__["a" /* errorObject */].e; - if (err instanceof __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */]) { - errors = errors.concat(flattenUnsubscriptionErrors(err.errors)); - } - else { - errors.push(err); - } - } - } - } - } - if (hasErrors) { - throw new __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */](errors); - } - }; - Subscription.prototype.add = function (teardown) { - if (!teardown || (teardown === Subscription.EMPTY)) { - return Subscription.EMPTY; - } - if (teardown === this) { - return this; - } - var subscription = teardown; - switch (typeof teardown) { - case 'function': - subscription = new Subscription(teardown); - case 'object': - if (subscription.closed || typeof subscription.unsubscribe !== 'function') { - return subscription; - } - else if (this.closed) { - subscription.unsubscribe(); - return subscription; - } - else if (typeof subscription._addParent !== 'function') { - var tmp = subscription; - subscription = new Subscription(); - subscription._subscriptions = [tmp]; - } - break; - default: - throw new Error('unrecognized teardown ' + teardown + ' added to Subscription.'); - } - var subscriptions = this._subscriptions || (this._subscriptions = []); - subscriptions.push(subscription); - subscription._addParent(this); - return subscription; - }; - Subscription.prototype.remove = function (subscription) { - var subscriptions = this._subscriptions; - if (subscriptions) { - var subscriptionIndex = subscriptions.indexOf(subscription); - if (subscriptionIndex !== -1) { - subscriptions.splice(subscriptionIndex, 1); - } - } - }; - Subscription.prototype._addParent = function (parent) { - var _a = this, _parent = _a._parent, _parents = _a._parents; - if (!_parent || _parent === parent) { - this._parent = parent; - } - else if (!_parents) { - this._parents = [parent]; - } - else if (_parents.indexOf(parent) === -1) { - _parents.push(parent); - } - }; - Subscription.EMPTY = (function (empty) { - empty.closed = true; - return empty; - }(new Subscription())); - return Subscription; -}()); - -function flattenUnsubscriptionErrors(errors) { - return errors.reduce(function (errs, err) { return errs.concat((err instanceof __WEBPACK_IMPORTED_MODULE_5__util_UnsubscriptionError__["a" /* UnsubscriptionError */]) ? err.errors : err); }, []); -} -//# sourceMappingURL=Subscription.js.map - - -/***/ }), -/* 26 */ -/***/ (function(module, exports, __webpack_require__) { - -// Copyright 2015 Joyent, Inc. - -module.exports = { - bufferSplit: bufferSplit, - addRSAMissing: addRSAMissing, - calculateDSAPublic: calculateDSAPublic, - calculateED25519Public: calculateED25519Public, - calculateX25519Public: calculateX25519Public, - mpNormalize: mpNormalize, - mpDenormalize: mpDenormalize, - ecNormalize: ecNormalize, - countZeros: countZeros, - assertCompatible: assertCompatible, - isCompatible: isCompatible, - opensslKeyDeriv: opensslKeyDeriv, - opensshCipherInfo: opensshCipherInfo, - publicFromPrivateECDSA: publicFromPrivateECDSA, - zeroPadToLength: zeroPadToLength, - writeBitString: writeBitString, - readBitString: readBitString -}; - -var assert = __webpack_require__(16); -var Buffer = __webpack_require__(15).Buffer; -var PrivateKey = __webpack_require__(33); -var Key = __webpack_require__(27); -var crypto = __webpack_require__(11); -var algs = __webpack_require__(32); -var asn1 = __webpack_require__(66); - -var ec, jsbn; -var nacl; - -var MAX_CLASS_DEPTH = 3; - -function isCompatible(obj, klass, needVer) { - if (obj === null || typeof (obj) !== 'object') - return (false); - if (needVer === undefined) - needVer = klass.prototype._sshpkApiVersion; - if (obj instanceof klass && - klass.prototype._sshpkApiVersion[0] == needVer[0]) - return (true); - var proto = Object.getPrototypeOf(obj); - var depth = 0; - while (proto.constructor.name !== klass.name) { - proto = Object.getPrototypeOf(proto); - if (!proto || ++depth > MAX_CLASS_DEPTH) - return (false); - } - if (proto.constructor.name !== klass.name) - return (false); - var ver = proto._sshpkApiVersion; - if (ver === undefined) - ver = klass._oldVersionDetect(obj); - if (ver[0] != needVer[0] || ver[1] < needVer[1]) - return (false); - return (true); -} - -function assertCompatible(obj, klass, needVer, name) { - if (name === undefined) - name = 'object'; - assert.ok(obj, name + ' must not be null'); - assert.object(obj, name + ' must be an object'); - if (needVer === undefined) - needVer = klass.prototype._sshpkApiVersion; - if (obj instanceof klass && - klass.prototype._sshpkApiVersion[0] == needVer[0]) - return; - var proto = Object.getPrototypeOf(obj); - var depth = 0; - while (proto.constructor.name !== klass.name) { - proto = Object.getPrototypeOf(proto); - assert.ok(proto && ++depth <= MAX_CLASS_DEPTH, - name + ' must be a ' + klass.name + ' instance'); - } - assert.strictEqual(proto.constructor.name, klass.name, - name + ' must be a ' + klass.name + ' instance'); - var ver = proto._sshpkApiVersion; - if (ver === undefined) - ver = klass._oldVersionDetect(obj); - assert.ok(ver[0] == needVer[0] && ver[1] >= needVer[1], - name + ' must be compatible with ' + klass.name + ' klass ' + - 'version ' + needVer[0] + '.' + needVer[1]); -} - -var CIPHER_LEN = { - 'des-ede3-cbc': { key: 7, iv: 8 }, - 'aes-128-cbc': { key: 16, iv: 16 } -}; -var PKCS5_SALT_LEN = 8; - -function opensslKeyDeriv(cipher, salt, passphrase, count) { - assert.buffer(salt, 'salt'); - assert.buffer(passphrase, 'passphrase'); - assert.number(count, 'iteration count'); - - var clen = CIPHER_LEN[cipher]; - assert.object(clen, 'supported cipher'); - - salt = salt.slice(0, PKCS5_SALT_LEN); - - var D, D_prev, bufs; - var material = Buffer.alloc(0); - while (material.length < clen.key + clen.iv) { - bufs = []; - if (D_prev) - bufs.push(D_prev); - bufs.push(passphrase); - bufs.push(salt); - D = Buffer.concat(bufs); - for (var j = 0; j < count; ++j) - D = crypto.createHash('md5').update(D).digest(); - material = Buffer.concat([material, D]); - D_prev = D; - } - - return ({ - key: material.slice(0, clen.key), - iv: material.slice(clen.key, clen.key + clen.iv) - }); -} - -/* Count leading zero bits on a buffer */ -function countZeros(buf) { - var o = 0, obit = 8; - while (o < buf.length) { - var mask = (1 << obit); - if ((buf[o] & mask) === mask) - break; - obit--; - if (obit < 0) { - o++; - obit = 8; - } - } - return (o*8 + (8 - obit) - 1); -} - -function bufferSplit(buf, chr) { - assert.buffer(buf); - assert.string(chr); - - var parts = []; - var lastPart = 0; - var matches = 0; - for (var i = 0; i < buf.length; ++i) { - if (buf[i] === chr.charCodeAt(matches)) - ++matches; - else if (buf[i] === chr.charCodeAt(0)) - matches = 1; - else - matches = 0; - - if (matches >= chr.length) { - var newPart = i + 1; - parts.push(buf.slice(lastPart, newPart - matches)); - lastPart = newPart; - matches = 0; - } - } - if (lastPart <= buf.length) - parts.push(buf.slice(lastPart, buf.length)); - - return (parts); -} - -function ecNormalize(buf, addZero) { - assert.buffer(buf); - if (buf[0] === 0x00 && buf[1] === 0x04) { - if (addZero) - return (buf); - return (buf.slice(1)); - } else if (buf[0] === 0x04) { - if (!addZero) - return (buf); - } else { - while (buf[0] === 0x00) - buf = buf.slice(1); - if (buf[0] === 0x02 || buf[0] === 0x03) - throw (new Error('Compressed elliptic curve points ' + - 'are not supported')); - if (buf[0] !== 0x04) - throw (new Error('Not a valid elliptic curve point')); - if (!addZero) - return (buf); - } - var b = Buffer.alloc(buf.length + 1); - b[0] = 0x0; - buf.copy(b, 1); - return (b); -} - -function readBitString(der, tag) { - if (tag === undefined) - tag = asn1.Ber.BitString; - var buf = der.readString(tag, true); - assert.strictEqual(buf[0], 0x00, 'bit strings with unused bits are ' + - 'not supported (0x' + buf[0].toString(16) + ')'); - return (buf.slice(1)); -} - -function writeBitString(der, buf, tag) { - if (tag === undefined) - tag = asn1.Ber.BitString; - var b = Buffer.alloc(buf.length + 1); - b[0] = 0x00; - buf.copy(b, 1); - der.writeBuffer(b, tag); -} - -function mpNormalize(buf) { - assert.buffer(buf); - while (buf.length > 1 && buf[0] === 0x00 && (buf[1] & 0x80) === 0x00) - buf = buf.slice(1); - if ((buf[0] & 0x80) === 0x80) { - var b = Buffer.alloc(buf.length + 1); - b[0] = 0x00; - buf.copy(b, 1); - buf = b; - } - return (buf); -} - -function mpDenormalize(buf) { - assert.buffer(buf); - while (buf.length > 1 && buf[0] === 0x00) - buf = buf.slice(1); - return (buf); -} - -function zeroPadToLength(buf, len) { - assert.buffer(buf); - assert.number(len); - while (buf.length > len) { - assert.equal(buf[0], 0x00); - buf = buf.slice(1); - } - while (buf.length < len) { - var b = Buffer.alloc(buf.length + 1); - b[0] = 0x00; - buf.copy(b, 1); - buf = b; - } - return (buf); -} - -function bigintToMpBuf(bigint) { - var buf = Buffer.from(bigint.toByteArray()); - buf = mpNormalize(buf); - return (buf); -} - -function calculateDSAPublic(g, p, x) { - assert.buffer(g); - assert.buffer(p); - assert.buffer(x); - try { - var bigInt = __webpack_require__(81).BigInteger; - } catch (e) { - throw (new Error('To load a PKCS#8 format DSA private key, ' + - 'the node jsbn library is required.')); - } - g = new bigInt(g); - p = new bigInt(p); - x = new bigInt(x); - var y = g.modPow(x, p); - var ybuf = bigintToMpBuf(y); - return (ybuf); -} - -function calculateED25519Public(k) { - assert.buffer(k); - - if (nacl === undefined) - nacl = __webpack_require__(76); - - var kp = nacl.sign.keyPair.fromSeed(new Uint8Array(k)); - return (Buffer.from(kp.publicKey)); -} - -function calculateX25519Public(k) { - assert.buffer(k); - - if (nacl === undefined) - nacl = __webpack_require__(76); - - var kp = nacl.box.keyPair.fromSeed(new Uint8Array(k)); - return (Buffer.from(kp.publicKey)); -} - -function addRSAMissing(key) { - assert.object(key); - assertCompatible(key, PrivateKey, [1, 1]); - try { - var bigInt = __webpack_require__(81).BigInteger; - } catch (e) { - throw (new Error('To write a PEM private key from ' + - 'this source, the node jsbn lib is required.')); - } - - var d = new bigInt(key.part.d.data); - var buf; - - if (!key.part.dmodp) { - var p = new bigInt(key.part.p.data); - var dmodp = d.mod(p.subtract(1)); - - buf = bigintToMpBuf(dmodp); - key.part.dmodp = {name: 'dmodp', data: buf}; - key.parts.push(key.part.dmodp); - } - if (!key.part.dmodq) { - var q = new bigInt(key.part.q.data); - var dmodq = d.mod(q.subtract(1)); - - buf = bigintToMpBuf(dmodq); - key.part.dmodq = {name: 'dmodq', data: buf}; - key.parts.push(key.part.dmodq); - } -} - -function publicFromPrivateECDSA(curveName, priv) { - assert.string(curveName, 'curveName'); - assert.buffer(priv); - if (ec === undefined) - ec = __webpack_require__(139); - if (jsbn === undefined) - jsbn = __webpack_require__(81).BigInteger; - var params = algs.curves[curveName]; - var p = new jsbn(params.p); - var a = new jsbn(params.a); - var b = new jsbn(params.b); - var curve = new ec.ECCurveFp(p, a, b); - var G = curve.decodePointHex(params.G.toString('hex')); - - var d = new jsbn(mpNormalize(priv)); - var pub = G.multiply(d); - pub = Buffer.from(curve.encodePointHex(pub), 'hex'); - - var parts = []; - parts.push({name: 'curve', data: Buffer.from(curveName)}); - parts.push({name: 'Q', data: pub}); - - var key = new Key({type: 'ecdsa', curve: curve, parts: parts}); - return (key); -} - -function opensshCipherInfo(cipher) { - var inf = {}; - switch (cipher) { - case '3des-cbc': - inf.keySize = 24; - inf.blockSize = 8; - inf.opensslName = 'des-ede3-cbc'; - break; - case 'blowfish-cbc': - inf.keySize = 16; - inf.blockSize = 8; - inf.opensslName = 'bf-cbc'; - break; - case 'aes128-cbc': - case 'aes128-ctr': - case 'aes128-gcm@openssh.com': - inf.keySize = 16; - inf.blockSize = 16; - inf.opensslName = 'aes-128-' + cipher.slice(7, 10); - break; - case 'aes192-cbc': - case 'aes192-ctr': - case 'aes192-gcm@openssh.com': - inf.keySize = 24; - inf.blockSize = 16; - inf.opensslName = 'aes-192-' + cipher.slice(7, 10); - break; - case 'aes256-cbc': - case 'aes256-ctr': - case 'aes256-gcm@openssh.com': - inf.keySize = 32; - inf.blockSize = 16; - inf.opensslName = 'aes-256-' + cipher.slice(7, 10); - break; - default: - throw (new Error( - 'Unsupported openssl cipher "' + cipher + '"')); - } - return (inf); -} - - -/***/ }), -/* 27 */ -/***/ (function(module, exports, __webpack_require__) { - -// Copyright 2017 Joyent, Inc. - -module.exports = Key; - -var assert = __webpack_require__(16); -var algs = __webpack_require__(32); -var crypto = __webpack_require__(11); -var Fingerprint = __webpack_require__(156); -var Signature = __webpack_require__(75); -var DiffieHellman = __webpack_require__(325).DiffieHellman; -var errs = __webpack_require__(74); -var utils = __webpack_require__(26); -var PrivateKey = __webpack_require__(33); -var edCompat; - -try { - edCompat = __webpack_require__(454); -} catch (e) { - /* Just continue through, and bail out if we try to use it. */ -} - -var InvalidAlgorithmError = errs.InvalidAlgorithmError; -var KeyParseError = errs.KeyParseError; - -var formats = {}; -formats['auto'] = __webpack_require__(455); -formats['pem'] = __webpack_require__(86); -formats['pkcs1'] = __webpack_require__(327); -formats['pkcs8'] = __webpack_require__(157); -formats['rfc4253'] = __webpack_require__(103); -formats['ssh'] = __webpack_require__(456); -formats['ssh-private'] = __webpack_require__(192); -formats['openssh'] = formats['ssh-private']; -formats['dnssec'] = __webpack_require__(326); - -function Key(opts) { - assert.object(opts, 'options'); - assert.arrayOfObject(opts.parts, 'options.parts'); - assert.string(opts.type, 'options.type'); - assert.optionalString(opts.comment, 'options.comment'); - - var algInfo = algs.info[opts.type]; - if (typeof (algInfo) !== 'object') - throw (new InvalidAlgorithmError(opts.type)); - - var partLookup = {}; - for (var i = 0; i < opts.parts.length; ++i) { - var part = opts.parts[i]; - partLookup[part.name] = part; - } - - this.type = opts.type; - this.parts = opts.parts; - this.part = partLookup; - this.comment = undefined; - this.source = opts.source; - - /* for speeding up hashing/fingerprint operations */ - this._rfc4253Cache = opts._rfc4253Cache; - this._hashCache = {}; - - var sz; - this.curve = undefined; - if (this.type === 'ecdsa') { - var curve = this.part.curve.data.toString(); - this.curve = curve; - sz = algs.curves[curve].size; - } else if (this.type === 'ed25519' || this.type === 'curve25519') { - sz = 256; - this.curve = 'curve25519'; - } else { - var szPart = this.part[algInfo.sizePart]; - sz = szPart.data.length; - sz = sz * 8 - utils.countZeros(szPart.data); - } - this.size = sz; -} - -Key.formats = formats; - -Key.prototype.toBuffer = function (format, options) { - if (format === undefined) - format = 'ssh'; - assert.string(format, 'format'); - assert.object(formats[format], 'formats[format]'); - assert.optionalObject(options, 'options'); - - if (format === 'rfc4253') { - if (this._rfc4253Cache === undefined) - this._rfc4253Cache = formats['rfc4253'].write(this); - return (this._rfc4253Cache); - } - - return (formats[format].write(this, options)); -}; - -Key.prototype.toString = function (format, options) { - return (this.toBuffer(format, options).toString()); -}; - -Key.prototype.hash = function (algo) { - assert.string(algo, 'algorithm'); - algo = algo.toLowerCase(); - if (algs.hashAlgs[algo] === undefined) - throw (new InvalidAlgorithmError(algo)); - - if (this._hashCache[algo]) - return (this._hashCache[algo]); - var hash = crypto.createHash(algo). - update(this.toBuffer('rfc4253')).digest(); - this._hashCache[algo] = hash; - return (hash); -}; - -Key.prototype.fingerprint = function (algo) { - if (algo === undefined) - algo = 'sha256'; - assert.string(algo, 'algorithm'); - var opts = { - type: 'key', - hash: this.hash(algo), - algorithm: algo - }; - return (new Fingerprint(opts)); -}; - -Key.prototype.defaultHashAlgorithm = function () { - var hashAlgo = 'sha1'; - if (this.type === 'rsa') - hashAlgo = 'sha256'; - if (this.type === 'dsa' && this.size > 1024) - hashAlgo = 'sha256'; - if (this.type === 'ed25519') - hashAlgo = 'sha512'; - if (this.type === 'ecdsa') { - if (this.size <= 256) - hashAlgo = 'sha256'; - else if (this.size <= 384) - hashAlgo = 'sha384'; - else - hashAlgo = 'sha512'; - } - return (hashAlgo); -}; - -Key.prototype.createVerify = function (hashAlgo) { - if (hashAlgo === undefined) - hashAlgo = this.defaultHashAlgorithm(); - assert.string(hashAlgo, 'hash algorithm'); - - /* ED25519 is not supported by OpenSSL, use a javascript impl. */ - if (this.type === 'ed25519' && edCompat !== undefined) - return (new edCompat.Verifier(this, hashAlgo)); - if (this.type === 'curve25519') - throw (new Error('Curve25519 keys are not suitable for ' + - 'signing or verification')); - - var v, nm, err; - try { - nm = hashAlgo.toUpperCase(); - v = crypto.createVerify(nm); - } catch (e) { - err = e; - } - if (v === undefined || (err instanceof Error && - err.message.match(/Unknown message digest/))) { - nm = 'RSA-'; - nm += hashAlgo.toUpperCase(); - v = crypto.createVerify(nm); - } - assert.ok(v, 'failed to create verifier'); - var oldVerify = v.verify.bind(v); - var key = this.toBuffer('pkcs8'); - var curve = this.curve; - var self = this; - v.verify = function (signature, fmt) { - if (Signature.isSignature(signature, [2, 0])) { - if (signature.type !== self.type) - return (false); - if (signature.hashAlgorithm && - signature.hashAlgorithm !== hashAlgo) - return (false); - if (signature.curve && self.type === 'ecdsa' && - signature.curve !== curve) - return (false); - return (oldVerify(key, signature.toBuffer('asn1'))); - - } else if (typeof (signature) === 'string' || - Buffer.isBuffer(signature)) { - return (oldVerify(key, signature, fmt)); - - /* - * Avoid doing this on valid arguments, walking the prototype - * chain can be quite slow. - */ - } else if (Signature.isSignature(signature, [1, 0])) { - throw (new Error('signature was created by too old ' + - 'a version of sshpk and cannot be verified')); - - } else { - throw (new TypeError('signature must be a string, ' + - 'Buffer, or Signature object')); - } - }; - return (v); -}; - -Key.prototype.createDiffieHellman = function () { - if (this.type === 'rsa') - throw (new Error('RSA keys do not support Diffie-Hellman')); - - return (new DiffieHellman(this)); -}; -Key.prototype.createDH = Key.prototype.createDiffieHellman; - -Key.parse = function (data, format, options) { - if (typeof (data) !== 'string') - assert.buffer(data, 'data'); - if (format === undefined) - format = 'auto'; - assert.string(format, 'format'); - if (typeof (options) === 'string') - options = { filename: options }; - assert.optionalObject(options, 'options'); - if (options === undefined) - options = {}; - assert.optionalString(options.filename, 'options.filename'); - if (options.filename === undefined) - options.filename = '(unnamed)'; - - assert.object(formats[format], 'formats[format]'); - - try { - var k = formats[format].read(data, options); - if (k instanceof PrivateKey) - k = k.toPublic(); - if (!k.comment) - k.comment = options.filename; - return (k); - } catch (e) { - if (e.name === 'KeyEncryptedError') - throw (e); - throw (new KeyParseError(options.filename, format, e)); - } -}; - -Key.isKey = function (obj, ver) { - return (utils.isCompatible(obj, Key, ver)); -}; - -/* - * API versions for Key: - * [1,0] -- initial ver, may take Signature for createVerify or may not - * [1,1] -- added pkcs1, pkcs8 formats - * [1,2] -- added auto, ssh-private, openssh formats - * [1,3] -- added defaultHashAlgorithm - * [1,4] -- added ed support, createDH - * [1,5] -- first explicitly tagged version - * [1,6] -- changed ed25519 part names - */ -Key.prototype._sshpkApiVersion = [1, 6]; - -Key._oldVersionDetect = function (obj) { - assert.func(obj.toBuffer); - assert.func(obj.fingerprint); - if (obj.createDH) - return ([1, 4]); - if (obj.defaultHashAlgorithm) - return ([1, 3]); - if (obj.formats['auto']) - return ([1, 2]); - if (obj.formats['pkcs1']) - return ([1, 1]); - return ([1, 0]); -}; - - -/***/ }), -/* 28 */ -/***/ (function(module, exports) { - -module.exports = require("assert"); - -/***/ }), -/* 29 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.default = nullify; -function nullify(obj = {}) { - if (Array.isArray(obj)) { - for (var _iterator = obj, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { - var _ref; - - if (_isArray) { - if (_i >= _iterator.length) break; - _ref = _iterator[_i++]; - } else { - _i = _iterator.next(); - if (_i.done) break; - _ref = _i.value; - } - - const item = _ref; - - nullify(item); - } - } else if (obj !== null && typeof obj === 'object' || typeof obj === 'function') { - Object.setPrototypeOf(obj, null); - - // for..in can only be applied to 'object', not 'function' - if (typeof obj === 'object') { - for (const key in obj) { - nullify(obj[key]); - } - } - } - - return obj; -} - -/***/ }), -/* 30 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - -const escapeStringRegexp = __webpack_require__(388); -const ansiStyles = __webpack_require__(506); -const stdoutColor = __webpack_require__(598).stdout; - -const template = __webpack_require__(599); - -const isSimpleWindowsTerm = process.platform === 'win32' && !(process.env.TERM || '').toLowerCase().startsWith('xterm'); - -// `supportsColor.level` → `ansiStyles.color[name]` mapping -const levelMapping = ['ansi', 'ansi', 'ansi256', 'ansi16m']; - -// `color-convert` models to exclude from the Chalk API due to conflicts and such -const skipModels = new Set(['gray']); - -const styles = Object.create(null); - -function applyOptions(obj, options) { - options = options || {}; - - // Detect level if not set manually - const scLevel = stdoutColor ? stdoutColor.level : 0; - obj.level = options.level === undefined ? scLevel : options.level; - obj.enabled = 'enabled' in options ? options.enabled : obj.level > 0; -} - -function Chalk(options) { - // We check for this.template here since calling `chalk.constructor()` - // by itself will have a `this` of a previously constructed chalk object - if (!this || !(this instanceof Chalk) || this.template) { - const chalk = {}; - applyOptions(chalk, options); - - chalk.template = function () { - const args = [].slice.call(arguments); - return chalkTag.apply(null, [chalk.template].concat(args)); - }; - - Object.setPrototypeOf(chalk, Chalk.prototype); - Object.setPrototypeOf(chalk.template, chalk); - - chalk.template.constructor = Chalk; - - return chalk.template; - } - - applyOptions(this, options); -} - -// Use bright blue on Windows as the normal blue color is illegible -if (isSimpleWindowsTerm) { - ansiStyles.blue.open = '\u001B[94m'; -} - -for (const key of Object.keys(ansiStyles)) { - ansiStyles[key].closeRe = new RegExp(escapeStringRegexp(ansiStyles[key].close), 'g'); - - styles[key] = { - get() { - const codes = ansiStyles[key]; - return build.call(this, this._styles ? this._styles.concat(codes) : [codes], this._empty, key); - } - }; -} - -styles.visible = { - get() { - return build.call(this, this._styles || [], true, 'visible'); - } -}; - -ansiStyles.color.closeRe = new RegExp(escapeStringRegexp(ansiStyles.color.close), 'g'); -for (const model of Object.keys(ansiStyles.color.ansi)) { - if (skipModels.has(model)) { - continue; - } - - styles[model] = { - get() { - const level = this.level; - return function () { - const open = ansiStyles.color[levelMapping[level]][model].apply(null, arguments); - const codes = { - open, - close: ansiStyles.color.close, - closeRe: ansiStyles.color.closeRe - }; - return build.call(this, this._styles ? this._styles.concat(codes) : [codes], this._empty, model); - }; - } - }; -} - -ansiStyles.bgColor.closeRe = new RegExp(escapeStringRegexp(ansiStyles.bgColor.close), 'g'); -for (const model of Object.keys(ansiStyles.bgColor.ansi)) { - if (skipModels.has(model)) { - continue; - } - - const bgModel = 'bg' + model[0].toUpperCase() + model.slice(1); - styles[bgModel] = { - get() { - const level = this.level; - return function () { - const open = ansiStyles.bgColor[levelMapping[level]][model].apply(null, arguments); - const codes = { - open, - close: ansiStyles.bgColor.close, - closeRe: ansiStyles.bgColor.closeRe - }; - return build.call(this, this._styles ? this._styles.concat(codes) : [codes], this._empty, model); - }; - } - }; -} - -const proto = Object.defineProperties(() => {}, styles); - -function build(_styles, _empty, key) { - const builder = function () { - return applyStyle.apply(builder, arguments); - }; - - builder._styles = _styles; - builder._empty = _empty; - - const self = this; - - Object.defineProperty(builder, 'level', { - enumerable: true, - get() { - return self.level; - }, - set(level) { - self.level = level; - } - }); - - Object.defineProperty(builder, 'enabled', { - enumerable: true, - get() { - return self.enabled; - }, - set(enabled) { - self.enabled = enabled; - } - }); - - // See below for fix regarding invisible grey/dim combination on Windows - builder.hasGrey = this.hasGrey || key === 'gray' || key === 'grey'; - - // `__proto__` is used because we must return a function, but there is - // no way to create a function with a different prototype - builder.__proto__ = proto; // eslint-disable-line no-proto - - return builder; -} - -function applyStyle() { - // Support varags, but simply cast to string in case there's only one arg - const args = arguments; - const argsLen = args.length; - let str = String(arguments[0]); - - if (argsLen === 0) { - return ''; - } - - if (argsLen > 1) { - // Don't slice `arguments`, it prevents V8 optimizations - for (let a = 1; a < argsLen; a++) { - str += ' ' + args[a]; - } - } - - if (!this.enabled || this.level <= 0 || !str) { - return this._empty ? '' : str; - } - - // Turns out that on Windows dimmed gray text becomes invisible in cmd.exe, - // see https://github.com/chalk/chalk/issues/58 - // If we're on Windows and we're dealing with a gray color, temporarily make 'dim' a noop. - const originalDim = ansiStyles.dim.open; - if (isSimpleWindowsTerm && this.hasGrey) { - ansiStyles.dim.open = ''; - } - - for (const code of this._styles.slice().reverse()) { - // Replace any instances already present with a re-opening code - // otherwise only the part of the string until said closing code - // will be colored, and the rest will simply be 'plain'. - str = code.open + str.replace(code.closeRe, code.open) + code.close; - - // Close the styling before a linebreak and reopen - // after next line to fix a bleed issue on macOS - // https://github.com/chalk/chalk/pull/92 - str = str.replace(/\r?\n/g, `${code.close}$&${code.open}`); - } - - // Reset the original `dim` if we changed it to work around the Windows dimmed gray issue - ansiStyles.dim.open = originalDim; - - return str; -} - -function chalkTag(chalk, strings) { - if (!Array.isArray(strings)) { - // If chalk() was called by itself or with a string, - // return the string itself as a string. - return [].slice.call(arguments, 1).join(' '); - } - - const args = [].slice.call(arguments, 2); - const parts = [strings.raw[0]]; - - for (let i = 1; i < strings.length; i++) { - parts.push(String(args[i - 1]).replace(/[{}\\]/g, '\\$&')); - parts.push(String(strings.raw[i])); - } - - return template(chalk, parts.join('')); -} - -Object.defineProperties(Chalk.prototype, styles); - -module.exports = Chalk(); // eslint-disable-line new-cap -module.exports.supportsColor = stdoutColor; -module.exports.default = module.exports; // For TypeScript - - -/***/ }), -/* 31 */ -/***/ (function(module, exports) { - -var core = module.exports = { version: '2.5.7' }; -if (typeof __e == 'number') __e = core; // eslint-disable-line no-undef - - -/***/ }), -/* 32 */ -/***/ (function(module, exports, __webpack_require__) { - -// Copyright 2015 Joyent, Inc. - -var Buffer = __webpack_require__(15).Buffer; - -var algInfo = { - 'dsa': { - parts: ['p', 'q', 'g', 'y'], - sizePart: 'p' - }, - 'rsa': { - parts: ['e', 'n'], - sizePart: 'n' - }, - 'ecdsa': { - parts: ['curve', 'Q'], - sizePart: 'Q' - }, - 'ed25519': { - parts: ['A'], - sizePart: 'A' - } -}; -algInfo['curve25519'] = algInfo['ed25519']; - -var algPrivInfo = { - 'dsa': { - parts: ['p', 'q', 'g', 'y', 'x'] - }, - 'rsa': { - parts: ['n', 'e', 'd', 'iqmp', 'p', 'q'] - }, - 'ecdsa': { - parts: ['curve', 'Q', 'd'] - }, - 'ed25519': { - parts: ['A', 'k'] - } -}; -algPrivInfo['curve25519'] = algPrivInfo['ed25519']; - -var hashAlgs = { - 'md5': true, - 'sha1': true, - 'sha256': true, - 'sha384': true, - 'sha512': true -}; - -/* - * Taken from - * http://csrc.nist.gov/groups/ST/toolkit/documents/dss/NISTReCur.pdf - */ -var curves = { - 'nistp256': { - size: 256, - pkcs8oid: '1.2.840.10045.3.1.7', - p: Buffer.from(('00' + - 'ffffffff 00000001 00000000 00000000' + - '00000000 ffffffff ffffffff ffffffff'). - replace(/ /g, ''), 'hex'), - a: Buffer.from(('00' + - 'FFFFFFFF 00000001 00000000 00000000' + - '00000000 FFFFFFFF FFFFFFFF FFFFFFFC'). - replace(/ /g, ''), 'hex'), - b: Buffer.from(( - '5ac635d8 aa3a93e7 b3ebbd55 769886bc' + - '651d06b0 cc53b0f6 3bce3c3e 27d2604b'). - replace(/ /g, ''), 'hex'), - s: Buffer.from(('00' + - 'c49d3608 86e70493 6a6678e1 139d26b7' + - '819f7e90'). - replace(/ /g, ''), 'hex'), - n: Buffer.from(('00' + - 'ffffffff 00000000 ffffffff ffffffff' + - 'bce6faad a7179e84 f3b9cac2 fc632551'). - replace(/ /g, ''), 'hex'), - G: Buffer.from(('04' + - '6b17d1f2 e12c4247 f8bce6e5 63a440f2' + - '77037d81 2deb33a0 f4a13945 d898c296' + - '4fe342e2 fe1a7f9b 8ee7eb4a 7c0f9e16' + - '2bce3357 6b315ece cbb64068 37bf51f5'). - replace(/ /g, ''), 'hex') - }, - 'nistp384': { - size: 384, - pkcs8oid: '1.3.132.0.34', - p: Buffer.from(('00' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff ffffffff fffffffe' + - 'ffffffff 00000000 00000000 ffffffff'). - replace(/ /g, ''), 'hex'), - a: Buffer.from(('00' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE' + - 'FFFFFFFF 00000000 00000000 FFFFFFFC'). - replace(/ /g, ''), 'hex'), - b: Buffer.from(( - 'b3312fa7 e23ee7e4 988e056b e3f82d19' + - '181d9c6e fe814112 0314088f 5013875a' + - 'c656398d 8a2ed19d 2a85c8ed d3ec2aef'). - replace(/ /g, ''), 'hex'), - s: Buffer.from(('00' + - 'a335926a a319a27a 1d00896a 6773a482' + - '7acdac73'). - replace(/ /g, ''), 'hex'), - n: Buffer.from(('00' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff c7634d81 f4372ddf' + - '581a0db2 48b0a77a ecec196a ccc52973'). - replace(/ /g, ''), 'hex'), - G: Buffer.from(('04' + - 'aa87ca22 be8b0537 8eb1c71e f320ad74' + - '6e1d3b62 8ba79b98 59f741e0 82542a38' + - '5502f25d bf55296c 3a545e38 72760ab7' + - '3617de4a 96262c6f 5d9e98bf 9292dc29' + - 'f8f41dbd 289a147c e9da3113 b5f0b8c0' + - '0a60b1ce 1d7e819d 7a431d7c 90ea0e5f'). - replace(/ /g, ''), 'hex') - }, - 'nistp521': { - size: 521, - pkcs8oid: '1.3.132.0.35', - p: Buffer.from(( - '01ffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffff').replace(/ /g, ''), 'hex'), - a: Buffer.from(('01FF' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF' + - 'FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFC'). - replace(/ /g, ''), 'hex'), - b: Buffer.from(('51' + - '953eb961 8e1c9a1f 929a21a0 b68540ee' + - 'a2da725b 99b315f3 b8b48991 8ef109e1' + - '56193951 ec7e937b 1652c0bd 3bb1bf07' + - '3573df88 3d2c34f1 ef451fd4 6b503f00'). - replace(/ /g, ''), 'hex'), - s: Buffer.from(('00' + - 'd09e8800 291cb853 96cc6717 393284aa' + - 'a0da64ba').replace(/ /g, ''), 'hex'), - n: Buffer.from(('01ff' + - 'ffffffff ffffffff ffffffff ffffffff' + - 'ffffffff ffffffff ffffffff fffffffa' + - '51868783 bf2f966b 7fcc0148 f709a5d0' + - '3bb5c9b8 899c47ae bb6fb71e 91386409'). - replace(/ /g, ''), 'hex'), - G: Buffer.from(('04' + - '00c6 858e06b7 0404e9cd 9e3ecb66 2395b442' + - '9c648139 053fb521 f828af60 6b4d3dba' + - 'a14b5e77 efe75928 fe1dc127 a2ffa8de' + - '3348b3c1 856a429b f97e7e31 c2e5bd66' + - '0118 39296a78 9a3bc004 5c8a5fb4 2c7d1bd9' + - '98f54449 579b4468 17afbd17 273e662c' + - '97ee7299 5ef42640 c550b901 3fad0761' + - '353c7086 a272c240 88be9476 9fd16650'). - replace(/ /g, ''), 'hex') - } -}; - -module.exports = { - info: algInfo, - privInfo: algPrivInfo, - hashAlgs: hashAlgs, - curves: curves -}; - - -/***/ }), -/* 33 */ -/***/ (function(module, exports, __webpack_require__) { - -// Copyright 2017 Joyent, Inc. - -module.exports = PrivateKey; - -var assert = __webpack_require__(16); -var Buffer = __webpack_require__(15).Buffer; -var algs = __webpack_require__(32); -var crypto = __webpack_require__(11); -var Fingerprint = __webpack_require__(156); -var Signature = __webpack_require__(75); -var errs = __webpack_require__(74); -var util = __webpack_require__(3); -var utils = __webpack_require__(26); -var dhe = __webpack_require__(325); -var generateECDSA = dhe.generateECDSA; -var generateED25519 = dhe.generateED25519; -var edCompat; -var nacl; - -try { - edCompat = __webpack_require__(454); -} catch (e) { - /* Just continue through, and bail out if we try to use it. */ -} - -var Key = __webpack_require__(27); - -var InvalidAlgorithmError = errs.InvalidAlgorithmError; -var KeyParseError = errs.KeyParseError; -var KeyEncryptedError = errs.KeyEncryptedError; - -var formats = {}; -formats['auto'] = __webpack_require__(455); -formats['pem'] = __webpack_require__(86); -formats['pkcs1'] = __webpack_require__(327); -formats['pkcs8'] = __webpack_require__(157); -formats['rfc4253'] = __webpack_require__(103); -formats['ssh-private'] = __webpack_require__(192); -formats['openssh'] = formats['ssh-private']; -formats['ssh'] = formats['ssh-private']; -formats['dnssec'] = __webpack_require__(326); - -function PrivateKey(opts) { - assert.object(opts, 'options'); - Key.call(this, opts); - - this._pubCache = undefined; -} -util.inherits(PrivateKey, Key); - -PrivateKey.formats = formats; - -PrivateKey.prototype.toBuffer = function (format, options) { - if (format === undefined) - format = 'pkcs1'; - assert.string(format, 'format'); - assert.object(formats[format], 'formats[format]'); - assert.optionalObject(options, 'options'); - - return (formats[format].write(this, options)); -}; - -PrivateKey.prototype.hash = function (algo) { - return (this.toPublic().hash(algo)); -}; - -PrivateKey.prototype.toPublic = function () { - if (this._pubCache) - return (this._pubCache); - - var algInfo = algs.info[this.type]; - var pubParts = []; - for (var i = 0; i < algInfo.parts.length; ++i) { - var p = algInfo.parts[i]; - pubParts.push(this.part[p]); - } - - this._pubCache = new Key({ - type: this.type, - source: this, - parts: pubParts - }); - if (this.comment) - this._pubCache.comment = this.comment; - return (this._pubCache); -}; - -PrivateKey.prototype.derive = function (newType) { - assert.string(newType, 'type'); - var priv, pub, pair; - - if (this.type === 'ed25519' && newType === 'curve25519') { - if (nacl === undefined) - nacl = __webpack_require__(76); - - priv = this.part.k.data; - if (priv[0] === 0x00) - priv = priv.slice(1); - - pair = nacl.box.keyPair.fromSecretKey(new Uint8Array(priv)); - pub = Buffer.from(pair.publicKey); - - return (new PrivateKey({ - type: 'curve25519', - parts: [ - { name: 'A', data: utils.mpNormalize(pub) }, - { name: 'k', data: utils.mpNormalize(priv) } - ] - })); - } else if (this.type === 'curve25519' && newType === 'ed25519') { - if (nacl === undefined) - nacl = __webpack_require__(76); - - priv = this.part.k.data; - if (priv[0] === 0x00) - priv = priv.slice(1); - - pair = nacl.sign.keyPair.fromSeed(new Uint8Array(priv)); - pub = Buffer.from(pair.publicKey); - - return (new PrivateKey({ - type: 'ed25519', - parts: [ - { name: 'A', data: utils.mpNormalize(pub) }, - { name: 'k', data: utils.mpNormalize(priv) } - ] - })); - } - throw (new Error('Key derivation not supported from ' + this.type + - ' to ' + newType)); -}; - -PrivateKey.prototype.createVerify = function (hashAlgo) { - return (this.toPublic().createVerify(hashAlgo)); -}; - -PrivateKey.prototype.createSign = function (hashAlgo) { - if (hashAlgo === undefined) - hashAlgo = this.defaultHashAlgorithm(); - assert.string(hashAlgo, 'hash algorithm'); - - /* ED25519 is not supported by OpenSSL, use a javascript impl. */ - if (this.type === 'ed25519' && edCompat !== undefined) - return (new edCompat.Signer(this, hashAlgo)); - if (this.type === 'curve25519') - throw (new Error('Curve25519 keys are not suitable for ' + - 'signing or verification')); - - var v, nm, err; - try { - nm = hashAlgo.toUpperCase(); - v = crypto.createSign(nm); - } catch (e) { - err = e; - } - if (v === undefined || (err instanceof Error && - err.message.match(/Unknown message digest/))) { - nm = 'RSA-'; - nm += hashAlgo.toUpperCase(); - v = crypto.createSign(nm); - } - assert.ok(v, 'failed to create verifier'); - var oldSign = v.sign.bind(v); - var key = this.toBuffer('pkcs1'); - var type = this.type; - var curve = this.curve; - v.sign = function () { - var sig = oldSign(key); - if (typeof (sig) === 'string') - sig = Buffer.from(sig, 'binary'); - sig = Signature.parse(sig, type, 'asn1'); - sig.hashAlgorithm = hashAlgo; - sig.curve = curve; - return (sig); - }; - return (v); -}; - -PrivateKey.parse = function (data, format, options) { - if (typeof (data) !== 'string') - assert.buffer(data, 'data'); - if (format === undefined) - format = 'auto'; - assert.string(format, 'format'); - if (typeof (options) === 'string') - options = { filename: options }; - assert.optionalObject(options, 'options'); - if (options === undefined) - options = {}; - assert.optionalString(options.filename, 'options.filename'); - if (options.filename === undefined) - options.filename = '(unnamed)'; - - assert.object(formats[format], 'formats[format]'); - - try { - var k = formats[format].read(data, options); - assert.ok(k instanceof PrivateKey, 'key is not a private key'); - if (!k.comment) - k.comment = options.filename; - return (k); - } catch (e) { - if (e.name === 'KeyEncryptedError') - throw (e); - throw (new KeyParseError(options.filename, format, e)); - } -}; - -PrivateKey.isPrivateKey = function (obj, ver) { - return (utils.isCompatible(obj, PrivateKey, ver)); -}; - -PrivateKey.generate = function (type, options) { - if (options === undefined) - options = {}; - assert.object(options, 'options'); - - switch (type) { - case 'ecdsa': - if (options.curve === undefined) - options.curve = 'nistp256'; - assert.string(options.curve, 'options.curve'); - return (generateECDSA(options.curve)); - case 'ed25519': - return (generateED25519()); - default: - throw (new Error('Key generation not supported with key ' + - 'type "' + type + '"')); - } -}; - -/* - * API versions for PrivateKey: - * [1,0] -- initial ver - * [1,1] -- added auto, pkcs[18], openssh/ssh-private formats - * [1,2] -- added defaultHashAlgorithm - * [1,3] -- added derive, ed, createDH - * [1,4] -- first tagged version - * [1,5] -- changed ed25519 part names and format - */ -PrivateKey.prototype._sshpkApiVersion = [1, 5]; - -PrivateKey._oldVersionDetect = function (obj) { - assert.func(obj.toPublic); - assert.func(obj.createSign); - if (obj.derive) - return ([1, 3]); - if (obj.defaultHashAlgorithm) - return ([1, 2]); - if (obj.formats['auto']) - return ([1, 1]); - return ([1, 0]); -}; - - -/***/ }), -/* 34 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.wrapLifecycle = exports.run = exports.install = exports.Install = undefined; - -var _extends2; - -function _load_extends() { - return _extends2 = _interopRequireDefault(__webpack_require__(21)); -} - -var _asyncToGenerator2; - -function _load_asyncToGenerator() { - return _asyncToGenerator2 = _interopRequireDefault(__webpack_require__(2)); -} - -let install = exports.install = (() => { - var _ref29 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (config, reporter, flags, lockfile) { - yield wrapLifecycle(config, flags, (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const install = new Install(flags, config, reporter, lockfile); - yield install.init(); - })); - }); - - return function install(_x7, _x8, _x9, _x10) { - return _ref29.apply(this, arguments); - }; -})(); - -let run = exports.run = (() => { - var _ref31 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (config, reporter, flags, args) { - let lockfile; - let error = 'installCommandRenamed'; - if (flags.lockfile === false) { - lockfile = new (_lockfile || _load_lockfile()).default(); - } else { - lockfile = yield (_lockfile || _load_lockfile()).default.fromDirectory(config.lockfileFolder, reporter); - } - - if (args.length) { - const exampleArgs = args.slice(); - - if (flags.saveDev) { - exampleArgs.push('--dev'); - } - if (flags.savePeer) { - exampleArgs.push('--peer'); - } - if (flags.saveOptional) { - exampleArgs.push('--optional'); - } - if (flags.saveExact) { - exampleArgs.push('--exact'); - } - if (flags.saveTilde) { - exampleArgs.push('--tilde'); - } - let command = 'add'; - if (flags.global) { - error = 'globalFlagRemoved'; - command = 'global add'; - } - throw new (_errors || _load_errors()).MessageError(reporter.lang(error, `yarn ${command} ${exampleArgs.join(' ')}`)); - } - - yield install(config, reporter, flags, lockfile); - }); - - return function run(_x11, _x12, _x13, _x14) { - return _ref31.apply(this, arguments); - }; -})(); - -let wrapLifecycle = exports.wrapLifecycle = (() => { - var _ref32 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (config, flags, factory) { - yield config.executeLifecycleScript('preinstall'); - - yield factory(); - - // npm behaviour, seems kinda funky but yay compatibility - yield config.executeLifecycleScript('install'); - yield config.executeLifecycleScript('postinstall'); - - if (!config.production) { - if (!config.disablePrepublish) { - yield config.executeLifecycleScript('prepublish'); - } - yield config.executeLifecycleScript('prepare'); - } - }); - - return function wrapLifecycle(_x15, _x16, _x17) { - return _ref32.apply(this, arguments); - }; -})(); - -exports.hasWrapper = hasWrapper; -exports.setFlags = setFlags; - -var _objectPath; - -function _load_objectPath() { - return _objectPath = _interopRequireDefault(__webpack_require__(304)); -} - -var _hooks; - -function _load_hooks() { - return _hooks = __webpack_require__(374); -} - -var _index; - -function _load_index() { - return _index = _interopRequireDefault(__webpack_require__(220)); -} - -var _errors; - -function _load_errors() { - return _errors = __webpack_require__(6); -} - -var _integrityChecker; - -function _load_integrityChecker() { - return _integrityChecker = _interopRequireDefault(__webpack_require__(208)); -} - -var _lockfile; - -function _load_lockfile() { - return _lockfile = _interopRequireDefault(__webpack_require__(19)); -} - -var _lockfile2; - -function _load_lockfile2() { - return _lockfile2 = __webpack_require__(19); -} - -var _packageFetcher; - -function _load_packageFetcher() { - return _packageFetcher = _interopRequireWildcard(__webpack_require__(210)); -} - -var _packageInstallScripts; - -function _load_packageInstallScripts() { - return _packageInstallScripts = _interopRequireDefault(__webpack_require__(557)); -} - -var _packageCompatibility; - -function _load_packageCompatibility() { - return _packageCompatibility = _interopRequireWildcard(__webpack_require__(209)); -} - -var _packageResolver; - -function _load_packageResolver() { - return _packageResolver = _interopRequireDefault(__webpack_require__(366)); -} - -var _packageLinker; - -function _load_packageLinker() { - return _packageLinker = _interopRequireDefault(__webpack_require__(211)); -} - -var _index2; - -function _load_index2() { - return _index2 = __webpack_require__(57); -} - -var _index3; - -function _load_index3() { - return _index3 = __webpack_require__(78); -} - -var _autoclean; - -function _load_autoclean() { - return _autoclean = __webpack_require__(354); -} - -var _constants; - -function _load_constants() { - return _constants = _interopRequireWildcard(__webpack_require__(8)); -} - -var _normalizePattern; - -function _load_normalizePattern() { - return _normalizePattern = __webpack_require__(37); -} - -var _fs; - -function _load_fs() { - return _fs = _interopRequireWildcard(__webpack_require__(4)); -} - -var _map; - -function _load_map() { - return _map = _interopRequireDefault(__webpack_require__(29)); -} - -var _yarnVersion; - -function _load_yarnVersion() { - return _yarnVersion = __webpack_require__(120); -} - -var _generatePnpMap; - -function _load_generatePnpMap() { - return _generatePnpMap = __webpack_require__(579); -} - -var _workspaceLayout; - -function _load_workspaceLayout() { - return _workspaceLayout = _interopRequireDefault(__webpack_require__(90)); -} - -var _resolutionMap; - -function _load_resolutionMap() { - return _resolutionMap = _interopRequireDefault(__webpack_require__(214)); -} - -var _guessName; - -function _load_guessName() { - return _guessName = _interopRequireDefault(__webpack_require__(169)); -} - -var _audit; - -function _load_audit() { - return _audit = _interopRequireDefault(__webpack_require__(353)); -} - -function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } } - -function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } - -const deepEqual = __webpack_require__(631); - -const emoji = __webpack_require__(302); -const invariant = __webpack_require__(9); -const path = __webpack_require__(0); -const semver = __webpack_require__(22); -const uuid = __webpack_require__(119); -const ssri = __webpack_require__(65); - -const ONE_DAY = 1000 * 60 * 60 * 24; - -/** - * Try and detect the installation method for Yarn and provide a command to update it with. - */ - -function getUpdateCommand(installationMethod) { - if (installationMethod === 'tar') { - return `curl --compressed -o- -L ${(_constants || _load_constants()).YARN_INSTALLER_SH} | bash`; - } - - if (installationMethod === 'homebrew') { - return 'brew upgrade yarn'; - } - - if (installationMethod === 'deb') { - return 'sudo apt-get update && sudo apt-get install yarn'; - } - - if (installationMethod === 'rpm') { - return 'sudo yum install yarn'; - } - - if (installationMethod === 'npm') { - return 'npm install --global yarn'; - } - - if (installationMethod === 'chocolatey') { - return 'choco upgrade yarn'; - } - - if (installationMethod === 'apk') { - return 'apk update && apk add -u yarn'; - } - - if (installationMethod === 'portage') { - return 'sudo emerge --sync && sudo emerge -au sys-apps/yarn'; - } - - return null; -} - -function getUpdateInstaller(installationMethod) { - // Windows - if (installationMethod === 'msi') { - return (_constants || _load_constants()).YARN_INSTALLER_MSI; - } - - return null; -} - -function normalizeFlags(config, rawFlags) { - const flags = { - // install - har: !!rawFlags.har, - ignorePlatform: !!rawFlags.ignorePlatform, - ignoreEngines: !!rawFlags.ignoreEngines, - ignoreScripts: !!rawFlags.ignoreScripts, - ignoreOptional: !!rawFlags.ignoreOptional, - force: !!rawFlags.force, - flat: !!rawFlags.flat, - lockfile: rawFlags.lockfile !== false, - pureLockfile: !!rawFlags.pureLockfile, - updateChecksums: !!rawFlags.updateChecksums, - skipIntegrityCheck: !!rawFlags.skipIntegrityCheck, - frozenLockfile: !!rawFlags.frozenLockfile, - linkDuplicates: !!rawFlags.linkDuplicates, - checkFiles: !!rawFlags.checkFiles, - audit: !!rawFlags.audit, - - // add - peer: !!rawFlags.peer, - dev: !!rawFlags.dev, - optional: !!rawFlags.optional, - exact: !!rawFlags.exact, - tilde: !!rawFlags.tilde, - ignoreWorkspaceRootCheck: !!rawFlags.ignoreWorkspaceRootCheck, - - // outdated, update-interactive - includeWorkspaceDeps: !!rawFlags.includeWorkspaceDeps, - - // add, remove, update - workspaceRootIsCwd: rawFlags.workspaceRootIsCwd !== false - }; - - if (config.getOption('ignore-scripts')) { - flags.ignoreScripts = true; - } - - if (config.getOption('ignore-platform')) { - flags.ignorePlatform = true; - } - - if (config.getOption('ignore-engines')) { - flags.ignoreEngines = true; - } - - if (config.getOption('ignore-optional')) { - flags.ignoreOptional = true; - } - - if (config.getOption('force')) { - flags.force = true; - } - - return flags; -} - -class Install { - constructor(flags, config, reporter, lockfile) { - this.rootManifestRegistries = []; - this.rootPatternsToOrigin = (0, (_map || _load_map()).default)(); - this.lockfile = lockfile; - this.reporter = reporter; - this.config = config; - this.flags = normalizeFlags(config, flags); - this.resolutions = (0, (_map || _load_map()).default)(); // Legacy resolutions field used for flat install mode - this.resolutionMap = new (_resolutionMap || _load_resolutionMap()).default(config); // Selective resolutions for nested dependencies - this.resolver = new (_packageResolver || _load_packageResolver()).default(config, lockfile, this.resolutionMap); - this.integrityChecker = new (_integrityChecker || _load_integrityChecker()).default(config); - this.linker = new (_packageLinker || _load_packageLinker()).default(config, this.resolver); - this.scripts = new (_packageInstallScripts || _load_packageInstallScripts()).default(config, this.resolver, this.flags.force); - } - - /** - * Create a list of dependency requests from the current directories manifests. - */ - - fetchRequestFromCwd(excludePatterns = [], ignoreUnusedPatterns = false) { - var _this = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const patterns = []; - const deps = []; - let resolutionDeps = []; - const manifest = {}; - - const ignorePatterns = []; - const usedPatterns = []; - let workspaceLayout; - - // some commands should always run in the context of the entire workspace - const cwd = _this.flags.includeWorkspaceDeps || _this.flags.workspaceRootIsCwd ? _this.config.lockfileFolder : _this.config.cwd; - - // non-workspaces are always root, otherwise check for workspace root - const cwdIsRoot = !_this.config.workspaceRootFolder || _this.config.lockfileFolder === cwd; - - // exclude package names that are in install args - const excludeNames = []; - for (var _iterator = excludePatterns, _isArray = Array.isArray(_iterator), _i = 0, _iterator = _isArray ? _iterator : _iterator[Symbol.iterator]();;) { - var _ref; - - if (_isArray) { - if (_i >= _iterator.length) break; - _ref = _iterator[_i++]; - } else { - _i = _iterator.next(); - if (_i.done) break; - _ref = _i.value; - } - - const pattern = _ref; - - if ((0, (_index3 || _load_index3()).getExoticResolver)(pattern)) { - excludeNames.push((0, (_guessName || _load_guessName()).default)(pattern)); - } else { - // extract the name - const parts = (0, (_normalizePattern || _load_normalizePattern()).normalizePattern)(pattern); - excludeNames.push(parts.name); - } - } - - const stripExcluded = function stripExcluded(manifest) { - for (var _iterator2 = excludeNames, _isArray2 = Array.isArray(_iterator2), _i2 = 0, _iterator2 = _isArray2 ? _iterator2 : _iterator2[Symbol.iterator]();;) { - var _ref2; - - if (_isArray2) { - if (_i2 >= _iterator2.length) break; - _ref2 = _iterator2[_i2++]; - } else { - _i2 = _iterator2.next(); - if (_i2.done) break; - _ref2 = _i2.value; - } - - const exclude = _ref2; - - if (manifest.dependencies && manifest.dependencies[exclude]) { - delete manifest.dependencies[exclude]; - } - if (manifest.devDependencies && manifest.devDependencies[exclude]) { - delete manifest.devDependencies[exclude]; - } - if (manifest.optionalDependencies && manifest.optionalDependencies[exclude]) { - delete manifest.optionalDependencies[exclude]; - } - } - }; - - for (var _iterator3 = Object.keys((_index2 || _load_index2()).registries), _isArray3 = Array.isArray(_iterator3), _i3 = 0, _iterator3 = _isArray3 ? _iterator3 : _iterator3[Symbol.iterator]();;) { - var _ref3; - - if (_isArray3) { - if (_i3 >= _iterator3.length) break; - _ref3 = _iterator3[_i3++]; - } else { - _i3 = _iterator3.next(); - if (_i3.done) break; - _ref3 = _i3.value; - } - - const registry = _ref3; - - const filename = (_index2 || _load_index2()).registries[registry].filename; - - const loc = path.join(cwd, filename); - if (!(yield (_fs || _load_fs()).exists(loc))) { - continue; - } - - _this.rootManifestRegistries.push(registry); - - const projectManifestJson = yield _this.config.readJson(loc); - yield (0, (_index || _load_index()).default)(projectManifestJson, cwd, _this.config, cwdIsRoot); - - Object.assign(_this.resolutions, projectManifestJson.resolutions); - Object.assign(manifest, projectManifestJson); - - _this.resolutionMap.init(_this.resolutions); - for (var _iterator4 = Object.keys(_this.resolutionMap.resolutionsByPackage), _isArray4 = Array.isArray(_iterator4), _i4 = 0, _iterator4 = _isArray4 ? _iterator4 : _iterator4[Symbol.iterator]();;) { - var _ref4; - - if (_isArray4) { - if (_i4 >= _iterator4.length) break; - _ref4 = _iterator4[_i4++]; - } else { - _i4 = _iterator4.next(); - if (_i4.done) break; - _ref4 = _i4.value; - } - - const packageName = _ref4; - - const optional = (_objectPath || _load_objectPath()).default.has(manifest.optionalDependencies, packageName) && _this.flags.ignoreOptional; - for (var _iterator8 = _this.resolutionMap.resolutionsByPackage[packageName], _isArray8 = Array.isArray(_iterator8), _i8 = 0, _iterator8 = _isArray8 ? _iterator8 : _iterator8[Symbol.iterator]();;) { - var _ref9; - - if (_isArray8) { - if (_i8 >= _iterator8.length) break; - _ref9 = _iterator8[_i8++]; - } else { - _i8 = _iterator8.next(); - if (_i8.done) break; - _ref9 = _i8.value; - } - - const _ref8 = _ref9; - const pattern = _ref8.pattern; - - resolutionDeps = [...resolutionDeps, { registry, pattern, optional, hint: 'resolution' }]; - } - } - - const pushDeps = function pushDeps(depType, manifest, { hint, optional }, isUsed) { - if (ignoreUnusedPatterns && !isUsed) { - return; - } - // We only take unused dependencies into consideration to get deterministic hoisting. - // Since flat mode doesn't care about hoisting and everything is top level and specified then we can safely - // leave these out. - if (_this.flags.flat && !isUsed) { - return; - } - const depMap = manifest[depType]; - for (const name in depMap) { - if (excludeNames.indexOf(name) >= 0) { - continue; - } - - let pattern = name; - if (!_this.lockfile.getLocked(pattern)) { - // when we use --save we save the dependency to the lockfile with just the name rather than the - // version combo - pattern += '@' + depMap[name]; - } - - // normalization made sure packages are mentioned only once - if (isUsed) { - usedPatterns.push(pattern); - } else { - ignorePatterns.push(pattern); - } - - _this.rootPatternsToOrigin[pattern] = depType; - patterns.push(pattern); - deps.push({ pattern, registry, hint, optional, workspaceName: manifest.name, workspaceLoc: manifest._loc }); - } - }; - - if (cwdIsRoot) { - pushDeps('dependencies', projectManifestJson, { hint: null, optional: false }, true); - pushDeps('devDependencies', projectManifestJson, { hint: 'dev', optional: false }, !_this.config.production); - pushDeps('optionalDependencies', projectManifestJson, { hint: 'optional', optional: true }, true); - } - - if (_this.config.workspaceRootFolder) { - const workspaceLoc = cwdIsRoot ? loc : path.join(_this.config.lockfileFolder, filename); - const workspacesRoot = path.dirname(workspaceLoc); - - let workspaceManifestJson = projectManifestJson; - if (!cwdIsRoot) { - // the manifest we read before was a child workspace, so get the root - workspaceManifestJson = yield _this.config.readJson(workspaceLoc); - yield (0, (_index || _load_index()).default)(workspaceManifestJson, workspacesRoot, _this.config, true); - } - - const workspaces = yield _this.config.resolveWorkspaces(workspacesRoot, workspaceManifestJson); - workspaceLayout = new (_workspaceLayout || _load_workspaceLayout()).default(workspaces, _this.config); - - // add virtual manifest that depends on all workspaces, this way package hoisters and resolvers will work fine - const workspaceDependencies = (0, (_extends2 || _load_extends()).default)({}, workspaceManifestJson.dependencies); - for (var _iterator5 = Object.keys(workspaces), _isArray5 = Array.isArray(_iterator5), _i5 = 0, _iterator5 = _isArray5 ? _iterator5 : _iterator5[Symbol.iterator]();;) { - var _ref5; - - if (_isArray5) { - if (_i5 >= _iterator5.length) break; - _ref5 = _iterator5[_i5++]; - } else { - _i5 = _iterator5.next(); - if (_i5.done) break; - _ref5 = _i5.value; - } - - const workspaceName = _ref5; - - const workspaceManifest = workspaces[workspaceName].manifest; - workspaceDependencies[workspaceName] = workspaceManifest.version; - - // include dependencies from all workspaces - if (_this.flags.includeWorkspaceDeps) { - pushDeps('dependencies', workspaceManifest, { hint: null, optional: false }, true); - pushDeps('devDependencies', workspaceManifest, { hint: 'dev', optional: false }, !_this.config.production); - pushDeps('optionalDependencies', workspaceManifest, { hint: 'optional', optional: true }, true); - } - } - const virtualDependencyManifest = { - _uid: '', - name: `workspace-aggregator-${uuid.v4()}`, - version: '1.0.0', - _registry: 'npm', - _loc: workspacesRoot, - dependencies: workspaceDependencies, - devDependencies: (0, (_extends2 || _load_extends()).default)({}, workspaceManifestJson.devDependencies), - optionalDependencies: (0, (_extends2 || _load_extends()).default)({}, workspaceManifestJson.optionalDependencies), - private: workspaceManifestJson.private, - workspaces: workspaceManifestJson.workspaces - }; - workspaceLayout.virtualManifestName = virtualDependencyManifest.name; - const virtualDep = {}; - virtualDep[virtualDependencyManifest.name] = virtualDependencyManifest.version; - workspaces[virtualDependencyManifest.name] = { loc: workspacesRoot, manifest: virtualDependencyManifest }; - - // ensure dependencies that should be excluded are stripped from the correct manifest - stripExcluded(cwdIsRoot ? virtualDependencyManifest : workspaces[projectManifestJson.name].manifest); - - pushDeps('workspaces', { workspaces: virtualDep }, { hint: 'workspaces', optional: false }, true); - - const implicitWorkspaceDependencies = (0, (_extends2 || _load_extends()).default)({}, workspaceDependencies); - - for (var _iterator6 = (_constants || _load_constants()).OWNED_DEPENDENCY_TYPES, _isArray6 = Array.isArray(_iterator6), _i6 = 0, _iterator6 = _isArray6 ? _iterator6 : _iterator6[Symbol.iterator]();;) { - var _ref6; - - if (_isArray6) { - if (_i6 >= _iterator6.length) break; - _ref6 = _iterator6[_i6++]; - } else { - _i6 = _iterator6.next(); - if (_i6.done) break; - _ref6 = _i6.value; - } - - const type = _ref6; - - for (var _iterator7 = Object.keys(projectManifestJson[type] || {}), _isArray7 = Array.isArray(_iterator7), _i7 = 0, _iterator7 = _isArray7 ? _iterator7 : _iterator7[Symbol.iterator]();;) { - var _ref7; - - if (_isArray7) { - if (_i7 >= _iterator7.length) break; - _ref7 = _iterator7[_i7++]; - } else { - _i7 = _iterator7.next(); - if (_i7.done) break; - _ref7 = _i7.value; - } - - const dependencyName = _ref7; - - delete implicitWorkspaceDependencies[dependencyName]; - } - } - - pushDeps('dependencies', { dependencies: implicitWorkspaceDependencies }, { hint: 'workspaces', optional: false }, true); - } - - break; - } - - // inherit root flat flag - if (manifest.flat) { - _this.flags.flat = true; - } - - return { - requests: [...resolutionDeps, ...deps], - patterns, - manifest, - usedPatterns, - ignorePatterns, - workspaceLayout - }; - })(); - } - - /** - * TODO description - */ - - prepareRequests(requests) { - return requests; - } - - preparePatterns(patterns) { - return patterns; - } - preparePatternsForLinking(patterns, cwdManifest, cwdIsRoot) { - return patterns; - } - - prepareManifests() { - var _this2 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const manifests = yield _this2.config.getRootManifests(); - return manifests; - })(); - } - - bailout(patterns, workspaceLayout) { - var _this3 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - // We don't want to skip the audit - it could yield important errors - if (_this3.flags.audit) { - return false; - } - // PNP is so fast that the integrity check isn't pertinent - if (_this3.config.plugnplayEnabled) { - return false; - } - if (_this3.flags.skipIntegrityCheck || _this3.flags.force) { - return false; - } - const lockfileCache = _this3.lockfile.cache; - if (!lockfileCache) { - return false; - } - const lockfileClean = _this3.lockfile.parseResultType === 'success'; - const match = yield _this3.integrityChecker.check(patterns, lockfileCache, _this3.flags, workspaceLayout); - if (_this3.flags.frozenLockfile && (!lockfileClean || match.missingPatterns.length > 0)) { - throw new (_errors || _load_errors()).MessageError(_this3.reporter.lang('frozenLockfileError')); - } - - const haveLockfile = yield (_fs || _load_fs()).exists(path.join(_this3.config.lockfileFolder, (_constants || _load_constants()).LOCKFILE_FILENAME)); - - const lockfileIntegrityPresent = !_this3.lockfile.hasEntriesExistWithoutIntegrity(); - const integrityBailout = lockfileIntegrityPresent || !_this3.config.autoAddIntegrity; - - if (match.integrityMatches && haveLockfile && lockfileClean && integrityBailout) { - _this3.reporter.success(_this3.reporter.lang('upToDate')); - return true; - } - - if (match.integrityFileMissing && haveLockfile) { - // Integrity file missing, force script installations - _this3.scripts.setForce(true); - return false; - } - - if (match.hardRefreshRequired) { - // e.g. node version doesn't match, force script installations - _this3.scripts.setForce(true); - return false; - } - - if (!patterns.length && !match.integrityFileMissing) { - _this3.reporter.success(_this3.reporter.lang('nothingToInstall')); - yield _this3.createEmptyManifestFolders(); - yield _this3.saveLockfileAndIntegrity(patterns, workspaceLayout); - return true; - } - - return false; - })(); - } - - /** - * Produce empty folders for all used root manifests. - */ - - createEmptyManifestFolders() { - var _this4 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - if (_this4.config.modulesFolder) { - // already created - return; - } - - for (var _iterator9 = _this4.rootManifestRegistries, _isArray9 = Array.isArray(_iterator9), _i9 = 0, _iterator9 = _isArray9 ? _iterator9 : _iterator9[Symbol.iterator]();;) { - var _ref10; - - if (_isArray9) { - if (_i9 >= _iterator9.length) break; - _ref10 = _iterator9[_i9++]; - } else { - _i9 = _iterator9.next(); - if (_i9.done) break; - _ref10 = _i9.value; - } - - const registryName = _ref10; - const folder = _this4.config.registries[registryName].folder; - - yield (_fs || _load_fs()).mkdirp(path.join(_this4.config.lockfileFolder, folder)); - } - })(); - } - - /** - * TODO description - */ - - markIgnored(patterns) { - for (var _iterator10 = patterns, _isArray10 = Array.isArray(_iterator10), _i10 = 0, _iterator10 = _isArray10 ? _iterator10 : _iterator10[Symbol.iterator]();;) { - var _ref11; - - if (_isArray10) { - if (_i10 >= _iterator10.length) break; - _ref11 = _iterator10[_i10++]; - } else { - _i10 = _iterator10.next(); - if (_i10.done) break; - _ref11 = _i10.value; - } - - const pattern = _ref11; - - const manifest = this.resolver.getStrictResolvedPattern(pattern); - const ref = manifest._reference; - invariant(ref, 'expected package reference'); - - // just mark the package as ignored. if the package is used by a required package, the hoister - // will take care of that. - ref.ignore = true; - } - } - - /** - * helper method that gets only recent manifests - * used by global.ls command - */ - getFlattenedDeps() { - var _this5 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - var _ref12 = yield _this5.fetchRequestFromCwd(); - - const depRequests = _ref12.requests, - rawPatterns = _ref12.patterns; - - - yield _this5.resolver.init(depRequests, {}); - - const manifests = yield (_packageFetcher || _load_packageFetcher()).fetch(_this5.resolver.getManifests(), _this5.config); - _this5.resolver.updateManifests(manifests); - - return _this5.flatten(rawPatterns); - })(); - } - - /** - * TODO description - */ - - init() { - var _this6 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - _this6.checkUpdate(); - - // warn if we have a shrinkwrap - if (yield (_fs || _load_fs()).exists(path.join(_this6.config.lockfileFolder, (_constants || _load_constants()).NPM_SHRINKWRAP_FILENAME))) { - _this6.reporter.warn(_this6.reporter.lang('shrinkwrapWarning')); - } - - // warn if we have an npm lockfile - if (yield (_fs || _load_fs()).exists(path.join(_this6.config.lockfileFolder, (_constants || _load_constants()).NPM_LOCK_FILENAME))) { - _this6.reporter.warn(_this6.reporter.lang('npmLockfileWarning')); - } - - if (_this6.config.plugnplayEnabled) { - _this6.reporter.info(_this6.reporter.lang('plugnplaySuggestV2L1')); - _this6.reporter.info(_this6.reporter.lang('plugnplaySuggestV2L2')); - } - - let flattenedTopLevelPatterns = []; - const steps = []; - - var _ref13 = yield _this6.fetchRequestFromCwd(); - - const depRequests = _ref13.requests, - rawPatterns = _ref13.patterns, - ignorePatterns = _ref13.ignorePatterns, - workspaceLayout = _ref13.workspaceLayout, - manifest = _ref13.manifest; - - let topLevelPatterns = []; - - const artifacts = yield _this6.integrityChecker.getArtifacts(); - if (artifacts) { - _this6.linker.setArtifacts(artifacts); - _this6.scripts.setArtifacts(artifacts); - } - - if ((_packageCompatibility || _load_packageCompatibility()).shouldCheck(manifest, _this6.flags)) { - steps.push((() => { - var _ref14 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (curr, total) { - _this6.reporter.step(curr, total, _this6.reporter.lang('checkingManifest'), emoji.get('mag')); - yield _this6.checkCompatibility(); - }); - - return function (_x, _x2) { - return _ref14.apply(this, arguments); - }; - })()); - } - - const audit = new (_audit || _load_audit()).default(_this6.config, _this6.reporter, { groups: (_constants || _load_constants()).OWNED_DEPENDENCY_TYPES }); - let auditFoundProblems = false; - - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('resolveStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - _this6.reporter.step(curr, total, _this6.reporter.lang('resolvingPackages'), emoji.get('mag')); - yield _this6.resolver.init(_this6.prepareRequests(depRequests), { - isFlat: _this6.flags.flat, - isFrozen: _this6.flags.frozenLockfile, - workspaceLayout - }); - topLevelPatterns = _this6.preparePatterns(rawPatterns); - flattenedTopLevelPatterns = yield _this6.flatten(topLevelPatterns); - return { bailout: !_this6.flags.audit && (yield _this6.bailout(topLevelPatterns, workspaceLayout)) }; - })); - }); - - if (_this6.flags.audit) { - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('auditStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - _this6.reporter.step(curr, total, _this6.reporter.lang('auditRunning'), emoji.get('mag')); - if (_this6.flags.offline) { - _this6.reporter.warn(_this6.reporter.lang('auditOffline')); - return { bailout: false }; - } - const preparedManifests = yield _this6.prepareManifests(); - // $FlowFixMe - Flow considers `m` in the map operation to be "mixed", so does not recognize `m.object` - const mergedManifest = Object.assign({}, ...Object.values(preparedManifests).map(function (m) { - return m.object; - })); - const auditVulnerabilityCounts = yield audit.performAudit(mergedManifest, _this6.lockfile, _this6.resolver, _this6.linker, topLevelPatterns); - auditFoundProblems = auditVulnerabilityCounts.info || auditVulnerabilityCounts.low || auditVulnerabilityCounts.moderate || auditVulnerabilityCounts.high || auditVulnerabilityCounts.critical; - return { bailout: yield _this6.bailout(topLevelPatterns, workspaceLayout) }; - })); - }); - } - - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('fetchStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - _this6.markIgnored(ignorePatterns); - _this6.reporter.step(curr, total, _this6.reporter.lang('fetchingPackages'), emoji.get('truck')); - const manifests = yield (_packageFetcher || _load_packageFetcher()).fetch(_this6.resolver.getManifests(), _this6.config); - _this6.resolver.updateManifests(manifests); - yield (_packageCompatibility || _load_packageCompatibility()).check(_this6.resolver.getManifests(), _this6.config, _this6.flags.ignoreEngines); - })); - }); - - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('linkStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - // remove integrity hash to make this operation atomic - yield _this6.integrityChecker.removeIntegrityFile(); - _this6.reporter.step(curr, total, _this6.reporter.lang('linkingDependencies'), emoji.get('link')); - flattenedTopLevelPatterns = _this6.preparePatternsForLinking(flattenedTopLevelPatterns, manifest, _this6.config.lockfileFolder === _this6.config.cwd); - yield _this6.linker.init(flattenedTopLevelPatterns, workspaceLayout, { - linkDuplicates: _this6.flags.linkDuplicates, - ignoreOptional: _this6.flags.ignoreOptional - }); - })); - }); - - if (_this6.config.plugnplayEnabled) { - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('pnpStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const pnpPath = `${_this6.config.lockfileFolder}/${(_constants || _load_constants()).PNP_FILENAME}`; - - const code = yield (0, (_generatePnpMap || _load_generatePnpMap()).generatePnpMap)(_this6.config, flattenedTopLevelPatterns, { - resolver: _this6.resolver, - reporter: _this6.reporter, - targetPath: pnpPath, - workspaceLayout - }); - - try { - const file = yield (_fs || _load_fs()).readFile(pnpPath); - if (file === code) { - return; - } - } catch (error) {} - - yield (_fs || _load_fs()).writeFile(pnpPath, code); - yield (_fs || _load_fs()).chmod(pnpPath, 0o755); - })); - }); - } - - steps.push(function (curr, total) { - return (0, (_hooks || _load_hooks()).callThroughHook)('buildStep', (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - _this6.reporter.step(curr, total, _this6.flags.force ? _this6.reporter.lang('rebuildingPackages') : _this6.reporter.lang('buildingFreshPackages'), emoji.get('hammer')); - - if (_this6.config.ignoreScripts) { - _this6.reporter.warn(_this6.reporter.lang('ignoredScripts')); - } else { - yield _this6.scripts.init(flattenedTopLevelPatterns); - } - })); - }); - - if (_this6.flags.har) { - steps.push((() => { - var _ref21 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (curr, total) { - const formattedDate = new Date().toISOString().replace(/:/g, '-'); - const filename = `yarn-install_${formattedDate}.har`; - _this6.reporter.step(curr, total, _this6.reporter.lang('savingHar', filename), emoji.get('black_circle_for_record')); - yield _this6.config.requestManager.saveHar(filename); - }); - - return function (_x3, _x4) { - return _ref21.apply(this, arguments); - }; - })()); - } - - if (yield _this6.shouldClean()) { - steps.push((() => { - var _ref22 = (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* (curr, total) { - _this6.reporter.step(curr, total, _this6.reporter.lang('cleaningModules'), emoji.get('recycle')); - yield (0, (_autoclean || _load_autoclean()).clean)(_this6.config, _this6.reporter); - }); - - return function (_x5, _x6) { - return _ref22.apply(this, arguments); - }; - })()); - } - - let currentStep = 0; - for (var _iterator11 = steps, _isArray11 = Array.isArray(_iterator11), _i11 = 0, _iterator11 = _isArray11 ? _iterator11 : _iterator11[Symbol.iterator]();;) { - var _ref23; - - if (_isArray11) { - if (_i11 >= _iterator11.length) break; - _ref23 = _iterator11[_i11++]; - } else { - _i11 = _iterator11.next(); - if (_i11.done) break; - _ref23 = _i11.value; - } - - const step = _ref23; - - const stepResult = yield step(++currentStep, steps.length); - if (stepResult && stepResult.bailout) { - if (_this6.flags.audit) { - audit.summary(); - } - if (auditFoundProblems) { - _this6.reporter.warn(_this6.reporter.lang('auditRunAuditForDetails')); - } - _this6.maybeOutputUpdate(); - return flattenedTopLevelPatterns; - } - } - - // fin! - if (_this6.flags.audit) { - audit.summary(); - } - if (auditFoundProblems) { - _this6.reporter.warn(_this6.reporter.lang('auditRunAuditForDetails')); - } - yield _this6.saveLockfileAndIntegrity(topLevelPatterns, workspaceLayout); - yield _this6.persistChanges(); - _this6.maybeOutputUpdate(); - _this6.config.requestManager.clearCache(); - return flattenedTopLevelPatterns; - })(); - } - - checkCompatibility() { - var _this7 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - var _ref24 = yield _this7.fetchRequestFromCwd(); - - const manifest = _ref24.manifest; - - yield (_packageCompatibility || _load_packageCompatibility()).checkOne(manifest, _this7.config, _this7.flags.ignoreEngines); - })(); - } - - persistChanges() { - var _this8 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - // get all the different registry manifests in this folder - const manifests = yield _this8.config.getRootManifests(); - - if (yield _this8.applyChanges(manifests)) { - yield _this8.config.saveRootManifests(manifests); - } - })(); - } - - applyChanges(manifests) { - let hasChanged = false; - - if (this.config.plugnplayPersist) { - const object = manifests.npm.object; - - - if (typeof object.installConfig !== 'object') { - object.installConfig = {}; - } - - if (this.config.plugnplayEnabled && object.installConfig.pnp !== true) { - object.installConfig.pnp = true; - hasChanged = true; - } else if (!this.config.plugnplayEnabled && typeof object.installConfig.pnp !== 'undefined') { - delete object.installConfig.pnp; - hasChanged = true; - } - - if (Object.keys(object.installConfig).length === 0) { - delete object.installConfig; - } - } - - return Promise.resolve(hasChanged); - } - - /** - * Check if we should run the cleaning step. - */ - - shouldClean() { - return (_fs || _load_fs()).exists(path.join(this.config.lockfileFolder, (_constants || _load_constants()).CLEAN_FILENAME)); - } - - /** - * TODO - */ - - flatten(patterns) { - var _this9 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - if (!_this9.flags.flat) { - return patterns; - } - - const flattenedPatterns = []; - - for (var _iterator12 = _this9.resolver.getAllDependencyNamesByLevelOrder(patterns), _isArray12 = Array.isArray(_iterator12), _i12 = 0, _iterator12 = _isArray12 ? _iterator12 : _iterator12[Symbol.iterator]();;) { - var _ref25; - - if (_isArray12) { - if (_i12 >= _iterator12.length) break; - _ref25 = _iterator12[_i12++]; - } else { - _i12 = _iterator12.next(); - if (_i12.done) break; - _ref25 = _i12.value; - } - - const name = _ref25; - - const infos = _this9.resolver.getAllInfoForPackageName(name).filter(function (manifest) { - const ref = manifest._reference; - invariant(ref, 'expected package reference'); - return !ref.ignore; - }); - - if (infos.length === 0) { - continue; - } - - if (infos.length === 1) { - // single version of this package - // take out a single pattern as multiple patterns may have resolved to this package - flattenedPatterns.push(_this9.resolver.patternsByPackage[name][0]); - continue; - } - - const options = infos.map(function (info) { - const ref = info._reference; - invariant(ref, 'expected reference'); - return { - // TODO `and is required by {PARENT}`, - name: _this9.reporter.lang('manualVersionResolutionOption', ref.patterns.join(', '), info.version), - - value: info.version - }; - }); - const versions = infos.map(function (info) { - return info.version; - }); - let version; - - const resolutionVersion = _this9.resolutions[name]; - if (resolutionVersion && versions.indexOf(resolutionVersion) >= 0) { - // use json `resolution` version - version = resolutionVersion; - } else { - version = yield _this9.reporter.select(_this9.reporter.lang('manualVersionResolution', name), _this9.reporter.lang('answer'), options); - _this9.resolutions[name] = version; - } - - flattenedPatterns.push(_this9.resolver.collapseAllVersionsOfPackage(name, version)); - } - - // save resolutions to their appropriate root manifest - if (Object.keys(_this9.resolutions).length) { - const manifests = yield _this9.config.getRootManifests(); - - for (const name in _this9.resolutions) { - const version = _this9.resolutions[name]; - - const patterns = _this9.resolver.patternsByPackage[name]; - if (!patterns) { - continue; - } - - let manifest; - for (var _iterator13 = patterns, _isArray13 = Array.isArray(_iterator13), _i13 = 0, _iterator13 = _isArray13 ? _iterator13 : _iterator13[Symbol.iterator]();;) { - var _ref26; - - if (_isArray13) { - if (_i13 >= _iterator13.length) break; - _ref26 = _iterator13[_i13++]; - } else { - _i13 = _iterator13.next(); - if (_i13.done) break; - _ref26 = _i13.value; - } - - const pattern = _ref26; - - manifest = _this9.resolver.getResolvedPattern(pattern); - if (manifest) { - break; - } - } - invariant(manifest, 'expected manifest'); - - const ref = manifest._reference; - invariant(ref, 'expected reference'); - - const object = manifests[ref.registry].object; - object.resolutions = object.resolutions || {}; - object.resolutions[name] = version; - } - - yield _this9.config.saveRootManifests(manifests); - } - - return flattenedPatterns; - })(); - } - - /** - * Remove offline tarballs that are no longer required - */ - - pruneOfflineMirror(lockfile) { - var _this10 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const mirror = _this10.config.getOfflineMirrorPath(); - if (!mirror) { - return; - } - - const requiredTarballs = new Set(); - for (const dependency in lockfile) { - const resolved = lockfile[dependency].resolved; - if (resolved) { - const basename = path.basename(resolved.split('#')[0]); - if (dependency[0] === '@' && basename[0] !== '@') { - requiredTarballs.add(`${dependency.split('/')[0]}-${basename}`); - } - requiredTarballs.add(basename); - } - } - - const mirrorFiles = yield (_fs || _load_fs()).walk(mirror); - for (var _iterator14 = mirrorFiles, _isArray14 = Array.isArray(_iterator14), _i14 = 0, _iterator14 = _isArray14 ? _iterator14 : _iterator14[Symbol.iterator]();;) { - var _ref27; - - if (_isArray14) { - if (_i14 >= _iterator14.length) break; - _ref27 = _iterator14[_i14++]; - } else { - _i14 = _iterator14.next(); - if (_i14.done) break; - _ref27 = _i14.value; - } - - const file = _ref27; - - const isTarball = path.extname(file.basename) === '.tgz'; - // if using experimental-pack-script-packages-in-mirror flag, don't unlink prebuilt packages - const hasPrebuiltPackage = file.relative.startsWith('prebuilt/'); - if (isTarball && !hasPrebuiltPackage && !requiredTarballs.has(file.basename)) { - yield (_fs || _load_fs()).unlink(file.absolute); - } - } - })(); - } - - /** - * Save updated integrity and lockfiles. - */ - - saveLockfileAndIntegrity(patterns, workspaceLayout) { - var _this11 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const resolvedPatterns = {}; - Object.keys(_this11.resolver.patterns).forEach(function (pattern) { - if (!workspaceLayout || !workspaceLayout.getManifestByPattern(pattern)) { - resolvedPatterns[pattern] = _this11.resolver.patterns[pattern]; - } - }); - - // TODO this code is duplicated in a few places, need a common way to filter out workspace patterns from lockfile - patterns = patterns.filter(function (p) { - return !workspaceLayout || !workspaceLayout.getManifestByPattern(p); - }); - - const lockfileBasedOnResolver = _this11.lockfile.getLockfile(resolvedPatterns); - - if (_this11.config.pruneOfflineMirror) { - yield _this11.pruneOfflineMirror(lockfileBasedOnResolver); - } - - // write integrity hash - if (!_this11.config.plugnplayEnabled) { - yield _this11.integrityChecker.save(patterns, lockfileBasedOnResolver, _this11.flags, workspaceLayout, _this11.scripts.getArtifacts()); - } - - // --no-lockfile or --pure-lockfile or --frozen-lockfile - if (_this11.flags.lockfile === false || _this11.flags.pureLockfile || _this11.flags.frozenLockfile) { - return; - } - - const lockFileHasAllPatterns = patterns.every(function (p) { - return _this11.lockfile.getLocked(p); - }); - const lockfilePatternsMatch = Object.keys(_this11.lockfile.cache || {}).every(function (p) { - return lockfileBasedOnResolver[p]; - }); - const resolverPatternsAreSameAsInLockfile = Object.keys(lockfileBasedOnResolver).every(function (pattern) { - const manifest = _this11.lockfile.getLocked(pattern); - return manifest && manifest.resolved === lockfileBasedOnResolver[pattern].resolved && deepEqual(manifest.prebuiltVariants, lockfileBasedOnResolver[pattern].prebuiltVariants); - }); - const integrityPatternsAreSameAsInLockfile = Object.keys(lockfileBasedOnResolver).every(function (pattern) { - const existingIntegrityInfo = lockfileBasedOnResolver[pattern].integrity; - if (!existingIntegrityInfo) { - // if this entry does not have an integrity, no need to re-write the lockfile because of it - return true; - } - const manifest = _this11.lockfile.getLocked(pattern); - if (manifest && manifest.integrity) { - const manifestIntegrity = ssri.stringify(manifest.integrity); - return manifestIntegrity === existingIntegrityInfo; - } - return false; - }); - - // remove command is followed by install with force, lockfile will be rewritten in any case then - if (!_this11.flags.force && _this11.lockfile.parseResultType === 'success' && lockFileHasAllPatterns && lockfilePatternsMatch && resolverPatternsAreSameAsInLockfile && integrityPatternsAreSameAsInLockfile && patterns.length) { - return; - } - - // build lockfile location - const loc = path.join(_this11.config.lockfileFolder, (_constants || _load_constants()).LOCKFILE_FILENAME); - - // write lockfile - const lockSource = (0, (_lockfile2 || _load_lockfile2()).stringify)(lockfileBasedOnResolver, false, _this11.config.enableLockfileVersions); - yield (_fs || _load_fs()).writeFilePreservingEol(loc, lockSource); - - _this11._logSuccessSaveLockfile(); - })(); - } - - _logSuccessSaveLockfile() { - this.reporter.success(this.reporter.lang('savedLockfile')); - } - - /** - * Load the dependency graph of the current install. Only does package resolving and wont write to the cwd. - */ - hydrate(ignoreUnusedPatterns) { - var _this12 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - const request = yield _this12.fetchRequestFromCwd([], ignoreUnusedPatterns); - const depRequests = request.requests, - rawPatterns = request.patterns, - ignorePatterns = request.ignorePatterns, - workspaceLayout = request.workspaceLayout; - - - yield _this12.resolver.init(depRequests, { - isFlat: _this12.flags.flat, - isFrozen: _this12.flags.frozenLockfile, - workspaceLayout - }); - yield _this12.flatten(rawPatterns); - _this12.markIgnored(ignorePatterns); - - // fetch packages, should hit cache most of the time - const manifests = yield (_packageFetcher || _load_packageFetcher()).fetch(_this12.resolver.getManifests(), _this12.config); - _this12.resolver.updateManifests(manifests); - yield (_packageCompatibility || _load_packageCompatibility()).check(_this12.resolver.getManifests(), _this12.config, _this12.flags.ignoreEngines); - - // expand minimal manifests - for (var _iterator15 = _this12.resolver.getManifests(), _isArray15 = Array.isArray(_iterator15), _i15 = 0, _iterator15 = _isArray15 ? _iterator15 : _iterator15[Symbol.iterator]();;) { - var _ref28; - - if (_isArray15) { - if (_i15 >= _iterator15.length) break; - _ref28 = _iterator15[_i15++]; - } else { - _i15 = _iterator15.next(); - if (_i15.done) break; - _ref28 = _i15.value; - } - - const manifest = _ref28; - - const ref = manifest._reference; - invariant(ref, 'expected reference'); - const type = ref.remote.type; - // link specifier won't ever hit cache - - let loc = ''; - if (type === 'link') { - continue; - } else if (type === 'workspace') { - if (!ref.remote.reference) { - continue; - } - loc = ref.remote.reference; - } else { - loc = _this12.config.generateModuleCachePath(ref); - } - const newPkg = yield _this12.config.readManifest(loc); - yield _this12.resolver.updateManifest(ref, newPkg); - } - - return request; - })(); - } - - /** - * Check for updates every day and output a nag message if there's a newer version. - */ - - checkUpdate() { - if (this.config.nonInteractive) { - // don't show upgrade dialog on CI or non-TTY terminals - return; - } - - // don't check if disabled - if (this.config.getOption('disable-self-update-check')) { - return; - } - - // only check for updates once a day - const lastUpdateCheck = Number(this.config.getOption('lastUpdateCheck')) || 0; - if (lastUpdateCheck && Date.now() - lastUpdateCheck < ONE_DAY) { - return; - } - - // don't bug for updates on tagged releases - if ((_yarnVersion || _load_yarnVersion()).version.indexOf('-') >= 0) { - return; - } - - this._checkUpdate().catch(() => { - // swallow errors - }); - } - - _checkUpdate() { - var _this13 = this; - - return (0, (_asyncToGenerator2 || _load_asyncToGenerator()).default)(function* () { - let latestVersion = yield _this13.config.requestManager.request({ - url: (_constants || _load_constants()).SELF_UPDATE_VERSION_URL - }); - invariant(typeof latestVersion === 'string', 'expected string'); - latestVersion = latestVersion.trim(); - if (!semver.valid(latestVersion)) { - return; - } - - // ensure we only check for updates periodically - _this13.config.registries.yarn.saveHomeConfig({ - lastUpdateCheck: Date.now() - }); - - if (semver.gt(latestVersion, (_yarnVersion || _load_yarnVersion()).version)) { - const installationMethod = yield (0, (_yarnVersion || _load_yarnVersion()).getInstallationMethod)(); - _this13.maybeOutputUpdate = function () { - _this13.reporter.warn(_this13.reporter.lang('yarnOutdated', latestVersion, (_yarnVersion || _load_yarnVersion()).version)); - - const command = getUpdateCommand(installationMethod); - if (command) { - _this13.reporter.info(_this13.reporter.lang('yarnOutdatedCommand')); - _this13.reporter.command(command); - } else { - const installer = getUpdateInstaller(installationMethod); - if (installer) { - _this13.reporter.info(_this13.reporter.lang('yarnOutdatedInstaller', installer)); - } - } - }; - } - })(); - } - - /** - * Method to override with a possible upgrade message. - */ - - maybeOutputUpdate() {} -} - -exports.Install = Install; -function hasWrapper(commander, args) { - return true; -} - -function setFlags(commander) { - commander.description('Yarn install is used to install all dependencies for a project.'); - commander.usage('install [flags]'); - commander.option('-A, --audit', 'Run vulnerability audit on installed packages'); - commander.option('-g, --global', 'DEPRECATED'); - commander.option('-S, --save', 'DEPRECATED - save package to your `dependencies`'); - commander.option('-D, --save-dev', 'DEPRECATED - save package to your `devDependencies`'); - commander.option('-P, --save-peer', 'DEPRECATED - save package to your `peerDependencies`'); - commander.option('-O, --save-optional', 'DEPRECATED - save package to your `optionalDependencies`'); - commander.option('-E, --save-exact', 'DEPRECATED'); - commander.option('-T, --save-tilde', 'DEPRECATED'); -} - -/***/ }), -/* 35 */ -/***/ (function(module, exports, __webpack_require__) { - -var isObject = __webpack_require__(52); -module.exports = function (it) { - if (!isObject(it)) throw TypeError(it + ' is not an object!'); - return it; -}; - - -/***/ }), -/* 36 */ -/***/ (function(module, __webpack_exports__, __webpack_require__) { - -"use strict"; -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return SubjectSubscriber; }); -/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Subject; }); -/* unused harmony export AnonymousSubject */ -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_0_tslib__ = __webpack_require__(1); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_1__Observable__ = __webpack_require__(12); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_2__Subscriber__ = __webpack_require__(7); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_3__Subscription__ = __webpack_require__(25); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__ = __webpack_require__(189); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_5__SubjectSubscription__ = __webpack_require__(422); -/* harmony import */ var __WEBPACK_IMPORTED_MODULE_6__internal_symbol_rxSubscriber__ = __webpack_require__(321); -/** PURE_IMPORTS_START tslib,_Observable,_Subscriber,_Subscription,_util_ObjectUnsubscribedError,_SubjectSubscription,_internal_symbol_rxSubscriber PURE_IMPORTS_END */ - - - - - - - -var SubjectSubscriber = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](SubjectSubscriber, _super); - function SubjectSubscriber(destination) { - var _this = _super.call(this, destination) || this; - _this.destination = destination; - return _this; - } - return SubjectSubscriber; -}(__WEBPACK_IMPORTED_MODULE_2__Subscriber__["a" /* Subscriber */])); - -var Subject = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](Subject, _super); - function Subject() { - var _this = _super.call(this) || this; - _this.observers = []; - _this.closed = false; - _this.isStopped = false; - _this.hasError = false; - _this.thrownError = null; - return _this; - } - Subject.prototype[__WEBPACK_IMPORTED_MODULE_6__internal_symbol_rxSubscriber__["a" /* rxSubscriber */]] = function () { - return new SubjectSubscriber(this); - }; - Subject.prototype.lift = function (operator) { - var subject = new AnonymousSubject(this, this); - subject.operator = operator; - return subject; - }; - Subject.prototype.next = function (value) { - if (this.closed) { - throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); - } - if (!this.isStopped) { - var observers = this.observers; - var len = observers.length; - var copy = observers.slice(); - for (var i = 0; i < len; i++) { - copy[i].next(value); - } - } - }; - Subject.prototype.error = function (err) { - if (this.closed) { - throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); - } - this.hasError = true; - this.thrownError = err; - this.isStopped = true; - var observers = this.observers; - var len = observers.length; - var copy = observers.slice(); - for (var i = 0; i < len; i++) { - copy[i].error(err); - } - this.observers.length = 0; - }; - Subject.prototype.complete = function () { - if (this.closed) { - throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); - } - this.isStopped = true; - var observers = this.observers; - var len = observers.length; - var copy = observers.slice(); - for (var i = 0; i < len; i++) { - copy[i].complete(); - } - this.observers.length = 0; - }; - Subject.prototype.unsubscribe = function () { - this.isStopped = true; - this.closed = true; - this.observers = null; - }; - Subject.prototype._trySubscribe = function (subscriber) { - if (this.closed) { - throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); - } - else { - return _super.prototype._trySubscribe.call(this, subscriber); - } - }; - Subject.prototype._subscribe = function (subscriber) { - if (this.closed) { - throw new __WEBPACK_IMPORTED_MODULE_4__util_ObjectUnsubscribedError__["a" /* ObjectUnsubscribedError */](); - } - else if (this.hasError) { - subscriber.error(this.thrownError); - return __WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */].EMPTY; - } - else if (this.isStopped) { - subscriber.complete(); - return __WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */].EMPTY; - } - else { - this.observers.push(subscriber); - return new __WEBPACK_IMPORTED_MODULE_5__SubjectSubscription__["a" /* SubjectSubscription */](this, subscriber); - } - }; - Subject.prototype.asObservable = function () { - var observable = new __WEBPACK_IMPORTED_MODULE_1__Observable__["a" /* Observable */](); - observable.source = this; - return observable; - }; - Subject.create = function (destination, source) { - return new AnonymousSubject(destination, source); - }; - return Subject; -}(__WEBPACK_IMPORTED_MODULE_1__Observable__["a" /* Observable */])); - -var AnonymousSubject = /*@__PURE__*/ (function (_super) { - __WEBPACK_IMPORTED_MODULE_0_tslib__["a" /* __extends */](AnonymousSubject, _super); - function AnonymousSubject(destination, source) { - var _this = _super.call(this) || this; - _this.destination = destination; - _this.source = source; - return _this; - } - AnonymousSubject.prototype.next = function (value) { - var destination = this.destination; - if (destination && destination.next) { - destination.next(value); - } - }; - AnonymousSubject.prototype.error = function (err) { - var destination = this.destination; - if (destination && destination.error) { - this.destination.error(err); - } - }; - AnonymousSubject.prototype.complete = function () { - var destination = this.destination; - if (destination && destination.complete) { - this.destination.complete(); - } - }; - AnonymousSubject.prototype._subscribe = function (subscriber) { - var source = this.source; - if (source) { - return this.source.subscribe(subscriber); - } - else { - return __WEBPACK_IMPORTED_MODULE_3__Subscription__["a" /* Subscription */].EMPTY; - } - }; - return AnonymousSubject; -}(Subject)); - -//# sourceMappingURL=Subject.js.map - - -/***/ }), -/* 37 */ -/***/ (function(module, exports, __webpack_require__) { - -"use strict"; - - -Object.defineProperty(exports, "__esModule", { - value: true -}); -exports.normalizePattern = normalizePattern; - -/** - * Explode and normalize a pattern into its name and range. - */ - -function normalizePattern(pattern) { - let hasVersion = false; - let range = 'latest'; - let name = pattern; - - // if we're a scope then remove the @ and add it back later - let isScoped = false; - if (name[0] === '@') { - isScoped = true; - name = name.slice(1); - } - - // take first part as the name - const parts = name.split('@'); - if (parts.length > 1) { - name = parts.shift(); - range = parts.join('@'); - - if (range) { - hasVersion = true; - } else { - range = '*'; - } - } - - // add back @ scope suffix - if (isScoped) { - name = `@${name}`; - } - - return { name, range, hasVersion }; -} - -/***/ }), -/* 38 */ -/***/ (function(module, exports, __webpack_require__) { - -/* WEBPACK VAR INJECTION */(function(module) {var __WEBPACK_AMD_DEFINE_RESULT__;/** - * @license - * Lodash - * Copyright JS Foundation and other contributors - * Released under MIT license - * Based on Underscore.js 1.8.3 - * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors - */ -;(function() { - - /** Used as a safe reference for `undefined` in pre-ES5 environments. */ - var undefined; - - /** Used as the semantic version number. */ - var VERSION = '4.17.10'; - - /** Used as the size to enable large array optimizations. */ - var LARGE_ARRAY_SIZE = 200; - - /** Error message constants. */ - var CORE_ERROR_TEXT = 'Unsupported core-js use. Try https://npms.io/search?q=ponyfill.', - FUNC_ERROR_TEXT = 'Expected a function'; - - /** Used to stand-in for `undefined` hash values. */ - var HASH_UNDEFINED = '__lodash_hash_undefined__'; - - /** Used as the maximum memoize cache size. */ - var MAX_MEMOIZE_SIZE = 500; - - /** Used as the internal argument placeholder. */ - var PLACEHOLDER = '__lodash_placeholder__'; - - /** Used to compose bitmasks for cloning. */ - var CLONE_DEEP_FLAG = 1, - CLONE_FLAT_FLAG = 2, - CLONE_SYMBOLS_FLAG = 4; - - /** Used to compose bitmasks for value comparisons. */ - var COMPARE_PARTIAL_FLAG = 1, - COMPARE_UNORDERED_FLAG = 2; - - /** Used to compose bitmasks for function metadata. */ - var WRAP_BIND_FLAG = 1, - WRAP_BIND_KEY_FLAG = 2, - WRAP_CURRY_BOUND_FLAG = 4, - WRAP_CURRY_FLAG = 8, - WRAP_CURRY_RIGHT_FLAG = 16, - WRAP_PARTIAL_FLAG = 32, - WRAP_PARTIAL_RIGHT_FLAG = 64, - WRAP_ARY_FLAG = 128, - WRAP_REARG_FLAG = 256, - WRAP_FLIP_FLAG = 512; - - /** Used as default options for `_.truncate`. */ - var DEFAULT_TRUNC_LENGTH = 30, - DEFAULT_TRUNC_OMISSION = '...'; - - /** Used to detect hot functions by number of calls within a span of milliseconds. */ - var HOT_COUNT = 800, - HOT_SPAN = 16; - - /** Used to indicate the type of lazy iteratees. */ - var LAZY_FILTER_FLAG = 1, - LAZY_MAP_FLAG = 2, - LAZY_WHILE_FLAG = 3; - - /** Used as references for various `Number` constants. */ - var INFINITY = 1 / 0, - MAX_SAFE_INTEGER = 9007199254740991, - MAX_INTEGER = 1.7976931348623157e+308, - NAN = 0 / 0; - - /** Used as references for the maximum length and index of an array. */ - var MAX_ARRAY_LENGTH = 4294967295, - MAX_ARRAY_INDEX = MAX_ARRAY_LENGTH - 1, - HALF_MAX_ARRAY_LENGTH = MAX_ARRAY_LENGTH >>> 1; - - /** Used to associate wrap methods with their bit flags. */ - var wrapFlags = [ - ['ary', WRAP_ARY_FLAG], - ['bind', WRAP_BIND_FLAG], - ['bindKey', WRAP_BIND_KEY_FLAG], - ['curry', WRAP_CURRY_FLAG], - ['curryRight', WRAP_CURRY_RIGHT_FLAG], - ['flip', WRAP_FLIP_FLAG], - ['partial', WRAP_PARTIAL_FLAG], - ['partialRight', WRAP_PARTIAL_RIGHT_FLAG], - ['rearg', WRAP_REARG_FLAG] - ]; - - /** `Object#toString` result references. */ - var argsTag = '[object Arguments]', - arrayTag = '[object Array]', - asyncTag = '[object AsyncFunction]', - boolTag = '[object Boolean]', - dateTag = '[object Date]', - domExcTag = '[object DOMException]', - errorTag = '[object Error]', - funcTag = '[object Function]', - genTag = '[object GeneratorFunction]', - mapTag = '[object Map]', - numberTag = '[object Number]', - nullTag = '[object Null]', - objectTag = '[object Object]', - promiseTag = '[object Promise]', - proxyTag = '[object Proxy]', - regexpTag = '[object RegExp]', - setTag = '[object Set]', - stringTag = '[object String]', - symbolTag = '[object Symbol]', - undefinedTag = '[object Undefined]', - weakMapTag = '[object WeakMap]', - weakSetTag = '[object WeakSet]'; - - var arrayBufferTag = '[object ArrayBuffer]', - dataViewTag = '[object DataView]', - float32Tag = '[object Float32Array]', - float64Tag = '[object Float64Array]', - int8Tag = '[object Int8Array]', - int16Tag = '[object Int16Array]', - int32Tag = '[object Int32Array]', - uint8Tag = '[object Uint8Array]', - uint8ClampedTag = '[object Uint8ClampedArray]', - uint16Tag = '[object Uint16Array]', - uint32Tag = '[object Uint32Array]'; - - /** Used to match empty string literals in compiled template source. */ - var reEmptyStringLeading = /\b__p \+= '';/g, - reEmptyStringMiddle = /\b(__p \+=) '' \+/g, - reEmptyStringTrailing = /(__e\(.*?\)|\b__t\)) \+\n'';/g; - - /** Used to match HTML entities and HTML characters. */ - var reEscapedHtml = /&(?:amp|lt|gt|quot|#39);/g, - reUnescapedHtml = /[&<>"']/g, - reHasEscapedHtml = RegExp(reEscapedHtml.source), - reHasUnescapedHtml = RegExp(reUnescapedHtml.source); - - /** Used to match template delimiters. */ - var reEscape = /<%-([\s\S]+?)%>/g, - reEvaluate = /<%([\s\S]+?)%>/g, - reInterpolate = /<%=([\s\S]+?)%>/g; - - /** Used to match property names within property paths. */ - var reIsDeepProp = /\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/, - reIsPlainProp = /^\w*$/, - rePropName = /[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g; - - /** - * Used to match `RegExp` - * [syntax characters](http://ecma-international.org/ecma-262/7.0/#sec-patterns). - */ - var reRegExpChar = /[\\^$.*+?()[\]{}|]/g, - reHasRegExpChar = RegExp(reRegExpChar.source); - - /** Used to match leading and trailing whitespace. */ - var reTrim = /^\s+|\s+$/g, - reTrimStart = /^\s+/, - reTrimEnd = /\s+$/; - - /** Used to match wrap detail comments. */ - var reWrapComment = /\{(?:\n\/\* \[wrapped with .+\] \*\/)?\n?/, - reWrapDetails = /\{\n\/\* \[wrapped with (.+)\] \*/, - reSplitDetails = /,? & /; - - /** Used to match words composed of alphanumeric characters. */ - var reAsciiWord = /[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g; - - /** Used to match backslashes in property paths. */ - var reEscapeChar = /\\(\\)?/g; - - /** - * Used to match - * [ES template delimiters](http://ecma-international.org/ecma-262/7.0/#sec-template-literal-lexical-components). - */ - var reEsTemplate = /\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g; - - /** Used to match `RegExp` flags from their coerced string values. */ - var reFlags = /\w*$/; - - /** Used to detect bad signed hexadecimal string values. */ - var reIsBadHex = /^[-+]0x[0-9a-f]+$/i; - - /** Used to detect binary string values. */ - var reIsBinary = /^0b[01]+$/i; - - /** Used to detect host constructors (Safari). */ - var reIsHostCtor = /^\[object .+?Constructor\]$/; - - /** Used to detect octal string values. */ - var reIsOctal = /^0o[0-7]+$/i; - - /** Used to detect unsigned integer values. */ - var reIsUint = /^(?:0|[1-9]\d*)$/; - - /** Used to match Latin Unicode letters (excluding mathematical operators). */ - var reLatin = /[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g; - - /** Used to ensure capturing order of template delimiters. */ - var reNoMatch = /($^)/; - - /** Used to match unescaped characters in compiled string literals. */ - var reUnescapedString = /['\n\r\u2028\u2029\\]/g; - - /** Used to compose unicode character classes. */ - var rsAstralRange = '\\ud800-\\udfff', - rsComboMarksRange = '\\u0300-\\u036f', - reComboHalfMarksRange = '\\ufe20-\\ufe2f', - rsComboSymbolsRange = '\\u20d0-\\u20ff', - rsComboRange = rsComboMarksRange + reComboHalfMarksRange + rsComboSymbolsRange, - rsDingbatRange = '\\u2700-\\u27bf', - rsLowerRange = 'a-z\\xdf-\\xf6\\xf8-\\xff', - rsMathOpRange = '\\xac\\xb1\\xd7\\xf7', - rsNonCharRange = '\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf', - rsPunctuationRange = '\\u2000-\\u206f', - rsSpaceRange = ' \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000', - rsUpperRange = 'A-Z\\xc0-\\xd6\\xd8-\\xde', - rsVarRange = '\\ufe0e\\ufe0f', - rsBreakRange = rsMathOpRange + rsNonCharRange + rsPunctuationRange + rsSpaceRange; - - /** Used to compose unicode capture groups. */ - var rsApos = "['\u2019]", - rsAstral = '[' + rsAstralRange + ']', - rsBreak = '[' + rsBreakRange + ']', - rsCombo = '[' + rsComboRange + ']', - rsDigits = '\\d+', - rsDingbat = '[' + rsDingbatRange + ']', - rsLower = '[' + rsLowerRange + ']', - rsMisc = '[^' + rsAstralRange + rsBreakRange + rsDigits + rsDingbatRange + rsLowerRange + rsUpperRange + ']', - rsFitz = '\\ud83c[\\udffb-\\udfff]', - rsModifier = '(?:' + rsCombo + '|' + rsFitz + ')', - rsNonAstral = '[^' + rsAstralRange + ']', - rsRegional = '(?:\\ud83c[\\udde6-\\uddff]){2}', - rsSurrPair = '[\\ud800-\\udbff][\\udc00-\\udfff]', - rsUpper = '[' + rsUpperRange + ']', - rsZWJ = '\\u200d'; - - /** Used to compose unicode regexes. */ - var rsMiscLower = '(?:' + rsLower + '|' + rsMisc + ')', - rsMiscUpper = '(?:' + rsUpper + '|' + rsMisc + ')', - rsOptContrLower = '(?:' + rsApos + '(?:d|ll|m|re|s|t|ve))?', - rsOptContrUpper = '(?:' + rsApos + '(?:D|LL|M|RE|S|T|VE))?', - reOptMod = rsModifier + '?', - rsOptVar = '[' + rsVarRange + ']?', - rsOptJoin = '(?:' + rsZWJ + '(?:' + [rsNonAstral, rsRegional, rsSurrPair].join('|') + ')' + rsOptVar + reOptMod + ')*', - rsOrdLower = '\\d*(?:1st|2nd|3rd|(?![123])\\dth)(?=\\b|[A-Z_])', - rsOrdUpper = '\\d*(?:1ST|2ND|3RD|(?![123])\\dTH)(?=\\b|[a-z_])', - rsSeq = rsOptVar + reOptMod + rsOptJoin, - rsEmoji = '(?:' + [rsDingbat, rsRegional, rsSurrPair].join('|') + ')' + rsSeq, - rsSymbol = '(?:' + [rsNonAstral + rsCombo + '?', rsCombo, rsRegional, rsSurrPair, rsAstral].join('|') + ')'; - - /** Used to match apostrophes. */ - var reApos = RegExp(rsApos, 'g'); - - /** - * Used to match [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks) and - * [combining diacritical marks for symbols](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks_for_Symbols). - */ - var reComboMark = RegExp(rsCombo, 'g'); - - /** Used to match [string symbols](https://mathiasbynens.be/notes/javascript-unicode). */ - var reUnicode = RegExp(rsFitz + '(?=' + rsFitz + ')|' + rsSymbol + rsSeq, 'g'); - - /** Used to match complex or compound words. */ - var reUnicodeWord = RegExp([ - rsUpper + '?' + rsLower + '+' + rsOptContrLower + '(?=' + [rsBreak, rsUpper, '$'].join('|') + ')', - rsMiscUpper + '+' + rsOptContrUpper + '(?=' + [rsBreak, rsUpper + rsMiscLower, '$'].join('|') + ')', - rsUpper + '?' + rsMiscLower + '+' + rsOptContrLower, - rsUpper + '+' + rsOptContrUpper, - rsOrdUpper, - rsOrdLower, - rsDigits, - rsEmoji - ].join('|'), 'g'); - - /** Used to detect strings with [zero-width joiners or code points from the astral planes](http://eev.ee/blog/2015/09/12/dark-corners-of-unicode/). */ - var reHasUnicode = RegExp('[' + rsZWJ + rsAstralRange + rsComboRange + rsVarRange + ']'); - - /** Used to detect strings that need a more robust regexp to match words. */ - var reHasUnicodeWord = /[a-z][A-Z]|[A-Z]{2,}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/; - - /** Used to assign default `context` object properties. */ - var contextProps = [ - 'Array', 'Buffer', 'DataView', 'Date', 'Error', 'Float32Array', 'Float64Array', - 'Function', 'Int8Array', 'Int16Array', 'Int32Array', 'Map', 'Math', 'Object', - 'Promise', 'RegExp', 'Set', 'String', 'Symbol', 'TypeError', 'Uint8Array', - 'Uint8ClampedArray', 'Uint16Array', 'Uint32Array', 'WeakMap', - '_', 'clearTimeout', 'isFinite', 'parseInt', 'setTimeout' - ]; - - /** Used to make template sourceURLs easier to identify. */ - var templateCounter = -1; - - /** Used to identify `toStringTag` values of typed arrays. */ - var typedArrayTags = {}; - typedArrayTags[float32Tag] = typedArrayTags[float64Tag] = - typedArrayTags[int8Tag] = typedArrayTags[int16Tag] = - typedArrayTags[int32Tag] = typedArrayTags[uint8Tag] = - typedArrayTags[uint8ClampedTag] = typedArrayTags[uint16Tag] = - typedArrayTags[uint32Tag] = true; - typedArrayTags[argsTag] = typedArrayTags[arrayTag] = - typedArrayTags[arrayBufferTag] = typedArrayTags[boolTag] = - typedArrayTags[dataViewTag] = typedArrayTags[dateTag] = - typedArrayTags[errorTag] = typedArrayTags[funcTag] = - typedArrayTags[mapTag] = typedArrayTags[numberTag] = - typedArrayTags[objectTag] = typedArrayTags[regexpTag] = - typedArrayTags[setTag] = typedArrayTags[stringTag] = - typedArrayTags[weakMapTag] = false; - - /** Used to identify `toStringTag` values supported by `_.clone`. */ - var cloneableTags = {}; - cloneableTags[argsTag] = cloneableTags[arrayTag] = - cloneableTags[arrayBufferTag] = cloneableTags[dataViewTag] = - cloneableTags[boolTag] = cloneableTags[dateTag] = - cloneableTags[float32Tag] = cloneableTags[float64Tag] = - cloneableTags[int8Tag] = cloneableTags[int16Tag] = - cloneableTags[int32Tag] = cloneableTags[mapTag] = - cloneableTags[numberTag] = cloneableTags[objectTag] = - cloneableTags[regexpTag] = cloneableTags[setTag] = - cloneableTags[stringTag] = cloneableTags[symbolTag] = - cloneableTags[uint8Tag] = cloneableTags[uint8ClampedTag] = - cloneableTags[uint16Tag] = cloneableTags[uint32Tag] = true; - cloneableTags[errorTag] = cloneableTags[funcTag] = - cloneableTags[weakMapTag] = false; - - /** Used to map Latin Unicode letters to basic Latin letters. */ - var deburredLetters = { - // Latin-1 Supplement block. - '\xc0': 'A', '\xc1': 'A', '\xc2': 'A', '\xc3': 'A', '\xc4': 'A', '\xc5': 'A', - '\xe0': 'a', '\xe1': 'a', '\xe2': 'a', '\xe3': 'a', '\xe4': 'a', '\xe5': 'a', - '\xc7': 'C', '\xe7': 'c', - '\xd0': 'D', '\xf0': 'd', - '\xc8': 'E', '\xc9': 'E', '\xca': 'E', '\xcb': 'E', - '\xe8': 'e', '\xe9': 'e', '\xea': 'e', '\xeb': 'e', - '\xcc': 'I', '\xcd': 'I', '\xce': 'I', '\xcf': 'I', - '\xec': 'i', '\xed': 'i', '\xee': 'i', '\xef': 'i', - '\xd1': 'N', '\xf1': 'n', - '\xd2': 'O', '\xd3': 'O', '\xd4': 'O', '\xd5': 'O', '\xd6': 'O', '\xd8': 'O', - '\xf2': 'o', '\xf3': 'o', '\xf4': 'o', '\xf5': 'o', '\xf6': 'o', '\xf8': 'o', - '\xd9': 'U', '\xda': 'U', '\xdb': 'U', '\xdc': 'U', - '\xf9': 'u', '\xfa': 'u', '\xfb': 'u', '\xfc': 'u', - '\xdd': 'Y', '\xfd': 'y', '\xff': 'y', - '\xc6': 'Ae', '\xe6': 'ae', - '\xde': 'Th', '\xfe': 'th', - '\xdf': 'ss', - // Latin Extended-A block. - '\u0100': 'A', '\u0102': 'A', '\u0104': 'A', - '\u0101': 'a', '\u0103': 'a', '\u0105': 'a', - '\u0106': 'C', '\u0108': 'C', '\u010a': 'C', '\u010c': 'C', - '\u0107': 'c', '\u0109': 'c', '\u010b': 'c', '\u010d': 'c', - '\u010e': 'D', '\u0110': 'D', '\u010f': 'd', '\u0111': 'd', - '\u0112': 'E', '\u0114': 'E', '\u0116': 'E', '\u0118': 'E', '\u011a': 'E', - '\u0113': 'e', '\u0115': 'e', '\u0117': 'e', '\u0119': 'e', '\u011b': 'e', - '\u011c': 'G', '\u011e': 'G', '\u0120': 'G', '\u0122': 'G', - '\u011d': 'g', '\u011f': 'g', '\u0121': 'g', '\u0123': 'g', - '\u0124': 'H', '\u0126': 'H', '\u0125': 'h', '\u0127': 'h', - '\u0128': 'I', '\u012a': 'I', '\u012c': 'I', '\u012e': 'I', '\u0130': 'I', - '\u0129': 'i', '\u012b': 'i', '\u012d': 'i', '\u012f': 'i', '\u0131': 'i', - '\u0134': 'J', '\u0135': 'j', - '\u0136': 'K', '\u0137': 'k', '\u0138': 'k', - '\u0139': 'L', '\u013b': 'L', '\u013d': 'L', '\u013f': 'L', '\u0141': 'L', - '\u013a': 'l', '\u013c': 'l', '\u013e': 'l', '\u0140': 'l', '\u0142': 'l', - '\u0143': 'N', '\u0145': 'N', '\u0147': 'N', '\u014a': 'N', - '\u0144': 'n', '\u0146': 'n', '\u0148': 'n', '\u014b': 'n', - '\u014c': 'O', '\u014e': 'O', '\u0150': 'O', - '\u014d': 'o', '\u014f': 'o', '\u0151': 'o', - '\u0154': 'R', '\u0156': 'R', '\u0158': 'R', - '\u0155': 'r', '\u0157': 'r', '\u0159': 'r', - '\u015a': 'S', '\u015c': 'S', '\u015e': 'S', '\u0160': 'S', - '\u015b': 's', '\u015d': 's', '\u015f': 's', '\u0161': 's', - '\u0162': 'T', '\u0164': 'T', '\u0166': 'T', - '\u0163': 't', '\u0165': 't', '\u0167': 't', - '\u0168': 'U', '\u016a': 'U', '\u016c': 'U', '\u016e': 'U', '\u0170': 'U', '\u0172': 'U', - '\u0169': 'u', '\u016b': 'u', '\u016d': 'u', '\u016f': 'u', '\u0171': 'u', '\u0173': 'u', - '\u0174': 'W', '\u0175': 'w', - '\u0176': 'Y', '\u0177': 'y', '\u0178': 'Y', - '\u0179': 'Z', '\u017b': 'Z', '\u017d': 'Z', - '\u017a': 'z', '\u017c': 'z', '\u017e': 'z', - '\u0132': 'IJ', '\u0133': 'ij', - '\u0152': 'Oe', '\u0153': 'oe', - '\u0149': "'n", '\u017f': 's' - }; - - /** Used to map characters to HTML entities. */ - var htmlEscapes = { - '&': '&', - '<': '<', - '>': '>', - '"': '"', - "'": ''' - }; - - /** Used to map HTML entities to characters. */ - var htmlUnescapes = { - '&': '&', - '<': '<', - '>': '>', - '"': '"', - ''': "'" - }; - - /** Used to escape characters for inclusion in compiled string literals. */ - var stringEscapes = { - '\\': '\\', - "'": "'", - '\n': 'n', - '\r': 'r', - '\u2028': 'u2028', - '\u2029': 'u2029' - }; - - /** Built-in method references without a dependency on `root`. */ - var freeParseFloat = parseFloat, - freeParseInt = parseInt; - - /** Detect free variable `global` from Node.js. */ - var freeGlobal = typeof global == 'object' && global && global.Object === Object && global; - - /** Detect free variable `self`. */ - var freeSelf = typeof self == 'object' && self && self.Object === Object && self; - - /** Used as a reference to the global object. */ - var root = freeGlobal || freeSelf || Function('return this')(); - - /** Detect free variable `exports`. */ - var freeExports = typeof exports == 'object' && exports && !exports.nodeType && exports; - - /** Detect free variable `module`. */ - var freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module; - - /** Detect the popular CommonJS extension `module.exports`. */ - var moduleExports = freeModule && freeModule.exports === freeExports; - - /** Detect free variable `process` from Node.js. */ - var freeProcess = moduleExports && freeGlobal.process; - - /** Used to access faster Node.js helpers. */ - var nodeUtil = (function() { - try { - // Use `util.types` for Node.js 10+. - var types = freeModule && freeModule.require && freeModule.require('util').types; - - if (types) { - return types; - } - - // Legacy `process.binding('util')` for Node.js < 10. - return freeProcess && freeProcess.binding && freeProcess.binding('util'); - } catch (e) {} - }()); - - /* Node.js helper references. */ - var nodeIsArrayBuffer = nodeUtil && nodeUtil.isArrayBuffer, - nodeIsDate = nodeUtil && nodeUtil.isDate, - nodeIsMap = nodeUtil && nodeUtil.isMap, - nodeIsRegExp = nodeUtil && nodeUtil.isRegExp, - nodeIsSet = nodeUtil && nodeUtil.isSet, - nodeIsTypedArray = nodeUtil && nodeUtil.isTypedArray; - - /*--------------------------------------------------------------------------*/ - - /** - * A faster alternative to `Function#apply`, this function invokes `func` - * with the `this` binding of `thisArg` and the arguments of `args`. - * - * @private - * @param {Function} func The function to invoke. - * @param {*} thisArg The `this` binding of `func`. - * @param {Array} args The arguments to invoke `func` with. - * @returns {*} Returns the result of `func`. - */ - function apply(func, thisArg, args) { - switch (args.length) { - case 0: return func.call(thisArg); - case 1: return func.call(thisArg, args[0]); - case 2: return func.call(thisArg, args[0], args[1]); - case 3: return func.call(thisArg, args[0], args[1], args[2]); - } - return func.apply(thisArg, args); - } - - /** - * A specialized version of `baseAggregator` for arrays. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} setter The function to set `accumulator` values. - * @param {Function} iteratee The iteratee to transform keys. - * @param {Object} accumulator The initial aggregated object. - * @returns {Function} Returns `accumulator`. - */ - function arrayAggregator(array, setter, iteratee, accumulator) { - var index = -1, - length = array == null ? 0 : array.length; - - while (++index < length) { - var value = array[index]; - setter(accumulator, value, iteratee(value), array); - } - return accumulator; - } - - /** - * A specialized version of `_.forEach` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns `array`. - */ - function arrayEach(array, iteratee) { - var index = -1, - length = array == null ? 0 : array.length; - - while (++index < length) { - if (iteratee(array[index], index, array) === false) { - break; - } - } - return array; - } - - /** - * A specialized version of `_.forEachRight` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns `array`. - */ - function arrayEachRight(array, iteratee) { - var length = array == null ? 0 : array.length; - - while (length--) { - if (iteratee(array[length], length, array) === false) { - break; - } - } - return array; - } - - /** - * A specialized version of `_.every` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {boolean} Returns `true` if all elements pass the predicate check, - * else `false`. - */ - function arrayEvery(array, predicate) { - var index = -1, - length = array == null ? 0 : array.length; - - while (++index < length) { - if (!predicate(array[index], index, array)) { - return false; - } - } - return true; - } - - /** - * A specialized version of `_.filter` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {Array} Returns the new filtered array. - */ - function arrayFilter(array, predicate) { - var index = -1, - length = array == null ? 0 : array.length, - resIndex = 0, - result = []; - - while (++index < length) { - var value = array[index]; - if (predicate(value, index, array)) { - result[resIndex++] = value; - } - } - return result; - } - - /** - * A specialized version of `_.includes` for arrays without support for - * specifying an index to search from. - * - * @private - * @param {Array} [array] The array to inspect. - * @param {*} target The value to search for. - * @returns {boolean} Returns `true` if `target` is found, else `false`. - */ - function arrayIncludes(array, value) { - var length = array == null ? 0 : array.length; - return !!length && baseIndexOf(array, value, 0) > -1; - } - - /** - * This function is like `arrayIncludes` except that it accepts a comparator. - * - * @private - * @param {Array} [array] The array to inspect. - * @param {*} target The value to search for. - * @param {Function} comparator The comparator invoked per element. - * @returns {boolean} Returns `true` if `target` is found, else `false`. - */ - function arrayIncludesWith(array, value, comparator) { - var index = -1, - length = array == null ? 0 : array.length; - - while (++index < length) { - if (comparator(value, array[index])) { - return true; - } - } - return false; - } - - /** - * A specialized version of `_.map` for arrays without support for iteratee - * shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns the new mapped array. - */ - function arrayMap(array, iteratee) { - var index = -1, - length = array == null ? 0 : array.length, - result = Array(length); - - while (++index < length) { - result[index] = iteratee(array[index], index, array); - } - return result; - } - - /** - * Appends the elements of `values` to `array`. - * - * @private - * @param {Array} array The array to modify. - * @param {Array} values The values to append. - * @returns {Array} Returns `array`. - */ - function arrayPush(array, values) { - var index = -1, - length = values.length, - offset = array.length; - - while (++index < length) { - array[offset + index] = values[index]; - } - return array; - } - - /** - * A specialized version of `_.reduce` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {*} [accumulator] The initial value. - * @param {boolean} [initAccum] Specify using the first element of `array` as - * the initial value. - * @returns {*} Returns the accumulated value. - */ - function arrayReduce(array, iteratee, accumulator, initAccum) { - var index = -1, - length = array == null ? 0 : array.length; - - if (initAccum && length) { - accumulator = array[++index]; - } - while (++index < length) { - accumulator = iteratee(accumulator, array[index], index, array); - } - return accumulator; - } - - /** - * A specialized version of `_.reduceRight` for arrays without support for - * iteratee shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {*} [accumulator] The initial value. - * @param {boolean} [initAccum] Specify using the last element of `array` as - * the initial value. - * @returns {*} Returns the accumulated value. - */ - function arrayReduceRight(array, iteratee, accumulator, initAccum) { - var length = array == null ? 0 : array.length; - if (initAccum && length) { - accumulator = array[--length]; - } - while (length--) { - accumulator = iteratee(accumulator, array[length], length, array); - } - return accumulator; - } - - /** - * A specialized version of `_.some` for arrays without support for iteratee - * shorthands. - * - * @private - * @param {Array} [array] The array to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {boolean} Returns `true` if any element passes the predicate check, - * else `false`. - */ - function arraySome(array, predicate) { - var index = -1, - length = array == null ? 0 : array.length; - - while (++index < length) { - if (predicate(array[index], index, array)) { - return true; - } - } - return false; - } - - /** - * Gets the size of an ASCII `string`. - * - * @private - * @param {string} string The string inspect. - * @returns {number} Returns the string size. - */ - var asciiSize = baseProperty('length'); - - /** - * Converts an ASCII `string` to an array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the converted array. - */ - function asciiToArray(string) { - return string.split(''); - } - - /** - * Splits an ASCII `string` into an array of its words. - * - * @private - * @param {string} The string to inspect. - * @returns {Array} Returns the words of `string`. - */ - function asciiWords(string) { - return string.match(reAsciiWord) || []; - } - - /** - * The base implementation of methods like `_.findKey` and `_.findLastKey`, - * without support for iteratee shorthands, which iterates over `collection` - * using `eachFunc`. - * - * @private - * @param {Array|Object} collection The collection to inspect. - * @param {Function} predicate The function invoked per iteration. - * @param {Function} eachFunc The function to iterate over `collection`. - * @returns {*} Returns the found element or its key, else `undefined`. - */ - function baseFindKey(collection, predicate, eachFunc) { - var result; - eachFunc(collection, function(value, key, collection) { - if (predicate(value, key, collection)) { - result = key; - return false; - } - }); - return result; - } - - /** - * The base implementation of `_.findIndex` and `_.findLastIndex` without - * support for iteratee shorthands. - * - * @private - * @param {Array} array The array to inspect. - * @param {Function} predicate The function invoked per iteration. - * @param {number} fromIndex The index to search from. - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function baseFindIndex(array, predicate, fromIndex, fromRight) { - var length = array.length, - index = fromIndex + (fromRight ? 1 : -1); - - while ((fromRight ? index-- : ++index < length)) { - if (predicate(array[index], index, array)) { - return index; - } - } - return -1; - } - - /** - * The base implementation of `_.indexOf` without `fromIndex` bounds checks. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} fromIndex The index to search from. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function baseIndexOf(array, value, fromIndex) { - return value === value - ? strictIndexOf(array, value, fromIndex) - : baseFindIndex(array, baseIsNaN, fromIndex); - } - - /** - * This function is like `baseIndexOf` except that it accepts a comparator. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} fromIndex The index to search from. - * @param {Function} comparator The comparator invoked per element. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function baseIndexOfWith(array, value, fromIndex, comparator) { - var index = fromIndex - 1, - length = array.length; - - while (++index < length) { - if (comparator(array[index], value)) { - return index; - } - } - return -1; - } - - /** - * The base implementation of `_.isNaN` without support for number objects. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is `NaN`, else `false`. - */ - function baseIsNaN(value) { - return value !== value; - } - - /** - * The base implementation of `_.mean` and `_.meanBy` without support for - * iteratee shorthands. - * - * @private - * @param {Array} array The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {number} Returns the mean. - */ - function baseMean(array, iteratee) { - var length = array == null ? 0 : array.length; - return length ? (baseSum(array, iteratee) / length) : NAN; - } - - /** - * The base implementation of `_.property` without support for deep paths. - * - * @private - * @param {string} key The key of the property to get. - * @returns {Function} Returns the new accessor function. - */ - function baseProperty(key) { - return function(object) { - return object == null ? undefined : object[key]; - }; - } - - /** - * The base implementation of `_.propertyOf` without support for deep paths. - * - * @private - * @param {Object} object The object to query. - * @returns {Function} Returns the new accessor function. - */ - function basePropertyOf(object) { - return function(key) { - return object == null ? undefined : object[key]; - }; - } - - /** - * The base implementation of `_.reduce` and `_.reduceRight`, without support - * for iteratee shorthands, which iterates over `collection` using `eachFunc`. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {*} accumulator The initial value. - * @param {boolean} initAccum Specify using the first or last element of - * `collection` as the initial value. - * @param {Function} eachFunc The function to iterate over `collection`. - * @returns {*} Returns the accumulated value. - */ - function baseReduce(collection, iteratee, accumulator, initAccum, eachFunc) { - eachFunc(collection, function(value, index, collection) { - accumulator = initAccum - ? (initAccum = false, value) - : iteratee(accumulator, value, index, collection); - }); - return accumulator; - } - - /** - * The base implementation of `_.sortBy` which uses `comparer` to define the - * sort order of `array` and replaces criteria objects with their corresponding - * values. - * - * @private - * @param {Array} array The array to sort. - * @param {Function} comparer The function to define sort order. - * @returns {Array} Returns `array`. - */ - function baseSortBy(array, comparer) { - var length = array.length; - - array.sort(comparer); - while (length--) { - array[length] = array[length].value; - } - return array; - } - - /** - * The base implementation of `_.sum` and `_.sumBy` without support for - * iteratee shorthands. - * - * @private - * @param {Array} array The array to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {number} Returns the sum. - */ - function baseSum(array, iteratee) { - var result, - index = -1, - length = array.length; - - while (++index < length) { - var current = iteratee(array[index]); - if (current !== undefined) { - result = result === undefined ? current : (result + current); - } - } - return result; - } - - /** - * The base implementation of `_.times` without support for iteratee shorthands - * or max array length checks. - * - * @private - * @param {number} n The number of times to invoke `iteratee`. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns the array of results. - */ - function baseTimes(n, iteratee) { - var index = -1, - result = Array(n); - - while (++index < n) { - result[index] = iteratee(index); - } - return result; - } - - /** - * The base implementation of `_.toPairs` and `_.toPairsIn` which creates an array - * of key-value pairs for `object` corresponding to the property names of `props`. - * - * @private - * @param {Object} object The object to query. - * @param {Array} props The property names to get values for. - * @returns {Object} Returns the key-value pairs. - */ - function baseToPairs(object, props) { - return arrayMap(props, function(key) { - return [key, object[key]]; - }); - } - - /** - * The base implementation of `_.unary` without support for storing metadata. - * - * @private - * @param {Function} func The function to cap arguments for. - * @returns {Function} Returns the new capped function. - */ - function baseUnary(func) { - return function(value) { - return func(value); - }; - } - - /** - * The base implementation of `_.values` and `_.valuesIn` which creates an - * array of `object` property values corresponding to the property names - * of `props`. - * - * @private - * @param {Object} object The object to query. - * @param {Array} props The property names to get values for. - * @returns {Object} Returns the array of property values. - */ - function baseValues(object, props) { - return arrayMap(props, function(key) { - return object[key]; - }); - } - - /** - * Checks if a `cache` value for `key` exists. - * - * @private - * @param {Object} cache The cache to query. - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ - function cacheHas(cache, key) { - return cache.has(key); - } - - /** - * Used by `_.trim` and `_.trimStart` to get the index of the first string symbol - * that is not found in the character symbols. - * - * @private - * @param {Array} strSymbols The string symbols to inspect. - * @param {Array} chrSymbols The character symbols to find. - * @returns {number} Returns the index of the first unmatched string symbol. - */ - function charsStartIndex(strSymbols, chrSymbols) { - var index = -1, - length = strSymbols.length; - - while (++index < length && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {} - return index; - } - - /** - * Used by `_.trim` and `_.trimEnd` to get the index of the last string symbol - * that is not found in the character symbols. - * - * @private - * @param {Array} strSymbols The string symbols to inspect. - * @param {Array} chrSymbols The character symbols to find. - * @returns {number} Returns the index of the last unmatched string symbol. - */ - function charsEndIndex(strSymbols, chrSymbols) { - var index = strSymbols.length; - - while (index-- && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {} - return index; - } - - /** - * Gets the number of `placeholder` occurrences in `array`. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} placeholder The placeholder to search for. - * @returns {number} Returns the placeholder count. - */ - function countHolders(array, placeholder) { - var length = array.length, - result = 0; - - while (length--) { - if (array[length] === placeholder) { - ++result; - } - } - return result; - } - - /** - * Used by `_.deburr` to convert Latin-1 Supplement and Latin Extended-A - * letters to basic Latin letters. - * - * @private - * @param {string} letter The matched letter to deburr. - * @returns {string} Returns the deburred letter. - */ - var deburrLetter = basePropertyOf(deburredLetters); - - /** - * Used by `_.escape` to convert characters to HTML entities. - * - * @private - * @param {string} chr The matched character to escape. - * @returns {string} Returns the escaped character. - */ - var escapeHtmlChar = basePropertyOf(htmlEscapes); - - /** - * Used by `_.template` to escape characters for inclusion in compiled string literals. - * - * @private - * @param {string} chr The matched character to escape. - * @returns {string} Returns the escaped character. - */ - function escapeStringChar(chr) { - return '\\' + stringEscapes[chr]; - } - - /** - * Gets the value at `key` of `object`. - * - * @private - * @param {Object} [object] The object to query. - * @param {string} key The key of the property to get. - * @returns {*} Returns the property value. - */ - function getValue(object, key) { - return object == null ? undefined : object[key]; - } - - /** - * Checks if `string` contains Unicode symbols. - * - * @private - * @param {string} string The string to inspect. - * @returns {boolean} Returns `true` if a symbol is found, else `false`. - */ - function hasUnicode(string) { - return reHasUnicode.test(string); - } - - /** - * Checks if `string` contains a word composed of Unicode symbols. - * - * @private - * @param {string} string The string to inspect. - * @returns {boolean} Returns `true` if a word is found, else `false`. - */ - function hasUnicodeWord(string) { - return reHasUnicodeWord.test(string); - } - - /** - * Converts `iterator` to an array. - * - * @private - * @param {Object} iterator The iterator to convert. - * @returns {Array} Returns the converted array. - */ - function iteratorToArray(iterator) { - var data, - result = []; - - while (!(data = iterator.next()).done) { - result.push(data.value); - } - return result; - } - - /** - * Converts `map` to its key-value pairs. - * - * @private - * @param {Object} map The map to convert. - * @returns {Array} Returns the key-value pairs. - */ - function mapToArray(map) { - var index = -1, - result = Array(map.size); - - map.forEach(function(value, key) { - result[++index] = [key, value]; - }); - return result; - } - - /** - * Creates a unary function that invokes `func` with its argument transformed. - * - * @private - * @param {Function} func The function to wrap. - * @param {Function} transform The argument transform. - * @returns {Function} Returns the new function. - */ - function overArg(func, transform) { - return function(arg) { - return func(transform(arg)); - }; - } - - /** - * Replaces all `placeholder` elements in `array` with an internal placeholder - * and returns an array of their indexes. - * - * @private - * @param {Array} array The array to modify. - * @param {*} placeholder The placeholder to replace. - * @returns {Array} Returns the new array of placeholder indexes. - */ - function replaceHolders(array, placeholder) { - var index = -1, - length = array.length, - resIndex = 0, - result = []; - - while (++index < length) { - var value = array[index]; - if (value === placeholder || value === PLACEHOLDER) { - array[index] = PLACEHOLDER; - result[resIndex++] = index; - } - } - return result; - } - - /** - * Gets the value at `key`, unless `key` is "__proto__". - * - * @private - * @param {Object} object The object to query. - * @param {string} key The key of the property to get. - * @returns {*} Returns the property value. - */ - function safeGet(object, key) { - return key == '__proto__' - ? undefined - : object[key]; - } - - /** - * Converts `set` to an array of its values. - * - * @private - * @param {Object} set The set to convert. - * @returns {Array} Returns the values. - */ - function setToArray(set) { - var index = -1, - result = Array(set.size); - - set.forEach(function(value) { - result[++index] = value; - }); - return result; - } - - /** - * Converts `set` to its value-value pairs. - * - * @private - * @param {Object} set The set to convert. - * @returns {Array} Returns the value-value pairs. - */ - function setToPairs(set) { - var index = -1, - result = Array(set.size); - - set.forEach(function(value) { - result[++index] = [value, value]; - }); - return result; - } - - /** - * A specialized version of `_.indexOf` which performs strict equality - * comparisons of values, i.e. `===`. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} fromIndex The index to search from. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function strictIndexOf(array, value, fromIndex) { - var index = fromIndex - 1, - length = array.length; - - while (++index < length) { - if (array[index] === value) { - return index; - } - } - return -1; - } - - /** - * A specialized version of `_.lastIndexOf` which performs strict equality - * comparisons of values, i.e. `===`. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} fromIndex The index to search from. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function strictLastIndexOf(array, value, fromIndex) { - var index = fromIndex + 1; - while (index--) { - if (array[index] === value) { - return index; - } - } - return index; - } - - /** - * Gets the number of symbols in `string`. - * - * @private - * @param {string} string The string to inspect. - * @returns {number} Returns the string size. - */ - function stringSize(string) { - return hasUnicode(string) - ? unicodeSize(string) - : asciiSize(string); - } - - /** - * Converts `string` to an array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the converted array. - */ - function stringToArray(string) { - return hasUnicode(string) - ? unicodeToArray(string) - : asciiToArray(string); - } - - /** - * Used by `_.unescape` to convert HTML entities to characters. - * - * @private - * @param {string} chr The matched character to unescape. - * @returns {string} Returns the unescaped character. - */ - var unescapeHtmlChar = basePropertyOf(htmlUnescapes); - - /** - * Gets the size of a Unicode `string`. - * - * @private - * @param {string} string The string inspect. - * @returns {number} Returns the string size. - */ - function unicodeSize(string) { - var result = reUnicode.lastIndex = 0; - while (reUnicode.test(string)) { - ++result; - } - return result; - } - - /** - * Converts a Unicode `string` to an array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the converted array. - */ - function unicodeToArray(string) { - return string.match(reUnicode) || []; - } - - /** - * Splits a Unicode `string` into an array of its words. - * - * @private - * @param {string} The string to inspect. - * @returns {Array} Returns the words of `string`. - */ - function unicodeWords(string) { - return string.match(reUnicodeWord) || []; - } - - /*--------------------------------------------------------------------------*/ - - /** - * Create a new pristine `lodash` function using the `context` object. - * - * @static - * @memberOf _ - * @since 1.1.0 - * @category Util - * @param {Object} [context=root] The context object. - * @returns {Function} Returns a new `lodash` function. - * @example - * - * _.mixin({ 'foo': _.constant('foo') }); - * - * var lodash = _.runInContext(); - * lodash.mixin({ 'bar': lodash.constant('bar') }); - * - * _.isFunction(_.foo); - * // => true - * _.isFunction(_.bar); - * // => false - * - * lodash.isFunction(lodash.foo); - * // => false - * lodash.isFunction(lodash.bar); - * // => true - * - * // Create a suped-up `defer` in Node.js. - * var defer = _.runInContext({ 'setTimeout': setImmediate }).defer; - */ - var runInContext = (function runInContext(context) { - context = context == null ? root : _.defaults(root.Object(), context, _.pick(root, contextProps)); - - /** Built-in constructor references. */ - var Array = context.Array, - Date = context.Date, - Error = context.Error, - Function = context.Function, - Math = context.Math, - Object = context.Object, - RegExp = context.RegExp, - String = context.String, - TypeError = context.TypeError; - - /** Used for built-in method references. */ - var arrayProto = Array.prototype, - funcProto = Function.prototype, - objectProto = Object.prototype; - - /** Used to detect overreaching core-js shims. */ - var coreJsData = context['__core-js_shared__']; - - /** Used to resolve the decompiled source of functions. */ - var funcToString = funcProto.toString; - - /** Used to check objects for own properties. */ - var hasOwnProperty = objectProto.hasOwnProperty; - - /** Used to generate unique IDs. */ - var idCounter = 0; - - /** Used to detect methods masquerading as native. */ - var maskSrcKey = (function() { - var uid = /[^.]+$/.exec(coreJsData && coreJsData.keys && coreJsData.keys.IE_PROTO || ''); - return uid ? ('Symbol(src)_1.' + uid) : ''; - }()); - - /** - * Used to resolve the - * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring) - * of values. - */ - var nativeObjectToString = objectProto.toString; - - /** Used to infer the `Object` constructor. */ - var objectCtorString = funcToString.call(Object); - - /** Used to restore the original `_` reference in `_.noConflict`. */ - var oldDash = root._; - - /** Used to detect if a method is native. */ - var reIsNative = RegExp('^' + - funcToString.call(hasOwnProperty).replace(reRegExpChar, '\\$&') - .replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g, '$1.*?') + '$' - ); - - /** Built-in value references. */ - var Buffer = moduleExports ? context.Buffer : undefined, - Symbol = context.Symbol, - Uint8Array = context.Uint8Array, - allocUnsafe = Buffer ? Buffer.allocUnsafe : undefined, - getPrototype = overArg(Object.getPrototypeOf, Object), - objectCreate = Object.create, - propertyIsEnumerable = objectProto.propertyIsEnumerable, - splice = arrayProto.splice, - spreadableSymbol = Symbol ? Symbol.isConcatSpreadable : undefined, - symIterator = Symbol ? Symbol.iterator : undefined, - symToStringTag = Symbol ? Symbol.toStringTag : undefined; - - var defineProperty = (function() { - try { - var func = getNative(Object, 'defineProperty'); - func({}, '', {}); - return func; - } catch (e) {} - }()); - - /** Mocked built-ins. */ - var ctxClearTimeout = context.clearTimeout !== root.clearTimeout && context.clearTimeout, - ctxNow = Date && Date.now !== root.Date.now && Date.now, - ctxSetTimeout = context.setTimeout !== root.setTimeout && context.setTimeout; - - /* Built-in method references for those with the same name as other `lodash` methods. */ - var nativeCeil = Math.ceil, - nativeFloor = Math.floor, - nativeGetSymbols = Object.getOwnPropertySymbols, - nativeIsBuffer = Buffer ? Buffer.isBuffer : undefined, - nativeIsFinite = context.isFinite, - nativeJoin = arrayProto.join, - nativeKeys = overArg(Object.keys, Object), - nativeMax = Math.max, - nativeMin = Math.min, - nativeNow = Date.now, - nativeParseInt = context.parseInt, - nativeRandom = Math.random, - nativeReverse = arrayProto.reverse; - - /* Built-in method references that are verified to be native. */ - var DataView = getNative(context, 'DataView'), - Map = getNative(context, 'Map'), - Promise = getNative(context, 'Promise'), - Set = getNative(context, 'Set'), - WeakMap = getNative(context, 'WeakMap'), - nativeCreate = getNative(Object, 'create'); - - /** Used to store function metadata. */ - var metaMap = WeakMap && new WeakMap; - - /** Used to lookup unminified function names. */ - var realNames = {}; - - /** Used to detect maps, sets, and weakmaps. */ - var dataViewCtorString = toSource(DataView), - mapCtorString = toSource(Map), - promiseCtorString = toSource(Promise), - setCtorString = toSource(Set), - weakMapCtorString = toSource(WeakMap); - - /** Used to convert symbols to primitives and strings. */ - var symbolProto = Symbol ? Symbol.prototype : undefined, - symbolValueOf = symbolProto ? symbolProto.valueOf : undefined, - symbolToString = symbolProto ? symbolProto.toString : undefined; - - /*------------------------------------------------------------------------*/ - - /** - * Creates a `lodash` object which wraps `value` to enable implicit method - * chain sequences. Methods that operate on and return arrays, collections, - * and functions can be chained together. Methods that retrieve a single value - * or may return a primitive value will automatically end the chain sequence - * and return the unwrapped value. Otherwise, the value must be unwrapped - * with `_#value`. - * - * Explicit chain sequences, which must be unwrapped with `_#value`, may be - * enabled using `_.chain`. - * - * The execution of chained methods is lazy, that is, it's deferred until - * `_#value` is implicitly or explicitly called. - * - * Lazy evaluation allows several methods to support shortcut fusion. - * Shortcut fusion is an optimization to merge iteratee calls; this avoids - * the creation of intermediate arrays and can greatly reduce the number of - * iteratee executions. Sections of a chain sequence qualify for shortcut - * fusion if the section is applied to an array and iteratees accept only - * one argument. The heuristic for whether a section qualifies for shortcut - * fusion is subject to change. - * - * Chaining is supported in custom builds as long as the `_#value` method is - * directly or indirectly included in the build. - * - * In addition to lodash methods, wrappers have `Array` and `String` methods. - * - * The wrapper `Array` methods are: - * `concat`, `join`, `pop`, `push`, `shift`, `sort`, `splice`, and `unshift` - * - * The wrapper `String` methods are: - * `replace` and `split` - * - * The wrapper methods that support shortcut fusion are: - * `at`, `compact`, `drop`, `dropRight`, `dropWhile`, `filter`, `find`, - * `findLast`, `head`, `initial`, `last`, `map`, `reject`, `reverse`, `slice`, - * `tail`, `take`, `takeRight`, `takeRightWhile`, `takeWhile`, and `toArray` - * - * The chainable wrapper methods are: - * `after`, `ary`, `assign`, `assignIn`, `assignInWith`, `assignWith`, `at`, - * `before`, `bind`, `bindAll`, `bindKey`, `castArray`, `chain`, `chunk`, - * `commit`, `compact`, `concat`, `conforms`, `constant`, `countBy`, `create`, - * `curry`, `debounce`, `defaults`, `defaultsDeep`, `defer`, `delay`, - * `difference`, `differenceBy`, `differenceWith`, `drop`, `dropRight`, - * `dropRightWhile`, `dropWhile`, `extend`, `extendWith`, `fill`, `filter`, - * `flatMap`, `flatMapDeep`, `flatMapDepth`, `flatten`, `flattenDeep`, - * `flattenDepth`, `flip`, `flow`, `flowRight`, `fromPairs`, `functions`, - * `functionsIn`, `groupBy`, `initial`, `intersection`, `intersectionBy`, - * `intersectionWith`, `invert`, `invertBy`, `invokeMap`, `iteratee`, `keyBy`, - * `keys`, `keysIn`, `map`, `mapKeys`, `mapValues`, `matches`, `matchesProperty`, - * `memoize`, `merge`, `mergeWith`, `method`, `methodOf`, `mixin`, `negate`, - * `nthArg`, `omit`, `omitBy`, `once`, `orderBy`, `over`, `overArgs`, - * `overEvery`, `overSome`, `partial`, `partialRight`, `partition`, `pick`, - * `pickBy`, `plant`, `property`, `propertyOf`, `pull`, `pullAll`, `pullAllBy`, - * `pullAllWith`, `pullAt`, `push`, `range`, `rangeRight`, `rearg`, `reject`, - * `remove`, `rest`, `reverse`, `sampleSize`, `set`, `setWith`, `shuffle`, - * `slice`, `sort`, `sortBy`, `splice`, `spread`, `tail`, `take`, `takeRight`, - * `takeRightWhile`, `takeWhile`, `tap`, `throttle`, `thru`, `toArray`, - * `toPairs`, `toPairsIn`, `toPath`, `toPlainObject`, `transform`, `unary`, - * `union`, `unionBy`, `unionWith`, `uniq`, `uniqBy`, `uniqWith`, `unset`, - * `unshift`, `unzip`, `unzipWith`, `update`, `updateWith`, `values`, - * `valuesIn`, `without`, `wrap`, `xor`, `xorBy`, `xorWith`, `zip`, - * `zipObject`, `zipObjectDeep`, and `zipWith` - * - * The wrapper methods that are **not** chainable by default are: - * `add`, `attempt`, `camelCase`, `capitalize`, `ceil`, `clamp`, `clone`, - * `cloneDeep`, `cloneDeepWith`, `cloneWith`, `conformsTo`, `deburr`, - * `defaultTo`, `divide`, `each`, `eachRight`, `endsWith`, `eq`, `escape`, - * `escapeRegExp`, `every`, `find`, `findIndex`, `findKey`, `findLast`, - * `findLastIndex`, `findLastKey`, `first`, `floor`, `forEach`, `forEachRight`, - * `forIn`, `forInRight`, `forOwn`, `forOwnRight`, `get`, `gt`, `gte`, `has`, - * `hasIn`, `head`, `identity`, `includes`, `indexOf`, `inRange`, `invoke`, - * `isArguments`, `isArray`, `isArrayBuffer`, `isArrayLike`, `isArrayLikeObject`, - * `isBoolean`, `isBuffer`, `isDate`, `isElement`, `isEmpty`, `isEqual`, - * `isEqualWith`, `isError`, `isFinite`, `isFunction`, `isInteger`, `isLength`, - * `isMap`, `isMatch`, `isMatchWith`, `isNaN`, `isNative`, `isNil`, `isNull`, - * `isNumber`, `isObject`, `isObjectLike`, `isPlainObject`, `isRegExp`, - * `isSafeInteger`, `isSet`, `isString`, `isUndefined`, `isTypedArray`, - * `isWeakMap`, `isWeakSet`, `join`, `kebabCase`, `last`, `lastIndexOf`, - * `lowerCase`, `lowerFirst`, `lt`, `lte`, `max`, `maxBy`, `mean`, `meanBy`, - * `min`, `minBy`, `multiply`, `noConflict`, `noop`, `now`, `nth`, `pad`, - * `padEnd`, `padStart`, `parseInt`, `pop`, `random`, `reduce`, `reduceRight`, - * `repeat`, `result`, `round`, `runInContext`, `sample`, `shift`, `size`, - * `snakeCase`, `some`, `sortedIndex`, `sortedIndexBy`, `sortedLastIndex`, - * `sortedLastIndexBy`, `startCase`, `startsWith`, `stubArray`, `stubFalse`, - * `stubObject`, `stubString`, `stubTrue`, `subtract`, `sum`, `sumBy`, - * `template`, `times`, `toFinite`, `toInteger`, `toJSON`, `toLength`, - * `toLower`, `toNumber`, `toSafeInteger`, `toString`, `toUpper`, `trim`, - * `trimEnd`, `trimStart`, `truncate`, `unescape`, `uniqueId`, `upperCase`, - * `upperFirst`, `value`, and `words` - * - * @name _ - * @constructor - * @category Seq - * @param {*} value The value to wrap in a `lodash` instance. - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * function square(n) { - * return n * n; - * } - * - * var wrapped = _([1, 2, 3]); - * - * // Returns an unwrapped value. - * wrapped.reduce(_.add); - * // => 6 - * - * // Returns a wrapped value. - * var squares = wrapped.map(square); - * - * _.isArray(squares); - * // => false - * - * _.isArray(squares.value()); - * // => true - */ - function lodash(value) { - if (isObjectLike(value) && !isArray(value) && !(value instanceof LazyWrapper)) { - if (value instanceof LodashWrapper) { - return value; - } - if (hasOwnProperty.call(value, '__wrapped__')) { - return wrapperClone(value); - } - } - return new LodashWrapper(value); - } - - /** - * The base implementation of `_.create` without support for assigning - * properties to the created object. - * - * @private - * @param {Object} proto The object to inherit from. - * @returns {Object} Returns the new object. - */ - var baseCreate = (function() { - function object() {} - return function(proto) { - if (!isObject(proto)) { - return {}; - } - if (objectCreate) { - return objectCreate(proto); - } - object.prototype = proto; - var result = new object; - object.prototype = undefined; - return result; - }; - }()); - - /** - * The function whose prototype chain sequence wrappers inherit from. - * - * @private - */ - function baseLodash() { - // No operation performed. - } - - /** - * The base constructor for creating `lodash` wrapper objects. - * - * @private - * @param {*} value The value to wrap. - * @param {boolean} [chainAll] Enable explicit method chain sequences. - */ - function LodashWrapper(value, chainAll) { - this.__wrapped__ = value; - this.__actions__ = []; - this.__chain__ = !!chainAll; - this.__index__ = 0; - this.__values__ = undefined; - } - - /** - * By default, the template delimiters used by lodash are like those in - * embedded Ruby (ERB) as well as ES2015 template strings. Change the - * following template settings to use alternative delimiters. - * - * @static - * @memberOf _ - * @type {Object} - */ - lodash.templateSettings = { - - /** - * Used to detect `data` property values to be HTML-escaped. - * - * @memberOf _.templateSettings - * @type {RegExp} - */ - 'escape': reEscape, - - /** - * Used to detect code to be evaluated. - * - * @memberOf _.templateSettings - * @type {RegExp} - */ - 'evaluate': reEvaluate, - - /** - * Used to detect `data` property values to inject. - * - * @memberOf _.templateSettings - * @type {RegExp} - */ - 'interpolate': reInterpolate, - - /** - * Used to reference the data object in the template text. - * - * @memberOf _.templateSettings - * @type {string} - */ - 'variable': '', - - /** - * Used to import variables into the compiled template. - * - * @memberOf _.templateSettings - * @type {Object} - */ - 'imports': { - - /** - * A reference to the `lodash` function. - * - * @memberOf _.templateSettings.imports - * @type {Function} - */ - '_': lodash - } - }; - - // Ensure wrappers are instances of `baseLodash`. - lodash.prototype = baseLodash.prototype; - lodash.prototype.constructor = lodash; - - LodashWrapper.prototype = baseCreate(baseLodash.prototype); - LodashWrapper.prototype.constructor = LodashWrapper; - - /*------------------------------------------------------------------------*/ - - /** - * Creates a lazy wrapper object which wraps `value` to enable lazy evaluation. - * - * @private - * @constructor - * @param {*} value The value to wrap. - */ - function LazyWrapper(value) { - this.__wrapped__ = value; - this.__actions__ = []; - this.__dir__ = 1; - this.__filtered__ = false; - this.__iteratees__ = []; - this.__takeCount__ = MAX_ARRAY_LENGTH; - this.__views__ = []; - } - - /** - * Creates a clone of the lazy wrapper object. - * - * @private - * @name clone - * @memberOf LazyWrapper - * @returns {Object} Returns the cloned `LazyWrapper` object. - */ - function lazyClone() { - var result = new LazyWrapper(this.__wrapped__); - result.__actions__ = copyArray(this.__actions__); - result.__dir__ = this.__dir__; - result.__filtered__ = this.__filtered__; - result.__iteratees__ = copyArray(this.__iteratees__); - result.__takeCount__ = this.__takeCount__; - result.__views__ = copyArray(this.__views__); - return result; - } - - /** - * Reverses the direction of lazy iteration. - * - * @private - * @name reverse - * @memberOf LazyWrapper - * @returns {Object} Returns the new reversed `LazyWrapper` object. - */ - function lazyReverse() { - if (this.__filtered__) { - var result = new LazyWrapper(this); - result.__dir__ = -1; - result.__filtered__ = true; - } else { - result = this.clone(); - result.__dir__ *= -1; - } - return result; - } - - /** - * Extracts the unwrapped value from its lazy wrapper. - * - * @private - * @name value - * @memberOf LazyWrapper - * @returns {*} Returns the unwrapped value. - */ - function lazyValue() { - var array = this.__wrapped__.value(), - dir = this.__dir__, - isArr = isArray(array), - isRight = dir < 0, - arrLength = isArr ? array.length : 0, - view = getView(0, arrLength, this.__views__), - start = view.start, - end = view.end, - length = end - start, - index = isRight ? end : (start - 1), - iteratees = this.__iteratees__, - iterLength = iteratees.length, - resIndex = 0, - takeCount = nativeMin(length, this.__takeCount__); - - if (!isArr || (!isRight && arrLength == length && takeCount == length)) { - return baseWrapperValue(array, this.__actions__); - } - var result = []; - - outer: - while (length-- && resIndex < takeCount) { - index += dir; - - var iterIndex = -1, - value = array[index]; - - while (++iterIndex < iterLength) { - var data = iteratees[iterIndex], - iteratee = data.iteratee, - type = data.type, - computed = iteratee(value); - - if (type == LAZY_MAP_FLAG) { - value = computed; - } else if (!computed) { - if (type == LAZY_FILTER_FLAG) { - continue outer; - } else { - break outer; - } - } - } - result[resIndex++] = value; - } - return result; - } - - // Ensure `LazyWrapper` is an instance of `baseLodash`. - LazyWrapper.prototype = baseCreate(baseLodash.prototype); - LazyWrapper.prototype.constructor = LazyWrapper; - - /*------------------------------------------------------------------------*/ - - /** - * Creates a hash object. - * - * @private - * @constructor - * @param {Array} [entries] The key-value pairs to cache. - */ - function Hash(entries) { - var index = -1, - length = entries == null ? 0 : entries.length; - - this.clear(); - while (++index < length) { - var entry = entries[index]; - this.set(entry[0], entry[1]); - } - } - - /** - * Removes all key-value entries from the hash. - * - * @private - * @name clear - * @memberOf Hash - */ - function hashClear() { - this.__data__ = nativeCreate ? nativeCreate(null) : {}; - this.size = 0; - } - - /** - * Removes `key` and its value from the hash. - * - * @private - * @name delete - * @memberOf Hash - * @param {Object} hash The hash to modify. - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ - function hashDelete(key) { - var result = this.has(key) && delete this.__data__[key]; - this.size -= result ? 1 : 0; - return result; - } - - /** - * Gets the hash value for `key`. - * - * @private - * @name get - * @memberOf Hash - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ - function hashGet(key) { - var data = this.__data__; - if (nativeCreate) { - var result = data[key]; - return result === HASH_UNDEFINED ? undefined : result; - } - return hasOwnProperty.call(data, key) ? data[key] : undefined; - } - - /** - * Checks if a hash value for `key` exists. - * - * @private - * @name has - * @memberOf Hash - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ - function hashHas(key) { - var data = this.__data__; - return nativeCreate ? (data[key] !== undefined) : hasOwnProperty.call(data, key); - } - - /** - * Sets the hash `key` to `value`. - * - * @private - * @name set - * @memberOf Hash - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the hash instance. - */ - function hashSet(key, value) { - var data = this.__data__; - this.size += this.has(key) ? 0 : 1; - data[key] = (nativeCreate && value === undefined) ? HASH_UNDEFINED : value; - return this; - } - - // Add methods to `Hash`. - Hash.prototype.clear = hashClear; - Hash.prototype['delete'] = hashDelete; - Hash.prototype.get = hashGet; - Hash.prototype.has = hashHas; - Hash.prototype.set = hashSet; - - /*------------------------------------------------------------------------*/ - - /** - * Creates an list cache object. - * - * @private - * @constructor - * @param {Array} [entries] The key-value pairs to cache. - */ - function ListCache(entries) { - var index = -1, - length = entries == null ? 0 : entries.length; - - this.clear(); - while (++index < length) { - var entry = entries[index]; - this.set(entry[0], entry[1]); - } - } - - /** - * Removes all key-value entries from the list cache. - * - * @private - * @name clear - * @memberOf ListCache - */ - function listCacheClear() { - this.__data__ = []; - this.size = 0; - } - - /** - * Removes `key` and its value from the list cache. - * - * @private - * @name delete - * @memberOf ListCache - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ - function listCacheDelete(key) { - var data = this.__data__, - index = assocIndexOf(data, key); - - if (index < 0) { - return false; - } - var lastIndex = data.length - 1; - if (index == lastIndex) { - data.pop(); - } else { - splice.call(data, index, 1); - } - --this.size; - return true; - } - - /** - * Gets the list cache value for `key`. - * - * @private - * @name get - * @memberOf ListCache - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ - function listCacheGet(key) { - var data = this.__data__, - index = assocIndexOf(data, key); - - return index < 0 ? undefined : data[index][1]; - } - - /** - * Checks if a list cache value for `key` exists. - * - * @private - * @name has - * @memberOf ListCache - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ - function listCacheHas(key) { - return assocIndexOf(this.__data__, key) > -1; - } - - /** - * Sets the list cache `key` to `value`. - * - * @private - * @name set - * @memberOf ListCache - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the list cache instance. - */ - function listCacheSet(key, value) { - var data = this.__data__, - index = assocIndexOf(data, key); - - if (index < 0) { - ++this.size; - data.push([key, value]); - } else { - data[index][1] = value; - } - return this; - } - - // Add methods to `ListCache`. - ListCache.prototype.clear = listCacheClear; - ListCache.prototype['delete'] = listCacheDelete; - ListCache.prototype.get = listCacheGet; - ListCache.prototype.has = listCacheHas; - ListCache.prototype.set = listCacheSet; - - /*------------------------------------------------------------------------*/ - - /** - * Creates a map cache object to store key-value pairs. - * - * @private - * @constructor - * @param {Array} [entries] The key-value pairs to cache. - */ - function MapCache(entries) { - var index = -1, - length = entries == null ? 0 : entries.length; - - this.clear(); - while (++index < length) { - var entry = entries[index]; - this.set(entry[0], entry[1]); - } - } - - /** - * Removes all key-value entries from the map. - * - * @private - * @name clear - * @memberOf MapCache - */ - function mapCacheClear() { - this.size = 0; - this.__data__ = { - 'hash': new Hash, - 'map': new (Map || ListCache), - 'string': new Hash - }; - } - - /** - * Removes `key` and its value from the map. - * - * @private - * @name delete - * @memberOf MapCache - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ - function mapCacheDelete(key) { - var result = getMapData(this, key)['delete'](key); - this.size -= result ? 1 : 0; - return result; - } - - /** - * Gets the map value for `key`. - * - * @private - * @name get - * @memberOf MapCache - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ - function mapCacheGet(key) { - return getMapData(this, key).get(key); - } - - /** - * Checks if a map value for `key` exists. - * - * @private - * @name has - * @memberOf MapCache - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ - function mapCacheHas(key) { - return getMapData(this, key).has(key); - } - - /** - * Sets the map `key` to `value`. - * - * @private - * @name set - * @memberOf MapCache - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the map cache instance. - */ - function mapCacheSet(key, value) { - var data = getMapData(this, key), - size = data.size; - - data.set(key, value); - this.size += data.size == size ? 0 : 1; - return this; - } - - // Add methods to `MapCache`. - MapCache.prototype.clear = mapCacheClear; - MapCache.prototype['delete'] = mapCacheDelete; - MapCache.prototype.get = mapCacheGet; - MapCache.prototype.has = mapCacheHas; - MapCache.prototype.set = mapCacheSet; - - /*------------------------------------------------------------------------*/ - - /** - * - * Creates an array cache object to store unique values. - * - * @private - * @constructor - * @param {Array} [values] The values to cache. - */ - function SetCache(values) { - var index = -1, - length = values == null ? 0 : values.length; - - this.__data__ = new MapCache; - while (++index < length) { - this.add(values[index]); - } - } - - /** - * Adds `value` to the array cache. - * - * @private - * @name add - * @memberOf SetCache - * @alias push - * @param {*} value The value to cache. - * @returns {Object} Returns the cache instance. - */ - function setCacheAdd(value) { - this.__data__.set(value, HASH_UNDEFINED); - return this; - } - - /** - * Checks if `value` is in the array cache. - * - * @private - * @name has - * @memberOf SetCache - * @param {*} value The value to search for. - * @returns {number} Returns `true` if `value` is found, else `false`. - */ - function setCacheHas(value) { - return this.__data__.has(value); - } - - // Add methods to `SetCache`. - SetCache.prototype.add = SetCache.prototype.push = setCacheAdd; - SetCache.prototype.has = setCacheHas; - - /*------------------------------------------------------------------------*/ - - /** - * Creates a stack cache object to store key-value pairs. - * - * @private - * @constructor - * @param {Array} [entries] The key-value pairs to cache. - */ - function Stack(entries) { - var data = this.__data__ = new ListCache(entries); - this.size = data.size; - } - - /** - * Removes all key-value entries from the stack. - * - * @private - * @name clear - * @memberOf Stack - */ - function stackClear() { - this.__data__ = new ListCache; - this.size = 0; - } - - /** - * Removes `key` and its value from the stack. - * - * @private - * @name delete - * @memberOf Stack - * @param {string} key The key of the value to remove. - * @returns {boolean} Returns `true` if the entry was removed, else `false`. - */ - function stackDelete(key) { - var data = this.__data__, - result = data['delete'](key); - - this.size = data.size; - return result; - } - - /** - * Gets the stack value for `key`. - * - * @private - * @name get - * @memberOf Stack - * @param {string} key The key of the value to get. - * @returns {*} Returns the entry value. - */ - function stackGet(key) { - return this.__data__.get(key); - } - - /** - * Checks if a stack value for `key` exists. - * - * @private - * @name has - * @memberOf Stack - * @param {string} key The key of the entry to check. - * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. - */ - function stackHas(key) { - return this.__data__.has(key); - } - - /** - * Sets the stack `key` to `value`. - * - * @private - * @name set - * @memberOf Stack - * @param {string} key The key of the value to set. - * @param {*} value The value to set. - * @returns {Object} Returns the stack cache instance. - */ - function stackSet(key, value) { - var data = this.__data__; - if (data instanceof ListCache) { - var pairs = data.__data__; - if (!Map || (pairs.length < LARGE_ARRAY_SIZE - 1)) { - pairs.push([key, value]); - this.size = ++data.size; - return this; - } - data = this.__data__ = new MapCache(pairs); - } - data.set(key, value); - this.size = data.size; - return this; - } - - // Add methods to `Stack`. - Stack.prototype.clear = stackClear; - Stack.prototype['delete'] = stackDelete; - Stack.prototype.get = stackGet; - Stack.prototype.has = stackHas; - Stack.prototype.set = stackSet; - - /*------------------------------------------------------------------------*/ - - /** - * Creates an array of the enumerable property names of the array-like `value`. - * - * @private - * @param {*} value The value to query. - * @param {boolean} inherited Specify returning inherited property names. - * @returns {Array} Returns the array of property names. - */ - function arrayLikeKeys(value, inherited) { - var isArr = isArray(value), - isArg = !isArr && isArguments(value), - isBuff = !isArr && !isArg && isBuffer(value), - isType = !isArr && !isArg && !isBuff && isTypedArray(value), - skipIndexes = isArr || isArg || isBuff || isType, - result = skipIndexes ? baseTimes(value.length, String) : [], - length = result.length; - - for (var key in value) { - if ((inherited || hasOwnProperty.call(value, key)) && - !(skipIndexes && ( - // Safari 9 has enumerable `arguments.length` in strict mode. - key == 'length' || - // Node.js 0.10 has enumerable non-index properties on buffers. - (isBuff && (key == 'offset' || key == 'parent')) || - // PhantomJS 2 has enumerable non-index properties on typed arrays. - (isType && (key == 'buffer' || key == 'byteLength' || key == 'byteOffset')) || - // Skip index properties. - isIndex(key, length) - ))) { - result.push(key); - } - } - return result; - } - - /** - * A specialized version of `_.sample` for arrays. - * - * @private - * @param {Array} array The array to sample. - * @returns {*} Returns the random element. - */ - function arraySample(array) { - var length = array.length; - return length ? array[baseRandom(0, length - 1)] : undefined; - } - - /** - * A specialized version of `_.sampleSize` for arrays. - * - * @private - * @param {Array} array The array to sample. - * @param {number} n The number of elements to sample. - * @returns {Array} Returns the random elements. - */ - function arraySampleSize(array, n) { - return shuffleSelf(copyArray(array), baseClamp(n, 0, array.length)); - } - - /** - * A specialized version of `_.shuffle` for arrays. - * - * @private - * @param {Array} array The array to shuffle. - * @returns {Array} Returns the new shuffled array. - */ - function arrayShuffle(array) { - return shuffleSelf(copyArray(array)); - } - - /** - * This function is like `assignValue` except that it doesn't assign - * `undefined` values. - * - * @private - * @param {Object} object The object to modify. - * @param {string} key The key of the property to assign. - * @param {*} value The value to assign. - */ - function assignMergeValue(object, key, value) { - if ((value !== undefined && !eq(object[key], value)) || - (value === undefined && !(key in object))) { - baseAssignValue(object, key, value); - } - } - - /** - * Assigns `value` to `key` of `object` if the existing value is not equivalent - * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. - * - * @private - * @param {Object} object The object to modify. - * @param {string} key The key of the property to assign. - * @param {*} value The value to assign. - */ - function assignValue(object, key, value) { - var objValue = object[key]; - if (!(hasOwnProperty.call(object, key) && eq(objValue, value)) || - (value === undefined && !(key in object))) { - baseAssignValue(object, key, value); - } - } - - /** - * Gets the index at which the `key` is found in `array` of key-value pairs. - * - * @private - * @param {Array} array The array to inspect. - * @param {*} key The key to search for. - * @returns {number} Returns the index of the matched value, else `-1`. - */ - function assocIndexOf(array, key) { - var length = array.length; - while (length--) { - if (eq(array[length][0], key)) { - return length; - } - } - return -1; - } - - /** - * Aggregates elements of `collection` on `accumulator` with keys transformed - * by `iteratee` and values set by `setter`. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} setter The function to set `accumulator` values. - * @param {Function} iteratee The iteratee to transform keys. - * @param {Object} accumulator The initial aggregated object. - * @returns {Function} Returns `accumulator`. - */ - function baseAggregator(collection, setter, iteratee, accumulator) { - baseEach(collection, function(value, key, collection) { - setter(accumulator, value, iteratee(value), collection); - }); - return accumulator; - } - - /** - * The base implementation of `_.assign` without support for multiple sources - * or `customizer` functions. - * - * @private - * @param {Object} object The destination object. - * @param {Object} source The source object. - * @returns {Object} Returns `object`. - */ - function baseAssign(object, source) { - return object && copyObject(source, keys(source), object); - } - - /** - * The base implementation of `_.assignIn` without support for multiple sources - * or `customizer` functions. - * - * @private - * @param {Object} object The destination object. - * @param {Object} source The source object. - * @returns {Object} Returns `object`. - */ - function baseAssignIn(object, source) { - return object && copyObject(source, keysIn(source), object); - } - - /** - * The base implementation of `assignValue` and `assignMergeValue` without - * value checks. - * - * @private - * @param {Object} object The object to modify. - * @param {string} key The key of the property to assign. - * @param {*} value The value to assign. - */ - function baseAssignValue(object, key, value) { - if (key == '__proto__' && defineProperty) { - defineProperty(object, key, { - 'configurable': true, - 'enumerable': true, - 'value': value, - 'writable': true - }); - } else { - object[key] = value; - } - } - - /** - * The base implementation of `_.at` without support for individual paths. - * - * @private - * @param {Object} object The object to iterate over. - * @param {string[]} paths The property paths to pick. - * @returns {Array} Returns the picked elements. - */ - function baseAt(object, paths) { - var index = -1, - length = paths.length, - result = Array(length), - skip = object == null; - - while (++index < length) { - result[index] = skip ? undefined : get(object, paths[index]); - } - return result; - } - - /** - * The base implementation of `_.clamp` which doesn't coerce arguments. - * - * @private - * @param {number} number The number to clamp. - * @param {number} [lower] The lower bound. - * @param {number} upper The upper bound. - * @returns {number} Returns the clamped number. - */ - function baseClamp(number, lower, upper) { - if (number === number) { - if (upper !== undefined) { - number = number <= upper ? number : upper; - } - if (lower !== undefined) { - number = number >= lower ? number : lower; - } - } - return number; - } - - /** - * The base implementation of `_.clone` and `_.cloneDeep` which tracks - * traversed objects. - * - * @private - * @param {*} value The value to clone. - * @param {boolean} bitmask The bitmask flags. - * 1 - Deep clone - * 2 - Flatten inherited properties - * 4 - Clone symbols - * @param {Function} [customizer] The function to customize cloning. - * @param {string} [key] The key of `value`. - * @param {Object} [object] The parent object of `value`. - * @param {Object} [stack] Tracks traversed objects and their clone counterparts. - * @returns {*} Returns the cloned value. - */ - function baseClone(value, bitmask, customizer, key, object, stack) { - var result, - isDeep = bitmask & CLONE_DEEP_FLAG, - isFlat = bitmask & CLONE_FLAT_FLAG, - isFull = bitmask & CLONE_SYMBOLS_FLAG; - - if (customizer) { - result = object ? customizer(value, key, object, stack) : customizer(value); - } - if (result !== undefined) { - return result; - } - if (!isObject(value)) { - return value; - } - var isArr = isArray(value); - if (isArr) { - result = initCloneArray(value); - if (!isDeep) { - return copyArray(value, result); - } - } else { - var tag = getTag(value), - isFunc = tag == funcTag || tag == genTag; - - if (isBuffer(value)) { - return cloneBuffer(value, isDeep); - } - if (tag == objectTag || tag == argsTag || (isFunc && !object)) { - result = (isFlat || isFunc) ? {} : initCloneObject(value); - if (!isDeep) { - return isFlat - ? copySymbolsIn(value, baseAssignIn(result, value)) - : copySymbols(value, baseAssign(result, value)); - } - } else { - if (!cloneableTags[tag]) { - return object ? value : {}; - } - result = initCloneByTag(value, tag, isDeep); - } - } - // Check for circular references and return its corresponding clone. - stack || (stack = new Stack); - var stacked = stack.get(value); - if (stacked) { - return stacked; - } - stack.set(value, result); - - if (isSet(value)) { - value.forEach(function(subValue) { - result.add(baseClone(subValue, bitmask, customizer, subValue, value, stack)); - }); - - return result; - } - - if (isMap(value)) { - value.forEach(function(subValue, key) { - result.set(key, baseClone(subValue, bitmask, customizer, key, value, stack)); - }); - - return result; - } - - var keysFunc = isFull - ? (isFlat ? getAllKeysIn : getAllKeys) - : (isFlat ? keysIn : keys); - - var props = isArr ? undefined : keysFunc(value); - arrayEach(props || value, function(subValue, key) { - if (props) { - key = subValue; - subValue = value[key]; - } - // Recursively populate clone (susceptible to call stack limits). - assignValue(result, key, baseClone(subValue, bitmask, customizer, key, value, stack)); - }); - return result; - } - - /** - * The base implementation of `_.conforms` which doesn't clone `source`. - * - * @private - * @param {Object} source The object of property predicates to conform to. - * @returns {Function} Returns the new spec function. - */ - function baseConforms(source) { - var props = keys(source); - return function(object) { - return baseConformsTo(object, source, props); - }; - } - - /** - * The base implementation of `_.conformsTo` which accepts `props` to check. - * - * @private - * @param {Object} object The object to inspect. - * @param {Object} source The object of property predicates to conform to. - * @returns {boolean} Returns `true` if `object` conforms, else `false`. - */ - function baseConformsTo(object, source, props) { - var length = props.length; - if (object == null) { - return !length; - } - object = Object(object); - while (length--) { - var key = props[length], - predicate = source[key], - value = object[key]; - - if ((value === undefined && !(key in object)) || !predicate(value)) { - return false; - } - } - return true; - } - - /** - * The base implementation of `_.delay` and `_.defer` which accepts `args` - * to provide to `func`. - * - * @private - * @param {Function} func The function to delay. - * @param {number} wait The number of milliseconds to delay invocation. - * @param {Array} args The arguments to provide to `func`. - * @returns {number|Object} Returns the timer id or timeout object. - */ - function baseDelay(func, wait, args) { - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - return setTimeout(function() { func.apply(undefined, args); }, wait); - } - - /** - * The base implementation of methods like `_.difference` without support - * for excluding multiple arrays or iteratee shorthands. - * - * @private - * @param {Array} array The array to inspect. - * @param {Array} values The values to exclude. - * @param {Function} [iteratee] The iteratee invoked per element. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of filtered values. - */ - function baseDifference(array, values, iteratee, comparator) { - var index = -1, - includes = arrayIncludes, - isCommon = true, - length = array.length, - result = [], - valuesLength = values.length; - - if (!length) { - return result; - } - if (iteratee) { - values = arrayMap(values, baseUnary(iteratee)); - } - if (comparator) { - includes = arrayIncludesWith; - isCommon = false; - } - else if (values.length >= LARGE_ARRAY_SIZE) { - includes = cacheHas; - isCommon = false; - values = new SetCache(values); - } - outer: - while (++index < length) { - var value = array[index], - computed = iteratee == null ? value : iteratee(value); - - value = (comparator || value !== 0) ? value : 0; - if (isCommon && computed === computed) { - var valuesIndex = valuesLength; - while (valuesIndex--) { - if (values[valuesIndex] === computed) { - continue outer; - } - } - result.push(value); - } - else if (!includes(values, computed, comparator)) { - result.push(value); - } - } - return result; - } - - /** - * The base implementation of `_.forEach` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array|Object} Returns `collection`. - */ - var baseEach = createBaseEach(baseForOwn); - - /** - * The base implementation of `_.forEachRight` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array|Object} Returns `collection`. - */ - var baseEachRight = createBaseEach(baseForOwnRight, true); - - /** - * The base implementation of `_.every` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {boolean} Returns `true` if all elements pass the predicate check, - * else `false` - */ - function baseEvery(collection, predicate) { - var result = true; - baseEach(collection, function(value, index, collection) { - result = !!predicate(value, index, collection); - return result; - }); - return result; - } - - /** - * The base implementation of methods like `_.max` and `_.min` which accepts a - * `comparator` to determine the extremum value. - * - * @private - * @param {Array} array The array to iterate over. - * @param {Function} iteratee The iteratee invoked per iteration. - * @param {Function} comparator The comparator used to compare values. - * @returns {*} Returns the extremum value. - */ - function baseExtremum(array, iteratee, comparator) { - var index = -1, - length = array.length; - - while (++index < length) { - var value = array[index], - current = iteratee(value); - - if (current != null && (computed === undefined - ? (current === current && !isSymbol(current)) - : comparator(current, computed) - )) { - var computed = current, - result = value; - } - } - return result; - } - - /** - * The base implementation of `_.fill` without an iteratee call guard. - * - * @private - * @param {Array} array The array to fill. - * @param {*} value The value to fill `array` with. - * @param {number} [start=0] The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns `array`. - */ - function baseFill(array, value, start, end) { - var length = array.length; - - start = toInteger(start); - if (start < 0) { - start = -start > length ? 0 : (length + start); - } - end = (end === undefined || end > length) ? length : toInteger(end); - if (end < 0) { - end += length; - } - end = start > end ? 0 : toLength(end); - while (start < end) { - array[start++] = value; - } - return array; - } - - /** - * The base implementation of `_.filter` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {Array} Returns the new filtered array. - */ - function baseFilter(collection, predicate) { - var result = []; - baseEach(collection, function(value, index, collection) { - if (predicate(value, index, collection)) { - result.push(value); - } - }); - return result; - } - - /** - * The base implementation of `_.flatten` with support for restricting flattening. - * - * @private - * @param {Array} array The array to flatten. - * @param {number} depth The maximum recursion depth. - * @param {boolean} [predicate=isFlattenable] The function invoked per iteration. - * @param {boolean} [isStrict] Restrict to values that pass `predicate` checks. - * @param {Array} [result=[]] The initial result value. - * @returns {Array} Returns the new flattened array. - */ - function baseFlatten(array, depth, predicate, isStrict, result) { - var index = -1, - length = array.length; - - predicate || (predicate = isFlattenable); - result || (result = []); - - while (++index < length) { - var value = array[index]; - if (depth > 0 && predicate(value)) { - if (depth > 1) { - // Recursively flatten arrays (susceptible to call stack limits). - baseFlatten(value, depth - 1, predicate, isStrict, result); - } else { - arrayPush(result, value); - } - } else if (!isStrict) { - result[result.length] = value; - } - } - return result; - } - - /** - * The base implementation of `baseForOwn` which iterates over `object` - * properties returned by `keysFunc` and invokes `iteratee` for each property. - * Iteratee functions may exit iteration early by explicitly returning `false`. - * - * @private - * @param {Object} object The object to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {Function} keysFunc The function to get the keys of `object`. - * @returns {Object} Returns `object`. - */ - var baseFor = createBaseFor(); - - /** - * This function is like `baseFor` except that it iterates over properties - * in the opposite order. - * - * @private - * @param {Object} object The object to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @param {Function} keysFunc The function to get the keys of `object`. - * @returns {Object} Returns `object`. - */ - var baseForRight = createBaseFor(true); - - /** - * The base implementation of `_.forOwn` without support for iteratee shorthands. - * - * @private - * @param {Object} object The object to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Object} Returns `object`. - */ - function baseForOwn(object, iteratee) { - return object && baseFor(object, iteratee, keys); - } - - /** - * The base implementation of `_.forOwnRight` without support for iteratee shorthands. - * - * @private - * @param {Object} object The object to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Object} Returns `object`. - */ - function baseForOwnRight(object, iteratee) { - return object && baseForRight(object, iteratee, keys); - } - - /** - * The base implementation of `_.functions` which creates an array of - * `object` function property names filtered from `props`. - * - * @private - * @param {Object} object The object to inspect. - * @param {Array} props The property names to filter. - * @returns {Array} Returns the function names. - */ - function baseFunctions(object, props) { - return arrayFilter(props, function(key) { - return isFunction(object[key]); - }); - } - - /** - * The base implementation of `_.get` without support for default values. - * - * @private - * @param {Object} object The object to query. - * @param {Array|string} path The path of the property to get. - * @returns {*} Returns the resolved value. - */ - function baseGet(object, path) { - path = castPath(path, object); - - var index = 0, - length = path.length; - - while (object != null && index < length) { - object = object[toKey(path[index++])]; - } - return (index && index == length) ? object : undefined; - } - - /** - * The base implementation of `getAllKeys` and `getAllKeysIn` which uses - * `keysFunc` and `symbolsFunc` to get the enumerable property names and - * symbols of `object`. - * - * @private - * @param {Object} object The object to query. - * @param {Function} keysFunc The function to get the keys of `object`. - * @param {Function} symbolsFunc The function to get the symbols of `object`. - * @returns {Array} Returns the array of property names and symbols. - */ - function baseGetAllKeys(object, keysFunc, symbolsFunc) { - var result = keysFunc(object); - return isArray(object) ? result : arrayPush(result, symbolsFunc(object)); - } - - /** - * The base implementation of `getTag` without fallbacks for buggy environments. - * - * @private - * @param {*} value The value to query. - * @returns {string} Returns the `toStringTag`. - */ - function baseGetTag(value) { - if (value == null) { - return value === undefined ? undefinedTag : nullTag; - } - return (symToStringTag && symToStringTag in Object(value)) - ? getRawTag(value) - : objectToString(value); - } - - /** - * The base implementation of `_.gt` which doesn't coerce arguments. - * - * @private - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is greater than `other`, - * else `false`. - */ - function baseGt(value, other) { - return value > other; - } - - /** - * The base implementation of `_.has` without support for deep paths. - * - * @private - * @param {Object} [object] The object to query. - * @param {Array|string} key The key to check. - * @returns {boolean} Returns `true` if `key` exists, else `false`. - */ - function baseHas(object, key) { - return object != null && hasOwnProperty.call(object, key); - } - - /** - * The base implementation of `_.hasIn` without support for deep paths. - * - * @private - * @param {Object} [object] The object to query. - * @param {Array|string} key The key to check. - * @returns {boolean} Returns `true` if `key` exists, else `false`. - */ - function baseHasIn(object, key) { - return object != null && key in Object(object); - } - - /** - * The base implementation of `_.inRange` which doesn't coerce arguments. - * - * @private - * @param {number} number The number to check. - * @param {number} start The start of the range. - * @param {number} end The end of the range. - * @returns {boolean} Returns `true` if `number` is in the range, else `false`. - */ - function baseInRange(number, start, end) { - return number >= nativeMin(start, end) && number < nativeMax(start, end); - } - - /** - * The base implementation of methods like `_.intersection`, without support - * for iteratee shorthands, that accepts an array of arrays to inspect. - * - * @private - * @param {Array} arrays The arrays to inspect. - * @param {Function} [iteratee] The iteratee invoked per element. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of shared values. - */ - function baseIntersection(arrays, iteratee, comparator) { - var includes = comparator ? arrayIncludesWith : arrayIncludes, - length = arrays[0].length, - othLength = arrays.length, - othIndex = othLength, - caches = Array(othLength), - maxLength = Infinity, - result = []; - - while (othIndex--) { - var array = arrays[othIndex]; - if (othIndex && iteratee) { - array = arrayMap(array, baseUnary(iteratee)); - } - maxLength = nativeMin(array.length, maxLength); - caches[othIndex] = !comparator && (iteratee || (length >= 120 && array.length >= 120)) - ? new SetCache(othIndex && array) - : undefined; - } - array = arrays[0]; - - var index = -1, - seen = caches[0]; - - outer: - while (++index < length && result.length < maxLength) { - var value = array[index], - computed = iteratee ? iteratee(value) : value; - - value = (comparator || value !== 0) ? value : 0; - if (!(seen - ? cacheHas(seen, computed) - : includes(result, computed, comparator) - )) { - othIndex = othLength; - while (--othIndex) { - var cache = caches[othIndex]; - if (!(cache - ? cacheHas(cache, computed) - : includes(arrays[othIndex], computed, comparator)) - ) { - continue outer; - } - } - if (seen) { - seen.push(computed); - } - result.push(value); - } - } - return result; - } - - /** - * The base implementation of `_.invert` and `_.invertBy` which inverts - * `object` with values transformed by `iteratee` and set by `setter`. - * - * @private - * @param {Object} object The object to iterate over. - * @param {Function} setter The function to set `accumulator` values. - * @param {Function} iteratee The iteratee to transform values. - * @param {Object} accumulator The initial inverted object. - * @returns {Function} Returns `accumulator`. - */ - function baseInverter(object, setter, iteratee, accumulator) { - baseForOwn(object, function(value, key, object) { - setter(accumulator, iteratee(value), key, object); - }); - return accumulator; - } - - /** - * The base implementation of `_.invoke` without support for individual - * method arguments. - * - * @private - * @param {Object} object The object to query. - * @param {Array|string} path The path of the method to invoke. - * @param {Array} args The arguments to invoke the method with. - * @returns {*} Returns the result of the invoked method. - */ - function baseInvoke(object, path, args) { - path = castPath(path, object); - object = parent(object, path); - var func = object == null ? object : object[toKey(last(path))]; - return func == null ? undefined : apply(func, object, args); - } - - /** - * The base implementation of `_.isArguments`. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an `arguments` object, - */ - function baseIsArguments(value) { - return isObjectLike(value) && baseGetTag(value) == argsTag; - } - - /** - * The base implementation of `_.isArrayBuffer` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an array buffer, else `false`. - */ - function baseIsArrayBuffer(value) { - return isObjectLike(value) && baseGetTag(value) == arrayBufferTag; - } - - /** - * The base implementation of `_.isDate` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a date object, else `false`. - */ - function baseIsDate(value) { - return isObjectLike(value) && baseGetTag(value) == dateTag; - } - - /** - * The base implementation of `_.isEqual` which supports partial comparisons - * and tracks traversed objects. - * - * @private - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @param {boolean} bitmask The bitmask flags. - * 1 - Unordered comparison - * 2 - Partial comparison - * @param {Function} [customizer] The function to customize comparisons. - * @param {Object} [stack] Tracks traversed `value` and `other` objects. - * @returns {boolean} Returns `true` if the values are equivalent, else `false`. - */ - function baseIsEqual(value, other, bitmask, customizer, stack) { - if (value === other) { - return true; - } - if (value == null || other == null || (!isObjectLike(value) && !isObjectLike(other))) { - return value !== value && other !== other; - } - return baseIsEqualDeep(value, other, bitmask, customizer, baseIsEqual, stack); - } - - /** - * A specialized version of `baseIsEqual` for arrays and objects which performs - * deep comparisons and tracks traversed objects enabling objects with circular - * references to be compared. - * - * @private - * @param {Object} object The object to compare. - * @param {Object} other The other object to compare. - * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. - * @param {Function} customizer The function to customize comparisons. - * @param {Function} equalFunc The function to determine equivalents of values. - * @param {Object} [stack] Tracks traversed `object` and `other` objects. - * @returns {boolean} Returns `true` if the objects are equivalent, else `false`. - */ - function baseIsEqualDeep(object, other, bitmask, customizer, equalFunc, stack) { - var objIsArr = isArray(object), - othIsArr = isArray(other), - objTag = objIsArr ? arrayTag : getTag(object), - othTag = othIsArr ? arrayTag : getTag(other); - - objTag = objTag == argsTag ? objectTag : objTag; - othTag = othTag == argsTag ? objectTag : othTag; - - var objIsObj = objTag == objectTag, - othIsObj = othTag == objectTag, - isSameTag = objTag == othTag; - - if (isSameTag && isBuffer(object)) { - if (!isBuffer(other)) { - return false; - } - objIsArr = true; - objIsObj = false; - } - if (isSameTag && !objIsObj) { - stack || (stack = new Stack); - return (objIsArr || isTypedArray(object)) - ? equalArrays(object, other, bitmask, customizer, equalFunc, stack) - : equalByTag(object, other, objTag, bitmask, customizer, equalFunc, stack); - } - if (!(bitmask & COMPARE_PARTIAL_FLAG)) { - var objIsWrapped = objIsObj && hasOwnProperty.call(object, '__wrapped__'), - othIsWrapped = othIsObj && hasOwnProperty.call(other, '__wrapped__'); - - if (objIsWrapped || othIsWrapped) { - var objUnwrapped = objIsWrapped ? object.value() : object, - othUnwrapped = othIsWrapped ? other.value() : other; - - stack || (stack = new Stack); - return equalFunc(objUnwrapped, othUnwrapped, bitmask, customizer, stack); - } - } - if (!isSameTag) { - return false; - } - stack || (stack = new Stack); - return equalObjects(object, other, bitmask, customizer, equalFunc, stack); - } - - /** - * The base implementation of `_.isMap` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a map, else `false`. - */ - function baseIsMap(value) { - return isObjectLike(value) && getTag(value) == mapTag; - } - - /** - * The base implementation of `_.isMatch` without support for iteratee shorthands. - * - * @private - * @param {Object} object The object to inspect. - * @param {Object} source The object of property values to match. - * @param {Array} matchData The property names, values, and compare flags to match. - * @param {Function} [customizer] The function to customize comparisons. - * @returns {boolean} Returns `true` if `object` is a match, else `false`. - */ - function baseIsMatch(object, source, matchData, customizer) { - var index = matchData.length, - length = index, - noCustomizer = !customizer; - - if (object == null) { - return !length; - } - object = Object(object); - while (index--) { - var data = matchData[index]; - if ((noCustomizer && data[2]) - ? data[1] !== object[data[0]] - : !(data[0] in object) - ) { - return false; - } - } - while (++index < length) { - data = matchData[index]; - var key = data[0], - objValue = object[key], - srcValue = data[1]; - - if (noCustomizer && data[2]) { - if (objValue === undefined && !(key in object)) { - return false; - } - } else { - var stack = new Stack; - if (customizer) { - var result = customizer(objValue, srcValue, key, object, source, stack); - } - if (!(result === undefined - ? baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG, customizer, stack) - : result - )) { - return false; - } - } - } - return true; - } - - /** - * The base implementation of `_.isNative` without bad shim checks. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a native function, - * else `false`. - */ - function baseIsNative(value) { - if (!isObject(value) || isMasked(value)) { - return false; - } - var pattern = isFunction(value) ? reIsNative : reIsHostCtor; - return pattern.test(toSource(value)); - } - - /** - * The base implementation of `_.isRegExp` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a regexp, else `false`. - */ - function baseIsRegExp(value) { - return isObjectLike(value) && baseGetTag(value) == regexpTag; - } - - /** - * The base implementation of `_.isSet` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a set, else `false`. - */ - function baseIsSet(value) { - return isObjectLike(value) && getTag(value) == setTag; - } - - /** - * The base implementation of `_.isTypedArray` without Node.js optimizations. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a typed array, else `false`. - */ - function baseIsTypedArray(value) { - return isObjectLike(value) && - isLength(value.length) && !!typedArrayTags[baseGetTag(value)]; - } - - /** - * The base implementation of `_.iteratee`. - * - * @private - * @param {*} [value=_.identity] The value to convert to an iteratee. - * @returns {Function} Returns the iteratee. - */ - function baseIteratee(value) { - // Don't store the `typeof` result in a variable to avoid a JIT bug in Safari 9. - // See https://bugs.webkit.org/show_bug.cgi?id=156034 for more details. - if (typeof value == 'function') { - return value; - } - if (value == null) { - return identity; - } - if (typeof value == 'object') { - return isArray(value) - ? baseMatchesProperty(value[0], value[1]) - : baseMatches(value); - } - return property(value); - } - - /** - * The base implementation of `_.keys` which doesn't treat sparse arrays as dense. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names. - */ - function baseKeys(object) { - if (!isPrototype(object)) { - return nativeKeys(object); - } - var result = []; - for (var key in Object(object)) { - if (hasOwnProperty.call(object, key) && key != 'constructor') { - result.push(key); - } - } - return result; - } - - /** - * The base implementation of `_.keysIn` which doesn't treat sparse arrays as dense. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names. - */ - function baseKeysIn(object) { - if (!isObject(object)) { - return nativeKeysIn(object); - } - var isProto = isPrototype(object), - result = []; - - for (var key in object) { - if (!(key == 'constructor' && (isProto || !hasOwnProperty.call(object, key)))) { - result.push(key); - } - } - return result; - } - - /** - * The base implementation of `_.lt` which doesn't coerce arguments. - * - * @private - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is less than `other`, - * else `false`. - */ - function baseLt(value, other) { - return value < other; - } - - /** - * The base implementation of `_.map` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} iteratee The function invoked per iteration. - * @returns {Array} Returns the new mapped array. - */ - function baseMap(collection, iteratee) { - var index = -1, - result = isArrayLike(collection) ? Array(collection.length) : []; - - baseEach(collection, function(value, key, collection) { - result[++index] = iteratee(value, key, collection); - }); - return result; - } - - /** - * The base implementation of `_.matches` which doesn't clone `source`. - * - * @private - * @param {Object} source The object of property values to match. - * @returns {Function} Returns the new spec function. - */ - function baseMatches(source) { - var matchData = getMatchData(source); - if (matchData.length == 1 && matchData[0][2]) { - return matchesStrictComparable(matchData[0][0], matchData[0][1]); - } - return function(object) { - return object === source || baseIsMatch(object, source, matchData); - }; - } - - /** - * The base implementation of `_.matchesProperty` which doesn't clone `srcValue`. - * - * @private - * @param {string} path The path of the property to get. - * @param {*} srcValue The value to match. - * @returns {Function} Returns the new spec function. - */ - function baseMatchesProperty(path, srcValue) { - if (isKey(path) && isStrictComparable(srcValue)) { - return matchesStrictComparable(toKey(path), srcValue); - } - return function(object) { - var objValue = get(object, path); - return (objValue === undefined && objValue === srcValue) - ? hasIn(object, path) - : baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG); - }; - } - - /** - * The base implementation of `_.merge` without support for multiple sources. - * - * @private - * @param {Object} object The destination object. - * @param {Object} source The source object. - * @param {number} srcIndex The index of `source`. - * @param {Function} [customizer] The function to customize merged values. - * @param {Object} [stack] Tracks traversed source values and their merged - * counterparts. - */ - function baseMerge(object, source, srcIndex, customizer, stack) { - if (object === source) { - return; - } - baseFor(source, function(srcValue, key) { - if (isObject(srcValue)) { - stack || (stack = new Stack); - baseMergeDeep(object, source, key, srcIndex, baseMerge, customizer, stack); - } - else { - var newValue = customizer - ? customizer(safeGet(object, key), srcValue, (key + ''), object, source, stack) - : undefined; - - if (newValue === undefined) { - newValue = srcValue; - } - assignMergeValue(object, key, newValue); - } - }, keysIn); - } - - /** - * A specialized version of `baseMerge` for arrays and objects which performs - * deep merges and tracks traversed objects enabling objects with circular - * references to be merged. - * - * @private - * @param {Object} object The destination object. - * @param {Object} source The source object. - * @param {string} key The key of the value to merge. - * @param {number} srcIndex The index of `source`. - * @param {Function} mergeFunc The function to merge values. - * @param {Function} [customizer] The function to customize assigned values. - * @param {Object} [stack] Tracks traversed source values and their merged - * counterparts. - */ - function baseMergeDeep(object, source, key, srcIndex, mergeFunc, customizer, stack) { - var objValue = safeGet(object, key), - srcValue = safeGet(source, key), - stacked = stack.get(srcValue); - - if (stacked) { - assignMergeValue(object, key, stacked); - return; - } - var newValue = customizer - ? customizer(objValue, srcValue, (key + ''), object, source, stack) - : undefined; - - var isCommon = newValue === undefined; - - if (isCommon) { - var isArr = isArray(srcValue), - isBuff = !isArr && isBuffer(srcValue), - isTyped = !isArr && !isBuff && isTypedArray(srcValue); - - newValue = srcValue; - if (isArr || isBuff || isTyped) { - if (isArray(objValue)) { - newValue = objValue; - } - else if (isArrayLikeObject(objValue)) { - newValue = copyArray(objValue); - } - else if (isBuff) { - isCommon = false; - newValue = cloneBuffer(srcValue, true); - } - else if (isTyped) { - isCommon = false; - newValue = cloneTypedArray(srcValue, true); - } - else { - newValue = []; - } - } - else if (isPlainObject(srcValue) || isArguments(srcValue)) { - newValue = objValue; - if (isArguments(objValue)) { - newValue = toPlainObject(objValue); - } - else if (!isObject(objValue) || (srcIndex && isFunction(objValue))) { - newValue = initCloneObject(srcValue); - } - } - else { - isCommon = false; - } - } - if (isCommon) { - // Recursively merge objects and arrays (susceptible to call stack limits). - stack.set(srcValue, newValue); - mergeFunc(newValue, srcValue, srcIndex, customizer, stack); - stack['delete'](srcValue); - } - assignMergeValue(object, key, newValue); - } - - /** - * The base implementation of `_.nth` which doesn't coerce arguments. - * - * @private - * @param {Array} array The array to query. - * @param {number} n The index of the element to return. - * @returns {*} Returns the nth element of `array`. - */ - function baseNth(array, n) { - var length = array.length; - if (!length) { - return; - } - n += n < 0 ? length : 0; - return isIndex(n, length) ? array[n] : undefined; - } - - /** - * The base implementation of `_.orderBy` without param guards. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function[]|Object[]|string[]} iteratees The iteratees to sort by. - * @param {string[]} orders The sort orders of `iteratees`. - * @returns {Array} Returns the new sorted array. - */ - function baseOrderBy(collection, iteratees, orders) { - var index = -1; - iteratees = arrayMap(iteratees.length ? iteratees : [identity], baseUnary(getIteratee())); - - var result = baseMap(collection, function(value, key, collection) { - var criteria = arrayMap(iteratees, function(iteratee) { - return iteratee(value); - }); - return { 'criteria': criteria, 'index': ++index, 'value': value }; - }); - - return baseSortBy(result, function(object, other) { - return compareMultiple(object, other, orders); - }); - } - - /** - * The base implementation of `_.pick` without support for individual - * property identifiers. - * - * @private - * @param {Object} object The source object. - * @param {string[]} paths The property paths to pick. - * @returns {Object} Returns the new object. - */ - function basePick(object, paths) { - return basePickBy(object, paths, function(value, path) { - return hasIn(object, path); - }); - } - - /** - * The base implementation of `_.pickBy` without support for iteratee shorthands. - * - * @private - * @param {Object} object The source object. - * @param {string[]} paths The property paths to pick. - * @param {Function} predicate The function invoked per property. - * @returns {Object} Returns the new object. - */ - function basePickBy(object, paths, predicate) { - var index = -1, - length = paths.length, - result = {}; - - while (++index < length) { - var path = paths[index], - value = baseGet(object, path); - - if (predicate(value, path)) { - baseSet(result, castPath(path, object), value); - } - } - return result; - } - - /** - * A specialized version of `baseProperty` which supports deep paths. - * - * @private - * @param {Array|string} path The path of the property to get. - * @returns {Function} Returns the new accessor function. - */ - function basePropertyDeep(path) { - return function(object) { - return baseGet(object, path); - }; - } - - /** - * The base implementation of `_.pullAllBy` without support for iteratee - * shorthands. - * - * @private - * @param {Array} array The array to modify. - * @param {Array} values The values to remove. - * @param {Function} [iteratee] The iteratee invoked per element. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns `array`. - */ - function basePullAll(array, values, iteratee, comparator) { - var indexOf = comparator ? baseIndexOfWith : baseIndexOf, - index = -1, - length = values.length, - seen = array; - - if (array === values) { - values = copyArray(values); - } - if (iteratee) { - seen = arrayMap(array, baseUnary(iteratee)); - } - while (++index < length) { - var fromIndex = 0, - value = values[index], - computed = iteratee ? iteratee(value) : value; - - while ((fromIndex = indexOf(seen, computed, fromIndex, comparator)) > -1) { - if (seen !== array) { - splice.call(seen, fromIndex, 1); - } - splice.call(array, fromIndex, 1); - } - } - return array; - } - - /** - * The base implementation of `_.pullAt` without support for individual - * indexes or capturing the removed elements. - * - * @private - * @param {Array} array The array to modify. - * @param {number[]} indexes The indexes of elements to remove. - * @returns {Array} Returns `array`. - */ - function basePullAt(array, indexes) { - var length = array ? indexes.length : 0, - lastIndex = length - 1; - - while (length--) { - var index = indexes[length]; - if (length == lastIndex || index !== previous) { - var previous = index; - if (isIndex(index)) { - splice.call(array, index, 1); - } else { - baseUnset(array, index); - } - } - } - return array; - } - - /** - * The base implementation of `_.random` without support for returning - * floating-point numbers. - * - * @private - * @param {number} lower The lower bound. - * @param {number} upper The upper bound. - * @returns {number} Returns the random number. - */ - function baseRandom(lower, upper) { - return lower + nativeFloor(nativeRandom() * (upper - lower + 1)); - } - - /** - * The base implementation of `_.range` and `_.rangeRight` which doesn't - * coerce arguments. - * - * @private - * @param {number} start The start of the range. - * @param {number} end The end of the range. - * @param {number} step The value to increment or decrement by. - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Array} Returns the range of numbers. - */ - function baseRange(start, end, step, fromRight) { - var index = -1, - length = nativeMax(nativeCeil((end - start) / (step || 1)), 0), - result = Array(length); - - while (length--) { - result[fromRight ? length : ++index] = start; - start += step; - } - return result; - } - - /** - * The base implementation of `_.repeat` which doesn't coerce arguments. - * - * @private - * @param {string} string The string to repeat. - * @param {number} n The number of times to repeat the string. - * @returns {string} Returns the repeated string. - */ - function baseRepeat(string, n) { - var result = ''; - if (!string || n < 1 || n > MAX_SAFE_INTEGER) { - return result; - } - // Leverage the exponentiation by squaring algorithm for a faster repeat. - // See https://en.wikipedia.org/wiki/Exponentiation_by_squaring for more details. - do { - if (n % 2) { - result += string; - } - n = nativeFloor(n / 2); - if (n) { - string += string; - } - } while (n); - - return result; - } - - /** - * The base implementation of `_.rest` which doesn't validate or coerce arguments. - * - * @private - * @param {Function} func The function to apply a rest parameter to. - * @param {number} [start=func.length-1] The start position of the rest parameter. - * @returns {Function} Returns the new function. - */ - function baseRest(func, start) { - return setToString(overRest(func, start, identity), func + ''); - } - - /** - * The base implementation of `_.sample`. - * - * @private - * @param {Array|Object} collection The collection to sample. - * @returns {*} Returns the random element. - */ - function baseSample(collection) { - return arraySample(values(collection)); - } - - /** - * The base implementation of `_.sampleSize` without param guards. - * - * @private - * @param {Array|Object} collection The collection to sample. - * @param {number} n The number of elements to sample. - * @returns {Array} Returns the random elements. - */ - function baseSampleSize(collection, n) { - var array = values(collection); - return shuffleSelf(array, baseClamp(n, 0, array.length)); - } - - /** - * The base implementation of `_.set`. - * - * @private - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {*} value The value to set. - * @param {Function} [customizer] The function to customize path creation. - * @returns {Object} Returns `object`. - */ - function baseSet(object, path, value, customizer) { - if (!isObject(object)) { - return object; - } - path = castPath(path, object); - - var index = -1, - length = path.length, - lastIndex = length - 1, - nested = object; - - while (nested != null && ++index < length) { - var key = toKey(path[index]), - newValue = value; - - if (index != lastIndex) { - var objValue = nested[key]; - newValue = customizer ? customizer(objValue, key, nested) : undefined; - if (newValue === undefined) { - newValue = isObject(objValue) - ? objValue - : (isIndex(path[index + 1]) ? [] : {}); - } - } - assignValue(nested, key, newValue); - nested = nested[key]; - } - return object; - } - - /** - * The base implementation of `setData` without support for hot loop shorting. - * - * @private - * @param {Function} func The function to associate metadata with. - * @param {*} data The metadata. - * @returns {Function} Returns `func`. - */ - var baseSetData = !metaMap ? identity : function(func, data) { - metaMap.set(func, data); - return func; - }; - - /** - * The base implementation of `setToString` without support for hot loop shorting. - * - * @private - * @param {Function} func The function to modify. - * @param {Function} string The `toString` result. - * @returns {Function} Returns `func`. - */ - var baseSetToString = !defineProperty ? identity : function(func, string) { - return defineProperty(func, 'toString', { - 'configurable': true, - 'enumerable': false, - 'value': constant(string), - 'writable': true - }); - }; - - /** - * The base implementation of `_.shuffle`. - * - * @private - * @param {Array|Object} collection The collection to shuffle. - * @returns {Array} Returns the new shuffled array. - */ - function baseShuffle(collection) { - return shuffleSelf(values(collection)); - } - - /** - * The base implementation of `_.slice` without an iteratee call guard. - * - * @private - * @param {Array} array The array to slice. - * @param {number} [start=0] The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns the slice of `array`. - */ - function baseSlice(array, start, end) { - var index = -1, - length = array.length; - - if (start < 0) { - start = -start > length ? 0 : (length + start); - } - end = end > length ? length : end; - if (end < 0) { - end += length; - } - length = start > end ? 0 : ((end - start) >>> 0); - start >>>= 0; - - var result = Array(length); - while (++index < length) { - result[index] = array[index + start]; - } - return result; - } - - /** - * The base implementation of `_.some` without support for iteratee shorthands. - * - * @private - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} predicate The function invoked per iteration. - * @returns {boolean} Returns `true` if any element passes the predicate check, - * else `false`. - */ - function baseSome(collection, predicate) { - var result; - - baseEach(collection, function(value, index, collection) { - result = predicate(value, index, collection); - return !result; - }); - return !!result; - } - - /** - * The base implementation of `_.sortedIndex` and `_.sortedLastIndex` which - * performs a binary search of `array` to determine the index at which `value` - * should be inserted into `array` in order to maintain its sort order. - * - * @private - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @param {boolean} [retHighest] Specify returning the highest qualified index. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - */ - function baseSortedIndex(array, value, retHighest) { - var low = 0, - high = array == null ? low : array.length; - - if (typeof value == 'number' && value === value && high <= HALF_MAX_ARRAY_LENGTH) { - while (low < high) { - var mid = (low + high) >>> 1, - computed = array[mid]; - - if (computed !== null && !isSymbol(computed) && - (retHighest ? (computed <= value) : (computed < value))) { - low = mid + 1; - } else { - high = mid; - } - } - return high; - } - return baseSortedIndexBy(array, value, identity, retHighest); - } - - /** - * The base implementation of `_.sortedIndexBy` and `_.sortedLastIndexBy` - * which invokes `iteratee` for `value` and each element of `array` to compute - * their sort ranking. The iteratee is invoked with one argument; (value). - * - * @private - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @param {Function} iteratee The iteratee invoked per element. - * @param {boolean} [retHighest] Specify returning the highest qualified index. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - */ - function baseSortedIndexBy(array, value, iteratee, retHighest) { - value = iteratee(value); - - var low = 0, - high = array == null ? 0 : array.length, - valIsNaN = value !== value, - valIsNull = value === null, - valIsSymbol = isSymbol(value), - valIsUndefined = value === undefined; - - while (low < high) { - var mid = nativeFloor((low + high) / 2), - computed = iteratee(array[mid]), - othIsDefined = computed !== undefined, - othIsNull = computed === null, - othIsReflexive = computed === computed, - othIsSymbol = isSymbol(computed); - - if (valIsNaN) { - var setLow = retHighest || othIsReflexive; - } else if (valIsUndefined) { - setLow = othIsReflexive && (retHighest || othIsDefined); - } else if (valIsNull) { - setLow = othIsReflexive && othIsDefined && (retHighest || !othIsNull); - } else if (valIsSymbol) { - setLow = othIsReflexive && othIsDefined && !othIsNull && (retHighest || !othIsSymbol); - } else if (othIsNull || othIsSymbol) { - setLow = false; - } else { - setLow = retHighest ? (computed <= value) : (computed < value); - } - if (setLow) { - low = mid + 1; - } else { - high = mid; - } - } - return nativeMin(high, MAX_ARRAY_INDEX); - } - - /** - * The base implementation of `_.sortedUniq` and `_.sortedUniqBy` without - * support for iteratee shorthands. - * - * @private - * @param {Array} array The array to inspect. - * @param {Function} [iteratee] The iteratee invoked per element. - * @returns {Array} Returns the new duplicate free array. - */ - function baseSortedUniq(array, iteratee) { - var index = -1, - length = array.length, - resIndex = 0, - result = []; - - while (++index < length) { - var value = array[index], - computed = iteratee ? iteratee(value) : value; - - if (!index || !eq(computed, seen)) { - var seen = computed; - result[resIndex++] = value === 0 ? 0 : value; - } - } - return result; - } - - /** - * The base implementation of `_.toNumber` which doesn't ensure correct - * conversions of binary, hexadecimal, or octal string values. - * - * @private - * @param {*} value The value to process. - * @returns {number} Returns the number. - */ - function baseToNumber(value) { - if (typeof value == 'number') { - return value; - } - if (isSymbol(value)) { - return NAN; - } - return +value; - } - - /** - * The base implementation of `_.toString` which doesn't convert nullish - * values to empty strings. - * - * @private - * @param {*} value The value to process. - * @returns {string} Returns the string. - */ - function baseToString(value) { - // Exit early for strings to avoid a performance hit in some environments. - if (typeof value == 'string') { - return value; - } - if (isArray(value)) { - // Recursively convert values (susceptible to call stack limits). - return arrayMap(value, baseToString) + ''; - } - if (isSymbol(value)) { - return symbolToString ? symbolToString.call(value) : ''; - } - var result = (value + ''); - return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result; - } - - /** - * The base implementation of `_.uniqBy` without support for iteratee shorthands. - * - * @private - * @param {Array} array The array to inspect. - * @param {Function} [iteratee] The iteratee invoked per element. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new duplicate free array. - */ - function baseUniq(array, iteratee, comparator) { - var index = -1, - includes = arrayIncludes, - length = array.length, - isCommon = true, - result = [], - seen = result; - - if (comparator) { - isCommon = false; - includes = arrayIncludesWith; - } - else if (length >= LARGE_ARRAY_SIZE) { - var set = iteratee ? null : createSet(array); - if (set) { - return setToArray(set); - } - isCommon = false; - includes = cacheHas; - seen = new SetCache; - } - else { - seen = iteratee ? [] : result; - } - outer: - while (++index < length) { - var value = array[index], - computed = iteratee ? iteratee(value) : value; - - value = (comparator || value !== 0) ? value : 0; - if (isCommon && computed === computed) { - var seenIndex = seen.length; - while (seenIndex--) { - if (seen[seenIndex] === computed) { - continue outer; - } - } - if (iteratee) { - seen.push(computed); - } - result.push(value); - } - else if (!includes(seen, computed, comparator)) { - if (seen !== result) { - seen.push(computed); - } - result.push(value); - } - } - return result; - } - - /** - * The base implementation of `_.unset`. - * - * @private - * @param {Object} object The object to modify. - * @param {Array|string} path The property path to unset. - * @returns {boolean} Returns `true` if the property is deleted, else `false`. - */ - function baseUnset(object, path) { - path = castPath(path, object); - object = parent(object, path); - return object == null || delete object[toKey(last(path))]; - } - - /** - * The base implementation of `_.update`. - * - * @private - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to update. - * @param {Function} updater The function to produce the updated value. - * @param {Function} [customizer] The function to customize path creation. - * @returns {Object} Returns `object`. - */ - function baseUpdate(object, path, updater, customizer) { - return baseSet(object, path, updater(baseGet(object, path)), customizer); - } - - /** - * The base implementation of methods like `_.dropWhile` and `_.takeWhile` - * without support for iteratee shorthands. - * - * @private - * @param {Array} array The array to query. - * @param {Function} predicate The function invoked per iteration. - * @param {boolean} [isDrop] Specify dropping elements instead of taking them. - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Array} Returns the slice of `array`. - */ - function baseWhile(array, predicate, isDrop, fromRight) { - var length = array.length, - index = fromRight ? length : -1; - - while ((fromRight ? index-- : ++index < length) && - predicate(array[index], index, array)) {} - - return isDrop - ? baseSlice(array, (fromRight ? 0 : index), (fromRight ? index + 1 : length)) - : baseSlice(array, (fromRight ? index + 1 : 0), (fromRight ? length : index)); - } - - /** - * The base implementation of `wrapperValue` which returns the result of - * performing a sequence of actions on the unwrapped `value`, where each - * successive action is supplied the return value of the previous. - * - * @private - * @param {*} value The unwrapped value. - * @param {Array} actions Actions to perform to resolve the unwrapped value. - * @returns {*} Returns the resolved value. - */ - function baseWrapperValue(value, actions) { - var result = value; - if (result instanceof LazyWrapper) { - result = result.value(); - } - return arrayReduce(actions, function(result, action) { - return action.func.apply(action.thisArg, arrayPush([result], action.args)); - }, result); - } - - /** - * The base implementation of methods like `_.xor`, without support for - * iteratee shorthands, that accepts an array of arrays to inspect. - * - * @private - * @param {Array} arrays The arrays to inspect. - * @param {Function} [iteratee] The iteratee invoked per element. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of values. - */ - function baseXor(arrays, iteratee, comparator) { - var length = arrays.length; - if (length < 2) { - return length ? baseUniq(arrays[0]) : []; - } - var index = -1, - result = Array(length); - - while (++index < length) { - var array = arrays[index], - othIndex = -1; - - while (++othIndex < length) { - if (othIndex != index) { - result[index] = baseDifference(result[index] || array, arrays[othIndex], iteratee, comparator); - } - } - } - return baseUniq(baseFlatten(result, 1), iteratee, comparator); - } - - /** - * This base implementation of `_.zipObject` which assigns values using `assignFunc`. - * - * @private - * @param {Array} props The property identifiers. - * @param {Array} values The property values. - * @param {Function} assignFunc The function to assign values. - * @returns {Object} Returns the new object. - */ - function baseZipObject(props, values, assignFunc) { - var index = -1, - length = props.length, - valsLength = values.length, - result = {}; - - while (++index < length) { - var value = index < valsLength ? values[index] : undefined; - assignFunc(result, props[index], value); - } - return result; - } - - /** - * Casts `value` to an empty array if it's not an array like object. - * - * @private - * @param {*} value The value to inspect. - * @returns {Array|Object} Returns the cast array-like object. - */ - function castArrayLikeObject(value) { - return isArrayLikeObject(value) ? value : []; - } - - /** - * Casts `value` to `identity` if it's not a function. - * - * @private - * @param {*} value The value to inspect. - * @returns {Function} Returns cast function. - */ - function castFunction(value) { - return typeof value == 'function' ? value : identity; - } - - /** - * Casts `value` to a path array if it's not one. - * - * @private - * @param {*} value The value to inspect. - * @param {Object} [object] The object to query keys on. - * @returns {Array} Returns the cast property path array. - */ - function castPath(value, object) { - if (isArray(value)) { - return value; - } - return isKey(value, object) ? [value] : stringToPath(toString(value)); - } - - /** - * A `baseRest` alias which can be replaced with `identity` by module - * replacement plugins. - * - * @private - * @type {Function} - * @param {Function} func The function to apply a rest parameter to. - * @returns {Function} Returns the new function. - */ - var castRest = baseRest; - - /** - * Casts `array` to a slice if it's needed. - * - * @private - * @param {Array} array The array to inspect. - * @param {number} start The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns the cast slice. - */ - function castSlice(array, start, end) { - var length = array.length; - end = end === undefined ? length : end; - return (!start && end >= length) ? array : baseSlice(array, start, end); - } - - /** - * A simple wrapper around the global [`clearTimeout`](https://mdn.io/clearTimeout). - * - * @private - * @param {number|Object} id The timer id or timeout object of the timer to clear. - */ - var clearTimeout = ctxClearTimeout || function(id) { - return root.clearTimeout(id); - }; - - /** - * Creates a clone of `buffer`. - * - * @private - * @param {Buffer} buffer The buffer to clone. - * @param {boolean} [isDeep] Specify a deep clone. - * @returns {Buffer} Returns the cloned buffer. - */ - function cloneBuffer(buffer, isDeep) { - if (isDeep) { - return buffer.slice(); - } - var length = buffer.length, - result = allocUnsafe ? allocUnsafe(length) : new buffer.constructor(length); - - buffer.copy(result); - return result; - } - - /** - * Creates a clone of `arrayBuffer`. - * - * @private - * @param {ArrayBuffer} arrayBuffer The array buffer to clone. - * @returns {ArrayBuffer} Returns the cloned array buffer. - */ - function cloneArrayBuffer(arrayBuffer) { - var result = new arrayBuffer.constructor(arrayBuffer.byteLength); - new Uint8Array(result).set(new Uint8Array(arrayBuffer)); - return result; - } - - /** - * Creates a clone of `dataView`. - * - * @private - * @param {Object} dataView The data view to clone. - * @param {boolean} [isDeep] Specify a deep clone. - * @returns {Object} Returns the cloned data view. - */ - function cloneDataView(dataView, isDeep) { - var buffer = isDeep ? cloneArrayBuffer(dataView.buffer) : dataView.buffer; - return new dataView.constructor(buffer, dataView.byteOffset, dataView.byteLength); - } - - /** - * Creates a clone of `regexp`. - * - * @private - * @param {Object} regexp The regexp to clone. - * @returns {Object} Returns the cloned regexp. - */ - function cloneRegExp(regexp) { - var result = new regexp.constructor(regexp.source, reFlags.exec(regexp)); - result.lastIndex = regexp.lastIndex; - return result; - } - - /** - * Creates a clone of the `symbol` object. - * - * @private - * @param {Object} symbol The symbol object to clone. - * @returns {Object} Returns the cloned symbol object. - */ - function cloneSymbol(symbol) { - return symbolValueOf ? Object(symbolValueOf.call(symbol)) : {}; - } - - /** - * Creates a clone of `typedArray`. - * - * @private - * @param {Object} typedArray The typed array to clone. - * @param {boolean} [isDeep] Specify a deep clone. - * @returns {Object} Returns the cloned typed array. - */ - function cloneTypedArray(typedArray, isDeep) { - var buffer = isDeep ? cloneArrayBuffer(typedArray.buffer) : typedArray.buffer; - return new typedArray.constructor(buffer, typedArray.byteOffset, typedArray.length); - } - - /** - * Compares values to sort them in ascending order. - * - * @private - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {number} Returns the sort order indicator for `value`. - */ - function compareAscending(value, other) { - if (value !== other) { - var valIsDefined = value !== undefined, - valIsNull = value === null, - valIsReflexive = value === value, - valIsSymbol = isSymbol(value); - - var othIsDefined = other !== undefined, - othIsNull = other === null, - othIsReflexive = other === other, - othIsSymbol = isSymbol(other); - - if ((!othIsNull && !othIsSymbol && !valIsSymbol && value > other) || - (valIsSymbol && othIsDefined && othIsReflexive && !othIsNull && !othIsSymbol) || - (valIsNull && othIsDefined && othIsReflexive) || - (!valIsDefined && othIsReflexive) || - !valIsReflexive) { - return 1; - } - if ((!valIsNull && !valIsSymbol && !othIsSymbol && value < other) || - (othIsSymbol && valIsDefined && valIsReflexive && !valIsNull && !valIsSymbol) || - (othIsNull && valIsDefined && valIsReflexive) || - (!othIsDefined && valIsReflexive) || - !othIsReflexive) { - return -1; - } - } - return 0; - } - - /** - * Used by `_.orderBy` to compare multiple properties of a value to another - * and stable sort them. - * - * If `orders` is unspecified, all values are sorted in ascending order. Otherwise, - * specify an order of "desc" for descending or "asc" for ascending sort order - * of corresponding values. - * - * @private - * @param {Object} object The object to compare. - * @param {Object} other The other object to compare. - * @param {boolean[]|string[]} orders The order to sort by for each property. - * @returns {number} Returns the sort order indicator for `object`. - */ - function compareMultiple(object, other, orders) { - var index = -1, - objCriteria = object.criteria, - othCriteria = other.criteria, - length = objCriteria.length, - ordersLength = orders.length; - - while (++index < length) { - var result = compareAscending(objCriteria[index], othCriteria[index]); - if (result) { - if (index >= ordersLength) { - return result; - } - var order = orders[index]; - return result * (order == 'desc' ? -1 : 1); - } - } - // Fixes an `Array#sort` bug in the JS engine embedded in Adobe applications - // that causes it, under certain circumstances, to provide the same value for - // `object` and `other`. See https://github.com/jashkenas/underscore/pull/1247 - // for more details. - // - // This also ensures a stable sort in V8 and other engines. - // See https://bugs.chromium.org/p/v8/issues/detail?id=90 for more details. - return object.index - other.index; - } - - /** - * Creates an array that is the composition of partially applied arguments, - * placeholders, and provided arguments into a single array of arguments. - * - * @private - * @param {Array} args The provided arguments. - * @param {Array} partials The arguments to prepend to those provided. - * @param {Array} holders The `partials` placeholder indexes. - * @params {boolean} [isCurried] Specify composing for a curried function. - * @returns {Array} Returns the new array of composed arguments. - */ - function composeArgs(args, partials, holders, isCurried) { - var argsIndex = -1, - argsLength = args.length, - holdersLength = holders.length, - leftIndex = -1, - leftLength = partials.length, - rangeLength = nativeMax(argsLength - holdersLength, 0), - result = Array(leftLength + rangeLength), - isUncurried = !isCurried; - - while (++leftIndex < leftLength) { - result[leftIndex] = partials[leftIndex]; - } - while (++argsIndex < holdersLength) { - if (isUncurried || argsIndex < argsLength) { - result[holders[argsIndex]] = args[argsIndex]; - } - } - while (rangeLength--) { - result[leftIndex++] = args[argsIndex++]; - } - return result; - } - - /** - * This function is like `composeArgs` except that the arguments composition - * is tailored for `_.partialRight`. - * - * @private - * @param {Array} args The provided arguments. - * @param {Array} partials The arguments to append to those provided. - * @param {Array} holders The `partials` placeholder indexes. - * @params {boolean} [isCurried] Specify composing for a curried function. - * @returns {Array} Returns the new array of composed arguments. - */ - function composeArgsRight(args, partials, holders, isCurried) { - var argsIndex = -1, - argsLength = args.length, - holdersIndex = -1, - holdersLength = holders.length, - rightIndex = -1, - rightLength = partials.length, - rangeLength = nativeMax(argsLength - holdersLength, 0), - result = Array(rangeLength + rightLength), - isUncurried = !isCurried; - - while (++argsIndex < rangeLength) { - result[argsIndex] = args[argsIndex]; - } - var offset = argsIndex; - while (++rightIndex < rightLength) { - result[offset + rightIndex] = partials[rightIndex]; - } - while (++holdersIndex < holdersLength) { - if (isUncurried || argsIndex < argsLength) { - result[offset + holders[holdersIndex]] = args[argsIndex++]; - } - } - return result; - } - - /** - * Copies the values of `source` to `array`. - * - * @private - * @param {Array} source The array to copy values from. - * @param {Array} [array=[]] The array to copy values to. - * @returns {Array} Returns `array`. - */ - function copyArray(source, array) { - var index = -1, - length = source.length; - - array || (array = Array(length)); - while (++index < length) { - array[index] = source[index]; - } - return array; - } - - /** - * Copies properties of `source` to `object`. - * - * @private - * @param {Object} source The object to copy properties from. - * @param {Array} props The property identifiers to copy. - * @param {Object} [object={}] The object to copy properties to. - * @param {Function} [customizer] The function to customize copied values. - * @returns {Object} Returns `object`. - */ - function copyObject(source, props, object, customizer) { - var isNew = !object; - object || (object = {}); - - var index = -1, - length = props.length; - - while (++index < length) { - var key = props[index]; - - var newValue = customizer - ? customizer(object[key], source[key], key, object, source) - : undefined; - - if (newValue === undefined) { - newValue = source[key]; - } - if (isNew) { - baseAssignValue(object, key, newValue); - } else { - assignValue(object, key, newValue); - } - } - return object; - } - - /** - * Copies own symbols of `source` to `object`. - * - * @private - * @param {Object} source The object to copy symbols from. - * @param {Object} [object={}] The object to copy symbols to. - * @returns {Object} Returns `object`. - */ - function copySymbols(source, object) { - return copyObject(source, getSymbols(source), object); - } - - /** - * Copies own and inherited symbols of `source` to `object`. - * - * @private - * @param {Object} source The object to copy symbols from. - * @param {Object} [object={}] The object to copy symbols to. - * @returns {Object} Returns `object`. - */ - function copySymbolsIn(source, object) { - return copyObject(source, getSymbolsIn(source), object); - } - - /** - * Creates a function like `_.groupBy`. - * - * @private - * @param {Function} setter The function to set accumulator values. - * @param {Function} [initializer] The accumulator object initializer. - * @returns {Function} Returns the new aggregator function. - */ - function createAggregator(setter, initializer) { - return function(collection, iteratee) { - var func = isArray(collection) ? arrayAggregator : baseAggregator, - accumulator = initializer ? initializer() : {}; - - return func(collection, setter, getIteratee(iteratee, 2), accumulator); - }; - } - - /** - * Creates a function like `_.assign`. - * - * @private - * @param {Function} assigner The function to assign values. - * @returns {Function} Returns the new assigner function. - */ - function createAssigner(assigner) { - return baseRest(function(object, sources) { - var index = -1, - length = sources.length, - customizer = length > 1 ? sources[length - 1] : undefined, - guard = length > 2 ? sources[2] : undefined; - - customizer = (assigner.length > 3 && typeof customizer == 'function') - ? (length--, customizer) - : undefined; - - if (guard && isIterateeCall(sources[0], sources[1], guard)) { - customizer = length < 3 ? undefined : customizer; - length = 1; - } - object = Object(object); - while (++index < length) { - var source = sources[index]; - if (source) { - assigner(object, source, index, customizer); - } - } - return object; - }); - } - - /** - * Creates a `baseEach` or `baseEachRight` function. - * - * @private - * @param {Function} eachFunc The function to iterate over a collection. - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Function} Returns the new base function. - */ - function createBaseEach(eachFunc, fromRight) { - return function(collection, iteratee) { - if (collection == null) { - return collection; - } - if (!isArrayLike(collection)) { - return eachFunc(collection, iteratee); - } - var length = collection.length, - index = fromRight ? length : -1, - iterable = Object(collection); - - while ((fromRight ? index-- : ++index < length)) { - if (iteratee(iterable[index], index, iterable) === false) { - break; - } - } - return collection; - }; - } - - /** - * Creates a base function for methods like `_.forIn` and `_.forOwn`. - * - * @private - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Function} Returns the new base function. - */ - function createBaseFor(fromRight) { - return function(object, iteratee, keysFunc) { - var index = -1, - iterable = Object(object), - props = keysFunc(object), - length = props.length; - - while (length--) { - var key = props[fromRight ? length : ++index]; - if (iteratee(iterable[key], key, iterable) === false) { - break; - } - } - return object; - }; - } - - /** - * Creates a function that wraps `func` to invoke it with the optional `this` - * binding of `thisArg`. - * - * @private - * @param {Function} func The function to wrap. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @param {*} [thisArg] The `this` binding of `func`. - * @returns {Function} Returns the new wrapped function. - */ - function createBind(func, bitmask, thisArg) { - var isBind = bitmask & WRAP_BIND_FLAG, - Ctor = createCtor(func); - - function wrapper() { - var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func; - return fn.apply(isBind ? thisArg : this, arguments); - } - return wrapper; - } - - /** - * Creates a function like `_.lowerFirst`. - * - * @private - * @param {string} methodName The name of the `String` case method to use. - * @returns {Function} Returns the new case function. - */ - function createCaseFirst(methodName) { - return function(string) { - string = toString(string); - - var strSymbols = hasUnicode(string) - ? stringToArray(string) - : undefined; - - var chr = strSymbols - ? strSymbols[0] - : string.charAt(0); - - var trailing = strSymbols - ? castSlice(strSymbols, 1).join('') - : string.slice(1); - - return chr[methodName]() + trailing; - }; - } - - /** - * Creates a function like `_.camelCase`. - * - * @private - * @param {Function} callback The function to combine each word. - * @returns {Function} Returns the new compounder function. - */ - function createCompounder(callback) { - return function(string) { - return arrayReduce(words(deburr(string).replace(reApos, '')), callback, ''); - }; - } - - /** - * Creates a function that produces an instance of `Ctor` regardless of - * whether it was invoked as part of a `new` expression or by `call` or `apply`. - * - * @private - * @param {Function} Ctor The constructor to wrap. - * @returns {Function} Returns the new wrapped function. - */ - function createCtor(Ctor) { - return function() { - // Use a `switch` statement to work with class constructors. See - // http://ecma-international.org/ecma-262/7.0/#sec-ecmascript-function-objects-call-thisargument-argumentslist - // for more details. - var args = arguments; - switch (args.length) { - case 0: return new Ctor; - case 1: return new Ctor(args[0]); - case 2: return new Ctor(args[0], args[1]); - case 3: return new Ctor(args[0], args[1], args[2]); - case 4: return new Ctor(args[0], args[1], args[2], args[3]); - case 5: return new Ctor(args[0], args[1], args[2], args[3], args[4]); - case 6: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5]); - case 7: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5], args[6]); - } - var thisBinding = baseCreate(Ctor.prototype), - result = Ctor.apply(thisBinding, args); - - // Mimic the constructor's `return` behavior. - // See https://es5.github.io/#x13.2.2 for more details. - return isObject(result) ? result : thisBinding; - }; - } - - /** - * Creates a function that wraps `func` to enable currying. - * - * @private - * @param {Function} func The function to wrap. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @param {number} arity The arity of `func`. - * @returns {Function} Returns the new wrapped function. - */ - function createCurry(func, bitmask, arity) { - var Ctor = createCtor(func); - - function wrapper() { - var length = arguments.length, - args = Array(length), - index = length, - placeholder = getHolder(wrapper); - - while (index--) { - args[index] = arguments[index]; - } - var holders = (length < 3 && args[0] !== placeholder && args[length - 1] !== placeholder) - ? [] - : replaceHolders(args, placeholder); - - length -= holders.length; - if (length < arity) { - return createRecurry( - func, bitmask, createHybrid, wrapper.placeholder, undefined, - args, holders, undefined, undefined, arity - length); - } - var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func; - return apply(fn, this, args); - } - return wrapper; - } - - /** - * Creates a `_.find` or `_.findLast` function. - * - * @private - * @param {Function} findIndexFunc The function to find the collection index. - * @returns {Function} Returns the new find function. - */ - function createFind(findIndexFunc) { - return function(collection, predicate, fromIndex) { - var iterable = Object(collection); - if (!isArrayLike(collection)) { - var iteratee = getIteratee(predicate, 3); - collection = keys(collection); - predicate = function(key) { return iteratee(iterable[key], key, iterable); }; - } - var index = findIndexFunc(collection, predicate, fromIndex); - return index > -1 ? iterable[iteratee ? collection[index] : index] : undefined; - }; - } - - /** - * Creates a `_.flow` or `_.flowRight` function. - * - * @private - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Function} Returns the new flow function. - */ - function createFlow(fromRight) { - return flatRest(function(funcs) { - var length = funcs.length, - index = length, - prereq = LodashWrapper.prototype.thru; - - if (fromRight) { - funcs.reverse(); - } - while (index--) { - var func = funcs[index]; - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - if (prereq && !wrapper && getFuncName(func) == 'wrapper') { - var wrapper = new LodashWrapper([], true); - } - } - index = wrapper ? index : length; - while (++index < length) { - func = funcs[index]; - - var funcName = getFuncName(func), - data = funcName == 'wrapper' ? getData(func) : undefined; - - if (data && isLaziable(data[0]) && - data[1] == (WRAP_ARY_FLAG | WRAP_CURRY_FLAG | WRAP_PARTIAL_FLAG | WRAP_REARG_FLAG) && - !data[4].length && data[9] == 1 - ) { - wrapper = wrapper[getFuncName(data[0])].apply(wrapper, data[3]); - } else { - wrapper = (func.length == 1 && isLaziable(func)) - ? wrapper[funcName]() - : wrapper.thru(func); - } - } - return function() { - var args = arguments, - value = args[0]; - - if (wrapper && args.length == 1 && isArray(value)) { - return wrapper.plant(value).value(); - } - var index = 0, - result = length ? funcs[index].apply(this, args) : value; - - while (++index < length) { - result = funcs[index].call(this, result); - } - return result; - }; - }); - } - - /** - * Creates a function that wraps `func` to invoke it with optional `this` - * binding of `thisArg`, partial application, and currying. - * - * @private - * @param {Function|string} func The function or method name to wrap. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @param {*} [thisArg] The `this` binding of `func`. - * @param {Array} [partials] The arguments to prepend to those provided to - * the new function. - * @param {Array} [holders] The `partials` placeholder indexes. - * @param {Array} [partialsRight] The arguments to append to those provided - * to the new function. - * @param {Array} [holdersRight] The `partialsRight` placeholder indexes. - * @param {Array} [argPos] The argument positions of the new function. - * @param {number} [ary] The arity cap of `func`. - * @param {number} [arity] The arity of `func`. - * @returns {Function} Returns the new wrapped function. - */ - function createHybrid(func, bitmask, thisArg, partials, holders, partialsRight, holdersRight, argPos, ary, arity) { - var isAry = bitmask & WRAP_ARY_FLAG, - isBind = bitmask & WRAP_BIND_FLAG, - isBindKey = bitmask & WRAP_BIND_KEY_FLAG, - isCurried = bitmask & (WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG), - isFlip = bitmask & WRAP_FLIP_FLAG, - Ctor = isBindKey ? undefined : createCtor(func); - - function wrapper() { - var length = arguments.length, - args = Array(length), - index = length; - - while (index--) { - args[index] = arguments[index]; - } - if (isCurried) { - var placeholder = getHolder(wrapper), - holdersCount = countHolders(args, placeholder); - } - if (partials) { - args = composeArgs(args, partials, holders, isCurried); - } - if (partialsRight) { - args = composeArgsRight(args, partialsRight, holdersRight, isCurried); - } - length -= holdersCount; - if (isCurried && length < arity) { - var newHolders = replaceHolders(args, placeholder); - return createRecurry( - func, bitmask, createHybrid, wrapper.placeholder, thisArg, - args, newHolders, argPos, ary, arity - length - ); - } - var thisBinding = isBind ? thisArg : this, - fn = isBindKey ? thisBinding[func] : func; - - length = args.length; - if (argPos) { - args = reorder(args, argPos); - } else if (isFlip && length > 1) { - args.reverse(); - } - if (isAry && ary < length) { - args.length = ary; - } - if (this && this !== root && this instanceof wrapper) { - fn = Ctor || createCtor(fn); - } - return fn.apply(thisBinding, args); - } - return wrapper; - } - - /** - * Creates a function like `_.invertBy`. - * - * @private - * @param {Function} setter The function to set accumulator values. - * @param {Function} toIteratee The function to resolve iteratees. - * @returns {Function} Returns the new inverter function. - */ - function createInverter(setter, toIteratee) { - return function(object, iteratee) { - return baseInverter(object, setter, toIteratee(iteratee), {}); - }; - } - - /** - * Creates a function that performs a mathematical operation on two values. - * - * @private - * @param {Function} operator The function to perform the operation. - * @param {number} [defaultValue] The value used for `undefined` arguments. - * @returns {Function} Returns the new mathematical operation function. - */ - function createMathOperation(operator, defaultValue) { - return function(value, other) { - var result; - if (value === undefined && other === undefined) { - return defaultValue; - } - if (value !== undefined) { - result = value; - } - if (other !== undefined) { - if (result === undefined) { - return other; - } - if (typeof value == 'string' || typeof other == 'string') { - value = baseToString(value); - other = baseToString(other); - } else { - value = baseToNumber(value); - other = baseToNumber(other); - } - result = operator(value, other); - } - return result; - }; - } - - /** - * Creates a function like `_.over`. - * - * @private - * @param {Function} arrayFunc The function to iterate over iteratees. - * @returns {Function} Returns the new over function. - */ - function createOver(arrayFunc) { - return flatRest(function(iteratees) { - iteratees = arrayMap(iteratees, baseUnary(getIteratee())); - return baseRest(function(args) { - var thisArg = this; - return arrayFunc(iteratees, function(iteratee) { - return apply(iteratee, thisArg, args); - }); - }); - }); - } - - /** - * Creates the padding for `string` based on `length`. The `chars` string - * is truncated if the number of characters exceeds `length`. - * - * @private - * @param {number} length The padding length. - * @param {string} [chars=' '] The string used as padding. - * @returns {string} Returns the padding for `string`. - */ - function createPadding(length, chars) { - chars = chars === undefined ? ' ' : baseToString(chars); - - var charsLength = chars.length; - if (charsLength < 2) { - return charsLength ? baseRepeat(chars, length) : chars; - } - var result = baseRepeat(chars, nativeCeil(length / stringSize(chars))); - return hasUnicode(chars) - ? castSlice(stringToArray(result), 0, length).join('') - : result.slice(0, length); - } - - /** - * Creates a function that wraps `func` to invoke it with the `this` binding - * of `thisArg` and `partials` prepended to the arguments it receives. - * - * @private - * @param {Function} func The function to wrap. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @param {*} thisArg The `this` binding of `func`. - * @param {Array} partials The arguments to prepend to those provided to - * the new function. - * @returns {Function} Returns the new wrapped function. - */ - function createPartial(func, bitmask, thisArg, partials) { - var isBind = bitmask & WRAP_BIND_FLAG, - Ctor = createCtor(func); - - function wrapper() { - var argsIndex = -1, - argsLength = arguments.length, - leftIndex = -1, - leftLength = partials.length, - args = Array(leftLength + argsLength), - fn = (this && this !== root && this instanceof wrapper) ? Ctor : func; - - while (++leftIndex < leftLength) { - args[leftIndex] = partials[leftIndex]; - } - while (argsLength--) { - args[leftIndex++] = arguments[++argsIndex]; - } - return apply(fn, isBind ? thisArg : this, args); - } - return wrapper; - } - - /** - * Creates a `_.range` or `_.rangeRight` function. - * - * @private - * @param {boolean} [fromRight] Specify iterating from right to left. - * @returns {Function} Returns the new range function. - */ - function createRange(fromRight) { - return function(start, end, step) { - if (step && typeof step != 'number' && isIterateeCall(start, end, step)) { - end = step = undefined; - } - // Ensure the sign of `-0` is preserved. - start = toFinite(start); - if (end === undefined) { - end = start; - start = 0; - } else { - end = toFinite(end); - } - step = step === undefined ? (start < end ? 1 : -1) : toFinite(step); - return baseRange(start, end, step, fromRight); - }; - } - - /** - * Creates a function that performs a relational operation on two values. - * - * @private - * @param {Function} operator The function to perform the operation. - * @returns {Function} Returns the new relational operation function. - */ - function createRelationalOperation(operator) { - return function(value, other) { - if (!(typeof value == 'string' && typeof other == 'string')) { - value = toNumber(value); - other = toNumber(other); - } - return operator(value, other); - }; - } - - /** - * Creates a function that wraps `func` to continue currying. - * - * @private - * @param {Function} func The function to wrap. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @param {Function} wrapFunc The function to create the `func` wrapper. - * @param {*} placeholder The placeholder value. - * @param {*} [thisArg] The `this` binding of `func`. - * @param {Array} [partials] The arguments to prepend to those provided to - * the new function. - * @param {Array} [holders] The `partials` placeholder indexes. - * @param {Array} [argPos] The argument positions of the new function. - * @param {number} [ary] The arity cap of `func`. - * @param {number} [arity] The arity of `func`. - * @returns {Function} Returns the new wrapped function. - */ - function createRecurry(func, bitmask, wrapFunc, placeholder, thisArg, partials, holders, argPos, ary, arity) { - var isCurry = bitmask & WRAP_CURRY_FLAG, - newHolders = isCurry ? holders : undefined, - newHoldersRight = isCurry ? undefined : holders, - newPartials = isCurry ? partials : undefined, - newPartialsRight = isCurry ? undefined : partials; - - bitmask |= (isCurry ? WRAP_PARTIAL_FLAG : WRAP_PARTIAL_RIGHT_FLAG); - bitmask &= ~(isCurry ? WRAP_PARTIAL_RIGHT_FLAG : WRAP_PARTIAL_FLAG); - - if (!(bitmask & WRAP_CURRY_BOUND_FLAG)) { - bitmask &= ~(WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG); - } - var newData = [ - func, bitmask, thisArg, newPartials, newHolders, newPartialsRight, - newHoldersRight, argPos, ary, arity - ]; - - var result = wrapFunc.apply(undefined, newData); - if (isLaziable(func)) { - setData(result, newData); - } - result.placeholder = placeholder; - return setWrapToString(result, func, bitmask); - } - - /** - * Creates a function like `_.round`. - * - * @private - * @param {string} methodName The name of the `Math` method to use when rounding. - * @returns {Function} Returns the new round function. - */ - function createRound(methodName) { - var func = Math[methodName]; - return function(number, precision) { - number = toNumber(number); - precision = precision == null ? 0 : nativeMin(toInteger(precision), 292); - if (precision) { - // Shift with exponential notation to avoid floating-point issues. - // See [MDN](https://mdn.io/round#Examples) for more details. - var pair = (toString(number) + 'e').split('e'), - value = func(pair[0] + 'e' + (+pair[1] + precision)); - - pair = (toString(value) + 'e').split('e'); - return +(pair[0] + 'e' + (+pair[1] - precision)); - } - return func(number); - }; - } - - /** - * Creates a set object of `values`. - * - * @private - * @param {Array} values The values to add to the set. - * @returns {Object} Returns the new set. - */ - var createSet = !(Set && (1 / setToArray(new Set([,-0]))[1]) == INFINITY) ? noop : function(values) { - return new Set(values); - }; - - /** - * Creates a `_.toPairs` or `_.toPairsIn` function. - * - * @private - * @param {Function} keysFunc The function to get the keys of a given object. - * @returns {Function} Returns the new pairs function. - */ - function createToPairs(keysFunc) { - return function(object) { - var tag = getTag(object); - if (tag == mapTag) { - return mapToArray(object); - } - if (tag == setTag) { - return setToPairs(object); - } - return baseToPairs(object, keysFunc(object)); - }; - } - - /** - * Creates a function that either curries or invokes `func` with optional - * `this` binding and partially applied arguments. - * - * @private - * @param {Function|string} func The function or method name to wrap. - * @param {number} bitmask The bitmask flags. - * 1 - `_.bind` - * 2 - `_.bindKey` - * 4 - `_.curry` or `_.curryRight` of a bound function - * 8 - `_.curry` - * 16 - `_.curryRight` - * 32 - `_.partial` - * 64 - `_.partialRight` - * 128 - `_.rearg` - * 256 - `_.ary` - * 512 - `_.flip` - * @param {*} [thisArg] The `this` binding of `func`. - * @param {Array} [partials] The arguments to be partially applied. - * @param {Array} [holders] The `partials` placeholder indexes. - * @param {Array} [argPos] The argument positions of the new function. - * @param {number} [ary] The arity cap of `func`. - * @param {number} [arity] The arity of `func`. - * @returns {Function} Returns the new wrapped function. - */ - function createWrap(func, bitmask, thisArg, partials, holders, argPos, ary, arity) { - var isBindKey = bitmask & WRAP_BIND_KEY_FLAG; - if (!isBindKey && typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - var length = partials ? partials.length : 0; - if (!length) { - bitmask &= ~(WRAP_PARTIAL_FLAG | WRAP_PARTIAL_RIGHT_FLAG); - partials = holders = undefined; - } - ary = ary === undefined ? ary : nativeMax(toInteger(ary), 0); - arity = arity === undefined ? arity : toInteger(arity); - length -= holders ? holders.length : 0; - - if (bitmask & WRAP_PARTIAL_RIGHT_FLAG) { - var partialsRight = partials, - holdersRight = holders; - - partials = holders = undefined; - } - var data = isBindKey ? undefined : getData(func); - - var newData = [ - func, bitmask, thisArg, partials, holders, partialsRight, holdersRight, - argPos, ary, arity - ]; - - if (data) { - mergeData(newData, data); - } - func = newData[0]; - bitmask = newData[1]; - thisArg = newData[2]; - partials = newData[3]; - holders = newData[4]; - arity = newData[9] = newData[9] === undefined - ? (isBindKey ? 0 : func.length) - : nativeMax(newData[9] - length, 0); - - if (!arity && bitmask & (WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG)) { - bitmask &= ~(WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG); - } - if (!bitmask || bitmask == WRAP_BIND_FLAG) { - var result = createBind(func, bitmask, thisArg); - } else if (bitmask == WRAP_CURRY_FLAG || bitmask == WRAP_CURRY_RIGHT_FLAG) { - result = createCurry(func, bitmask, arity); - } else if ((bitmask == WRAP_PARTIAL_FLAG || bitmask == (WRAP_BIND_FLAG | WRAP_PARTIAL_FLAG)) && !holders.length) { - result = createPartial(func, bitmask, thisArg, partials); - } else { - result = createHybrid.apply(undefined, newData); - } - var setter = data ? baseSetData : setData; - return setWrapToString(setter(result, newData), func, bitmask); - } - - /** - * Used by `_.defaults` to customize its `_.assignIn` use to assign properties - * of source objects to the destination object for all destination properties - * that resolve to `undefined`. - * - * @private - * @param {*} objValue The destination value. - * @param {*} srcValue The source value. - * @param {string} key The key of the property to assign. - * @param {Object} object The parent object of `objValue`. - * @returns {*} Returns the value to assign. - */ - function customDefaultsAssignIn(objValue, srcValue, key, object) { - if (objValue === undefined || - (eq(objValue, objectProto[key]) && !hasOwnProperty.call(object, key))) { - return srcValue; - } - return objValue; - } - - /** - * Used by `_.defaultsDeep` to customize its `_.merge` use to merge source - * objects into destination objects that are passed thru. - * - * @private - * @param {*} objValue The destination value. - * @param {*} srcValue The source value. - * @param {string} key The key of the property to merge. - * @param {Object} object The parent object of `objValue`. - * @param {Object} source The parent object of `srcValue`. - * @param {Object} [stack] Tracks traversed source values and their merged - * counterparts. - * @returns {*} Returns the value to assign. - */ - function customDefaultsMerge(objValue, srcValue, key, object, source, stack) { - if (isObject(objValue) && isObject(srcValue)) { - // Recursively merge objects and arrays (susceptible to call stack limits). - stack.set(srcValue, objValue); - baseMerge(objValue, srcValue, undefined, customDefaultsMerge, stack); - stack['delete'](srcValue); - } - return objValue; - } - - /** - * Used by `_.omit` to customize its `_.cloneDeep` use to only clone plain - * objects. - * - * @private - * @param {*} value The value to inspect. - * @param {string} key The key of the property to inspect. - * @returns {*} Returns the uncloned value or `undefined` to defer cloning to `_.cloneDeep`. - */ - function customOmitClone(value) { - return isPlainObject(value) ? undefined : value; - } - - /** - * A specialized version of `baseIsEqualDeep` for arrays with support for - * partial deep comparisons. - * - * @private - * @param {Array} array The array to compare. - * @param {Array} other The other array to compare. - * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. - * @param {Function} customizer The function to customize comparisons. - * @param {Function} equalFunc The function to determine equivalents of values. - * @param {Object} stack Tracks traversed `array` and `other` objects. - * @returns {boolean} Returns `true` if the arrays are equivalent, else `false`. - */ - function equalArrays(array, other, bitmask, customizer, equalFunc, stack) { - var isPartial = bitmask & COMPARE_PARTIAL_FLAG, - arrLength = array.length, - othLength = other.length; - - if (arrLength != othLength && !(isPartial && othLength > arrLength)) { - return false; - } - // Assume cyclic values are equal. - var stacked = stack.get(array); - if (stacked && stack.get(other)) { - return stacked == other; - } - var index = -1, - result = true, - seen = (bitmask & COMPARE_UNORDERED_FLAG) ? new SetCache : undefined; - - stack.set(array, other); - stack.set(other, array); - - // Ignore non-index properties. - while (++index < arrLength) { - var arrValue = array[index], - othValue = other[index]; - - if (customizer) { - var compared = isPartial - ? customizer(othValue, arrValue, index, other, array, stack) - : customizer(arrValue, othValue, index, array, other, stack); - } - if (compared !== undefined) { - if (compared) { - continue; - } - result = false; - break; - } - // Recursively compare arrays (susceptible to call stack limits). - if (seen) { - if (!arraySome(other, function(othValue, othIndex) { - if (!cacheHas(seen, othIndex) && - (arrValue === othValue || equalFunc(arrValue, othValue, bitmask, customizer, stack))) { - return seen.push(othIndex); - } - })) { - result = false; - break; - } - } else if (!( - arrValue === othValue || - equalFunc(arrValue, othValue, bitmask, customizer, stack) - )) { - result = false; - break; - } - } - stack['delete'](array); - stack['delete'](other); - return result; - } - - /** - * A specialized version of `baseIsEqualDeep` for comparing objects of - * the same `toStringTag`. - * - * **Note:** This function only supports comparing values with tags of - * `Boolean`, `Date`, `Error`, `Number`, `RegExp`, or `String`. - * - * @private - * @param {Object} object The object to compare. - * @param {Object} other The other object to compare. - * @param {string} tag The `toStringTag` of the objects to compare. - * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. - * @param {Function} customizer The function to customize comparisons. - * @param {Function} equalFunc The function to determine equivalents of values. - * @param {Object} stack Tracks traversed `object` and `other` objects. - * @returns {boolean} Returns `true` if the objects are equivalent, else `false`. - */ - function equalByTag(object, other, tag, bitmask, customizer, equalFunc, stack) { - switch (tag) { - case dataViewTag: - if ((object.byteLength != other.byteLength) || - (object.byteOffset != other.byteOffset)) { - return false; - } - object = object.buffer; - other = other.buffer; - - case arrayBufferTag: - if ((object.byteLength != other.byteLength) || - !equalFunc(new Uint8Array(object), new Uint8Array(other))) { - return false; - } - return true; - - case boolTag: - case dateTag: - case numberTag: - // Coerce booleans to `1` or `0` and dates to milliseconds. - // Invalid dates are coerced to `NaN`. - return eq(+object, +other); - - case errorTag: - return object.name == other.name && object.message == other.message; - - case regexpTag: - case stringTag: - // Coerce regexes to strings and treat strings, primitives and objects, - // as equal. See http://www.ecma-international.org/ecma-262/7.0/#sec-regexp.prototype.tostring - // for more details. - return object == (other + ''); - - case mapTag: - var convert = mapToArray; - - case setTag: - var isPartial = bitmask & COMPARE_PARTIAL_FLAG; - convert || (convert = setToArray); - - if (object.size != other.size && !isPartial) { - return false; - } - // Assume cyclic values are equal. - var stacked = stack.get(object); - if (stacked) { - return stacked == other; - } - bitmask |= COMPARE_UNORDERED_FLAG; - - // Recursively compare objects (susceptible to call stack limits). - stack.set(object, other); - var result = equalArrays(convert(object), convert(other), bitmask, customizer, equalFunc, stack); - stack['delete'](object); - return result; - - case symbolTag: - if (symbolValueOf) { - return symbolValueOf.call(object) == symbolValueOf.call(other); - } - } - return false; - } - - /** - * A specialized version of `baseIsEqualDeep` for objects with support for - * partial deep comparisons. - * - * @private - * @param {Object} object The object to compare. - * @param {Object} other The other object to compare. - * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. - * @param {Function} customizer The function to customize comparisons. - * @param {Function} equalFunc The function to determine equivalents of values. - * @param {Object} stack Tracks traversed `object` and `other` objects. - * @returns {boolean} Returns `true` if the objects are equivalent, else `false`. - */ - function equalObjects(object, other, bitmask, customizer, equalFunc, stack) { - var isPartial = bitmask & COMPARE_PARTIAL_FLAG, - objProps = getAllKeys(object), - objLength = objProps.length, - othProps = getAllKeys(other), - othLength = othProps.length; - - if (objLength != othLength && !isPartial) { - return false; - } - var index = objLength; - while (index--) { - var key = objProps[index]; - if (!(isPartial ? key in other : hasOwnProperty.call(other, key))) { - return false; - } - } - // Assume cyclic values are equal. - var stacked = stack.get(object); - if (stacked && stack.get(other)) { - return stacked == other; - } - var result = true; - stack.set(object, other); - stack.set(other, object); - - var skipCtor = isPartial; - while (++index < objLength) { - key = objProps[index]; - var objValue = object[key], - othValue = other[key]; - - if (customizer) { - var compared = isPartial - ? customizer(othValue, objValue, key, other, object, stack) - : customizer(objValue, othValue, key, object, other, stack); - } - // Recursively compare objects (susceptible to call stack limits). - if (!(compared === undefined - ? (objValue === othValue || equalFunc(objValue, othValue, bitmask, customizer, stack)) - : compared - )) { - result = false; - break; - } - skipCtor || (skipCtor = key == 'constructor'); - } - if (result && !skipCtor) { - var objCtor = object.constructor, - othCtor = other.constructor; - - // Non `Object` object instances with different constructors are not equal. - if (objCtor != othCtor && - ('constructor' in object && 'constructor' in other) && - !(typeof objCtor == 'function' && objCtor instanceof objCtor && - typeof othCtor == 'function' && othCtor instanceof othCtor)) { - result = false; - } - } - stack['delete'](object); - stack['delete'](other); - return result; - } - - /** - * A specialized version of `baseRest` which flattens the rest array. - * - * @private - * @param {Function} func The function to apply a rest parameter to. - * @returns {Function} Returns the new function. - */ - function flatRest(func) { - return setToString(overRest(func, undefined, flatten), func + ''); - } - - /** - * Creates an array of own enumerable property names and symbols of `object`. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names and symbols. - */ - function getAllKeys(object) { - return baseGetAllKeys(object, keys, getSymbols); - } - - /** - * Creates an array of own and inherited enumerable property names and - * symbols of `object`. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names and symbols. - */ - function getAllKeysIn(object) { - return baseGetAllKeys(object, keysIn, getSymbolsIn); - } - - /** - * Gets metadata for `func`. - * - * @private - * @param {Function} func The function to query. - * @returns {*} Returns the metadata for `func`. - */ - var getData = !metaMap ? noop : function(func) { - return metaMap.get(func); - }; - - /** - * Gets the name of `func`. - * - * @private - * @param {Function} func The function to query. - * @returns {string} Returns the function name. - */ - function getFuncName(func) { - var result = (func.name + ''), - array = realNames[result], - length = hasOwnProperty.call(realNames, result) ? array.length : 0; - - while (length--) { - var data = array[length], - otherFunc = data.func; - if (otherFunc == null || otherFunc == func) { - return data.name; - } - } - return result; - } - - /** - * Gets the argument placeholder value for `func`. - * - * @private - * @param {Function} func The function to inspect. - * @returns {*} Returns the placeholder value. - */ - function getHolder(func) { - var object = hasOwnProperty.call(lodash, 'placeholder') ? lodash : func; - return object.placeholder; - } - - /** - * Gets the appropriate "iteratee" function. If `_.iteratee` is customized, - * this function returns the custom method, otherwise it returns `baseIteratee`. - * If arguments are provided, the chosen function is invoked with them and - * its result is returned. - * - * @private - * @param {*} [value] The value to convert to an iteratee. - * @param {number} [arity] The arity of the created iteratee. - * @returns {Function} Returns the chosen function or its result. - */ - function getIteratee() { - var result = lodash.iteratee || iteratee; - result = result === iteratee ? baseIteratee : result; - return arguments.length ? result(arguments[0], arguments[1]) : result; - } - - /** - * Gets the data for `map`. - * - * @private - * @param {Object} map The map to query. - * @param {string} key The reference key. - * @returns {*} Returns the map data. - */ - function getMapData(map, key) { - var data = map.__data__; - return isKeyable(key) - ? data[typeof key == 'string' ? 'string' : 'hash'] - : data.map; - } - - /** - * Gets the property names, values, and compare flags of `object`. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the match data of `object`. - */ - function getMatchData(object) { - var result = keys(object), - length = result.length; - - while (length--) { - var key = result[length], - value = object[key]; - - result[length] = [key, value, isStrictComparable(value)]; - } - return result; - } - - /** - * Gets the native function at `key` of `object`. - * - * @private - * @param {Object} object The object to query. - * @param {string} key The key of the method to get. - * @returns {*} Returns the function if it's native, else `undefined`. - */ - function getNative(object, key) { - var value = getValue(object, key); - return baseIsNative(value) ? value : undefined; - } - - /** - * A specialized version of `baseGetTag` which ignores `Symbol.toStringTag` values. - * - * @private - * @param {*} value The value to query. - * @returns {string} Returns the raw `toStringTag`. - */ - function getRawTag(value) { - var isOwn = hasOwnProperty.call(value, symToStringTag), - tag = value[symToStringTag]; - - try { - value[symToStringTag] = undefined; - var unmasked = true; - } catch (e) {} - - var result = nativeObjectToString.call(value); - if (unmasked) { - if (isOwn) { - value[symToStringTag] = tag; - } else { - delete value[symToStringTag]; - } - } - return result; - } - - /** - * Creates an array of the own enumerable symbols of `object`. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of symbols. - */ - var getSymbols = !nativeGetSymbols ? stubArray : function(object) { - if (object == null) { - return []; - } - object = Object(object); - return arrayFilter(nativeGetSymbols(object), function(symbol) { - return propertyIsEnumerable.call(object, symbol); - }); - }; - - /** - * Creates an array of the own and inherited enumerable symbols of `object`. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of symbols. - */ - var getSymbolsIn = !nativeGetSymbols ? stubArray : function(object) { - var result = []; - while (object) { - arrayPush(result, getSymbols(object)); - object = getPrototype(object); - } - return result; - }; - - /** - * Gets the `toStringTag` of `value`. - * - * @private - * @param {*} value The value to query. - * @returns {string} Returns the `toStringTag`. - */ - var getTag = baseGetTag; - - // Fallback for data views, maps, sets, and weak maps in IE 11 and promises in Node.js < 6. - if ((DataView && getTag(new DataView(new ArrayBuffer(1))) != dataViewTag) || - (Map && getTag(new Map) != mapTag) || - (Promise && getTag(Promise.resolve()) != promiseTag) || - (Set && getTag(new Set) != setTag) || - (WeakMap && getTag(new WeakMap) != weakMapTag)) { - getTag = function(value) { - var result = baseGetTag(value), - Ctor = result == objectTag ? value.constructor : undefined, - ctorString = Ctor ? toSource(Ctor) : ''; - - if (ctorString) { - switch (ctorString) { - case dataViewCtorString: return dataViewTag; - case mapCtorString: return mapTag; - case promiseCtorString: return promiseTag; - case setCtorString: return setTag; - case weakMapCtorString: return weakMapTag; - } - } - return result; - }; - } - - /** - * Gets the view, applying any `transforms` to the `start` and `end` positions. - * - * @private - * @param {number} start The start of the view. - * @param {number} end The end of the view. - * @param {Array} transforms The transformations to apply to the view. - * @returns {Object} Returns an object containing the `start` and `end` - * positions of the view. - */ - function getView(start, end, transforms) { - var index = -1, - length = transforms.length; - - while (++index < length) { - var data = transforms[index], - size = data.size; - - switch (data.type) { - case 'drop': start += size; break; - case 'dropRight': end -= size; break; - case 'take': end = nativeMin(end, start + size); break; - case 'takeRight': start = nativeMax(start, end - size); break; - } - } - return { 'start': start, 'end': end }; - } - - /** - * Extracts wrapper details from the `source` body comment. - * - * @private - * @param {string} source The source to inspect. - * @returns {Array} Returns the wrapper details. - */ - function getWrapDetails(source) { - var match = source.match(reWrapDetails); - return match ? match[1].split(reSplitDetails) : []; - } - - /** - * Checks if `path` exists on `object`. - * - * @private - * @param {Object} object The object to query. - * @param {Array|string} path The path to check. - * @param {Function} hasFunc The function to check properties. - * @returns {boolean} Returns `true` if `path` exists, else `false`. - */ - function hasPath(object, path, hasFunc) { - path = castPath(path, object); - - var index = -1, - length = path.length, - result = false; - - while (++index < length) { - var key = toKey(path[index]); - if (!(result = object != null && hasFunc(object, key))) { - break; - } - object = object[key]; - } - if (result || ++index != length) { - return result; - } - length = object == null ? 0 : object.length; - return !!length && isLength(length) && isIndex(key, length) && - (isArray(object) || isArguments(object)); - } - - /** - * Initializes an array clone. - * - * @private - * @param {Array} array The array to clone. - * @returns {Array} Returns the initialized clone. - */ - function initCloneArray(array) { - var length = array.length, - result = new array.constructor(length); - - // Add properties assigned by `RegExp#exec`. - if (length && typeof array[0] == 'string' && hasOwnProperty.call(array, 'index')) { - result.index = array.index; - result.input = array.input; - } - return result; - } - - /** - * Initializes an object clone. - * - * @private - * @param {Object} object The object to clone. - * @returns {Object} Returns the initialized clone. - */ - function initCloneObject(object) { - return (typeof object.constructor == 'function' && !isPrototype(object)) - ? baseCreate(getPrototype(object)) - : {}; - } - - /** - * Initializes an object clone based on its `toStringTag`. - * - * **Note:** This function only supports cloning values with tags of - * `Boolean`, `Date`, `Error`, `Map`, `Number`, `RegExp`, `Set`, or `String`. - * - * @private - * @param {Object} object The object to clone. - * @param {string} tag The `toStringTag` of the object to clone. - * @param {boolean} [isDeep] Specify a deep clone. - * @returns {Object} Returns the initialized clone. - */ - function initCloneByTag(object, tag, isDeep) { - var Ctor = object.constructor; - switch (tag) { - case arrayBufferTag: - return cloneArrayBuffer(object); - - case boolTag: - case dateTag: - return new Ctor(+object); - - case dataViewTag: - return cloneDataView(object, isDeep); - - case float32Tag: case float64Tag: - case int8Tag: case int16Tag: case int32Tag: - case uint8Tag: case uint8ClampedTag: case uint16Tag: case uint32Tag: - return cloneTypedArray(object, isDeep); - - case mapTag: - return new Ctor; - - case numberTag: - case stringTag: - return new Ctor(object); - - case regexpTag: - return cloneRegExp(object); - - case setTag: - return new Ctor; - - case symbolTag: - return cloneSymbol(object); - } - } - - /** - * Inserts wrapper `details` in a comment at the top of the `source` body. - * - * @private - * @param {string} source The source to modify. - * @returns {Array} details The details to insert. - * @returns {string} Returns the modified source. - */ - function insertWrapDetails(source, details) { - var length = details.length; - if (!length) { - return source; - } - var lastIndex = length - 1; - details[lastIndex] = (length > 1 ? '& ' : '') + details[lastIndex]; - details = details.join(length > 2 ? ', ' : ' '); - return source.replace(reWrapComment, '{\n/* [wrapped with ' + details + '] */\n'); - } - - /** - * Checks if `value` is a flattenable `arguments` object or array. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is flattenable, else `false`. - */ - function isFlattenable(value) { - return isArray(value) || isArguments(value) || - !!(spreadableSymbol && value && value[spreadableSymbol]); - } - - /** - * Checks if `value` is a valid array-like index. - * - * @private - * @param {*} value The value to check. - * @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index. - * @returns {boolean} Returns `true` if `value` is a valid index, else `false`. - */ - function isIndex(value, length) { - var type = typeof value; - length = length == null ? MAX_SAFE_INTEGER : length; - - return !!length && - (type == 'number' || - (type != 'symbol' && reIsUint.test(value))) && - (value > -1 && value % 1 == 0 && value < length); - } - - /** - * Checks if the given arguments are from an iteratee call. - * - * @private - * @param {*} value The potential iteratee value argument. - * @param {*} index The potential iteratee index or key argument. - * @param {*} object The potential iteratee object argument. - * @returns {boolean} Returns `true` if the arguments are from an iteratee call, - * else `false`. - */ - function isIterateeCall(value, index, object) { - if (!isObject(object)) { - return false; - } - var type = typeof index; - if (type == 'number' - ? (isArrayLike(object) && isIndex(index, object.length)) - : (type == 'string' && index in object) - ) { - return eq(object[index], value); - } - return false; - } - - /** - * Checks if `value` is a property name and not a property path. - * - * @private - * @param {*} value The value to check. - * @param {Object} [object] The object to query keys on. - * @returns {boolean} Returns `true` if `value` is a property name, else `false`. - */ - function isKey(value, object) { - if (isArray(value)) { - return false; - } - var type = typeof value; - if (type == 'number' || type == 'symbol' || type == 'boolean' || - value == null || isSymbol(value)) { - return true; - } - return reIsPlainProp.test(value) || !reIsDeepProp.test(value) || - (object != null && value in Object(object)); - } - - /** - * Checks if `value` is suitable for use as unique object key. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is suitable, else `false`. - */ - function isKeyable(value) { - var type = typeof value; - return (type == 'string' || type == 'number' || type == 'symbol' || type == 'boolean') - ? (value !== '__proto__') - : (value === null); - } - - /** - * Checks if `func` has a lazy counterpart. - * - * @private - * @param {Function} func The function to check. - * @returns {boolean} Returns `true` if `func` has a lazy counterpart, - * else `false`. - */ - function isLaziable(func) { - var funcName = getFuncName(func), - other = lodash[funcName]; - - if (typeof other != 'function' || !(funcName in LazyWrapper.prototype)) { - return false; - } - if (func === other) { - return true; - } - var data = getData(other); - return !!data && func === data[0]; - } - - /** - * Checks if `func` has its source masked. - * - * @private - * @param {Function} func The function to check. - * @returns {boolean} Returns `true` if `func` is masked, else `false`. - */ - function isMasked(func) { - return !!maskSrcKey && (maskSrcKey in func); - } - - /** - * Checks if `func` is capable of being masked. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `func` is maskable, else `false`. - */ - var isMaskable = coreJsData ? isFunction : stubFalse; - - /** - * Checks if `value` is likely a prototype object. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a prototype, else `false`. - */ - function isPrototype(value) { - var Ctor = value && value.constructor, - proto = (typeof Ctor == 'function' && Ctor.prototype) || objectProto; - - return value === proto; - } - - /** - * Checks if `value` is suitable for strict equality comparisons, i.e. `===`. - * - * @private - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` if suitable for strict - * equality comparisons, else `false`. - */ - function isStrictComparable(value) { - return value === value && !isObject(value); - } - - /** - * A specialized version of `matchesProperty` for source values suitable - * for strict equality comparisons, i.e. `===`. - * - * @private - * @param {string} key The key of the property to get. - * @param {*} srcValue The value to match. - * @returns {Function} Returns the new spec function. - */ - function matchesStrictComparable(key, srcValue) { - return function(object) { - if (object == null) { - return false; - } - return object[key] === srcValue && - (srcValue !== undefined || (key in Object(object))); - }; - } - - /** - * A specialized version of `_.memoize` which clears the memoized function's - * cache when it exceeds `MAX_MEMOIZE_SIZE`. - * - * @private - * @param {Function} func The function to have its output memoized. - * @returns {Function} Returns the new memoized function. - */ - function memoizeCapped(func) { - var result = memoize(func, function(key) { - if (cache.size === MAX_MEMOIZE_SIZE) { - cache.clear(); - } - return key; - }); - - var cache = result.cache; - return result; - } - - /** - * Merges the function metadata of `source` into `data`. - * - * Merging metadata reduces the number of wrappers used to invoke a function. - * This is possible because methods like `_.bind`, `_.curry`, and `_.partial` - * may be applied regardless of execution order. Methods like `_.ary` and - * `_.rearg` modify function arguments, making the order in which they are - * executed important, preventing the merging of metadata. However, we make - * an exception for a safe combined case where curried functions have `_.ary` - * and or `_.rearg` applied. - * - * @private - * @param {Array} data The destination metadata. - * @param {Array} source The source metadata. - * @returns {Array} Returns `data`. - */ - function mergeData(data, source) { - var bitmask = data[1], - srcBitmask = source[1], - newBitmask = bitmask | srcBitmask, - isCommon = newBitmask < (WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG | WRAP_ARY_FLAG); - - var isCombo = - ((srcBitmask == WRAP_ARY_FLAG) && (bitmask == WRAP_CURRY_FLAG)) || - ((srcBitmask == WRAP_ARY_FLAG) && (bitmask == WRAP_REARG_FLAG) && (data[7].length <= source[8])) || - ((srcBitmask == (WRAP_ARY_FLAG | WRAP_REARG_FLAG)) && (source[7].length <= source[8]) && (bitmask == WRAP_CURRY_FLAG)); - - // Exit early if metadata can't be merged. - if (!(isCommon || isCombo)) { - return data; - } - // Use source `thisArg` if available. - if (srcBitmask & WRAP_BIND_FLAG) { - data[2] = source[2]; - // Set when currying a bound function. - newBitmask |= bitmask & WRAP_BIND_FLAG ? 0 : WRAP_CURRY_BOUND_FLAG; - } - // Compose partial arguments. - var value = source[3]; - if (value) { - var partials = data[3]; - data[3] = partials ? composeArgs(partials, value, source[4]) : value; - data[4] = partials ? replaceHolders(data[3], PLACEHOLDER) : source[4]; - } - // Compose partial right arguments. - value = source[5]; - if (value) { - partials = data[5]; - data[5] = partials ? composeArgsRight(partials, value, source[6]) : value; - data[6] = partials ? replaceHolders(data[5], PLACEHOLDER) : source[6]; - } - // Use source `argPos` if available. - value = source[7]; - if (value) { - data[7] = value; - } - // Use source `ary` if it's smaller. - if (srcBitmask & WRAP_ARY_FLAG) { - data[8] = data[8] == null ? source[8] : nativeMin(data[8], source[8]); - } - // Use source `arity` if one is not provided. - if (data[9] == null) { - data[9] = source[9]; - } - // Use source `func` and merge bitmasks. - data[0] = source[0]; - data[1] = newBitmask; - - return data; - } - - /** - * This function is like - * [`Object.keys`](http://ecma-international.org/ecma-262/7.0/#sec-object.keys) - * except that it includes inherited enumerable properties. - * - * @private - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names. - */ - function nativeKeysIn(object) { - var result = []; - if (object != null) { - for (var key in Object(object)) { - result.push(key); - } - } - return result; - } - - /** - * Converts `value` to a string using `Object.prototype.toString`. - * - * @private - * @param {*} value The value to convert. - * @returns {string} Returns the converted string. - */ - function objectToString(value) { - return nativeObjectToString.call(value); - } - - /** - * A specialized version of `baseRest` which transforms the rest array. - * - * @private - * @param {Function} func The function to apply a rest parameter to. - * @param {number} [start=func.length-1] The start position of the rest parameter. - * @param {Function} transform The rest array transform. - * @returns {Function} Returns the new function. - */ - function overRest(func, start, transform) { - start = nativeMax(start === undefined ? (func.length - 1) : start, 0); - return function() { - var args = arguments, - index = -1, - length = nativeMax(args.length - start, 0), - array = Array(length); - - while (++index < length) { - array[index] = args[start + index]; - } - index = -1; - var otherArgs = Array(start + 1); - while (++index < start) { - otherArgs[index] = args[index]; - } - otherArgs[start] = transform(array); - return apply(func, this, otherArgs); - }; - } - - /** - * Gets the parent value at `path` of `object`. - * - * @private - * @param {Object} object The object to query. - * @param {Array} path The path to get the parent value of. - * @returns {*} Returns the parent value. - */ - function parent(object, path) { - return path.length < 2 ? object : baseGet(object, baseSlice(path, 0, -1)); - } - - /** - * Reorder `array` according to the specified indexes where the element at - * the first index is assigned as the first element, the element at - * the second index is assigned as the second element, and so on. - * - * @private - * @param {Array} array The array to reorder. - * @param {Array} indexes The arranged array indexes. - * @returns {Array} Returns `array`. - */ - function reorder(array, indexes) { - var arrLength = array.length, - length = nativeMin(indexes.length, arrLength), - oldArray = copyArray(array); - - while (length--) { - var index = indexes[length]; - array[length] = isIndex(index, arrLength) ? oldArray[index] : undefined; - } - return array; - } - - /** - * Sets metadata for `func`. - * - * **Note:** If this function becomes hot, i.e. is invoked a lot in a short - * period of time, it will trip its breaker and transition to an identity - * function to avoid garbage collection pauses in V8. See - * [V8 issue 2070](https://bugs.chromium.org/p/v8/issues/detail?id=2070) - * for more details. - * - * @private - * @param {Function} func The function to associate metadata with. - * @param {*} data The metadata. - * @returns {Function} Returns `func`. - */ - var setData = shortOut(baseSetData); - - /** - * A simple wrapper around the global [`setTimeout`](https://mdn.io/setTimeout). - * - * @private - * @param {Function} func The function to delay. - * @param {number} wait The number of milliseconds to delay invocation. - * @returns {number|Object} Returns the timer id or timeout object. - */ - var setTimeout = ctxSetTimeout || function(func, wait) { - return root.setTimeout(func, wait); - }; - - /** - * Sets the `toString` method of `func` to return `string`. - * - * @private - * @param {Function} func The function to modify. - * @param {Function} string The `toString` result. - * @returns {Function} Returns `func`. - */ - var setToString = shortOut(baseSetToString); - - /** - * Sets the `toString` method of `wrapper` to mimic the source of `reference` - * with wrapper details in a comment at the top of the source body. - * - * @private - * @param {Function} wrapper The function to modify. - * @param {Function} reference The reference function. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @returns {Function} Returns `wrapper`. - */ - function setWrapToString(wrapper, reference, bitmask) { - var source = (reference + ''); - return setToString(wrapper, insertWrapDetails(source, updateWrapDetails(getWrapDetails(source), bitmask))); - } - - /** - * Creates a function that'll short out and invoke `identity` instead - * of `func` when it's called `HOT_COUNT` or more times in `HOT_SPAN` - * milliseconds. - * - * @private - * @param {Function} func The function to restrict. - * @returns {Function} Returns the new shortable function. - */ - function shortOut(func) { - var count = 0, - lastCalled = 0; - - return function() { - var stamp = nativeNow(), - remaining = HOT_SPAN - (stamp - lastCalled); - - lastCalled = stamp; - if (remaining > 0) { - if (++count >= HOT_COUNT) { - return arguments[0]; - } - } else { - count = 0; - } - return func.apply(undefined, arguments); - }; - } - - /** - * A specialized version of `_.shuffle` which mutates and sets the size of `array`. - * - * @private - * @param {Array} array The array to shuffle. - * @param {number} [size=array.length] The size of `array`. - * @returns {Array} Returns `array`. - */ - function shuffleSelf(array, size) { - var index = -1, - length = array.length, - lastIndex = length - 1; - - size = size === undefined ? length : size; - while (++index < size) { - var rand = baseRandom(index, lastIndex), - value = array[rand]; - - array[rand] = array[index]; - array[index] = value; - } - array.length = size; - return array; - } - - /** - * Converts `string` to a property path array. - * - * @private - * @param {string} string The string to convert. - * @returns {Array} Returns the property path array. - */ - var stringToPath = memoizeCapped(function(string) { - var result = []; - if (string.charCodeAt(0) === 46 /* . */) { - result.push(''); - } - string.replace(rePropName, function(match, number, quote, subString) { - result.push(quote ? subString.replace(reEscapeChar, '$1') : (number || match)); - }); - return result; - }); - - /** - * Converts `value` to a string key if it's not a string or symbol. - * - * @private - * @param {*} value The value to inspect. - * @returns {string|symbol} Returns the key. - */ - function toKey(value) { - if (typeof value == 'string' || isSymbol(value)) { - return value; - } - var result = (value + ''); - return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result; - } - - /** - * Converts `func` to its source code. - * - * @private - * @param {Function} func The function to convert. - * @returns {string} Returns the source code. - */ - function toSource(func) { - if (func != null) { - try { - return funcToString.call(func); - } catch (e) {} - try { - return (func + ''); - } catch (e) {} - } - return ''; - } - - /** - * Updates wrapper `details` based on `bitmask` flags. - * - * @private - * @returns {Array} details The details to modify. - * @param {number} bitmask The bitmask flags. See `createWrap` for more details. - * @returns {Array} Returns `details`. - */ - function updateWrapDetails(details, bitmask) { - arrayEach(wrapFlags, function(pair) { - var value = '_.' + pair[0]; - if ((bitmask & pair[1]) && !arrayIncludes(details, value)) { - details.push(value); - } - }); - return details.sort(); - } - - /** - * Creates a clone of `wrapper`. - * - * @private - * @param {Object} wrapper The wrapper to clone. - * @returns {Object} Returns the cloned wrapper. - */ - function wrapperClone(wrapper) { - if (wrapper instanceof LazyWrapper) { - return wrapper.clone(); - } - var result = new LodashWrapper(wrapper.__wrapped__, wrapper.__chain__); - result.__actions__ = copyArray(wrapper.__actions__); - result.__index__ = wrapper.__index__; - result.__values__ = wrapper.__values__; - return result; - } - - /*------------------------------------------------------------------------*/ - - /** - * Creates an array of elements split into groups the length of `size`. - * If `array` can't be split evenly, the final chunk will be the remaining - * elements. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to process. - * @param {number} [size=1] The length of each chunk - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the new array of chunks. - * @example - * - * _.chunk(['a', 'b', 'c', 'd'], 2); - * // => [['a', 'b'], ['c', 'd']] - * - * _.chunk(['a', 'b', 'c', 'd'], 3); - * // => [['a', 'b', 'c'], ['d']] - */ - function chunk(array, size, guard) { - if ((guard ? isIterateeCall(array, size, guard) : size === undefined)) { - size = 1; - } else { - size = nativeMax(toInteger(size), 0); - } - var length = array == null ? 0 : array.length; - if (!length || size < 1) { - return []; - } - var index = 0, - resIndex = 0, - result = Array(nativeCeil(length / size)); - - while (index < length) { - result[resIndex++] = baseSlice(array, index, (index += size)); - } - return result; - } - - /** - * Creates an array with all falsey values removed. The values `false`, `null`, - * `0`, `""`, `undefined`, and `NaN` are falsey. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to compact. - * @returns {Array} Returns the new array of filtered values. - * @example - * - * _.compact([0, 1, false, 2, '', 3]); - * // => [1, 2, 3] - */ - function compact(array) { - var index = -1, - length = array == null ? 0 : array.length, - resIndex = 0, - result = []; - - while (++index < length) { - var value = array[index]; - if (value) { - result[resIndex++] = value; - } - } - return result; - } - - /** - * Creates a new array concatenating `array` with any additional arrays - * and/or values. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to concatenate. - * @param {...*} [values] The values to concatenate. - * @returns {Array} Returns the new concatenated array. - * @example - * - * var array = [1]; - * var other = _.concat(array, 2, [3], [[4]]); - * - * console.log(other); - * // => [1, 2, 3, [4]] - * - * console.log(array); - * // => [1] - */ - function concat() { - var length = arguments.length; - if (!length) { - return []; - } - var args = Array(length - 1), - array = arguments[0], - index = length; - - while (index--) { - args[index - 1] = arguments[index]; - } - return arrayPush(isArray(array) ? copyArray(array) : [array], baseFlatten(args, 1)); - } - - /** - * Creates an array of `array` values not included in the other given arrays - * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. The order and references of result values are - * determined by the first array. - * - * **Note:** Unlike `_.pullAll`, this method returns a new array. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {...Array} [values] The values to exclude. - * @returns {Array} Returns the new array of filtered values. - * @see _.without, _.xor - * @example - * - * _.difference([2, 1], [2, 3]); - * // => [1] - */ - var difference = baseRest(function(array, values) { - return isArrayLikeObject(array) - ? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true)) - : []; - }); - - /** - * This method is like `_.difference` except that it accepts `iteratee` which - * is invoked for each element of `array` and `values` to generate the criterion - * by which they're compared. The order and references of result values are - * determined by the first array. The iteratee is invoked with one argument: - * (value). - * - * **Note:** Unlike `_.pullAllBy`, this method returns a new array. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {...Array} [values] The values to exclude. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns the new array of filtered values. - * @example - * - * _.differenceBy([2.1, 1.2], [2.3, 3.4], Math.floor); - * // => [1.2] - * - * // The `_.property` iteratee shorthand. - * _.differenceBy([{ 'x': 2 }, { 'x': 1 }], [{ 'x': 1 }], 'x'); - * // => [{ 'x': 2 }] - */ - var differenceBy = baseRest(function(array, values) { - var iteratee = last(values); - if (isArrayLikeObject(iteratee)) { - iteratee = undefined; - } - return isArrayLikeObject(array) - ? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true), getIteratee(iteratee, 2)) - : []; - }); - - /** - * This method is like `_.difference` except that it accepts `comparator` - * which is invoked to compare elements of `array` to `values`. The order and - * references of result values are determined by the first array. The comparator - * is invoked with two arguments: (arrVal, othVal). - * - * **Note:** Unlike `_.pullAllWith`, this method returns a new array. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {...Array} [values] The values to exclude. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of filtered values. - * @example - * - * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; - * - * _.differenceWith(objects, [{ 'x': 1, 'y': 2 }], _.isEqual); - * // => [{ 'x': 2, 'y': 1 }] - */ - var differenceWith = baseRest(function(array, values) { - var comparator = last(values); - if (isArrayLikeObject(comparator)) { - comparator = undefined; - } - return isArrayLikeObject(array) - ? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true), undefined, comparator) - : []; - }); - - /** - * Creates a slice of `array` with `n` elements dropped from the beginning. - * - * @static - * @memberOf _ - * @since 0.5.0 - * @category Array - * @param {Array} array The array to query. - * @param {number} [n=1] The number of elements to drop. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.drop([1, 2, 3]); - * // => [2, 3] - * - * _.drop([1, 2, 3], 2); - * // => [3] - * - * _.drop([1, 2, 3], 5); - * // => [] - * - * _.drop([1, 2, 3], 0); - * // => [1, 2, 3] - */ - function drop(array, n, guard) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - n = (guard || n === undefined) ? 1 : toInteger(n); - return baseSlice(array, n < 0 ? 0 : n, length); - } - - /** - * Creates a slice of `array` with `n` elements dropped from the end. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {number} [n=1] The number of elements to drop. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.dropRight([1, 2, 3]); - * // => [1, 2] - * - * _.dropRight([1, 2, 3], 2); - * // => [1] - * - * _.dropRight([1, 2, 3], 5); - * // => [] - * - * _.dropRight([1, 2, 3], 0); - * // => [1, 2, 3] - */ - function dropRight(array, n, guard) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - n = (guard || n === undefined) ? 1 : toInteger(n); - n = length - n; - return baseSlice(array, 0, n < 0 ? 0 : n); - } - - /** - * Creates a slice of `array` excluding elements dropped from the end. - * Elements are dropped until `predicate` returns falsey. The predicate is - * invoked with three arguments: (value, index, array). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the slice of `array`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': true }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': false } - * ]; - * - * _.dropRightWhile(users, function(o) { return !o.active; }); - * // => objects for ['barney'] - * - * // The `_.matches` iteratee shorthand. - * _.dropRightWhile(users, { 'user': 'pebbles', 'active': false }); - * // => objects for ['barney', 'fred'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.dropRightWhile(users, ['active', false]); - * // => objects for ['barney'] - * - * // The `_.property` iteratee shorthand. - * _.dropRightWhile(users, 'active'); - * // => objects for ['barney', 'fred', 'pebbles'] - */ - function dropRightWhile(array, predicate) { - return (array && array.length) - ? baseWhile(array, getIteratee(predicate, 3), true, true) - : []; - } - - /** - * Creates a slice of `array` excluding elements dropped from the beginning. - * Elements are dropped until `predicate` returns falsey. The predicate is - * invoked with three arguments: (value, index, array). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the slice of `array`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': false }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': true } - * ]; - * - * _.dropWhile(users, function(o) { return !o.active; }); - * // => objects for ['pebbles'] - * - * // The `_.matches` iteratee shorthand. - * _.dropWhile(users, { 'user': 'barney', 'active': false }); - * // => objects for ['fred', 'pebbles'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.dropWhile(users, ['active', false]); - * // => objects for ['pebbles'] - * - * // The `_.property` iteratee shorthand. - * _.dropWhile(users, 'active'); - * // => objects for ['barney', 'fred', 'pebbles'] - */ - function dropWhile(array, predicate) { - return (array && array.length) - ? baseWhile(array, getIteratee(predicate, 3), true) - : []; - } - - /** - * Fills elements of `array` with `value` from `start` up to, but not - * including, `end`. - * - * **Note:** This method mutates `array`. - * - * @static - * @memberOf _ - * @since 3.2.0 - * @category Array - * @param {Array} array The array to fill. - * @param {*} value The value to fill `array` with. - * @param {number} [start=0] The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns `array`. - * @example - * - * var array = [1, 2, 3]; - * - * _.fill(array, 'a'); - * console.log(array); - * // => ['a', 'a', 'a'] - * - * _.fill(Array(3), 2); - * // => [2, 2, 2] - * - * _.fill([4, 6, 8, 10], '*', 1, 3); - * // => [4, '*', '*', 10] - */ - function fill(array, value, start, end) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - if (start && typeof start != 'number' && isIterateeCall(array, value, start)) { - start = 0; - end = length; - } - return baseFill(array, value, start, end); - } - - /** - * This method is like `_.find` except that it returns the index of the first - * element `predicate` returns truthy for instead of the element itself. - * - * @static - * @memberOf _ - * @since 1.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param {number} [fromIndex=0] The index to search from. - * @returns {number} Returns the index of the found element, else `-1`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': false }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': true } - * ]; - * - * _.findIndex(users, function(o) { return o.user == 'barney'; }); - * // => 0 - * - * // The `_.matches` iteratee shorthand. - * _.findIndex(users, { 'user': 'fred', 'active': false }); - * // => 1 - * - * // The `_.matchesProperty` iteratee shorthand. - * _.findIndex(users, ['active', false]); - * // => 0 - * - * // The `_.property` iteratee shorthand. - * _.findIndex(users, 'active'); - * // => 2 - */ - function findIndex(array, predicate, fromIndex) { - var length = array == null ? 0 : array.length; - if (!length) { - return -1; - } - var index = fromIndex == null ? 0 : toInteger(fromIndex); - if (index < 0) { - index = nativeMax(length + index, 0); - } - return baseFindIndex(array, getIteratee(predicate, 3), index); - } - - /** - * This method is like `_.findIndex` except that it iterates over elements - * of `collection` from right to left. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param {number} [fromIndex=array.length-1] The index to search from. - * @returns {number} Returns the index of the found element, else `-1`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': true }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': false } - * ]; - * - * _.findLastIndex(users, function(o) { return o.user == 'pebbles'; }); - * // => 2 - * - * // The `_.matches` iteratee shorthand. - * _.findLastIndex(users, { 'user': 'barney', 'active': true }); - * // => 0 - * - * // The `_.matchesProperty` iteratee shorthand. - * _.findLastIndex(users, ['active', false]); - * // => 2 - * - * // The `_.property` iteratee shorthand. - * _.findLastIndex(users, 'active'); - * // => 0 - */ - function findLastIndex(array, predicate, fromIndex) { - var length = array == null ? 0 : array.length; - if (!length) { - return -1; - } - var index = length - 1; - if (fromIndex !== undefined) { - index = toInteger(fromIndex); - index = fromIndex < 0 - ? nativeMax(length + index, 0) - : nativeMin(index, length - 1); - } - return baseFindIndex(array, getIteratee(predicate, 3), index, true); - } - - /** - * Flattens `array` a single level deep. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to flatten. - * @returns {Array} Returns the new flattened array. - * @example - * - * _.flatten([1, [2, [3, [4]], 5]]); - * // => [1, 2, [3, [4]], 5] - */ - function flatten(array) { - var length = array == null ? 0 : array.length; - return length ? baseFlatten(array, 1) : []; - } - - /** - * Recursively flattens `array`. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to flatten. - * @returns {Array} Returns the new flattened array. - * @example - * - * _.flattenDeep([1, [2, [3, [4]], 5]]); - * // => [1, 2, 3, 4, 5] - */ - function flattenDeep(array) { - var length = array == null ? 0 : array.length; - return length ? baseFlatten(array, INFINITY) : []; - } - - /** - * Recursively flatten `array` up to `depth` times. - * - * @static - * @memberOf _ - * @since 4.4.0 - * @category Array - * @param {Array} array The array to flatten. - * @param {number} [depth=1] The maximum recursion depth. - * @returns {Array} Returns the new flattened array. - * @example - * - * var array = [1, [2, [3, [4]], 5]]; - * - * _.flattenDepth(array, 1); - * // => [1, 2, [3, [4]], 5] - * - * _.flattenDepth(array, 2); - * // => [1, 2, 3, [4], 5] - */ - function flattenDepth(array, depth) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - depth = depth === undefined ? 1 : toInteger(depth); - return baseFlatten(array, depth); - } - - /** - * The inverse of `_.toPairs`; this method returns an object composed - * from key-value `pairs`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} pairs The key-value pairs. - * @returns {Object} Returns the new object. - * @example - * - * _.fromPairs([['a', 1], ['b', 2]]); - * // => { 'a': 1, 'b': 2 } - */ - function fromPairs(pairs) { - var index = -1, - length = pairs == null ? 0 : pairs.length, - result = {}; - - while (++index < length) { - var pair = pairs[index]; - result[pair[0]] = pair[1]; - } - return result; - } - - /** - * Gets the first element of `array`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @alias first - * @category Array - * @param {Array} array The array to query. - * @returns {*} Returns the first element of `array`. - * @example - * - * _.head([1, 2, 3]); - * // => 1 - * - * _.head([]); - * // => undefined - */ - function head(array) { - return (array && array.length) ? array[0] : undefined; - } - - /** - * Gets the index at which the first occurrence of `value` is found in `array` - * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. If `fromIndex` is negative, it's used as the - * offset from the end of `array`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} [fromIndex=0] The index to search from. - * @returns {number} Returns the index of the matched value, else `-1`. - * @example - * - * _.indexOf([1, 2, 1, 2], 2); - * // => 1 - * - * // Search from the `fromIndex`. - * _.indexOf([1, 2, 1, 2], 2, 2); - * // => 3 - */ - function indexOf(array, value, fromIndex) { - var length = array == null ? 0 : array.length; - if (!length) { - return -1; - } - var index = fromIndex == null ? 0 : toInteger(fromIndex); - if (index < 0) { - index = nativeMax(length + index, 0); - } - return baseIndexOf(array, value, index); - } - - /** - * Gets all but the last element of `array`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to query. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.initial([1, 2, 3]); - * // => [1, 2] - */ - function initial(array) { - var length = array == null ? 0 : array.length; - return length ? baseSlice(array, 0, -1) : []; - } - - /** - * Creates an array of unique values that are included in all given arrays - * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. The order and references of result values are - * determined by the first array. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @returns {Array} Returns the new array of intersecting values. - * @example - * - * _.intersection([2, 1], [2, 3]); - * // => [2] - */ - var intersection = baseRest(function(arrays) { - var mapped = arrayMap(arrays, castArrayLikeObject); - return (mapped.length && mapped[0] === arrays[0]) - ? baseIntersection(mapped) - : []; - }); - - /** - * This method is like `_.intersection` except that it accepts `iteratee` - * which is invoked for each element of each `arrays` to generate the criterion - * by which they're compared. The order and references of result values are - * determined by the first array. The iteratee is invoked with one argument: - * (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns the new array of intersecting values. - * @example - * - * _.intersectionBy([2.1, 1.2], [2.3, 3.4], Math.floor); - * // => [2.1] - * - * // The `_.property` iteratee shorthand. - * _.intersectionBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x'); - * // => [{ 'x': 1 }] - */ - var intersectionBy = baseRest(function(arrays) { - var iteratee = last(arrays), - mapped = arrayMap(arrays, castArrayLikeObject); - - if (iteratee === last(mapped)) { - iteratee = undefined; - } else { - mapped.pop(); - } - return (mapped.length && mapped[0] === arrays[0]) - ? baseIntersection(mapped, getIteratee(iteratee, 2)) - : []; - }); - - /** - * This method is like `_.intersection` except that it accepts `comparator` - * which is invoked to compare elements of `arrays`. The order and references - * of result values are determined by the first array. The comparator is - * invoked with two arguments: (arrVal, othVal). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of intersecting values. - * @example - * - * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; - * var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }]; - * - * _.intersectionWith(objects, others, _.isEqual); - * // => [{ 'x': 1, 'y': 2 }] - */ - var intersectionWith = baseRest(function(arrays) { - var comparator = last(arrays), - mapped = arrayMap(arrays, castArrayLikeObject); - - comparator = typeof comparator == 'function' ? comparator : undefined; - if (comparator) { - mapped.pop(); - } - return (mapped.length && mapped[0] === arrays[0]) - ? baseIntersection(mapped, undefined, comparator) - : []; - }); - - /** - * Converts all elements in `array` into a string separated by `separator`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to convert. - * @param {string} [separator=','] The element separator. - * @returns {string} Returns the joined string. - * @example - * - * _.join(['a', 'b', 'c'], '~'); - * // => 'a~b~c' - */ - function join(array, separator) { - return array == null ? '' : nativeJoin.call(array, separator); - } - - /** - * Gets the last element of `array`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to query. - * @returns {*} Returns the last element of `array`. - * @example - * - * _.last([1, 2, 3]); - * // => 3 - */ - function last(array) { - var length = array == null ? 0 : array.length; - return length ? array[length - 1] : undefined; - } - - /** - * This method is like `_.indexOf` except that it iterates over elements of - * `array` from right to left. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @param {number} [fromIndex=array.length-1] The index to search from. - * @returns {number} Returns the index of the matched value, else `-1`. - * @example - * - * _.lastIndexOf([1, 2, 1, 2], 2); - * // => 3 - * - * // Search from the `fromIndex`. - * _.lastIndexOf([1, 2, 1, 2], 2, 2); - * // => 1 - */ - function lastIndexOf(array, value, fromIndex) { - var length = array == null ? 0 : array.length; - if (!length) { - return -1; - } - var index = length; - if (fromIndex !== undefined) { - index = toInteger(fromIndex); - index = index < 0 ? nativeMax(length + index, 0) : nativeMin(index, length - 1); - } - return value === value - ? strictLastIndexOf(array, value, index) - : baseFindIndex(array, baseIsNaN, index, true); - } - - /** - * Gets the element at index `n` of `array`. If `n` is negative, the nth - * element from the end is returned. - * - * @static - * @memberOf _ - * @since 4.11.0 - * @category Array - * @param {Array} array The array to query. - * @param {number} [n=0] The index of the element to return. - * @returns {*} Returns the nth element of `array`. - * @example - * - * var array = ['a', 'b', 'c', 'd']; - * - * _.nth(array, 1); - * // => 'b' - * - * _.nth(array, -2); - * // => 'c'; - */ - function nth(array, n) { - return (array && array.length) ? baseNth(array, toInteger(n)) : undefined; - } - - /** - * Removes all given values from `array` using - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. - * - * **Note:** Unlike `_.without`, this method mutates `array`. Use `_.remove` - * to remove elements from an array by predicate. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Array - * @param {Array} array The array to modify. - * @param {...*} [values] The values to remove. - * @returns {Array} Returns `array`. - * @example - * - * var array = ['a', 'b', 'c', 'a', 'b', 'c']; - * - * _.pull(array, 'a', 'c'); - * console.log(array); - * // => ['b', 'b'] - */ - var pull = baseRest(pullAll); - - /** - * This method is like `_.pull` except that it accepts an array of values to remove. - * - * **Note:** Unlike `_.difference`, this method mutates `array`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to modify. - * @param {Array} values The values to remove. - * @returns {Array} Returns `array`. - * @example - * - * var array = ['a', 'b', 'c', 'a', 'b', 'c']; - * - * _.pullAll(array, ['a', 'c']); - * console.log(array); - * // => ['b', 'b'] - */ - function pullAll(array, values) { - return (array && array.length && values && values.length) - ? basePullAll(array, values) - : array; - } - - /** - * This method is like `_.pullAll` except that it accepts `iteratee` which is - * invoked for each element of `array` and `values` to generate the criterion - * by which they're compared. The iteratee is invoked with one argument: (value). - * - * **Note:** Unlike `_.differenceBy`, this method mutates `array`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to modify. - * @param {Array} values The values to remove. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns `array`. - * @example - * - * var array = [{ 'x': 1 }, { 'x': 2 }, { 'x': 3 }, { 'x': 1 }]; - * - * _.pullAllBy(array, [{ 'x': 1 }, { 'x': 3 }], 'x'); - * console.log(array); - * // => [{ 'x': 2 }] - */ - function pullAllBy(array, values, iteratee) { - return (array && array.length && values && values.length) - ? basePullAll(array, values, getIteratee(iteratee, 2)) - : array; - } - - /** - * This method is like `_.pullAll` except that it accepts `comparator` which - * is invoked to compare elements of `array` to `values`. The comparator is - * invoked with two arguments: (arrVal, othVal). - * - * **Note:** Unlike `_.differenceWith`, this method mutates `array`. - * - * @static - * @memberOf _ - * @since 4.6.0 - * @category Array - * @param {Array} array The array to modify. - * @param {Array} values The values to remove. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns `array`. - * @example - * - * var array = [{ 'x': 1, 'y': 2 }, { 'x': 3, 'y': 4 }, { 'x': 5, 'y': 6 }]; - * - * _.pullAllWith(array, [{ 'x': 3, 'y': 4 }], _.isEqual); - * console.log(array); - * // => [{ 'x': 1, 'y': 2 }, { 'x': 5, 'y': 6 }] - */ - function pullAllWith(array, values, comparator) { - return (array && array.length && values && values.length) - ? basePullAll(array, values, undefined, comparator) - : array; - } - - /** - * Removes elements from `array` corresponding to `indexes` and returns an - * array of removed elements. - * - * **Note:** Unlike `_.at`, this method mutates `array`. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to modify. - * @param {...(number|number[])} [indexes] The indexes of elements to remove. - * @returns {Array} Returns the new array of removed elements. - * @example - * - * var array = ['a', 'b', 'c', 'd']; - * var pulled = _.pullAt(array, [1, 3]); - * - * console.log(array); - * // => ['a', 'c'] - * - * console.log(pulled); - * // => ['b', 'd'] - */ - var pullAt = flatRest(function(array, indexes) { - var length = array == null ? 0 : array.length, - result = baseAt(array, indexes); - - basePullAt(array, arrayMap(indexes, function(index) { - return isIndex(index, length) ? +index : index; - }).sort(compareAscending)); - - return result; - }); - - /** - * Removes all elements from `array` that `predicate` returns truthy for - * and returns an array of the removed elements. The predicate is invoked - * with three arguments: (value, index, array). - * - * **Note:** Unlike `_.filter`, this method mutates `array`. Use `_.pull` - * to pull elements from an array by value. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Array - * @param {Array} array The array to modify. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new array of removed elements. - * @example - * - * var array = [1, 2, 3, 4]; - * var evens = _.remove(array, function(n) { - * return n % 2 == 0; - * }); - * - * console.log(array); - * // => [1, 3] - * - * console.log(evens); - * // => [2, 4] - */ - function remove(array, predicate) { - var result = []; - if (!(array && array.length)) { - return result; - } - var index = -1, - indexes = [], - length = array.length; - - predicate = getIteratee(predicate, 3); - while (++index < length) { - var value = array[index]; - if (predicate(value, index, array)) { - result.push(value); - indexes.push(index); - } - } - basePullAt(array, indexes); - return result; - } - - /** - * Reverses `array` so that the first element becomes the last, the second - * element becomes the second to last, and so on. - * - * **Note:** This method mutates `array` and is based on - * [`Array#reverse`](https://mdn.io/Array/reverse). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to modify. - * @returns {Array} Returns `array`. - * @example - * - * var array = [1, 2, 3]; - * - * _.reverse(array); - * // => [3, 2, 1] - * - * console.log(array); - * // => [3, 2, 1] - */ - function reverse(array) { - return array == null ? array : nativeReverse.call(array); - } - - /** - * Creates a slice of `array` from `start` up to, but not including, `end`. - * - * **Note:** This method is used instead of - * [`Array#slice`](https://mdn.io/Array/slice) to ensure dense arrays are - * returned. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to slice. - * @param {number} [start=0] The start position. - * @param {number} [end=array.length] The end position. - * @returns {Array} Returns the slice of `array`. - */ - function slice(array, start, end) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - if (end && typeof end != 'number' && isIterateeCall(array, start, end)) { - start = 0; - end = length; - } - else { - start = start == null ? 0 : toInteger(start); - end = end === undefined ? length : toInteger(end); - } - return baseSlice(array, start, end); - } - - /** - * Uses a binary search to determine the lowest index at which `value` - * should be inserted into `array` in order to maintain its sort order. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - * @example - * - * _.sortedIndex([30, 50], 40); - * // => 1 - */ - function sortedIndex(array, value) { - return baseSortedIndex(array, value); - } - - /** - * This method is like `_.sortedIndex` except that it accepts `iteratee` - * which is invoked for `value` and each element of `array` to compute their - * sort ranking. The iteratee is invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - * @example - * - * var objects = [{ 'x': 4 }, { 'x': 5 }]; - * - * _.sortedIndexBy(objects, { 'x': 4 }, function(o) { return o.x; }); - * // => 0 - * - * // The `_.property` iteratee shorthand. - * _.sortedIndexBy(objects, { 'x': 4 }, 'x'); - * // => 0 - */ - function sortedIndexBy(array, value, iteratee) { - return baseSortedIndexBy(array, value, getIteratee(iteratee, 2)); - } - - /** - * This method is like `_.indexOf` except that it performs a binary - * search on a sorted `array`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @returns {number} Returns the index of the matched value, else `-1`. - * @example - * - * _.sortedIndexOf([4, 5, 5, 5, 6], 5); - * // => 1 - */ - function sortedIndexOf(array, value) { - var length = array == null ? 0 : array.length; - if (length) { - var index = baseSortedIndex(array, value); - if (index < length && eq(array[index], value)) { - return index; - } - } - return -1; - } - - /** - * This method is like `_.sortedIndex` except that it returns the highest - * index at which `value` should be inserted into `array` in order to - * maintain its sort order. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - * @example - * - * _.sortedLastIndex([4, 5, 5, 5, 6], 5); - * // => 4 - */ - function sortedLastIndex(array, value) { - return baseSortedIndex(array, value, true); - } - - /** - * This method is like `_.sortedLastIndex` except that it accepts `iteratee` - * which is invoked for `value` and each element of `array` to compute their - * sort ranking. The iteratee is invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The sorted array to inspect. - * @param {*} value The value to evaluate. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {number} Returns the index at which `value` should be inserted - * into `array`. - * @example - * - * var objects = [{ 'x': 4 }, { 'x': 5 }]; - * - * _.sortedLastIndexBy(objects, { 'x': 4 }, function(o) { return o.x; }); - * // => 1 - * - * // The `_.property` iteratee shorthand. - * _.sortedLastIndexBy(objects, { 'x': 4 }, 'x'); - * // => 1 - */ - function sortedLastIndexBy(array, value, iteratee) { - return baseSortedIndexBy(array, value, getIteratee(iteratee, 2), true); - } - - /** - * This method is like `_.lastIndexOf` except that it performs a binary - * search on a sorted `array`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {*} value The value to search for. - * @returns {number} Returns the index of the matched value, else `-1`. - * @example - * - * _.sortedLastIndexOf([4, 5, 5, 5, 6], 5); - * // => 3 - */ - function sortedLastIndexOf(array, value) { - var length = array == null ? 0 : array.length; - if (length) { - var index = baseSortedIndex(array, value, true) - 1; - if (eq(array[index], value)) { - return index; - } - } - return -1; - } - - /** - * This method is like `_.uniq` except that it's designed and optimized - * for sorted arrays. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @returns {Array} Returns the new duplicate free array. - * @example - * - * _.sortedUniq([1, 1, 2]); - * // => [1, 2] - */ - function sortedUniq(array) { - return (array && array.length) - ? baseSortedUniq(array) - : []; - } - - /** - * This method is like `_.uniqBy` except that it's designed and optimized - * for sorted arrays. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {Function} [iteratee] The iteratee invoked per element. - * @returns {Array} Returns the new duplicate free array. - * @example - * - * _.sortedUniqBy([1.1, 1.2, 2.3, 2.4], Math.floor); - * // => [1.1, 2.3] - */ - function sortedUniqBy(array, iteratee) { - return (array && array.length) - ? baseSortedUniq(array, getIteratee(iteratee, 2)) - : []; - } - - /** - * Gets all but the first element of `array`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to query. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.tail([1, 2, 3]); - * // => [2, 3] - */ - function tail(array) { - var length = array == null ? 0 : array.length; - return length ? baseSlice(array, 1, length) : []; - } - - /** - * Creates a slice of `array` with `n` elements taken from the beginning. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to query. - * @param {number} [n=1] The number of elements to take. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.take([1, 2, 3]); - * // => [1] - * - * _.take([1, 2, 3], 2); - * // => [1, 2] - * - * _.take([1, 2, 3], 5); - * // => [1, 2, 3] - * - * _.take([1, 2, 3], 0); - * // => [] - */ - function take(array, n, guard) { - if (!(array && array.length)) { - return []; - } - n = (guard || n === undefined) ? 1 : toInteger(n); - return baseSlice(array, 0, n < 0 ? 0 : n); - } - - /** - * Creates a slice of `array` with `n` elements taken from the end. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {number} [n=1] The number of elements to take. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the slice of `array`. - * @example - * - * _.takeRight([1, 2, 3]); - * // => [3] - * - * _.takeRight([1, 2, 3], 2); - * // => [2, 3] - * - * _.takeRight([1, 2, 3], 5); - * // => [1, 2, 3] - * - * _.takeRight([1, 2, 3], 0); - * // => [] - */ - function takeRight(array, n, guard) { - var length = array == null ? 0 : array.length; - if (!length) { - return []; - } - n = (guard || n === undefined) ? 1 : toInteger(n); - n = length - n; - return baseSlice(array, n < 0 ? 0 : n, length); - } - - /** - * Creates a slice of `array` with elements taken from the end. Elements are - * taken until `predicate` returns falsey. The predicate is invoked with - * three arguments: (value, index, array). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the slice of `array`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': true }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': false } - * ]; - * - * _.takeRightWhile(users, function(o) { return !o.active; }); - * // => objects for ['fred', 'pebbles'] - * - * // The `_.matches` iteratee shorthand. - * _.takeRightWhile(users, { 'user': 'pebbles', 'active': false }); - * // => objects for ['pebbles'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.takeRightWhile(users, ['active', false]); - * // => objects for ['fred', 'pebbles'] - * - * // The `_.property` iteratee shorthand. - * _.takeRightWhile(users, 'active'); - * // => [] - */ - function takeRightWhile(array, predicate) { - return (array && array.length) - ? baseWhile(array, getIteratee(predicate, 3), false, true) - : []; - } - - /** - * Creates a slice of `array` with elements taken from the beginning. Elements - * are taken until `predicate` returns falsey. The predicate is invoked with - * three arguments: (value, index, array). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Array - * @param {Array} array The array to query. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the slice of `array`. - * @example - * - * var users = [ - * { 'user': 'barney', 'active': false }, - * { 'user': 'fred', 'active': false }, - * { 'user': 'pebbles', 'active': true } - * ]; - * - * _.takeWhile(users, function(o) { return !o.active; }); - * // => objects for ['barney', 'fred'] - * - * // The `_.matches` iteratee shorthand. - * _.takeWhile(users, { 'user': 'barney', 'active': false }); - * // => objects for ['barney'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.takeWhile(users, ['active', false]); - * // => objects for ['barney', 'fred'] - * - * // The `_.property` iteratee shorthand. - * _.takeWhile(users, 'active'); - * // => [] - */ - function takeWhile(array, predicate) { - return (array && array.length) - ? baseWhile(array, getIteratee(predicate, 3)) - : []; - } - - /** - * Creates an array of unique values, in order, from all given arrays using - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @returns {Array} Returns the new array of combined values. - * @example - * - * _.union([2], [1, 2]); - * // => [2, 1] - */ - var union = baseRest(function(arrays) { - return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true)); - }); - - /** - * This method is like `_.union` except that it accepts `iteratee` which is - * invoked for each element of each `arrays` to generate the criterion by - * which uniqueness is computed. Result values are chosen from the first - * array in which the value occurs. The iteratee is invoked with one argument: - * (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns the new array of combined values. - * @example - * - * _.unionBy([2.1], [1.2, 2.3], Math.floor); - * // => [2.1, 1.2] - * - * // The `_.property` iteratee shorthand. - * _.unionBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x'); - * // => [{ 'x': 1 }, { 'x': 2 }] - */ - var unionBy = baseRest(function(arrays) { - var iteratee = last(arrays); - if (isArrayLikeObject(iteratee)) { - iteratee = undefined; - } - return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true), getIteratee(iteratee, 2)); - }); - - /** - * This method is like `_.union` except that it accepts `comparator` which - * is invoked to compare elements of `arrays`. Result values are chosen from - * the first array in which the value occurs. The comparator is invoked - * with two arguments: (arrVal, othVal). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of combined values. - * @example - * - * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; - * var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }]; - * - * _.unionWith(objects, others, _.isEqual); - * // => [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }, { 'x': 1, 'y': 1 }] - */ - var unionWith = baseRest(function(arrays) { - var comparator = last(arrays); - comparator = typeof comparator == 'function' ? comparator : undefined; - return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true), undefined, comparator); - }); - - /** - * Creates a duplicate-free version of an array, using - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons, in which only the first occurrence of each element - * is kept. The order of result values is determined by the order they occur - * in the array. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @returns {Array} Returns the new duplicate free array. - * @example - * - * _.uniq([2, 1, 2]); - * // => [2, 1] - */ - function uniq(array) { - return (array && array.length) ? baseUniq(array) : []; - } - - /** - * This method is like `_.uniq` except that it accepts `iteratee` which is - * invoked for each element in `array` to generate the criterion by which - * uniqueness is computed. The order of result values is determined by the - * order they occur in the array. The iteratee is invoked with one argument: - * (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns the new duplicate free array. - * @example - * - * _.uniqBy([2.1, 1.2, 2.3], Math.floor); - * // => [2.1, 1.2] - * - * // The `_.property` iteratee shorthand. - * _.uniqBy([{ 'x': 1 }, { 'x': 2 }, { 'x': 1 }], 'x'); - * // => [{ 'x': 1 }, { 'x': 2 }] - */ - function uniqBy(array, iteratee) { - return (array && array.length) ? baseUniq(array, getIteratee(iteratee, 2)) : []; - } - - /** - * This method is like `_.uniq` except that it accepts `comparator` which - * is invoked to compare elements of `array`. The order of result values is - * determined by the order they occur in the array.The comparator is invoked - * with two arguments: (arrVal, othVal). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new duplicate free array. - * @example - * - * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }, { 'x': 1, 'y': 2 }]; - * - * _.uniqWith(objects, _.isEqual); - * // => [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }] - */ - function uniqWith(array, comparator) { - comparator = typeof comparator == 'function' ? comparator : undefined; - return (array && array.length) ? baseUniq(array, undefined, comparator) : []; - } - - /** - * This method is like `_.zip` except that it accepts an array of grouped - * elements and creates an array regrouping the elements to their pre-zip - * configuration. - * - * @static - * @memberOf _ - * @since 1.2.0 - * @category Array - * @param {Array} array The array of grouped elements to process. - * @returns {Array} Returns the new array of regrouped elements. - * @example - * - * var zipped = _.zip(['a', 'b'], [1, 2], [true, false]); - * // => [['a', 1, true], ['b', 2, false]] - * - * _.unzip(zipped); - * // => [['a', 'b'], [1, 2], [true, false]] - */ - function unzip(array) { - if (!(array && array.length)) { - return []; - } - var length = 0; - array = arrayFilter(array, function(group) { - if (isArrayLikeObject(group)) { - length = nativeMax(group.length, length); - return true; - } - }); - return baseTimes(length, function(index) { - return arrayMap(array, baseProperty(index)); - }); - } - - /** - * This method is like `_.unzip` except that it accepts `iteratee` to specify - * how regrouped values should be combined. The iteratee is invoked with the - * elements of each group: (...group). - * - * @static - * @memberOf _ - * @since 3.8.0 - * @category Array - * @param {Array} array The array of grouped elements to process. - * @param {Function} [iteratee=_.identity] The function to combine - * regrouped values. - * @returns {Array} Returns the new array of regrouped elements. - * @example - * - * var zipped = _.zip([1, 2], [10, 20], [100, 200]); - * // => [[1, 10, 100], [2, 20, 200]] - * - * _.unzipWith(zipped, _.add); - * // => [3, 30, 300] - */ - function unzipWith(array, iteratee) { - if (!(array && array.length)) { - return []; - } - var result = unzip(array); - if (iteratee == null) { - return result; - } - return arrayMap(result, function(group) { - return apply(iteratee, undefined, group); - }); - } - - /** - * Creates an array excluding all given values using - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * for equality comparisons. - * - * **Note:** Unlike `_.pull`, this method returns a new array. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {Array} array The array to inspect. - * @param {...*} [values] The values to exclude. - * @returns {Array} Returns the new array of filtered values. - * @see _.difference, _.xor - * @example - * - * _.without([2, 1, 2, 3], 1, 2); - * // => [3] - */ - var without = baseRest(function(array, values) { - return isArrayLikeObject(array) - ? baseDifference(array, values) - : []; - }); - - /** - * Creates an array of unique values that is the - * [symmetric difference](https://en.wikipedia.org/wiki/Symmetric_difference) - * of the given arrays. The order of result values is determined by the order - * they occur in the arrays. - * - * @static - * @memberOf _ - * @since 2.4.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @returns {Array} Returns the new array of filtered values. - * @see _.difference, _.without - * @example - * - * _.xor([2, 1], [2, 3]); - * // => [1, 3] - */ - var xor = baseRest(function(arrays) { - return baseXor(arrayFilter(arrays, isArrayLikeObject)); - }); - - /** - * This method is like `_.xor` except that it accepts `iteratee` which is - * invoked for each element of each `arrays` to generate the criterion by - * which by which they're compared. The order of result values is determined - * by the order they occur in the arrays. The iteratee is invoked with one - * argument: (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Array} Returns the new array of filtered values. - * @example - * - * _.xorBy([2.1, 1.2], [2.3, 3.4], Math.floor); - * // => [1.2, 3.4] - * - * // The `_.property` iteratee shorthand. - * _.xorBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x'); - * // => [{ 'x': 2 }] - */ - var xorBy = baseRest(function(arrays) { - var iteratee = last(arrays); - if (isArrayLikeObject(iteratee)) { - iteratee = undefined; - } - return baseXor(arrayFilter(arrays, isArrayLikeObject), getIteratee(iteratee, 2)); - }); - - /** - * This method is like `_.xor` except that it accepts `comparator` which is - * invoked to compare elements of `arrays`. The order of result values is - * determined by the order they occur in the arrays. The comparator is invoked - * with two arguments: (arrVal, othVal). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Array - * @param {...Array} [arrays] The arrays to inspect. - * @param {Function} [comparator] The comparator invoked per element. - * @returns {Array} Returns the new array of filtered values. - * @example - * - * var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]; - * var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }]; - * - * _.xorWith(objects, others, _.isEqual); - * // => [{ 'x': 2, 'y': 1 }, { 'x': 1, 'y': 1 }] - */ - var xorWith = baseRest(function(arrays) { - var comparator = last(arrays); - comparator = typeof comparator == 'function' ? comparator : undefined; - return baseXor(arrayFilter(arrays, isArrayLikeObject), undefined, comparator); - }); - - /** - * Creates an array of grouped elements, the first of which contains the - * first elements of the given arrays, the second of which contains the - * second elements of the given arrays, and so on. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Array - * @param {...Array} [arrays] The arrays to process. - * @returns {Array} Returns the new array of grouped elements. - * @example - * - * _.zip(['a', 'b'], [1, 2], [true, false]); - * // => [['a', 1, true], ['b', 2, false]] - */ - var zip = baseRest(unzip); - - /** - * This method is like `_.fromPairs` except that it accepts two arrays, - * one of property identifiers and one of corresponding values. - * - * @static - * @memberOf _ - * @since 0.4.0 - * @category Array - * @param {Array} [props=[]] The property identifiers. - * @param {Array} [values=[]] The property values. - * @returns {Object} Returns the new object. - * @example - * - * _.zipObject(['a', 'b'], [1, 2]); - * // => { 'a': 1, 'b': 2 } - */ - function zipObject(props, values) { - return baseZipObject(props || [], values || [], assignValue); - } - - /** - * This method is like `_.zipObject` except that it supports property paths. - * - * @static - * @memberOf _ - * @since 4.1.0 - * @category Array - * @param {Array} [props=[]] The property identifiers. - * @param {Array} [values=[]] The property values. - * @returns {Object} Returns the new object. - * @example - * - * _.zipObjectDeep(['a.b[0].c', 'a.b[1].d'], [1, 2]); - * // => { 'a': { 'b': [{ 'c': 1 }, { 'd': 2 }] } } - */ - function zipObjectDeep(props, values) { - return baseZipObject(props || [], values || [], baseSet); - } - - /** - * This method is like `_.zip` except that it accepts `iteratee` to specify - * how grouped values should be combined. The iteratee is invoked with the - * elements of each group: (...group). - * - * @static - * @memberOf _ - * @since 3.8.0 - * @category Array - * @param {...Array} [arrays] The arrays to process. - * @param {Function} [iteratee=_.identity] The function to combine - * grouped values. - * @returns {Array} Returns the new array of grouped elements. - * @example - * - * _.zipWith([1, 2], [10, 20], [100, 200], function(a, b, c) { - * return a + b + c; - * }); - * // => [111, 222] - */ - var zipWith = baseRest(function(arrays) { - var length = arrays.length, - iteratee = length > 1 ? arrays[length - 1] : undefined; - - iteratee = typeof iteratee == 'function' ? (arrays.pop(), iteratee) : undefined; - return unzipWith(arrays, iteratee); - }); - - /*------------------------------------------------------------------------*/ - - /** - * Creates a `lodash` wrapper instance that wraps `value` with explicit method - * chain sequences enabled. The result of such sequences must be unwrapped - * with `_#value`. - * - * @static - * @memberOf _ - * @since 1.3.0 - * @category Seq - * @param {*} value The value to wrap. - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36 }, - * { 'user': 'fred', 'age': 40 }, - * { 'user': 'pebbles', 'age': 1 } - * ]; - * - * var youngest = _ - * .chain(users) - * .sortBy('age') - * .map(function(o) { - * return o.user + ' is ' + o.age; - * }) - * .head() - * .value(); - * // => 'pebbles is 1' - */ - function chain(value) { - var result = lodash(value); - result.__chain__ = true; - return result; - } - - /** - * This method invokes `interceptor` and returns `value`. The interceptor - * is invoked with one argument; (value). The purpose of this method is to - * "tap into" a method chain sequence in order to modify intermediate results. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Seq - * @param {*} value The value to provide to `interceptor`. - * @param {Function} interceptor The function to invoke. - * @returns {*} Returns `value`. - * @example - * - * _([1, 2, 3]) - * .tap(function(array) { - * // Mutate input array. - * array.pop(); - * }) - * .reverse() - * .value(); - * // => [2, 1] - */ - function tap(value, interceptor) { - interceptor(value); - return value; - } - - /** - * This method is like `_.tap` except that it returns the result of `interceptor`. - * The purpose of this method is to "pass thru" values replacing intermediate - * results in a method chain sequence. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Seq - * @param {*} value The value to provide to `interceptor`. - * @param {Function} interceptor The function to invoke. - * @returns {*} Returns the result of `interceptor`. - * @example - * - * _(' abc ') - * .chain() - * .trim() - * .thru(function(value) { - * return [value]; - * }) - * .value(); - * // => ['abc'] - */ - function thru(value, interceptor) { - return interceptor(value); - } - - /** - * This method is the wrapper version of `_.at`. - * - * @name at - * @memberOf _ - * @since 1.0.0 - * @category Seq - * @param {...(string|string[])} [paths] The property paths to pick. - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }, 4] }; - * - * _(object).at(['a[0].b.c', 'a[1]']).value(); - * // => [3, 4] - */ - var wrapperAt = flatRest(function(paths) { - var length = paths.length, - start = length ? paths[0] : 0, - value = this.__wrapped__, - interceptor = function(object) { return baseAt(object, paths); }; - - if (length > 1 || this.__actions__.length || - !(value instanceof LazyWrapper) || !isIndex(start)) { - return this.thru(interceptor); - } - value = value.slice(start, +start + (length ? 1 : 0)); - value.__actions__.push({ - 'func': thru, - 'args': [interceptor], - 'thisArg': undefined - }); - return new LodashWrapper(value, this.__chain__).thru(function(array) { - if (length && !array.length) { - array.push(undefined); - } - return array; - }); - }); - - /** - * Creates a `lodash` wrapper instance with explicit method chain sequences enabled. - * - * @name chain - * @memberOf _ - * @since 0.1.0 - * @category Seq - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36 }, - * { 'user': 'fred', 'age': 40 } - * ]; - * - * // A sequence without explicit chaining. - * _(users).head(); - * // => { 'user': 'barney', 'age': 36 } - * - * // A sequence with explicit chaining. - * _(users) - * .chain() - * .head() - * .pick('user') - * .value(); - * // => { 'user': 'barney' } - */ - function wrapperChain() { - return chain(this); - } - - /** - * Executes the chain sequence and returns the wrapped result. - * - * @name commit - * @memberOf _ - * @since 3.2.0 - * @category Seq - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * var array = [1, 2]; - * var wrapped = _(array).push(3); - * - * console.log(array); - * // => [1, 2] - * - * wrapped = wrapped.commit(); - * console.log(array); - * // => [1, 2, 3] - * - * wrapped.last(); - * // => 3 - * - * console.log(array); - * // => [1, 2, 3] - */ - function wrapperCommit() { - return new LodashWrapper(this.value(), this.__chain__); - } - - /** - * Gets the next value on a wrapped object following the - * [iterator protocol](https://mdn.io/iteration_protocols#iterator). - * - * @name next - * @memberOf _ - * @since 4.0.0 - * @category Seq - * @returns {Object} Returns the next iterator value. - * @example - * - * var wrapped = _([1, 2]); - * - * wrapped.next(); - * // => { 'done': false, 'value': 1 } - * - * wrapped.next(); - * // => { 'done': false, 'value': 2 } - * - * wrapped.next(); - * // => { 'done': true, 'value': undefined } - */ - function wrapperNext() { - if (this.__values__ === undefined) { - this.__values__ = toArray(this.value()); - } - var done = this.__index__ >= this.__values__.length, - value = done ? undefined : this.__values__[this.__index__++]; - - return { 'done': done, 'value': value }; - } - - /** - * Enables the wrapper to be iterable. - * - * @name Symbol.iterator - * @memberOf _ - * @since 4.0.0 - * @category Seq - * @returns {Object} Returns the wrapper object. - * @example - * - * var wrapped = _([1, 2]); - * - * wrapped[Symbol.iterator]() === wrapped; - * // => true - * - * Array.from(wrapped); - * // => [1, 2] - */ - function wrapperToIterator() { - return this; - } - - /** - * Creates a clone of the chain sequence planting `value` as the wrapped value. - * - * @name plant - * @memberOf _ - * @since 3.2.0 - * @category Seq - * @param {*} value The value to plant. - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * function square(n) { - * return n * n; - * } - * - * var wrapped = _([1, 2]).map(square); - * var other = wrapped.plant([3, 4]); - * - * other.value(); - * // => [9, 16] - * - * wrapped.value(); - * // => [1, 4] - */ - function wrapperPlant(value) { - var result, - parent = this; - - while (parent instanceof baseLodash) { - var clone = wrapperClone(parent); - clone.__index__ = 0; - clone.__values__ = undefined; - if (result) { - previous.__wrapped__ = clone; - } else { - result = clone; - } - var previous = clone; - parent = parent.__wrapped__; - } - previous.__wrapped__ = value; - return result; - } - - /** - * This method is the wrapper version of `_.reverse`. - * - * **Note:** This method mutates the wrapped array. - * - * @name reverse - * @memberOf _ - * @since 0.1.0 - * @category Seq - * @returns {Object} Returns the new `lodash` wrapper instance. - * @example - * - * var array = [1, 2, 3]; - * - * _(array).reverse().value() - * // => [3, 2, 1] - * - * console.log(array); - * // => [3, 2, 1] - */ - function wrapperReverse() { - var value = this.__wrapped__; - if (value instanceof LazyWrapper) { - var wrapped = value; - if (this.__actions__.length) { - wrapped = new LazyWrapper(this); - } - wrapped = wrapped.reverse(); - wrapped.__actions__.push({ - 'func': thru, - 'args': [reverse], - 'thisArg': undefined - }); - return new LodashWrapper(wrapped, this.__chain__); - } - return this.thru(reverse); - } - - /** - * Executes the chain sequence to resolve the unwrapped value. - * - * @name value - * @memberOf _ - * @since 0.1.0 - * @alias toJSON, valueOf - * @category Seq - * @returns {*} Returns the resolved unwrapped value. - * @example - * - * _([1, 2, 3]).value(); - * // => [1, 2, 3] - */ - function wrapperValue() { - return baseWrapperValue(this.__wrapped__, this.__actions__); - } - - /*------------------------------------------------------------------------*/ - - /** - * Creates an object composed of keys generated from the results of running - * each element of `collection` thru `iteratee`. The corresponding value of - * each key is the number of times the key was returned by `iteratee`. The - * iteratee is invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 0.5.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The iteratee to transform keys. - * @returns {Object} Returns the composed aggregate object. - * @example - * - * _.countBy([6.1, 4.2, 6.3], Math.floor); - * // => { '4': 1, '6': 2 } - * - * // The `_.property` iteratee shorthand. - * _.countBy(['one', 'two', 'three'], 'length'); - * // => { '3': 2, '5': 1 } - */ - var countBy = createAggregator(function(result, value, key) { - if (hasOwnProperty.call(result, key)) { - ++result[key]; - } else { - baseAssignValue(result, key, 1); - } - }); - - /** - * Checks if `predicate` returns truthy for **all** elements of `collection`. - * Iteration is stopped once `predicate` returns falsey. The predicate is - * invoked with three arguments: (value, index|key, collection). - * - * **Note:** This method returns `true` for - * [empty collections](https://en.wikipedia.org/wiki/Empty_set) because - * [everything is true](https://en.wikipedia.org/wiki/Vacuous_truth) of - * elements of empty collections. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {boolean} Returns `true` if all elements pass the predicate check, - * else `false`. - * @example - * - * _.every([true, 1, null, 'yes'], Boolean); - * // => false - * - * var users = [ - * { 'user': 'barney', 'age': 36, 'active': false }, - * { 'user': 'fred', 'age': 40, 'active': false } - * ]; - * - * // The `_.matches` iteratee shorthand. - * _.every(users, { 'user': 'barney', 'active': false }); - * // => false - * - * // The `_.matchesProperty` iteratee shorthand. - * _.every(users, ['active', false]); - * // => true - * - * // The `_.property` iteratee shorthand. - * _.every(users, 'active'); - * // => false - */ - function every(collection, predicate, guard) { - var func = isArray(collection) ? arrayEvery : baseEvery; - if (guard && isIterateeCall(collection, predicate, guard)) { - predicate = undefined; - } - return func(collection, getIteratee(predicate, 3)); - } - - /** - * Iterates over elements of `collection`, returning an array of all elements - * `predicate` returns truthy for. The predicate is invoked with three - * arguments: (value, index|key, collection). - * - * **Note:** Unlike `_.remove`, this method returns a new array. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new filtered array. - * @see _.reject - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36, 'active': true }, - * { 'user': 'fred', 'age': 40, 'active': false } - * ]; - * - * _.filter(users, function(o) { return !o.active; }); - * // => objects for ['fred'] - * - * // The `_.matches` iteratee shorthand. - * _.filter(users, { 'age': 36, 'active': true }); - * // => objects for ['barney'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.filter(users, ['active', false]); - * // => objects for ['fred'] - * - * // The `_.property` iteratee shorthand. - * _.filter(users, 'active'); - * // => objects for ['barney'] - */ - function filter(collection, predicate) { - var func = isArray(collection) ? arrayFilter : baseFilter; - return func(collection, getIteratee(predicate, 3)); - } - - /** - * Iterates over elements of `collection`, returning the first element - * `predicate` returns truthy for. The predicate is invoked with three - * arguments: (value, index|key, collection). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param {number} [fromIndex=0] The index to search from. - * @returns {*} Returns the matched element, else `undefined`. - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36, 'active': true }, - * { 'user': 'fred', 'age': 40, 'active': false }, - * { 'user': 'pebbles', 'age': 1, 'active': true } - * ]; - * - * _.find(users, function(o) { return o.age < 40; }); - * // => object for 'barney' - * - * // The `_.matches` iteratee shorthand. - * _.find(users, { 'age': 1, 'active': true }); - * // => object for 'pebbles' - * - * // The `_.matchesProperty` iteratee shorthand. - * _.find(users, ['active', false]); - * // => object for 'fred' - * - * // The `_.property` iteratee shorthand. - * _.find(users, 'active'); - * // => object for 'barney' - */ - var find = createFind(findIndex); - - /** - * This method is like `_.find` except that it iterates over elements of - * `collection` from right to left. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Collection - * @param {Array|Object} collection The collection to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param {number} [fromIndex=collection.length-1] The index to search from. - * @returns {*} Returns the matched element, else `undefined`. - * @example - * - * _.findLast([1, 2, 3, 4], function(n) { - * return n % 2 == 1; - * }); - * // => 3 - */ - var findLast = createFind(findLastIndex); - - /** - * Creates a flattened array of values by running each element in `collection` - * thru `iteratee` and flattening the mapped results. The iteratee is invoked - * with three arguments: (value, index|key, collection). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new flattened array. - * @example - * - * function duplicate(n) { - * return [n, n]; - * } - * - * _.flatMap([1, 2], duplicate); - * // => [1, 1, 2, 2] - */ - function flatMap(collection, iteratee) { - return baseFlatten(map(collection, iteratee), 1); - } - - /** - * This method is like `_.flatMap` except that it recursively flattens the - * mapped results. - * - * @static - * @memberOf _ - * @since 4.7.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new flattened array. - * @example - * - * function duplicate(n) { - * return [[[n, n]]]; - * } - * - * _.flatMapDeep([1, 2], duplicate); - * // => [1, 1, 2, 2] - */ - function flatMapDeep(collection, iteratee) { - return baseFlatten(map(collection, iteratee), INFINITY); - } - - /** - * This method is like `_.flatMap` except that it recursively flattens the - * mapped results up to `depth` times. - * - * @static - * @memberOf _ - * @since 4.7.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @param {number} [depth=1] The maximum recursion depth. - * @returns {Array} Returns the new flattened array. - * @example - * - * function duplicate(n) { - * return [[[n, n]]]; - * } - * - * _.flatMapDepth([1, 2], duplicate, 2); - * // => [[1, 1], [2, 2]] - */ - function flatMapDepth(collection, iteratee, depth) { - depth = depth === undefined ? 1 : toInteger(depth); - return baseFlatten(map(collection, iteratee), depth); - } - - /** - * Iterates over elements of `collection` and invokes `iteratee` for each element. - * The iteratee is invoked with three arguments: (value, index|key, collection). - * Iteratee functions may exit iteration early by explicitly returning `false`. - * - * **Note:** As with other "Collections" methods, objects with a "length" - * property are iterated like arrays. To avoid this behavior use `_.forIn` - * or `_.forOwn` for object iteration. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @alias each - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Array|Object} Returns `collection`. - * @see _.forEachRight - * @example - * - * _.forEach([1, 2], function(value) { - * console.log(value); - * }); - * // => Logs `1` then `2`. - * - * _.forEach({ 'a': 1, 'b': 2 }, function(value, key) { - * console.log(key); - * }); - * // => Logs 'a' then 'b' (iteration order is not guaranteed). - */ - function forEach(collection, iteratee) { - var func = isArray(collection) ? arrayEach : baseEach; - return func(collection, getIteratee(iteratee, 3)); - } - - /** - * This method is like `_.forEach` except that it iterates over elements of - * `collection` from right to left. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @alias eachRight - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Array|Object} Returns `collection`. - * @see _.forEach - * @example - * - * _.forEachRight([1, 2], function(value) { - * console.log(value); - * }); - * // => Logs `2` then `1`. - */ - function forEachRight(collection, iteratee) { - var func = isArray(collection) ? arrayEachRight : baseEachRight; - return func(collection, getIteratee(iteratee, 3)); - } - - /** - * Creates an object composed of keys generated from the results of running - * each element of `collection` thru `iteratee`. The order of grouped values - * is determined by the order they occur in `collection`. The corresponding - * value of each key is an array of elements responsible for generating the - * key. The iteratee is invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The iteratee to transform keys. - * @returns {Object} Returns the composed aggregate object. - * @example - * - * _.groupBy([6.1, 4.2, 6.3], Math.floor); - * // => { '4': [4.2], '6': [6.1, 6.3] } - * - * // The `_.property` iteratee shorthand. - * _.groupBy(['one', 'two', 'three'], 'length'); - * // => { '3': ['one', 'two'], '5': ['three'] } - */ - var groupBy = createAggregator(function(result, value, key) { - if (hasOwnProperty.call(result, key)) { - result[key].push(value); - } else { - baseAssignValue(result, key, [value]); - } - }); - - /** - * Checks if `value` is in `collection`. If `collection` is a string, it's - * checked for a substring of `value`, otherwise - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * is used for equality comparisons. If `fromIndex` is negative, it's used as - * the offset from the end of `collection`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object|string} collection The collection to inspect. - * @param {*} value The value to search for. - * @param {number} [fromIndex=0] The index to search from. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.reduce`. - * @returns {boolean} Returns `true` if `value` is found, else `false`. - * @example - * - * _.includes([1, 2, 3], 1); - * // => true - * - * _.includes([1, 2, 3], 1, 2); - * // => false - * - * _.includes({ 'a': 1, 'b': 2 }, 1); - * // => true - * - * _.includes('abcd', 'bc'); - * // => true - */ - function includes(collection, value, fromIndex, guard) { - collection = isArrayLike(collection) ? collection : values(collection); - fromIndex = (fromIndex && !guard) ? toInteger(fromIndex) : 0; - - var length = collection.length; - if (fromIndex < 0) { - fromIndex = nativeMax(length + fromIndex, 0); - } - return isString(collection) - ? (fromIndex <= length && collection.indexOf(value, fromIndex) > -1) - : (!!length && baseIndexOf(collection, value, fromIndex) > -1); - } - - /** - * Invokes the method at `path` of each element in `collection`, returning - * an array of the results of each invoked method. Any additional arguments - * are provided to each invoked method. If `path` is a function, it's invoked - * for, and `this` bound to, each element in `collection`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Array|Function|string} path The path of the method to invoke or - * the function invoked per iteration. - * @param {...*} [args] The arguments to invoke each method with. - * @returns {Array} Returns the array of results. - * @example - * - * _.invokeMap([[5, 1, 7], [3, 2, 1]], 'sort'); - * // => [[1, 5, 7], [1, 2, 3]] - * - * _.invokeMap([123, 456], String.prototype.split, ''); - * // => [['1', '2', '3'], ['4', '5', '6']] - */ - var invokeMap = baseRest(function(collection, path, args) { - var index = -1, - isFunc = typeof path == 'function', - result = isArrayLike(collection) ? Array(collection.length) : []; - - baseEach(collection, function(value) { - result[++index] = isFunc ? apply(path, value, args) : baseInvoke(value, path, args); - }); - return result; - }); - - /** - * Creates an object composed of keys generated from the results of running - * each element of `collection` thru `iteratee`. The corresponding value of - * each key is the last element responsible for generating the key. The - * iteratee is invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The iteratee to transform keys. - * @returns {Object} Returns the composed aggregate object. - * @example - * - * var array = [ - * { 'dir': 'left', 'code': 97 }, - * { 'dir': 'right', 'code': 100 } - * ]; - * - * _.keyBy(array, function(o) { - * return String.fromCharCode(o.code); - * }); - * // => { 'a': { 'dir': 'left', 'code': 97 }, 'd': { 'dir': 'right', 'code': 100 } } - * - * _.keyBy(array, 'dir'); - * // => { 'left': { 'dir': 'left', 'code': 97 }, 'right': { 'dir': 'right', 'code': 100 } } - */ - var keyBy = createAggregator(function(result, value, key) { - baseAssignValue(result, key, value); - }); - - /** - * Creates an array of values by running each element in `collection` thru - * `iteratee`. The iteratee is invoked with three arguments: - * (value, index|key, collection). - * - * Many lodash methods are guarded to work as iteratees for methods like - * `_.every`, `_.filter`, `_.map`, `_.mapValues`, `_.reject`, and `_.some`. - * - * The guarded methods are: - * `ary`, `chunk`, `curry`, `curryRight`, `drop`, `dropRight`, `every`, - * `fill`, `invert`, `parseInt`, `random`, `range`, `rangeRight`, `repeat`, - * `sampleSize`, `slice`, `some`, `sortBy`, `split`, `take`, `takeRight`, - * `template`, `trim`, `trimEnd`, `trimStart`, and `words` - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new mapped array. - * @example - * - * function square(n) { - * return n * n; - * } - * - * _.map([4, 8], square); - * // => [16, 64] - * - * _.map({ 'a': 4, 'b': 8 }, square); - * // => [16, 64] (iteration order is not guaranteed) - * - * var users = [ - * { 'user': 'barney' }, - * { 'user': 'fred' } - * ]; - * - * // The `_.property` iteratee shorthand. - * _.map(users, 'user'); - * // => ['barney', 'fred'] - */ - function map(collection, iteratee) { - var func = isArray(collection) ? arrayMap : baseMap; - return func(collection, getIteratee(iteratee, 3)); - } - - /** - * This method is like `_.sortBy` except that it allows specifying the sort - * orders of the iteratees to sort by. If `orders` is unspecified, all values - * are sorted in ascending order. Otherwise, specify an order of "desc" for - * descending or "asc" for ascending sort order of corresponding values. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Array[]|Function[]|Object[]|string[]} [iteratees=[_.identity]] - * The iteratees to sort by. - * @param {string[]} [orders] The sort orders of `iteratees`. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.reduce`. - * @returns {Array} Returns the new sorted array. - * @example - * - * var users = [ - * { 'user': 'fred', 'age': 48 }, - * { 'user': 'barney', 'age': 34 }, - * { 'user': 'fred', 'age': 40 }, - * { 'user': 'barney', 'age': 36 } - * ]; - * - * // Sort by `user` in ascending order and by `age` in descending order. - * _.orderBy(users, ['user', 'age'], ['asc', 'desc']); - * // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 40]] - */ - function orderBy(collection, iteratees, orders, guard) { - if (collection == null) { - return []; - } - if (!isArray(iteratees)) { - iteratees = iteratees == null ? [] : [iteratees]; - } - orders = guard ? undefined : orders; - if (!isArray(orders)) { - orders = orders == null ? [] : [orders]; - } - return baseOrderBy(collection, iteratees, orders); - } - - /** - * Creates an array of elements split into two groups, the first of which - * contains elements `predicate` returns truthy for, the second of which - * contains elements `predicate` returns falsey for. The predicate is - * invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the array of grouped elements. - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36, 'active': false }, - * { 'user': 'fred', 'age': 40, 'active': true }, - * { 'user': 'pebbles', 'age': 1, 'active': false } - * ]; - * - * _.partition(users, function(o) { return o.active; }); - * // => objects for [['fred'], ['barney', 'pebbles']] - * - * // The `_.matches` iteratee shorthand. - * _.partition(users, { 'age': 1, 'active': false }); - * // => objects for [['pebbles'], ['barney', 'fred']] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.partition(users, ['active', false]); - * // => objects for [['barney', 'pebbles'], ['fred']] - * - * // The `_.property` iteratee shorthand. - * _.partition(users, 'active'); - * // => objects for [['fred'], ['barney', 'pebbles']] - */ - var partition = createAggregator(function(result, value, key) { - result[key ? 0 : 1].push(value); - }, function() { return [[], []]; }); - - /** - * Reduces `collection` to a value which is the accumulated result of running - * each element in `collection` thru `iteratee`, where each successive - * invocation is supplied the return value of the previous. If `accumulator` - * is not given, the first element of `collection` is used as the initial - * value. The iteratee is invoked with four arguments: - * (accumulator, value, index|key, collection). - * - * Many lodash methods are guarded to work as iteratees for methods like - * `_.reduce`, `_.reduceRight`, and `_.transform`. - * - * The guarded methods are: - * `assign`, `defaults`, `defaultsDeep`, `includes`, `merge`, `orderBy`, - * and `sortBy` - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @param {*} [accumulator] The initial value. - * @returns {*} Returns the accumulated value. - * @see _.reduceRight - * @example - * - * _.reduce([1, 2], function(sum, n) { - * return sum + n; - * }, 0); - * // => 3 - * - * _.reduce({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) { - * (result[value] || (result[value] = [])).push(key); - * return result; - * }, {}); - * // => { '1': ['a', 'c'], '2': ['b'] } (iteration order is not guaranteed) - */ - function reduce(collection, iteratee, accumulator) { - var func = isArray(collection) ? arrayReduce : baseReduce, - initAccum = arguments.length < 3; - - return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEach); - } - - /** - * This method is like `_.reduce` except that it iterates over elements of - * `collection` from right to left. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @param {*} [accumulator] The initial value. - * @returns {*} Returns the accumulated value. - * @see _.reduce - * @example - * - * var array = [[0, 1], [2, 3], [4, 5]]; - * - * _.reduceRight(array, function(flattened, other) { - * return flattened.concat(other); - * }, []); - * // => [4, 5, 2, 3, 0, 1] - */ - function reduceRight(collection, iteratee, accumulator) { - var func = isArray(collection) ? arrayReduceRight : baseReduce, - initAccum = arguments.length < 3; - - return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEachRight); - } - - /** - * The opposite of `_.filter`; this method returns the elements of `collection` - * that `predicate` does **not** return truthy for. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {Array} Returns the new filtered array. - * @see _.filter - * @example - * - * var users = [ - * { 'user': 'barney', 'age': 36, 'active': false }, - * { 'user': 'fred', 'age': 40, 'active': true } - * ]; - * - * _.reject(users, function(o) { return !o.active; }); - * // => objects for ['fred'] - * - * // The `_.matches` iteratee shorthand. - * _.reject(users, { 'age': 40, 'active': true }); - * // => objects for ['barney'] - * - * // The `_.matchesProperty` iteratee shorthand. - * _.reject(users, ['active', false]); - * // => objects for ['fred'] - * - * // The `_.property` iteratee shorthand. - * _.reject(users, 'active'); - * // => objects for ['barney'] - */ - function reject(collection, predicate) { - var func = isArray(collection) ? arrayFilter : baseFilter; - return func(collection, negate(getIteratee(predicate, 3))); - } - - /** - * Gets a random element from `collection`. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Collection - * @param {Array|Object} collection The collection to sample. - * @returns {*} Returns the random element. - * @example - * - * _.sample([1, 2, 3, 4]); - * // => 2 - */ - function sample(collection) { - var func = isArray(collection) ? arraySample : baseSample; - return func(collection); - } - - /** - * Gets `n` random elements at unique keys from `collection` up to the - * size of `collection`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Collection - * @param {Array|Object} collection The collection to sample. - * @param {number} [n=1] The number of elements to sample. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Array} Returns the random elements. - * @example - * - * _.sampleSize([1, 2, 3], 2); - * // => [3, 1] - * - * _.sampleSize([1, 2, 3], 4); - * // => [2, 3, 1] - */ - function sampleSize(collection, n, guard) { - if ((guard ? isIterateeCall(collection, n, guard) : n === undefined)) { - n = 1; - } else { - n = toInteger(n); - } - var func = isArray(collection) ? arraySampleSize : baseSampleSize; - return func(collection, n); - } - - /** - * Creates an array of shuffled values, using a version of the - * [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher-Yates_shuffle). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to shuffle. - * @returns {Array} Returns the new shuffled array. - * @example - * - * _.shuffle([1, 2, 3, 4]); - * // => [4, 1, 3, 2] - */ - function shuffle(collection) { - var func = isArray(collection) ? arrayShuffle : baseShuffle; - return func(collection); - } - - /** - * Gets the size of `collection` by returning its length for array-like - * values or the number of own enumerable string keyed properties for objects. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object|string} collection The collection to inspect. - * @returns {number} Returns the collection size. - * @example - * - * _.size([1, 2, 3]); - * // => 3 - * - * _.size({ 'a': 1, 'b': 2 }); - * // => 2 - * - * _.size('pebbles'); - * // => 7 - */ - function size(collection) { - if (collection == null) { - return 0; - } - if (isArrayLike(collection)) { - return isString(collection) ? stringSize(collection) : collection.length; - } - var tag = getTag(collection); - if (tag == mapTag || tag == setTag) { - return collection.size; - } - return baseKeys(collection).length; - } - - /** - * Checks if `predicate` returns truthy for **any** element of `collection`. - * Iteration is stopped once `predicate` returns truthy. The predicate is - * invoked with three arguments: (value, index|key, collection). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {boolean} Returns `true` if any element passes the predicate check, - * else `false`. - * @example - * - * _.some([null, 0, 'yes', false], Boolean); - * // => true - * - * var users = [ - * { 'user': 'barney', 'active': true }, - * { 'user': 'fred', 'active': false } - * ]; - * - * // The `_.matches` iteratee shorthand. - * _.some(users, { 'user': 'barney', 'active': false }); - * // => false - * - * // The `_.matchesProperty` iteratee shorthand. - * _.some(users, ['active', false]); - * // => true - * - * // The `_.property` iteratee shorthand. - * _.some(users, 'active'); - * // => true - */ - function some(collection, predicate, guard) { - var func = isArray(collection) ? arraySome : baseSome; - if (guard && isIterateeCall(collection, predicate, guard)) { - predicate = undefined; - } - return func(collection, getIteratee(predicate, 3)); - } - - /** - * Creates an array of elements, sorted in ascending order by the results of - * running each element in a collection thru each iteratee. This method - * performs a stable sort, that is, it preserves the original sort order of - * equal elements. The iteratees are invoked with one argument: (value). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Collection - * @param {Array|Object} collection The collection to iterate over. - * @param {...(Function|Function[])} [iteratees=[_.identity]] - * The iteratees to sort by. - * @returns {Array} Returns the new sorted array. - * @example - * - * var users = [ - * { 'user': 'fred', 'age': 48 }, - * { 'user': 'barney', 'age': 36 }, - * { 'user': 'fred', 'age': 40 }, - * { 'user': 'barney', 'age': 34 } - * ]; - * - * _.sortBy(users, [function(o) { return o.user; }]); - * // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 40]] - * - * _.sortBy(users, ['user', 'age']); - * // => objects for [['barney', 34], ['barney', 36], ['fred', 40], ['fred', 48]] - */ - var sortBy = baseRest(function(collection, iteratees) { - if (collection == null) { - return []; - } - var length = iteratees.length; - if (length > 1 && isIterateeCall(collection, iteratees[0], iteratees[1])) { - iteratees = []; - } else if (length > 2 && isIterateeCall(iteratees[0], iteratees[1], iteratees[2])) { - iteratees = [iteratees[0]]; - } - return baseOrderBy(collection, baseFlatten(iteratees, 1), []); - }); - - /*------------------------------------------------------------------------*/ - - /** - * Gets the timestamp of the number of milliseconds that have elapsed since - * the Unix epoch (1 January 1970 00:00:00 UTC). - * - * @static - * @memberOf _ - * @since 2.4.0 - * @category Date - * @returns {number} Returns the timestamp. - * @example - * - * _.defer(function(stamp) { - * console.log(_.now() - stamp); - * }, _.now()); - * // => Logs the number of milliseconds it took for the deferred invocation. - */ - var now = ctxNow || function() { - return root.Date.now(); - }; - - /*------------------------------------------------------------------------*/ - - /** - * The opposite of `_.before`; this method creates a function that invokes - * `func` once it's called `n` or more times. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {number} n The number of calls before `func` is invoked. - * @param {Function} func The function to restrict. - * @returns {Function} Returns the new restricted function. - * @example - * - * var saves = ['profile', 'settings']; - * - * var done = _.after(saves.length, function() { - * console.log('done saving!'); - * }); - * - * _.forEach(saves, function(type) { - * asyncSave({ 'type': type, 'complete': done }); - * }); - * // => Logs 'done saving!' after the two async saves have completed. - */ - function after(n, func) { - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - n = toInteger(n); - return function() { - if (--n < 1) { - return func.apply(this, arguments); - } - }; - } - - /** - * Creates a function that invokes `func`, with up to `n` arguments, - * ignoring any additional arguments. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Function - * @param {Function} func The function to cap arguments for. - * @param {number} [n=func.length] The arity cap. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Function} Returns the new capped function. - * @example - * - * _.map(['6', '8', '10'], _.ary(parseInt, 1)); - * // => [6, 8, 10] - */ - function ary(func, n, guard) { - n = guard ? undefined : n; - n = (func && n == null) ? func.length : n; - return createWrap(func, WRAP_ARY_FLAG, undefined, undefined, undefined, undefined, n); - } - - /** - * Creates a function that invokes `func`, with the `this` binding and arguments - * of the created function, while it's called less than `n` times. Subsequent - * calls to the created function return the result of the last `func` invocation. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Function - * @param {number} n The number of calls at which `func` is no longer invoked. - * @param {Function} func The function to restrict. - * @returns {Function} Returns the new restricted function. - * @example - * - * jQuery(element).on('click', _.before(5, addContactToList)); - * // => Allows adding up to 4 contacts to the list. - */ - function before(n, func) { - var result; - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - n = toInteger(n); - return function() { - if (--n > 0) { - result = func.apply(this, arguments); - } - if (n <= 1) { - func = undefined; - } - return result; - }; - } - - /** - * Creates a function that invokes `func` with the `this` binding of `thisArg` - * and `partials` prepended to the arguments it receives. - * - * The `_.bind.placeholder` value, which defaults to `_` in monolithic builds, - * may be used as a placeholder for partially applied arguments. - * - * **Note:** Unlike native `Function#bind`, this method doesn't set the "length" - * property of bound functions. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to bind. - * @param {*} thisArg The `this` binding of `func`. - * @param {...*} [partials] The arguments to be partially applied. - * @returns {Function} Returns the new bound function. - * @example - * - * function greet(greeting, punctuation) { - * return greeting + ' ' + this.user + punctuation; - * } - * - * var object = { 'user': 'fred' }; - * - * var bound = _.bind(greet, object, 'hi'); - * bound('!'); - * // => 'hi fred!' - * - * // Bound with placeholders. - * var bound = _.bind(greet, object, _, '!'); - * bound('hi'); - * // => 'hi fred!' - */ - var bind = baseRest(function(func, thisArg, partials) { - var bitmask = WRAP_BIND_FLAG; - if (partials.length) { - var holders = replaceHolders(partials, getHolder(bind)); - bitmask |= WRAP_PARTIAL_FLAG; - } - return createWrap(func, bitmask, thisArg, partials, holders); - }); - - /** - * Creates a function that invokes the method at `object[key]` with `partials` - * prepended to the arguments it receives. - * - * This method differs from `_.bind` by allowing bound functions to reference - * methods that may be redefined or don't yet exist. See - * [Peter Michaux's article](http://peter.michaux.ca/articles/lazy-function-definition-pattern) - * for more details. - * - * The `_.bindKey.placeholder` value, which defaults to `_` in monolithic - * builds, may be used as a placeholder for partially applied arguments. - * - * @static - * @memberOf _ - * @since 0.10.0 - * @category Function - * @param {Object} object The object to invoke the method on. - * @param {string} key The key of the method. - * @param {...*} [partials] The arguments to be partially applied. - * @returns {Function} Returns the new bound function. - * @example - * - * var object = { - * 'user': 'fred', - * 'greet': function(greeting, punctuation) { - * return greeting + ' ' + this.user + punctuation; - * } - * }; - * - * var bound = _.bindKey(object, 'greet', 'hi'); - * bound('!'); - * // => 'hi fred!' - * - * object.greet = function(greeting, punctuation) { - * return greeting + 'ya ' + this.user + punctuation; - * }; - * - * bound('!'); - * // => 'hiya fred!' - * - * // Bound with placeholders. - * var bound = _.bindKey(object, 'greet', _, '!'); - * bound('hi'); - * // => 'hiya fred!' - */ - var bindKey = baseRest(function(object, key, partials) { - var bitmask = WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG; - if (partials.length) { - var holders = replaceHolders(partials, getHolder(bindKey)); - bitmask |= WRAP_PARTIAL_FLAG; - } - return createWrap(key, bitmask, object, partials, holders); - }); - - /** - * Creates a function that accepts arguments of `func` and either invokes - * `func` returning its result, if at least `arity` number of arguments have - * been provided, or returns a function that accepts the remaining `func` - * arguments, and so on. The arity of `func` may be specified if `func.length` - * is not sufficient. - * - * The `_.curry.placeholder` value, which defaults to `_` in monolithic builds, - * may be used as a placeholder for provided arguments. - * - * **Note:** This method doesn't set the "length" property of curried functions. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Function - * @param {Function} func The function to curry. - * @param {number} [arity=func.length] The arity of `func`. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Function} Returns the new curried function. - * @example - * - * var abc = function(a, b, c) { - * return [a, b, c]; - * }; - * - * var curried = _.curry(abc); - * - * curried(1)(2)(3); - * // => [1, 2, 3] - * - * curried(1, 2)(3); - * // => [1, 2, 3] - * - * curried(1, 2, 3); - * // => [1, 2, 3] - * - * // Curried with placeholders. - * curried(1)(_, 3)(2); - * // => [1, 2, 3] - */ - function curry(func, arity, guard) { - arity = guard ? undefined : arity; - var result = createWrap(func, WRAP_CURRY_FLAG, undefined, undefined, undefined, undefined, undefined, arity); - result.placeholder = curry.placeholder; - return result; - } - - /** - * This method is like `_.curry` except that arguments are applied to `func` - * in the manner of `_.partialRight` instead of `_.partial`. - * - * The `_.curryRight.placeholder` value, which defaults to `_` in monolithic - * builds, may be used as a placeholder for provided arguments. - * - * **Note:** This method doesn't set the "length" property of curried functions. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Function - * @param {Function} func The function to curry. - * @param {number} [arity=func.length] The arity of `func`. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Function} Returns the new curried function. - * @example - * - * var abc = function(a, b, c) { - * return [a, b, c]; - * }; - * - * var curried = _.curryRight(abc); - * - * curried(3)(2)(1); - * // => [1, 2, 3] - * - * curried(2, 3)(1); - * // => [1, 2, 3] - * - * curried(1, 2, 3); - * // => [1, 2, 3] - * - * // Curried with placeholders. - * curried(3)(1, _)(2); - * // => [1, 2, 3] - */ - function curryRight(func, arity, guard) { - arity = guard ? undefined : arity; - var result = createWrap(func, WRAP_CURRY_RIGHT_FLAG, undefined, undefined, undefined, undefined, undefined, arity); - result.placeholder = curryRight.placeholder; - return result; - } - - /** - * Creates a debounced function that delays invoking `func` until after `wait` - * milliseconds have elapsed since the last time the debounced function was - * invoked. The debounced function comes with a `cancel` method to cancel - * delayed `func` invocations and a `flush` method to immediately invoke them. - * Provide `options` to indicate whether `func` should be invoked on the - * leading and/or trailing edge of the `wait` timeout. The `func` is invoked - * with the last arguments provided to the debounced function. Subsequent - * calls to the debounced function return the result of the last `func` - * invocation. - * - * **Note:** If `leading` and `trailing` options are `true`, `func` is - * invoked on the trailing edge of the timeout only if the debounced function - * is invoked more than once during the `wait` timeout. - * - * If `wait` is `0` and `leading` is `false`, `func` invocation is deferred - * until to the next tick, similar to `setTimeout` with a timeout of `0`. - * - * See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/) - * for details over the differences between `_.debounce` and `_.throttle`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to debounce. - * @param {number} [wait=0] The number of milliseconds to delay. - * @param {Object} [options={}] The options object. - * @param {boolean} [options.leading=false] - * Specify invoking on the leading edge of the timeout. - * @param {number} [options.maxWait] - * The maximum time `func` is allowed to be delayed before it's invoked. - * @param {boolean} [options.trailing=true] - * Specify invoking on the trailing edge of the timeout. - * @returns {Function} Returns the new debounced function. - * @example - * - * // Avoid costly calculations while the window size is in flux. - * jQuery(window).on('resize', _.debounce(calculateLayout, 150)); - * - * // Invoke `sendMail` when clicked, debouncing subsequent calls. - * jQuery(element).on('click', _.debounce(sendMail, 300, { - * 'leading': true, - * 'trailing': false - * })); - * - * // Ensure `batchLog` is invoked once after 1 second of debounced calls. - * var debounced = _.debounce(batchLog, 250, { 'maxWait': 1000 }); - * var source = new EventSource('/stream'); - * jQuery(source).on('message', debounced); - * - * // Cancel the trailing debounced invocation. - * jQuery(window).on('popstate', debounced.cancel); - */ - function debounce(func, wait, options) { - var lastArgs, - lastThis, - maxWait, - result, - timerId, - lastCallTime, - lastInvokeTime = 0, - leading = false, - maxing = false, - trailing = true; - - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - wait = toNumber(wait) || 0; - if (isObject(options)) { - leading = !!options.leading; - maxing = 'maxWait' in options; - maxWait = maxing ? nativeMax(toNumber(options.maxWait) || 0, wait) : maxWait; - trailing = 'trailing' in options ? !!options.trailing : trailing; - } - - function invokeFunc(time) { - var args = lastArgs, - thisArg = lastThis; - - lastArgs = lastThis = undefined; - lastInvokeTime = time; - result = func.apply(thisArg, args); - return result; - } - - function leadingEdge(time) { - // Reset any `maxWait` timer. - lastInvokeTime = time; - // Start the timer for the trailing edge. - timerId = setTimeout(timerExpired, wait); - // Invoke the leading edge. - return leading ? invokeFunc(time) : result; - } - - function remainingWait(time) { - var timeSinceLastCall = time - lastCallTime, - timeSinceLastInvoke = time - lastInvokeTime, - timeWaiting = wait - timeSinceLastCall; - - return maxing - ? nativeMin(timeWaiting, maxWait - timeSinceLastInvoke) - : timeWaiting; - } - - function shouldInvoke(time) { - var timeSinceLastCall = time - lastCallTime, - timeSinceLastInvoke = time - lastInvokeTime; - - // Either this is the first call, activity has stopped and we're at the - // trailing edge, the system time has gone backwards and we're treating - // it as the trailing edge, or we've hit the `maxWait` limit. - return (lastCallTime === undefined || (timeSinceLastCall >= wait) || - (timeSinceLastCall < 0) || (maxing && timeSinceLastInvoke >= maxWait)); - } - - function timerExpired() { - var time = now(); - if (shouldInvoke(time)) { - return trailingEdge(time); - } - // Restart the timer. - timerId = setTimeout(timerExpired, remainingWait(time)); - } - - function trailingEdge(time) { - timerId = undefined; - - // Only invoke if we have `lastArgs` which means `func` has been - // debounced at least once. - if (trailing && lastArgs) { - return invokeFunc(time); - } - lastArgs = lastThis = undefined; - return result; - } - - function cancel() { - if (timerId !== undefined) { - clearTimeout(timerId); - } - lastInvokeTime = 0; - lastArgs = lastCallTime = lastThis = timerId = undefined; - } - - function flush() { - return timerId === undefined ? result : trailingEdge(now()); - } - - function debounced() { - var time = now(), - isInvoking = shouldInvoke(time); - - lastArgs = arguments; - lastThis = this; - lastCallTime = time; - - if (isInvoking) { - if (timerId === undefined) { - return leadingEdge(lastCallTime); - } - if (maxing) { - // Handle invocations in a tight loop. - timerId = setTimeout(timerExpired, wait); - return invokeFunc(lastCallTime); - } - } - if (timerId === undefined) { - timerId = setTimeout(timerExpired, wait); - } - return result; - } - debounced.cancel = cancel; - debounced.flush = flush; - return debounced; - } - - /** - * Defers invoking the `func` until the current call stack has cleared. Any - * additional arguments are provided to `func` when it's invoked. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to defer. - * @param {...*} [args] The arguments to invoke `func` with. - * @returns {number} Returns the timer id. - * @example - * - * _.defer(function(text) { - * console.log(text); - * }, 'deferred'); - * // => Logs 'deferred' after one millisecond. - */ - var defer = baseRest(function(func, args) { - return baseDelay(func, 1, args); - }); - - /** - * Invokes `func` after `wait` milliseconds. Any additional arguments are - * provided to `func` when it's invoked. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to delay. - * @param {number} wait The number of milliseconds to delay invocation. - * @param {...*} [args] The arguments to invoke `func` with. - * @returns {number} Returns the timer id. - * @example - * - * _.delay(function(text) { - * console.log(text); - * }, 1000, 'later'); - * // => Logs 'later' after one second. - */ - var delay = baseRest(function(func, wait, args) { - return baseDelay(func, toNumber(wait) || 0, args); - }); - - /** - * Creates a function that invokes `func` with arguments reversed. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Function - * @param {Function} func The function to flip arguments for. - * @returns {Function} Returns the new flipped function. - * @example - * - * var flipped = _.flip(function() { - * return _.toArray(arguments); - * }); - * - * flipped('a', 'b', 'c', 'd'); - * // => ['d', 'c', 'b', 'a'] - */ - function flip(func) { - return createWrap(func, WRAP_FLIP_FLAG); - } - - /** - * Creates a function that memoizes the result of `func`. If `resolver` is - * provided, it determines the cache key for storing the result based on the - * arguments provided to the memoized function. By default, the first argument - * provided to the memoized function is used as the map cache key. The `func` - * is invoked with the `this` binding of the memoized function. - * - * **Note:** The cache is exposed as the `cache` property on the memoized - * function. Its creation may be customized by replacing the `_.memoize.Cache` - * constructor with one whose instances implement the - * [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object) - * method interface of `clear`, `delete`, `get`, `has`, and `set`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to have its output memoized. - * @param {Function} [resolver] The function to resolve the cache key. - * @returns {Function} Returns the new memoized function. - * @example - * - * var object = { 'a': 1, 'b': 2 }; - * var other = { 'c': 3, 'd': 4 }; - * - * var values = _.memoize(_.values); - * values(object); - * // => [1, 2] - * - * values(other); - * // => [3, 4] - * - * object.a = 2; - * values(object); - * // => [1, 2] - * - * // Modify the result cache. - * values.cache.set(object, ['a', 'b']); - * values(object); - * // => ['a', 'b'] - * - * // Replace `_.memoize.Cache`. - * _.memoize.Cache = WeakMap; - */ - function memoize(func, resolver) { - if (typeof func != 'function' || (resolver != null && typeof resolver != 'function')) { - throw new TypeError(FUNC_ERROR_TEXT); - } - var memoized = function() { - var args = arguments, - key = resolver ? resolver.apply(this, args) : args[0], - cache = memoized.cache; - - if (cache.has(key)) { - return cache.get(key); - } - var result = func.apply(this, args); - memoized.cache = cache.set(key, result) || cache; - return result; - }; - memoized.cache = new (memoize.Cache || MapCache); - return memoized; - } - - // Expose `MapCache`. - memoize.Cache = MapCache; - - /** - * Creates a function that negates the result of the predicate `func`. The - * `func` predicate is invoked with the `this` binding and arguments of the - * created function. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Function - * @param {Function} predicate The predicate to negate. - * @returns {Function} Returns the new negated function. - * @example - * - * function isEven(n) { - * return n % 2 == 0; - * } - * - * _.filter([1, 2, 3, 4, 5, 6], _.negate(isEven)); - * // => [1, 3, 5] - */ - function negate(predicate) { - if (typeof predicate != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - return function() { - var args = arguments; - switch (args.length) { - case 0: return !predicate.call(this); - case 1: return !predicate.call(this, args[0]); - case 2: return !predicate.call(this, args[0], args[1]); - case 3: return !predicate.call(this, args[0], args[1], args[2]); - } - return !predicate.apply(this, args); - }; - } - - /** - * Creates a function that is restricted to invoking `func` once. Repeat calls - * to the function return the value of the first invocation. The `func` is - * invoked with the `this` binding and arguments of the created function. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to restrict. - * @returns {Function} Returns the new restricted function. - * @example - * - * var initialize = _.once(createApplication); - * initialize(); - * initialize(); - * // => `createApplication` is invoked once - */ - function once(func) { - return before(2, func); - } - - /** - * Creates a function that invokes `func` with its arguments transformed. - * - * @static - * @since 4.0.0 - * @memberOf _ - * @category Function - * @param {Function} func The function to wrap. - * @param {...(Function|Function[])} [transforms=[_.identity]] - * The argument transforms. - * @returns {Function} Returns the new function. - * @example - * - * function doubled(n) { - * return n * 2; - * } - * - * function square(n) { - * return n * n; - * } - * - * var func = _.overArgs(function(x, y) { - * return [x, y]; - * }, [square, doubled]); - * - * func(9, 3); - * // => [81, 6] - * - * func(10, 5); - * // => [100, 10] - */ - var overArgs = castRest(function(func, transforms) { - transforms = (transforms.length == 1 && isArray(transforms[0])) - ? arrayMap(transforms[0], baseUnary(getIteratee())) - : arrayMap(baseFlatten(transforms, 1), baseUnary(getIteratee())); - - var funcsLength = transforms.length; - return baseRest(function(args) { - var index = -1, - length = nativeMin(args.length, funcsLength); - - while (++index < length) { - args[index] = transforms[index].call(this, args[index]); - } - return apply(func, this, args); - }); - }); - - /** - * Creates a function that invokes `func` with `partials` prepended to the - * arguments it receives. This method is like `_.bind` except it does **not** - * alter the `this` binding. - * - * The `_.partial.placeholder` value, which defaults to `_` in monolithic - * builds, may be used as a placeholder for partially applied arguments. - * - * **Note:** This method doesn't set the "length" property of partially - * applied functions. - * - * @static - * @memberOf _ - * @since 0.2.0 - * @category Function - * @param {Function} func The function to partially apply arguments to. - * @param {...*} [partials] The arguments to be partially applied. - * @returns {Function} Returns the new partially applied function. - * @example - * - * function greet(greeting, name) { - * return greeting + ' ' + name; - * } - * - * var sayHelloTo = _.partial(greet, 'hello'); - * sayHelloTo('fred'); - * // => 'hello fred' - * - * // Partially applied with placeholders. - * var greetFred = _.partial(greet, _, 'fred'); - * greetFred('hi'); - * // => 'hi fred' - */ - var partial = baseRest(function(func, partials) { - var holders = replaceHolders(partials, getHolder(partial)); - return createWrap(func, WRAP_PARTIAL_FLAG, undefined, partials, holders); - }); - - /** - * This method is like `_.partial` except that partially applied arguments - * are appended to the arguments it receives. - * - * The `_.partialRight.placeholder` value, which defaults to `_` in monolithic - * builds, may be used as a placeholder for partially applied arguments. - * - * **Note:** This method doesn't set the "length" property of partially - * applied functions. - * - * @static - * @memberOf _ - * @since 1.0.0 - * @category Function - * @param {Function} func The function to partially apply arguments to. - * @param {...*} [partials] The arguments to be partially applied. - * @returns {Function} Returns the new partially applied function. - * @example - * - * function greet(greeting, name) { - * return greeting + ' ' + name; - * } - * - * var greetFred = _.partialRight(greet, 'fred'); - * greetFred('hi'); - * // => 'hi fred' - * - * // Partially applied with placeholders. - * var sayHelloTo = _.partialRight(greet, 'hello', _); - * sayHelloTo('fred'); - * // => 'hello fred' - */ - var partialRight = baseRest(function(func, partials) { - var holders = replaceHolders(partials, getHolder(partialRight)); - return createWrap(func, WRAP_PARTIAL_RIGHT_FLAG, undefined, partials, holders); - }); - - /** - * Creates a function that invokes `func` with arguments arranged according - * to the specified `indexes` where the argument value at the first index is - * provided as the first argument, the argument value at the second index is - * provided as the second argument, and so on. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Function - * @param {Function} func The function to rearrange arguments for. - * @param {...(number|number[])} indexes The arranged argument indexes. - * @returns {Function} Returns the new function. - * @example - * - * var rearged = _.rearg(function(a, b, c) { - * return [a, b, c]; - * }, [2, 0, 1]); - * - * rearged('b', 'c', 'a') - * // => ['a', 'b', 'c'] - */ - var rearg = flatRest(function(func, indexes) { - return createWrap(func, WRAP_REARG_FLAG, undefined, undefined, undefined, indexes); - }); - - /** - * Creates a function that invokes `func` with the `this` binding of the - * created function and arguments from `start` and beyond provided as - * an array. - * - * **Note:** This method is based on the - * [rest parameter](https://mdn.io/rest_parameters). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Function - * @param {Function} func The function to apply a rest parameter to. - * @param {number} [start=func.length-1] The start position of the rest parameter. - * @returns {Function} Returns the new function. - * @example - * - * var say = _.rest(function(what, names) { - * return what + ' ' + _.initial(names).join(', ') + - * (_.size(names) > 1 ? ', & ' : '') + _.last(names); - * }); - * - * say('hello', 'fred', 'barney', 'pebbles'); - * // => 'hello fred, barney, & pebbles' - */ - function rest(func, start) { - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - start = start === undefined ? start : toInteger(start); - return baseRest(func, start); - } - - /** - * Creates a function that invokes `func` with the `this` binding of the - * create function and an array of arguments much like - * [`Function#apply`](http://www.ecma-international.org/ecma-262/7.0/#sec-function.prototype.apply). - * - * **Note:** This method is based on the - * [spread operator](https://mdn.io/spread_operator). - * - * @static - * @memberOf _ - * @since 3.2.0 - * @category Function - * @param {Function} func The function to spread arguments over. - * @param {number} [start=0] The start position of the spread. - * @returns {Function} Returns the new function. - * @example - * - * var say = _.spread(function(who, what) { - * return who + ' says ' + what; - * }); - * - * say(['fred', 'hello']); - * // => 'fred says hello' - * - * var numbers = Promise.all([ - * Promise.resolve(40), - * Promise.resolve(36) - * ]); - * - * numbers.then(_.spread(function(x, y) { - * return x + y; - * })); - * // => a Promise of 76 - */ - function spread(func, start) { - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - start = start == null ? 0 : nativeMax(toInteger(start), 0); - return baseRest(function(args) { - var array = args[start], - otherArgs = castSlice(args, 0, start); - - if (array) { - arrayPush(otherArgs, array); - } - return apply(func, this, otherArgs); - }); - } - - /** - * Creates a throttled function that only invokes `func` at most once per - * every `wait` milliseconds. The throttled function comes with a `cancel` - * method to cancel delayed `func` invocations and a `flush` method to - * immediately invoke them. Provide `options` to indicate whether `func` - * should be invoked on the leading and/or trailing edge of the `wait` - * timeout. The `func` is invoked with the last arguments provided to the - * throttled function. Subsequent calls to the throttled function return the - * result of the last `func` invocation. - * - * **Note:** If `leading` and `trailing` options are `true`, `func` is - * invoked on the trailing edge of the timeout only if the throttled function - * is invoked more than once during the `wait` timeout. - * - * If `wait` is `0` and `leading` is `false`, `func` invocation is deferred - * until to the next tick, similar to `setTimeout` with a timeout of `0`. - * - * See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/) - * for details over the differences between `_.throttle` and `_.debounce`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {Function} func The function to throttle. - * @param {number} [wait=0] The number of milliseconds to throttle invocations to. - * @param {Object} [options={}] The options object. - * @param {boolean} [options.leading=true] - * Specify invoking on the leading edge of the timeout. - * @param {boolean} [options.trailing=true] - * Specify invoking on the trailing edge of the timeout. - * @returns {Function} Returns the new throttled function. - * @example - * - * // Avoid excessively updating the position while scrolling. - * jQuery(window).on('scroll', _.throttle(updatePosition, 100)); - * - * // Invoke `renewToken` when the click event is fired, but not more than once every 5 minutes. - * var throttled = _.throttle(renewToken, 300000, { 'trailing': false }); - * jQuery(element).on('click', throttled); - * - * // Cancel the trailing throttled invocation. - * jQuery(window).on('popstate', throttled.cancel); - */ - function throttle(func, wait, options) { - var leading = true, - trailing = true; - - if (typeof func != 'function') { - throw new TypeError(FUNC_ERROR_TEXT); - } - if (isObject(options)) { - leading = 'leading' in options ? !!options.leading : leading; - trailing = 'trailing' in options ? !!options.trailing : trailing; - } - return debounce(func, wait, { - 'leading': leading, - 'maxWait': wait, - 'trailing': trailing - }); - } - - /** - * Creates a function that accepts up to one argument, ignoring any - * additional arguments. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Function - * @param {Function} func The function to cap arguments for. - * @returns {Function} Returns the new capped function. - * @example - * - * _.map(['6', '8', '10'], _.unary(parseInt)); - * // => [6, 8, 10] - */ - function unary(func) { - return ary(func, 1); - } - - /** - * Creates a function that provides `value` to `wrapper` as its first - * argument. Any additional arguments provided to the function are appended - * to those provided to the `wrapper`. The wrapper is invoked with the `this` - * binding of the created function. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Function - * @param {*} value The value to wrap. - * @param {Function} [wrapper=identity] The wrapper function. - * @returns {Function} Returns the new function. - * @example - * - * var p = _.wrap(_.escape, function(func, text) { - * return '

' + func(text) + '

'; - * }); - * - * p('fred, barney, & pebbles'); - * // => '

fred, barney, & pebbles

' - */ - function wrap(value, wrapper) { - return partial(castFunction(wrapper), value); - } - - /*------------------------------------------------------------------------*/ - - /** - * Casts `value` as an array if it's not one. - * - * @static - * @memberOf _ - * @since 4.4.0 - * @category Lang - * @param {*} value The value to inspect. - * @returns {Array} Returns the cast array. - * @example - * - * _.castArray(1); - * // => [1] - * - * _.castArray({ 'a': 1 }); - * // => [{ 'a': 1 }] - * - * _.castArray('abc'); - * // => ['abc'] - * - * _.castArray(null); - * // => [null] - * - * _.castArray(undefined); - * // => [undefined] - * - * _.castArray(); - * // => [] - * - * var array = [1, 2, 3]; - * console.log(_.castArray(array) === array); - * // => true - */ - function castArray() { - if (!arguments.length) { - return []; - } - var value = arguments[0]; - return isArray(value) ? value : [value]; - } - - /** - * Creates a shallow clone of `value`. - * - * **Note:** This method is loosely based on the - * [structured clone algorithm](https://mdn.io/Structured_clone_algorithm) - * and supports cloning arrays, array buffers, booleans, date objects, maps, - * numbers, `Object` objects, regexes, sets, strings, symbols, and typed - * arrays. The own enumerable properties of `arguments` objects are cloned - * as plain objects. An empty object is returned for uncloneable values such - * as error objects, functions, DOM nodes, and WeakMaps. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to clone. - * @returns {*} Returns the cloned value. - * @see _.cloneDeep - * @example - * - * var objects = [{ 'a': 1 }, { 'b': 2 }]; - * - * var shallow = _.clone(objects); - * console.log(shallow[0] === objects[0]); - * // => true - */ - function clone(value) { - return baseClone(value, CLONE_SYMBOLS_FLAG); - } - - /** - * This method is like `_.clone` except that it accepts `customizer` which - * is invoked to produce the cloned value. If `customizer` returns `undefined`, - * cloning is handled by the method instead. The `customizer` is invoked with - * up to four arguments; (value [, index|key, object, stack]). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to clone. - * @param {Function} [customizer] The function to customize cloning. - * @returns {*} Returns the cloned value. - * @see _.cloneDeepWith - * @example - * - * function customizer(value) { - * if (_.isElement(value)) { - * return value.cloneNode(false); - * } - * } - * - * var el = _.cloneWith(document.body, customizer); - * - * console.log(el === document.body); - * // => false - * console.log(el.nodeName); - * // => 'BODY' - * console.log(el.childNodes.length); - * // => 0 - */ - function cloneWith(value, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - return baseClone(value, CLONE_SYMBOLS_FLAG, customizer); - } - - /** - * This method is like `_.clone` except that it recursively clones `value`. - * - * @static - * @memberOf _ - * @since 1.0.0 - * @category Lang - * @param {*} value The value to recursively clone. - * @returns {*} Returns the deep cloned value. - * @see _.clone - * @example - * - * var objects = [{ 'a': 1 }, { 'b': 2 }]; - * - * var deep = _.cloneDeep(objects); - * console.log(deep[0] === objects[0]); - * // => false - */ - function cloneDeep(value) { - return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG); - } - - /** - * This method is like `_.cloneWith` except that it recursively clones `value`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to recursively clone. - * @param {Function} [customizer] The function to customize cloning. - * @returns {*} Returns the deep cloned value. - * @see _.cloneWith - * @example - * - * function customizer(value) { - * if (_.isElement(value)) { - * return value.cloneNode(true); - * } - * } - * - * var el = _.cloneDeepWith(document.body, customizer); - * - * console.log(el === document.body); - * // => false - * console.log(el.nodeName); - * // => 'BODY' - * console.log(el.childNodes.length); - * // => 20 - */ - function cloneDeepWith(value, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG, customizer); - } - - /** - * Checks if `object` conforms to `source` by invoking the predicate - * properties of `source` with the corresponding property values of `object`. - * - * **Note:** This method is equivalent to `_.conforms` when `source` is - * partially applied. - * - * @static - * @memberOf _ - * @since 4.14.0 - * @category Lang - * @param {Object} object The object to inspect. - * @param {Object} source The object of property predicates to conform to. - * @returns {boolean} Returns `true` if `object` conforms, else `false`. - * @example - * - * var object = { 'a': 1, 'b': 2 }; - * - * _.conformsTo(object, { 'b': function(n) { return n > 1; } }); - * // => true - * - * _.conformsTo(object, { 'b': function(n) { return n > 2; } }); - * // => false - */ - function conformsTo(object, source) { - return source == null || baseConformsTo(object, source, keys(source)); - } - - /** - * Performs a - * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) - * comparison between two values to determine if they are equivalent. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if the values are equivalent, else `false`. - * @example - * - * var object = { 'a': 1 }; - * var other = { 'a': 1 }; - * - * _.eq(object, object); - * // => true - * - * _.eq(object, other); - * // => false - * - * _.eq('a', 'a'); - * // => true - * - * _.eq('a', Object('a')); - * // => false - * - * _.eq(NaN, NaN); - * // => true - */ - function eq(value, other) { - return value === other || (value !== value && other !== other); - } - - /** - * Checks if `value` is greater than `other`. - * - * @static - * @memberOf _ - * @since 3.9.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is greater than `other`, - * else `false`. - * @see _.lt - * @example - * - * _.gt(3, 1); - * // => true - * - * _.gt(3, 3); - * // => false - * - * _.gt(1, 3); - * // => false - */ - var gt = createRelationalOperation(baseGt); - - /** - * Checks if `value` is greater than or equal to `other`. - * - * @static - * @memberOf _ - * @since 3.9.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is greater than or equal to - * `other`, else `false`. - * @see _.lte - * @example - * - * _.gte(3, 1); - * // => true - * - * _.gte(3, 3); - * // => true - * - * _.gte(1, 3); - * // => false - */ - var gte = createRelationalOperation(function(value, other) { - return value >= other; - }); - - /** - * Checks if `value` is likely an `arguments` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an `arguments` object, - * else `false`. - * @example - * - * _.isArguments(function() { return arguments; }()); - * // => true - * - * _.isArguments([1, 2, 3]); - * // => false - */ - var isArguments = baseIsArguments(function() { return arguments; }()) ? baseIsArguments : function(value) { - return isObjectLike(value) && hasOwnProperty.call(value, 'callee') && - !propertyIsEnumerable.call(value, 'callee'); - }; - - /** - * Checks if `value` is classified as an `Array` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an array, else `false`. - * @example - * - * _.isArray([1, 2, 3]); - * // => true - * - * _.isArray(document.body.children); - * // => false - * - * _.isArray('abc'); - * // => false - * - * _.isArray(_.noop); - * // => false - */ - var isArray = Array.isArray; - - /** - * Checks if `value` is classified as an `ArrayBuffer` object. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an array buffer, else `false`. - * @example - * - * _.isArrayBuffer(new ArrayBuffer(2)); - * // => true - * - * _.isArrayBuffer(new Array(2)); - * // => false - */ - var isArrayBuffer = nodeIsArrayBuffer ? baseUnary(nodeIsArrayBuffer) : baseIsArrayBuffer; - - /** - * Checks if `value` is array-like. A value is considered array-like if it's - * not a function and has a `value.length` that's an integer greater than or - * equal to `0` and less than or equal to `Number.MAX_SAFE_INTEGER`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is array-like, else `false`. - * @example - * - * _.isArrayLike([1, 2, 3]); - * // => true - * - * _.isArrayLike(document.body.children); - * // => true - * - * _.isArrayLike('abc'); - * // => true - * - * _.isArrayLike(_.noop); - * // => false - */ - function isArrayLike(value) { - return value != null && isLength(value.length) && !isFunction(value); - } - - /** - * This method is like `_.isArrayLike` except that it also checks if `value` - * is an object. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an array-like object, - * else `false`. - * @example - * - * _.isArrayLikeObject([1, 2, 3]); - * // => true - * - * _.isArrayLikeObject(document.body.children); - * // => true - * - * _.isArrayLikeObject('abc'); - * // => false - * - * _.isArrayLikeObject(_.noop); - * // => false - */ - function isArrayLikeObject(value) { - return isObjectLike(value) && isArrayLike(value); - } - - /** - * Checks if `value` is classified as a boolean primitive or object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a boolean, else `false`. - * @example - * - * _.isBoolean(false); - * // => true - * - * _.isBoolean(null); - * // => false - */ - function isBoolean(value) { - return value === true || value === false || - (isObjectLike(value) && baseGetTag(value) == boolTag); - } - - /** - * Checks if `value` is a buffer. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a buffer, else `false`. - * @example - * - * _.isBuffer(new Buffer(2)); - * // => true - * - * _.isBuffer(new Uint8Array(2)); - * // => false - */ - var isBuffer = nativeIsBuffer || stubFalse; - - /** - * Checks if `value` is classified as a `Date` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a date object, else `false`. - * @example - * - * _.isDate(new Date); - * // => true - * - * _.isDate('Mon April 23 2012'); - * // => false - */ - var isDate = nodeIsDate ? baseUnary(nodeIsDate) : baseIsDate; - - /** - * Checks if `value` is likely a DOM element. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a DOM element, else `false`. - * @example - * - * _.isElement(document.body); - * // => true - * - * _.isElement(''); - * // => false - */ - function isElement(value) { - return isObjectLike(value) && value.nodeType === 1 && !isPlainObject(value); - } - - /** - * Checks if `value` is an empty object, collection, map, or set. - * - * Objects are considered empty if they have no own enumerable string keyed - * properties. - * - * Array-like values such as `arguments` objects, arrays, buffers, strings, or - * jQuery-like collections are considered empty if they have a `length` of `0`. - * Similarly, maps and sets are considered empty if they have a `size` of `0`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is empty, else `false`. - * @example - * - * _.isEmpty(null); - * // => true - * - * _.isEmpty(true); - * // => true - * - * _.isEmpty(1); - * // => true - * - * _.isEmpty([1, 2, 3]); - * // => false - * - * _.isEmpty({ 'a': 1 }); - * // => false - */ - function isEmpty(value) { - if (value == null) { - return true; - } - if (isArrayLike(value) && - (isArray(value) || typeof value == 'string' || typeof value.splice == 'function' || - isBuffer(value) || isTypedArray(value) || isArguments(value))) { - return !value.length; - } - var tag = getTag(value); - if (tag == mapTag || tag == setTag) { - return !value.size; - } - if (isPrototype(value)) { - return !baseKeys(value).length; - } - for (var key in value) { - if (hasOwnProperty.call(value, key)) { - return false; - } - } - return true; - } - - /** - * Performs a deep comparison between two values to determine if they are - * equivalent. - * - * **Note:** This method supports comparing arrays, array buffers, booleans, - * date objects, error objects, maps, numbers, `Object` objects, regexes, - * sets, strings, symbols, and typed arrays. `Object` objects are compared - * by their own, not inherited, enumerable properties. Functions and DOM - * nodes are compared by strict equality, i.e. `===`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if the values are equivalent, else `false`. - * @example - * - * var object = { 'a': 1 }; - * var other = { 'a': 1 }; - * - * _.isEqual(object, other); - * // => true - * - * object === other; - * // => false - */ - function isEqual(value, other) { - return baseIsEqual(value, other); - } - - /** - * This method is like `_.isEqual` except that it accepts `customizer` which - * is invoked to compare values. If `customizer` returns `undefined`, comparisons - * are handled by the method instead. The `customizer` is invoked with up to - * six arguments: (objValue, othValue [, index|key, object, other, stack]). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @param {Function} [customizer] The function to customize comparisons. - * @returns {boolean} Returns `true` if the values are equivalent, else `false`. - * @example - * - * function isGreeting(value) { - * return /^h(?:i|ello)$/.test(value); - * } - * - * function customizer(objValue, othValue) { - * if (isGreeting(objValue) && isGreeting(othValue)) { - * return true; - * } - * } - * - * var array = ['hello', 'goodbye']; - * var other = ['hi', 'goodbye']; - * - * _.isEqualWith(array, other, customizer); - * // => true - */ - function isEqualWith(value, other, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - var result = customizer ? customizer(value, other) : undefined; - return result === undefined ? baseIsEqual(value, other, undefined, customizer) : !!result; - } - - /** - * Checks if `value` is an `Error`, `EvalError`, `RangeError`, `ReferenceError`, - * `SyntaxError`, `TypeError`, or `URIError` object. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an error object, else `false`. - * @example - * - * _.isError(new Error); - * // => true - * - * _.isError(Error); - * // => false - */ - function isError(value) { - if (!isObjectLike(value)) { - return false; - } - var tag = baseGetTag(value); - return tag == errorTag || tag == domExcTag || - (typeof value.message == 'string' && typeof value.name == 'string' && !isPlainObject(value)); - } - - /** - * Checks if `value` is a finite primitive number. - * - * **Note:** This method is based on - * [`Number.isFinite`](https://mdn.io/Number/isFinite). - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a finite number, else `false`. - * @example - * - * _.isFinite(3); - * // => true - * - * _.isFinite(Number.MIN_VALUE); - * // => true - * - * _.isFinite(Infinity); - * // => false - * - * _.isFinite('3'); - * // => false - */ - function isFinite(value) { - return typeof value == 'number' && nativeIsFinite(value); - } - - /** - * Checks if `value` is classified as a `Function` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a function, else `false`. - * @example - * - * _.isFunction(_); - * // => true - * - * _.isFunction(/abc/); - * // => false - */ - function isFunction(value) { - if (!isObject(value)) { - return false; - } - // The use of `Object#toString` avoids issues with the `typeof` operator - // in Safari 9 which returns 'object' for typed arrays and other constructors. - var tag = baseGetTag(value); - return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag; - } - - /** - * Checks if `value` is an integer. - * - * **Note:** This method is based on - * [`Number.isInteger`](https://mdn.io/Number/isInteger). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an integer, else `false`. - * @example - * - * _.isInteger(3); - * // => true - * - * _.isInteger(Number.MIN_VALUE); - * // => false - * - * _.isInteger(Infinity); - * // => false - * - * _.isInteger('3'); - * // => false - */ - function isInteger(value) { - return typeof value == 'number' && value == toInteger(value); - } - - /** - * Checks if `value` is a valid array-like length. - * - * **Note:** This method is loosely based on - * [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a valid length, else `false`. - * @example - * - * _.isLength(3); - * // => true - * - * _.isLength(Number.MIN_VALUE); - * // => false - * - * _.isLength(Infinity); - * // => false - * - * _.isLength('3'); - * // => false - */ - function isLength(value) { - return typeof value == 'number' && - value > -1 && value % 1 == 0 && value <= MAX_SAFE_INTEGER; - } - - /** - * Checks if `value` is the - * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types) - * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`) - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is an object, else `false`. - * @example - * - * _.isObject({}); - * // => true - * - * _.isObject([1, 2, 3]); - * // => true - * - * _.isObject(_.noop); - * // => true - * - * _.isObject(null); - * // => false - */ - function isObject(value) { - var type = typeof value; - return value != null && (type == 'object' || type == 'function'); - } - - /** - * Checks if `value` is object-like. A value is object-like if it's not `null` - * and has a `typeof` result of "object". - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is object-like, else `false`. - * @example - * - * _.isObjectLike({}); - * // => true - * - * _.isObjectLike([1, 2, 3]); - * // => true - * - * _.isObjectLike(_.noop); - * // => false - * - * _.isObjectLike(null); - * // => false - */ - function isObjectLike(value) { - return value != null && typeof value == 'object'; - } - - /** - * Checks if `value` is classified as a `Map` object. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a map, else `false`. - * @example - * - * _.isMap(new Map); - * // => true - * - * _.isMap(new WeakMap); - * // => false - */ - var isMap = nodeIsMap ? baseUnary(nodeIsMap) : baseIsMap; - - /** - * Performs a partial deep comparison between `object` and `source` to - * determine if `object` contains equivalent property values. - * - * **Note:** This method is equivalent to `_.matches` when `source` is - * partially applied. - * - * Partial comparisons will match empty array and empty object `source` - * values against any array or object value, respectively. See `_.isEqual` - * for a list of supported value comparisons. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Lang - * @param {Object} object The object to inspect. - * @param {Object} source The object of property values to match. - * @returns {boolean} Returns `true` if `object` is a match, else `false`. - * @example - * - * var object = { 'a': 1, 'b': 2 }; - * - * _.isMatch(object, { 'b': 2 }); - * // => true - * - * _.isMatch(object, { 'b': 1 }); - * // => false - */ - function isMatch(object, source) { - return object === source || baseIsMatch(object, source, getMatchData(source)); - } - - /** - * This method is like `_.isMatch` except that it accepts `customizer` which - * is invoked to compare values. If `customizer` returns `undefined`, comparisons - * are handled by the method instead. The `customizer` is invoked with five - * arguments: (objValue, srcValue, index|key, object, source). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {Object} object The object to inspect. - * @param {Object} source The object of property values to match. - * @param {Function} [customizer] The function to customize comparisons. - * @returns {boolean} Returns `true` if `object` is a match, else `false`. - * @example - * - * function isGreeting(value) { - * return /^h(?:i|ello)$/.test(value); - * } - * - * function customizer(objValue, srcValue) { - * if (isGreeting(objValue) && isGreeting(srcValue)) { - * return true; - * } - * } - * - * var object = { 'greeting': 'hello' }; - * var source = { 'greeting': 'hi' }; - * - * _.isMatchWith(object, source, customizer); - * // => true - */ - function isMatchWith(object, source, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - return baseIsMatch(object, source, getMatchData(source), customizer); - } - - /** - * Checks if `value` is `NaN`. - * - * **Note:** This method is based on - * [`Number.isNaN`](https://mdn.io/Number/isNaN) and is not the same as - * global [`isNaN`](https://mdn.io/isNaN) which returns `true` for - * `undefined` and other non-number values. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is `NaN`, else `false`. - * @example - * - * _.isNaN(NaN); - * // => true - * - * _.isNaN(new Number(NaN)); - * // => true - * - * isNaN(undefined); - * // => true - * - * _.isNaN(undefined); - * // => false - */ - function isNaN(value) { - // An `NaN` primitive is the only value that is not equal to itself. - // Perform the `toStringTag` check first to avoid errors with some - // ActiveX objects in IE. - return isNumber(value) && value != +value; - } - - /** - * Checks if `value` is a pristine native function. - * - * **Note:** This method can't reliably detect native functions in the presence - * of the core-js package because core-js circumvents this kind of detection. - * Despite multiple requests, the core-js maintainer has made it clear: any - * attempt to fix the detection will be obstructed. As a result, we're left - * with little choice but to throw an error. Unfortunately, this also affects - * packages, like [babel-polyfill](https://www.npmjs.com/package/babel-polyfill), - * which rely on core-js. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a native function, - * else `false`. - * @example - * - * _.isNative(Array.prototype.push); - * // => true - * - * _.isNative(_); - * // => false - */ - function isNative(value) { - if (isMaskable(value)) { - throw new Error(CORE_ERROR_TEXT); - } - return baseIsNative(value); - } - - /** - * Checks if `value` is `null`. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is `null`, else `false`. - * @example - * - * _.isNull(null); - * // => true - * - * _.isNull(void 0); - * // => false - */ - function isNull(value) { - return value === null; - } - - /** - * Checks if `value` is `null` or `undefined`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is nullish, else `false`. - * @example - * - * _.isNil(null); - * // => true - * - * _.isNil(void 0); - * // => true - * - * _.isNil(NaN); - * // => false - */ - function isNil(value) { - return value == null; - } - - /** - * Checks if `value` is classified as a `Number` primitive or object. - * - * **Note:** To exclude `Infinity`, `-Infinity`, and `NaN`, which are - * classified as numbers, use the `_.isFinite` method. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a number, else `false`. - * @example - * - * _.isNumber(3); - * // => true - * - * _.isNumber(Number.MIN_VALUE); - * // => true - * - * _.isNumber(Infinity); - * // => true - * - * _.isNumber('3'); - * // => false - */ - function isNumber(value) { - return typeof value == 'number' || - (isObjectLike(value) && baseGetTag(value) == numberTag); - } - - /** - * Checks if `value` is a plain object, that is, an object created by the - * `Object` constructor or one with a `[[Prototype]]` of `null`. - * - * @static - * @memberOf _ - * @since 0.8.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a plain object, else `false`. - * @example - * - * function Foo() { - * this.a = 1; - * } - * - * _.isPlainObject(new Foo); - * // => false - * - * _.isPlainObject([1, 2, 3]); - * // => false - * - * _.isPlainObject({ 'x': 0, 'y': 0 }); - * // => true - * - * _.isPlainObject(Object.create(null)); - * // => true - */ - function isPlainObject(value) { - if (!isObjectLike(value) || baseGetTag(value) != objectTag) { - return false; - } - var proto = getPrototype(value); - if (proto === null) { - return true; - } - var Ctor = hasOwnProperty.call(proto, 'constructor') && proto.constructor; - return typeof Ctor == 'function' && Ctor instanceof Ctor && - funcToString.call(Ctor) == objectCtorString; - } - - /** - * Checks if `value` is classified as a `RegExp` object. - * - * @static - * @memberOf _ - * @since 0.1.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a regexp, else `false`. - * @example - * - * _.isRegExp(/abc/); - * // => true - * - * _.isRegExp('/abc/'); - * // => false - */ - var isRegExp = nodeIsRegExp ? baseUnary(nodeIsRegExp) : baseIsRegExp; - - /** - * Checks if `value` is a safe integer. An integer is safe if it's an IEEE-754 - * double precision number which isn't the result of a rounded unsafe integer. - * - * **Note:** This method is based on - * [`Number.isSafeInteger`](https://mdn.io/Number/isSafeInteger). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a safe integer, else `false`. - * @example - * - * _.isSafeInteger(3); - * // => true - * - * _.isSafeInteger(Number.MIN_VALUE); - * // => false - * - * _.isSafeInteger(Infinity); - * // => false - * - * _.isSafeInteger('3'); - * // => false - */ - function isSafeInteger(value) { - return isInteger(value) && value >= -MAX_SAFE_INTEGER && value <= MAX_SAFE_INTEGER; - } - - /** - * Checks if `value` is classified as a `Set` object. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a set, else `false`. - * @example - * - * _.isSet(new Set); - * // => true - * - * _.isSet(new WeakSet); - * // => false - */ - var isSet = nodeIsSet ? baseUnary(nodeIsSet) : baseIsSet; - - /** - * Checks if `value` is classified as a `String` primitive or object. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a string, else `false`. - * @example - * - * _.isString('abc'); - * // => true - * - * _.isString(1); - * // => false - */ - function isString(value) { - return typeof value == 'string' || - (!isArray(value) && isObjectLike(value) && baseGetTag(value) == stringTag); - } - - /** - * Checks if `value` is classified as a `Symbol` primitive or object. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a symbol, else `false`. - * @example - * - * _.isSymbol(Symbol.iterator); - * // => true - * - * _.isSymbol('abc'); - * // => false - */ - function isSymbol(value) { - return typeof value == 'symbol' || - (isObjectLike(value) && baseGetTag(value) == symbolTag); - } - - /** - * Checks if `value` is classified as a typed array. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a typed array, else `false`. - * @example - * - * _.isTypedArray(new Uint8Array); - * // => true - * - * _.isTypedArray([]); - * // => false - */ - var isTypedArray = nodeIsTypedArray ? baseUnary(nodeIsTypedArray) : baseIsTypedArray; - - /** - * Checks if `value` is `undefined`. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is `undefined`, else `false`. - * @example - * - * _.isUndefined(void 0); - * // => true - * - * _.isUndefined(null); - * // => false - */ - function isUndefined(value) { - return value === undefined; - } - - /** - * Checks if `value` is classified as a `WeakMap` object. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a weak map, else `false`. - * @example - * - * _.isWeakMap(new WeakMap); - * // => true - * - * _.isWeakMap(new Map); - * // => false - */ - function isWeakMap(value) { - return isObjectLike(value) && getTag(value) == weakMapTag; - } - - /** - * Checks if `value` is classified as a `WeakSet` object. - * - * @static - * @memberOf _ - * @since 4.3.0 - * @category Lang - * @param {*} value The value to check. - * @returns {boolean} Returns `true` if `value` is a weak set, else `false`. - * @example - * - * _.isWeakSet(new WeakSet); - * // => true - * - * _.isWeakSet(new Set); - * // => false - */ - function isWeakSet(value) { - return isObjectLike(value) && baseGetTag(value) == weakSetTag; - } - - /** - * Checks if `value` is less than `other`. - * - * @static - * @memberOf _ - * @since 3.9.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is less than `other`, - * else `false`. - * @see _.gt - * @example - * - * _.lt(1, 3); - * // => true - * - * _.lt(3, 3); - * // => false - * - * _.lt(3, 1); - * // => false - */ - var lt = createRelationalOperation(baseLt); - - /** - * Checks if `value` is less than or equal to `other`. - * - * @static - * @memberOf _ - * @since 3.9.0 - * @category Lang - * @param {*} value The value to compare. - * @param {*} other The other value to compare. - * @returns {boolean} Returns `true` if `value` is less than or equal to - * `other`, else `false`. - * @see _.gte - * @example - * - * _.lte(1, 3); - * // => true - * - * _.lte(3, 3); - * // => true - * - * _.lte(3, 1); - * // => false - */ - var lte = createRelationalOperation(function(value, other) { - return value <= other; - }); - - /** - * Converts `value` to an array. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Lang - * @param {*} value The value to convert. - * @returns {Array} Returns the converted array. - * @example - * - * _.toArray({ 'a': 1, 'b': 2 }); - * // => [1, 2] - * - * _.toArray('abc'); - * // => ['a', 'b', 'c'] - * - * _.toArray(1); - * // => [] - * - * _.toArray(null); - * // => [] - */ - function toArray(value) { - if (!value) { - return []; - } - if (isArrayLike(value)) { - return isString(value) ? stringToArray(value) : copyArray(value); - } - if (symIterator && value[symIterator]) { - return iteratorToArray(value[symIterator]()); - } - var tag = getTag(value), - func = tag == mapTag ? mapToArray : (tag == setTag ? setToArray : values); - - return func(value); - } - - /** - * Converts `value` to a finite number. - * - * @static - * @memberOf _ - * @since 4.12.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {number} Returns the converted number. - * @example - * - * _.toFinite(3.2); - * // => 3.2 - * - * _.toFinite(Number.MIN_VALUE); - * // => 5e-324 - * - * _.toFinite(Infinity); - * // => 1.7976931348623157e+308 - * - * _.toFinite('3.2'); - * // => 3.2 - */ - function toFinite(value) { - if (!value) { - return value === 0 ? value : 0; - } - value = toNumber(value); - if (value === INFINITY || value === -INFINITY) { - var sign = (value < 0 ? -1 : 1); - return sign * MAX_INTEGER; - } - return value === value ? value : 0; - } - - /** - * Converts `value` to an integer. - * - * **Note:** This method is loosely based on - * [`ToInteger`](http://www.ecma-international.org/ecma-262/7.0/#sec-tointeger). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {number} Returns the converted integer. - * @example - * - * _.toInteger(3.2); - * // => 3 - * - * _.toInteger(Number.MIN_VALUE); - * // => 0 - * - * _.toInteger(Infinity); - * // => 1.7976931348623157e+308 - * - * _.toInteger('3.2'); - * // => 3 - */ - function toInteger(value) { - var result = toFinite(value), - remainder = result % 1; - - return result === result ? (remainder ? result - remainder : result) : 0; - } - - /** - * Converts `value` to an integer suitable for use as the length of an - * array-like object. - * - * **Note:** This method is based on - * [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {number} Returns the converted integer. - * @example - * - * _.toLength(3.2); - * // => 3 - * - * _.toLength(Number.MIN_VALUE); - * // => 0 - * - * _.toLength(Infinity); - * // => 4294967295 - * - * _.toLength('3.2'); - * // => 3 - */ - function toLength(value) { - return value ? baseClamp(toInteger(value), 0, MAX_ARRAY_LENGTH) : 0; - } - - /** - * Converts `value` to a number. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to process. - * @returns {number} Returns the number. - * @example - * - * _.toNumber(3.2); - * // => 3.2 - * - * _.toNumber(Number.MIN_VALUE); - * // => 5e-324 - * - * _.toNumber(Infinity); - * // => Infinity - * - * _.toNumber('3.2'); - * // => 3.2 - */ - function toNumber(value) { - if (typeof value == 'number') { - return value; - } - if (isSymbol(value)) { - return NAN; - } - if (isObject(value)) { - var other = typeof value.valueOf == 'function' ? value.valueOf() : value; - value = isObject(other) ? (other + '') : other; - } - if (typeof value != 'string') { - return value === 0 ? value : +value; - } - value = value.replace(reTrim, ''); - var isBinary = reIsBinary.test(value); - return (isBinary || reIsOctal.test(value)) - ? freeParseInt(value.slice(2), isBinary ? 2 : 8) - : (reIsBadHex.test(value) ? NAN : +value); - } - - /** - * Converts `value` to a plain object flattening inherited enumerable string - * keyed properties of `value` to own properties of the plain object. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {Object} Returns the converted plain object. - * @example - * - * function Foo() { - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.assign({ 'a': 1 }, new Foo); - * // => { 'a': 1, 'b': 2 } - * - * _.assign({ 'a': 1 }, _.toPlainObject(new Foo)); - * // => { 'a': 1, 'b': 2, 'c': 3 } - */ - function toPlainObject(value) { - return copyObject(value, keysIn(value)); - } - - /** - * Converts `value` to a safe integer. A safe integer can be compared and - * represented correctly. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {number} Returns the converted integer. - * @example - * - * _.toSafeInteger(3.2); - * // => 3 - * - * _.toSafeInteger(Number.MIN_VALUE); - * // => 0 - * - * _.toSafeInteger(Infinity); - * // => 9007199254740991 - * - * _.toSafeInteger('3.2'); - * // => 3 - */ - function toSafeInteger(value) { - return value - ? baseClamp(toInteger(value), -MAX_SAFE_INTEGER, MAX_SAFE_INTEGER) - : (value === 0 ? value : 0); - } - - /** - * Converts `value` to a string. An empty string is returned for `null` - * and `undefined` values. The sign of `-0` is preserved. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Lang - * @param {*} value The value to convert. - * @returns {string} Returns the converted string. - * @example - * - * _.toString(null); - * // => '' - * - * _.toString(-0); - * // => '-0' - * - * _.toString([1, 2, 3]); - * // => '1,2,3' - */ - function toString(value) { - return value == null ? '' : baseToString(value); - } - - /*------------------------------------------------------------------------*/ - - /** - * Assigns own enumerable string keyed properties of source objects to the - * destination object. Source objects are applied from left to right. - * Subsequent sources overwrite property assignments of previous sources. - * - * **Note:** This method mutates `object` and is loosely based on - * [`Object.assign`](https://mdn.io/Object/assign). - * - * @static - * @memberOf _ - * @since 0.10.0 - * @category Object - * @param {Object} object The destination object. - * @param {...Object} [sources] The source objects. - * @returns {Object} Returns `object`. - * @see _.assignIn - * @example - * - * function Foo() { - * this.a = 1; - * } - * - * function Bar() { - * this.c = 3; - * } - * - * Foo.prototype.b = 2; - * Bar.prototype.d = 4; - * - * _.assign({ 'a': 0 }, new Foo, new Bar); - * // => { 'a': 1, 'c': 3 } - */ - var assign = createAssigner(function(object, source) { - if (isPrototype(source) || isArrayLike(source)) { - copyObject(source, keys(source), object); - return; - } - for (var key in source) { - if (hasOwnProperty.call(source, key)) { - assignValue(object, key, source[key]); - } - } - }); - - /** - * This method is like `_.assign` except that it iterates over own and - * inherited source properties. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @alias extend - * @category Object - * @param {Object} object The destination object. - * @param {...Object} [sources] The source objects. - * @returns {Object} Returns `object`. - * @see _.assign - * @example - * - * function Foo() { - * this.a = 1; - * } - * - * function Bar() { - * this.c = 3; - * } - * - * Foo.prototype.b = 2; - * Bar.prototype.d = 4; - * - * _.assignIn({ 'a': 0 }, new Foo, new Bar); - * // => { 'a': 1, 'b': 2, 'c': 3, 'd': 4 } - */ - var assignIn = createAssigner(function(object, source) { - copyObject(source, keysIn(source), object); - }); - - /** - * This method is like `_.assignIn` except that it accepts `customizer` - * which is invoked to produce the assigned values. If `customizer` returns - * `undefined`, assignment is handled by the method instead. The `customizer` - * is invoked with five arguments: (objValue, srcValue, key, object, source). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @alias extendWith - * @category Object - * @param {Object} object The destination object. - * @param {...Object} sources The source objects. - * @param {Function} [customizer] The function to customize assigned values. - * @returns {Object} Returns `object`. - * @see _.assignWith - * @example - * - * function customizer(objValue, srcValue) { - * return _.isUndefined(objValue) ? srcValue : objValue; - * } - * - * var defaults = _.partialRight(_.assignInWith, customizer); - * - * defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); - * // => { 'a': 1, 'b': 2 } - */ - var assignInWith = createAssigner(function(object, source, srcIndex, customizer) { - copyObject(source, keysIn(source), object, customizer); - }); - - /** - * This method is like `_.assign` except that it accepts `customizer` - * which is invoked to produce the assigned values. If `customizer` returns - * `undefined`, assignment is handled by the method instead. The `customizer` - * is invoked with five arguments: (objValue, srcValue, key, object, source). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The destination object. - * @param {...Object} sources The source objects. - * @param {Function} [customizer] The function to customize assigned values. - * @returns {Object} Returns `object`. - * @see _.assignInWith - * @example - * - * function customizer(objValue, srcValue) { - * return _.isUndefined(objValue) ? srcValue : objValue; - * } - * - * var defaults = _.partialRight(_.assignWith, customizer); - * - * defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); - * // => { 'a': 1, 'b': 2 } - */ - var assignWith = createAssigner(function(object, source, srcIndex, customizer) { - copyObject(source, keys(source), object, customizer); - }); - - /** - * Creates an array of values corresponding to `paths` of `object`. - * - * @static - * @memberOf _ - * @since 1.0.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {...(string|string[])} [paths] The property paths to pick. - * @returns {Array} Returns the picked values. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }, 4] }; - * - * _.at(object, ['a[0].b.c', 'a[1]']); - * // => [3, 4] - */ - var at = flatRest(baseAt); - - /** - * Creates an object that inherits from the `prototype` object. If a - * `properties` object is given, its own enumerable string keyed properties - * are assigned to the created object. - * - * @static - * @memberOf _ - * @since 2.3.0 - * @category Object - * @param {Object} prototype The object to inherit from. - * @param {Object} [properties] The properties to assign to the object. - * @returns {Object} Returns the new object. - * @example - * - * function Shape() { - * this.x = 0; - * this.y = 0; - * } - * - * function Circle() { - * Shape.call(this); - * } - * - * Circle.prototype = _.create(Shape.prototype, { - * 'constructor': Circle - * }); - * - * var circle = new Circle; - * circle instanceof Circle; - * // => true - * - * circle instanceof Shape; - * // => true - */ - function create(prototype, properties) { - var result = baseCreate(prototype); - return properties == null ? result : baseAssign(result, properties); - } - - /** - * Assigns own and inherited enumerable string keyed properties of source - * objects to the destination object for all destination properties that - * resolve to `undefined`. Source objects are applied from left to right. - * Once a property is set, additional values of the same property are ignored. - * - * **Note:** This method mutates `object`. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The destination object. - * @param {...Object} [sources] The source objects. - * @returns {Object} Returns `object`. - * @see _.defaultsDeep - * @example - * - * _.defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); - * // => { 'a': 1, 'b': 2 } - */ - var defaults = baseRest(function(object, sources) { - object = Object(object); - - var index = -1; - var length = sources.length; - var guard = length > 2 ? sources[2] : undefined; - - if (guard && isIterateeCall(sources[0], sources[1], guard)) { - length = 1; - } - - while (++index < length) { - var source = sources[index]; - var props = keysIn(source); - var propsIndex = -1; - var propsLength = props.length; - - while (++propsIndex < propsLength) { - var key = props[propsIndex]; - var value = object[key]; - - if (value === undefined || - (eq(value, objectProto[key]) && !hasOwnProperty.call(object, key))) { - object[key] = source[key]; - } - } - } - - return object; - }); - - /** - * This method is like `_.defaults` except that it recursively assigns - * default properties. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 3.10.0 - * @category Object - * @param {Object} object The destination object. - * @param {...Object} [sources] The source objects. - * @returns {Object} Returns `object`. - * @see _.defaults - * @example - * - * _.defaultsDeep({ 'a': { 'b': 2 } }, { 'a': { 'b': 1, 'c': 3 } }); - * // => { 'a': { 'b': 2, 'c': 3 } } - */ - var defaultsDeep = baseRest(function(args) { - args.push(undefined, customDefaultsMerge); - return apply(mergeWith, undefined, args); - }); - - /** - * This method is like `_.find` except that it returns the key of the first - * element `predicate` returns truthy for instead of the element itself. - * - * @static - * @memberOf _ - * @since 1.1.0 - * @category Object - * @param {Object} object The object to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {string|undefined} Returns the key of the matched element, - * else `undefined`. - * @example - * - * var users = { - * 'barney': { 'age': 36, 'active': true }, - * 'fred': { 'age': 40, 'active': false }, - * 'pebbles': { 'age': 1, 'active': true } - * }; - * - * _.findKey(users, function(o) { return o.age < 40; }); - * // => 'barney' (iteration order is not guaranteed) - * - * // The `_.matches` iteratee shorthand. - * _.findKey(users, { 'age': 1, 'active': true }); - * // => 'pebbles' - * - * // The `_.matchesProperty` iteratee shorthand. - * _.findKey(users, ['active', false]); - * // => 'fred' - * - * // The `_.property` iteratee shorthand. - * _.findKey(users, 'active'); - * // => 'barney' - */ - function findKey(object, predicate) { - return baseFindKey(object, getIteratee(predicate, 3), baseForOwn); - } - - /** - * This method is like `_.findKey` except that it iterates over elements of - * a collection in the opposite order. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Object - * @param {Object} object The object to inspect. - * @param {Function} [predicate=_.identity] The function invoked per iteration. - * @returns {string|undefined} Returns the key of the matched element, - * else `undefined`. - * @example - * - * var users = { - * 'barney': { 'age': 36, 'active': true }, - * 'fred': { 'age': 40, 'active': false }, - * 'pebbles': { 'age': 1, 'active': true } - * }; - * - * _.findLastKey(users, function(o) { return o.age < 40; }); - * // => returns 'pebbles' assuming `_.findKey` returns 'barney' - * - * // The `_.matches` iteratee shorthand. - * _.findLastKey(users, { 'age': 36, 'active': true }); - * // => 'barney' - * - * // The `_.matchesProperty` iteratee shorthand. - * _.findLastKey(users, ['active', false]); - * // => 'fred' - * - * // The `_.property` iteratee shorthand. - * _.findLastKey(users, 'active'); - * // => 'pebbles' - */ - function findLastKey(object, predicate) { - return baseFindKey(object, getIteratee(predicate, 3), baseForOwnRight); - } - - /** - * Iterates over own and inherited enumerable string keyed properties of an - * object and invokes `iteratee` for each property. The iteratee is invoked - * with three arguments: (value, key, object). Iteratee functions may exit - * iteration early by explicitly returning `false`. - * - * @static - * @memberOf _ - * @since 0.3.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns `object`. - * @see _.forInRight - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.forIn(new Foo, function(value, key) { - * console.log(key); - * }); - * // => Logs 'a', 'b', then 'c' (iteration order is not guaranteed). - */ - function forIn(object, iteratee) { - return object == null - ? object - : baseFor(object, getIteratee(iteratee, 3), keysIn); - } - - /** - * This method is like `_.forIn` except that it iterates over properties of - * `object` in the opposite order. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns `object`. - * @see _.forIn - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.forInRight(new Foo, function(value, key) { - * console.log(key); - * }); - * // => Logs 'c', 'b', then 'a' assuming `_.forIn` logs 'a', 'b', then 'c'. - */ - function forInRight(object, iteratee) { - return object == null - ? object - : baseForRight(object, getIteratee(iteratee, 3), keysIn); - } - - /** - * Iterates over own enumerable string keyed properties of an object and - * invokes `iteratee` for each property. The iteratee is invoked with three - * arguments: (value, key, object). Iteratee functions may exit iteration - * early by explicitly returning `false`. - * - * @static - * @memberOf _ - * @since 0.3.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns `object`. - * @see _.forOwnRight - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.forOwn(new Foo, function(value, key) { - * console.log(key); - * }); - * // => Logs 'a' then 'b' (iteration order is not guaranteed). - */ - function forOwn(object, iteratee) { - return object && baseForOwn(object, getIteratee(iteratee, 3)); - } - - /** - * This method is like `_.forOwn` except that it iterates over properties of - * `object` in the opposite order. - * - * @static - * @memberOf _ - * @since 2.0.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns `object`. - * @see _.forOwn - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.forOwnRight(new Foo, function(value, key) { - * console.log(key); - * }); - * // => Logs 'b' then 'a' assuming `_.forOwn` logs 'a' then 'b'. - */ - function forOwnRight(object, iteratee) { - return object && baseForOwnRight(object, getIteratee(iteratee, 3)); - } - - /** - * Creates an array of function property names from own enumerable properties - * of `object`. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The object to inspect. - * @returns {Array} Returns the function names. - * @see _.functionsIn - * @example - * - * function Foo() { - * this.a = _.constant('a'); - * this.b = _.constant('b'); - * } - * - * Foo.prototype.c = _.constant('c'); - * - * _.functions(new Foo); - * // => ['a', 'b'] - */ - function functions(object) { - return object == null ? [] : baseFunctions(object, keys(object)); - } - - /** - * Creates an array of function property names from own and inherited - * enumerable properties of `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The object to inspect. - * @returns {Array} Returns the function names. - * @see _.functions - * @example - * - * function Foo() { - * this.a = _.constant('a'); - * this.b = _.constant('b'); - * } - * - * Foo.prototype.c = _.constant('c'); - * - * _.functionsIn(new Foo); - * // => ['a', 'b', 'c'] - */ - function functionsIn(object) { - return object == null ? [] : baseFunctions(object, keysIn(object)); - } - - /** - * Gets the value at `path` of `object`. If the resolved value is - * `undefined`, the `defaultValue` is returned in its place. - * - * @static - * @memberOf _ - * @since 3.7.0 - * @category Object - * @param {Object} object The object to query. - * @param {Array|string} path The path of the property to get. - * @param {*} [defaultValue] The value returned for `undefined` resolved values. - * @returns {*} Returns the resolved value. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }] }; - * - * _.get(object, 'a[0].b.c'); - * // => 3 - * - * _.get(object, ['a', '0', 'b', 'c']); - * // => 3 - * - * _.get(object, 'a.b.c', 'default'); - * // => 'default' - */ - function get(object, path, defaultValue) { - var result = object == null ? undefined : baseGet(object, path); - return result === undefined ? defaultValue : result; - } - - /** - * Checks if `path` is a direct property of `object`. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The object to query. - * @param {Array|string} path The path to check. - * @returns {boolean} Returns `true` if `path` exists, else `false`. - * @example - * - * var object = { 'a': { 'b': 2 } }; - * var other = _.create({ 'a': _.create({ 'b': 2 }) }); - * - * _.has(object, 'a'); - * // => true - * - * _.has(object, 'a.b'); - * // => true - * - * _.has(object, ['a', 'b']); - * // => true - * - * _.has(other, 'a'); - * // => false - */ - function has(object, path) { - return object != null && hasPath(object, path, baseHas); - } - - /** - * Checks if `path` is a direct or inherited property of `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The object to query. - * @param {Array|string} path The path to check. - * @returns {boolean} Returns `true` if `path` exists, else `false`. - * @example - * - * var object = _.create({ 'a': _.create({ 'b': 2 }) }); - * - * _.hasIn(object, 'a'); - * // => true - * - * _.hasIn(object, 'a.b'); - * // => true - * - * _.hasIn(object, ['a', 'b']); - * // => true - * - * _.hasIn(object, 'b'); - * // => false - */ - function hasIn(object, path) { - return object != null && hasPath(object, path, baseHasIn); - } - - /** - * Creates an object composed of the inverted keys and values of `object`. - * If `object` contains duplicate values, subsequent values overwrite - * property assignments of previous values. - * - * @static - * @memberOf _ - * @since 0.7.0 - * @category Object - * @param {Object} object The object to invert. - * @returns {Object} Returns the new inverted object. - * @example - * - * var object = { 'a': 1, 'b': 2, 'c': 1 }; - * - * _.invert(object); - * // => { '1': 'c', '2': 'b' } - */ - var invert = createInverter(function(result, value, key) { - if (value != null && - typeof value.toString != 'function') { - value = nativeObjectToString.call(value); - } - - result[value] = key; - }, constant(identity)); - - /** - * This method is like `_.invert` except that the inverted object is generated - * from the results of running each element of `object` thru `iteratee`. The - * corresponding inverted value of each inverted key is an array of keys - * responsible for generating the inverted value. The iteratee is invoked - * with one argument: (value). - * - * @static - * @memberOf _ - * @since 4.1.0 - * @category Object - * @param {Object} object The object to invert. - * @param {Function} [iteratee=_.identity] The iteratee invoked per element. - * @returns {Object} Returns the new inverted object. - * @example - * - * var object = { 'a': 1, 'b': 2, 'c': 1 }; - * - * _.invertBy(object); - * // => { '1': ['a', 'c'], '2': ['b'] } - * - * _.invertBy(object, function(value) { - * return 'group' + value; - * }); - * // => { 'group1': ['a', 'c'], 'group2': ['b'] } - */ - var invertBy = createInverter(function(result, value, key) { - if (value != null && - typeof value.toString != 'function') { - value = nativeObjectToString.call(value); - } - - if (hasOwnProperty.call(result, value)) { - result[value].push(key); - } else { - result[value] = [key]; - } - }, getIteratee); - - /** - * Invokes the method at `path` of `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The object to query. - * @param {Array|string} path The path of the method to invoke. - * @param {...*} [args] The arguments to invoke the method with. - * @returns {*} Returns the result of the invoked method. - * @example - * - * var object = { 'a': [{ 'b': { 'c': [1, 2, 3, 4] } }] }; - * - * _.invoke(object, 'a[0].b.c.slice', 1, 3); - * // => [2, 3] - */ - var invoke = baseRest(baseInvoke); - - /** - * Creates an array of the own enumerable property names of `object`. - * - * **Note:** Non-object values are coerced to objects. See the - * [ES spec](http://ecma-international.org/ecma-262/7.0/#sec-object.keys) - * for more details. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.keys(new Foo); - * // => ['a', 'b'] (iteration order is not guaranteed) - * - * _.keys('hi'); - * // => ['0', '1'] - */ - function keys(object) { - return isArrayLike(object) ? arrayLikeKeys(object) : baseKeys(object); - } - - /** - * Creates an array of the own and inherited enumerable property names of `object`. - * - * **Note:** Non-object values are coerced to objects. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property names. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.keysIn(new Foo); - * // => ['a', 'b', 'c'] (iteration order is not guaranteed) - */ - function keysIn(object) { - return isArrayLike(object) ? arrayLikeKeys(object, true) : baseKeysIn(object); - } - - /** - * The opposite of `_.mapValues`; this method creates an object with the - * same values as `object` and keys generated by running each own enumerable - * string keyed property of `object` thru `iteratee`. The iteratee is invoked - * with three arguments: (value, key, object). - * - * @static - * @memberOf _ - * @since 3.8.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns the new mapped object. - * @see _.mapValues - * @example - * - * _.mapKeys({ 'a': 1, 'b': 2 }, function(value, key) { - * return key + value; - * }); - * // => { 'a1': 1, 'b2': 2 } - */ - function mapKeys(object, iteratee) { - var result = {}; - iteratee = getIteratee(iteratee, 3); - - baseForOwn(object, function(value, key, object) { - baseAssignValue(result, iteratee(value, key, object), value); - }); - return result; - } - - /** - * Creates an object with the same keys as `object` and values generated - * by running each own enumerable string keyed property of `object` thru - * `iteratee`. The iteratee is invoked with three arguments: - * (value, key, object). - * - * @static - * @memberOf _ - * @since 2.4.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @returns {Object} Returns the new mapped object. - * @see _.mapKeys - * @example - * - * var users = { - * 'fred': { 'user': 'fred', 'age': 40 }, - * 'pebbles': { 'user': 'pebbles', 'age': 1 } - * }; - * - * _.mapValues(users, function(o) { return o.age; }); - * // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed) - * - * // The `_.property` iteratee shorthand. - * _.mapValues(users, 'age'); - * // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed) - */ - function mapValues(object, iteratee) { - var result = {}; - iteratee = getIteratee(iteratee, 3); - - baseForOwn(object, function(value, key, object) { - baseAssignValue(result, key, iteratee(value, key, object)); - }); - return result; - } - - /** - * This method is like `_.assign` except that it recursively merges own and - * inherited enumerable string keyed properties of source objects into the - * destination object. Source properties that resolve to `undefined` are - * skipped if a destination value exists. Array and plain object properties - * are merged recursively. Other objects and value types are overridden by - * assignment. Source objects are applied from left to right. Subsequent - * sources overwrite property assignments of previous sources. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 0.5.0 - * @category Object - * @param {Object} object The destination object. - * @param {...Object} [sources] The source objects. - * @returns {Object} Returns `object`. - * @example - * - * var object = { - * 'a': [{ 'b': 2 }, { 'd': 4 }] - * }; - * - * var other = { - * 'a': [{ 'c': 3 }, { 'e': 5 }] - * }; - * - * _.merge(object, other); - * // => { 'a': [{ 'b': 2, 'c': 3 }, { 'd': 4, 'e': 5 }] } - */ - var merge = createAssigner(function(object, source, srcIndex) { - baseMerge(object, source, srcIndex); - }); - - /** - * This method is like `_.merge` except that it accepts `customizer` which - * is invoked to produce the merged values of the destination and source - * properties. If `customizer` returns `undefined`, merging is handled by the - * method instead. The `customizer` is invoked with six arguments: - * (objValue, srcValue, key, object, source, stack). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The destination object. - * @param {...Object} sources The source objects. - * @param {Function} customizer The function to customize assigned values. - * @returns {Object} Returns `object`. - * @example - * - * function customizer(objValue, srcValue) { - * if (_.isArray(objValue)) { - * return objValue.concat(srcValue); - * } - * } - * - * var object = { 'a': [1], 'b': [2] }; - * var other = { 'a': [3], 'b': [4] }; - * - * _.mergeWith(object, other, customizer); - * // => { 'a': [1, 3], 'b': [2, 4] } - */ - var mergeWith = createAssigner(function(object, source, srcIndex, customizer) { - baseMerge(object, source, srcIndex, customizer); - }); - - /** - * The opposite of `_.pick`; this method creates an object composed of the - * own and inherited enumerable property paths of `object` that are not omitted. - * - * **Note:** This method is considerably slower than `_.pick`. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The source object. - * @param {...(string|string[])} [paths] The property paths to omit. - * @returns {Object} Returns the new object. - * @example - * - * var object = { 'a': 1, 'b': '2', 'c': 3 }; - * - * _.omit(object, ['a', 'c']); - * // => { 'b': '2' } - */ - var omit = flatRest(function(object, paths) { - var result = {}; - if (object == null) { - return result; - } - var isDeep = false; - paths = arrayMap(paths, function(path) { - path = castPath(path, object); - isDeep || (isDeep = path.length > 1); - return path; - }); - copyObject(object, getAllKeysIn(object), result); - if (isDeep) { - result = baseClone(result, CLONE_DEEP_FLAG | CLONE_FLAT_FLAG | CLONE_SYMBOLS_FLAG, customOmitClone); - } - var length = paths.length; - while (length--) { - baseUnset(result, paths[length]); - } - return result; - }); - - /** - * The opposite of `_.pickBy`; this method creates an object composed of - * the own and inherited enumerable string keyed properties of `object` that - * `predicate` doesn't return truthy for. The predicate is invoked with two - * arguments: (value, key). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The source object. - * @param {Function} [predicate=_.identity] The function invoked per property. - * @returns {Object} Returns the new object. - * @example - * - * var object = { 'a': 1, 'b': '2', 'c': 3 }; - * - * _.omitBy(object, _.isNumber); - * // => { 'b': '2' } - */ - function omitBy(object, predicate) { - return pickBy(object, negate(getIteratee(predicate))); - } - - /** - * Creates an object composed of the picked `object` properties. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The source object. - * @param {...(string|string[])} [paths] The property paths to pick. - * @returns {Object} Returns the new object. - * @example - * - * var object = { 'a': 1, 'b': '2', 'c': 3 }; - * - * _.pick(object, ['a', 'c']); - * // => { 'a': 1, 'c': 3 } - */ - var pick = flatRest(function(object, paths) { - return object == null ? {} : basePick(object, paths); - }); - - /** - * Creates an object composed of the `object` properties `predicate` returns - * truthy for. The predicate is invoked with two arguments: (value, key). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The source object. - * @param {Function} [predicate=_.identity] The function invoked per property. - * @returns {Object} Returns the new object. - * @example - * - * var object = { 'a': 1, 'b': '2', 'c': 3 }; - * - * _.pickBy(object, _.isNumber); - * // => { 'a': 1, 'c': 3 } - */ - function pickBy(object, predicate) { - if (object == null) { - return {}; - } - var props = arrayMap(getAllKeysIn(object), function(prop) { - return [prop]; - }); - predicate = getIteratee(predicate); - return basePickBy(object, props, function(value, path) { - return predicate(value, path[0]); - }); - } - - /** - * This method is like `_.get` except that if the resolved value is a - * function it's invoked with the `this` binding of its parent object and - * its result is returned. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The object to query. - * @param {Array|string} path The path of the property to resolve. - * @param {*} [defaultValue] The value returned for `undefined` resolved values. - * @returns {*} Returns the resolved value. - * @example - * - * var object = { 'a': [{ 'b': { 'c1': 3, 'c2': _.constant(4) } }] }; - * - * _.result(object, 'a[0].b.c1'); - * // => 3 - * - * _.result(object, 'a[0].b.c2'); - * // => 4 - * - * _.result(object, 'a[0].b.c3', 'default'); - * // => 'default' - * - * _.result(object, 'a[0].b.c3', _.constant('default')); - * // => 'default' - */ - function result(object, path, defaultValue) { - path = castPath(path, object); - - var index = -1, - length = path.length; - - // Ensure the loop is entered when path is empty. - if (!length) { - length = 1; - object = undefined; - } - while (++index < length) { - var value = object == null ? undefined : object[toKey(path[index])]; - if (value === undefined) { - index = length; - value = defaultValue; - } - object = isFunction(value) ? value.call(object) : value; - } - return object; - } - - /** - * Sets the value at `path` of `object`. If a portion of `path` doesn't exist, - * it's created. Arrays are created for missing index properties while objects - * are created for all other missing properties. Use `_.setWith` to customize - * `path` creation. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 3.7.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {*} value The value to set. - * @returns {Object} Returns `object`. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }] }; - * - * _.set(object, 'a[0].b.c', 4); - * console.log(object.a[0].b.c); - * // => 4 - * - * _.set(object, ['x', '0', 'y', 'z'], 5); - * console.log(object.x[0].y.z); - * // => 5 - */ - function set(object, path, value) { - return object == null ? object : baseSet(object, path, value); - } - - /** - * This method is like `_.set` except that it accepts `customizer` which is - * invoked to produce the objects of `path`. If `customizer` returns `undefined` - * path creation is handled by the method instead. The `customizer` is invoked - * with three arguments: (nsValue, key, nsObject). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {*} value The value to set. - * @param {Function} [customizer] The function to customize assigned values. - * @returns {Object} Returns `object`. - * @example - * - * var object = {}; - * - * _.setWith(object, '[0][1]', 'a', Object); - * // => { '0': { '1': 'a' } } - */ - function setWith(object, path, value, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - return object == null ? object : baseSet(object, path, value, customizer); - } - - /** - * Creates an array of own enumerable string keyed-value pairs for `object` - * which can be consumed by `_.fromPairs`. If `object` is a map or set, its - * entries are returned. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @alias entries - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the key-value pairs. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.toPairs(new Foo); - * // => [['a', 1], ['b', 2]] (iteration order is not guaranteed) - */ - var toPairs = createToPairs(keys); - - /** - * Creates an array of own and inherited enumerable string keyed-value pairs - * for `object` which can be consumed by `_.fromPairs`. If `object` is a map - * or set, its entries are returned. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @alias entriesIn - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the key-value pairs. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.toPairsIn(new Foo); - * // => [['a', 1], ['b', 2], ['c', 3]] (iteration order is not guaranteed) - */ - var toPairsIn = createToPairs(keysIn); - - /** - * An alternative to `_.reduce`; this method transforms `object` to a new - * `accumulator` object which is the result of running each of its own - * enumerable string keyed properties thru `iteratee`, with each invocation - * potentially mutating the `accumulator` object. If `accumulator` is not - * provided, a new object with the same `[[Prototype]]` will be used. The - * iteratee is invoked with four arguments: (accumulator, value, key, object). - * Iteratee functions may exit iteration early by explicitly returning `false`. - * - * @static - * @memberOf _ - * @since 1.3.0 - * @category Object - * @param {Object} object The object to iterate over. - * @param {Function} [iteratee=_.identity] The function invoked per iteration. - * @param {*} [accumulator] The custom accumulator value. - * @returns {*} Returns the accumulated value. - * @example - * - * _.transform([2, 3, 4], function(result, n) { - * result.push(n *= n); - * return n % 2 == 0; - * }, []); - * // => [4, 9] - * - * _.transform({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) { - * (result[value] || (result[value] = [])).push(key); - * }, {}); - * // => { '1': ['a', 'c'], '2': ['b'] } - */ - function transform(object, iteratee, accumulator) { - var isArr = isArray(object), - isArrLike = isArr || isBuffer(object) || isTypedArray(object); - - iteratee = getIteratee(iteratee, 4); - if (accumulator == null) { - var Ctor = object && object.constructor; - if (isArrLike) { - accumulator = isArr ? new Ctor : []; - } - else if (isObject(object)) { - accumulator = isFunction(Ctor) ? baseCreate(getPrototype(object)) : {}; - } - else { - accumulator = {}; - } - } - (isArrLike ? arrayEach : baseForOwn)(object, function(value, index, object) { - return iteratee(accumulator, value, index, object); - }); - return accumulator; - } - - /** - * Removes the property at `path` of `object`. - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to unset. - * @returns {boolean} Returns `true` if the property is deleted, else `false`. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 7 } }] }; - * _.unset(object, 'a[0].b.c'); - * // => true - * - * console.log(object); - * // => { 'a': [{ 'b': {} }] }; - * - * _.unset(object, ['a', '0', 'b', 'c']); - * // => true - * - * console.log(object); - * // => { 'a': [{ 'b': {} }] }; - */ - function unset(object, path) { - return object == null ? true : baseUnset(object, path); - } - - /** - * This method is like `_.set` except that accepts `updater` to produce the - * value to set. Use `_.updateWith` to customize `path` creation. The `updater` - * is invoked with one argument: (value). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.6.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {Function} updater The function to produce the updated value. - * @returns {Object} Returns `object`. - * @example - * - * var object = { 'a': [{ 'b': { 'c': 3 } }] }; - * - * _.update(object, 'a[0].b.c', function(n) { return n * n; }); - * console.log(object.a[0].b.c); - * // => 9 - * - * _.update(object, 'x[0].y.z', function(n) { return n ? n + 1 : 0; }); - * console.log(object.x[0].y.z); - * // => 0 - */ - function update(object, path, updater) { - return object == null ? object : baseUpdate(object, path, castFunction(updater)); - } - - /** - * This method is like `_.update` except that it accepts `customizer` which is - * invoked to produce the objects of `path`. If `customizer` returns `undefined` - * path creation is handled by the method instead. The `customizer` is invoked - * with three arguments: (nsValue, key, nsObject). - * - * **Note:** This method mutates `object`. - * - * @static - * @memberOf _ - * @since 4.6.0 - * @category Object - * @param {Object} object The object to modify. - * @param {Array|string} path The path of the property to set. - * @param {Function} updater The function to produce the updated value. - * @param {Function} [customizer] The function to customize assigned values. - * @returns {Object} Returns `object`. - * @example - * - * var object = {}; - * - * _.updateWith(object, '[0][1]', _.constant('a'), Object); - * // => { '0': { '1': 'a' } } - */ - function updateWith(object, path, updater, customizer) { - customizer = typeof customizer == 'function' ? customizer : undefined; - return object == null ? object : baseUpdate(object, path, castFunction(updater), customizer); - } - - /** - * Creates an array of the own enumerable string keyed property values of `object`. - * - * **Note:** Non-object values are coerced to objects. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property values. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.values(new Foo); - * // => [1, 2] (iteration order is not guaranteed) - * - * _.values('hi'); - * // => ['h', 'i'] - */ - function values(object) { - return object == null ? [] : baseValues(object, keys(object)); - } - - /** - * Creates an array of the own and inherited enumerable string keyed property - * values of `object`. - * - * **Note:** Non-object values are coerced to objects. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category Object - * @param {Object} object The object to query. - * @returns {Array} Returns the array of property values. - * @example - * - * function Foo() { - * this.a = 1; - * this.b = 2; - * } - * - * Foo.prototype.c = 3; - * - * _.valuesIn(new Foo); - * // => [1, 2, 3] (iteration order is not guaranteed) - */ - function valuesIn(object) { - return object == null ? [] : baseValues(object, keysIn(object)); - } - - /*------------------------------------------------------------------------*/ - - /** - * Clamps `number` within the inclusive `lower` and `upper` bounds. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category Number - * @param {number} number The number to clamp. - * @param {number} [lower] The lower bound. - * @param {number} upper The upper bound. - * @returns {number} Returns the clamped number. - * @example - * - * _.clamp(-10, -5, 5); - * // => -5 - * - * _.clamp(10, -5, 5); - * // => 5 - */ - function clamp(number, lower, upper) { - if (upper === undefined) { - upper = lower; - lower = undefined; - } - if (upper !== undefined) { - upper = toNumber(upper); - upper = upper === upper ? upper : 0; - } - if (lower !== undefined) { - lower = toNumber(lower); - lower = lower === lower ? lower : 0; - } - return baseClamp(toNumber(number), lower, upper); - } - - /** - * Checks if `n` is between `start` and up to, but not including, `end`. If - * `end` is not specified, it's set to `start` with `start` then set to `0`. - * If `start` is greater than `end` the params are swapped to support - * negative ranges. - * - * @static - * @memberOf _ - * @since 3.3.0 - * @category Number - * @param {number} number The number to check. - * @param {number} [start=0] The start of the range. - * @param {number} end The end of the range. - * @returns {boolean} Returns `true` if `number` is in the range, else `false`. - * @see _.range, _.rangeRight - * @example - * - * _.inRange(3, 2, 4); - * // => true - * - * _.inRange(4, 8); - * // => true - * - * _.inRange(4, 2); - * // => false - * - * _.inRange(2, 2); - * // => false - * - * _.inRange(1.2, 2); - * // => true - * - * _.inRange(5.2, 4); - * // => false - * - * _.inRange(-3, -2, -6); - * // => true - */ - function inRange(number, start, end) { - start = toFinite(start); - if (end === undefined) { - end = start; - start = 0; - } else { - end = toFinite(end); - } - number = toNumber(number); - return baseInRange(number, start, end); - } - - /** - * Produces a random number between the inclusive `lower` and `upper` bounds. - * If only one argument is provided a number between `0` and the given number - * is returned. If `floating` is `true`, or either `lower` or `upper` are - * floats, a floating-point number is returned instead of an integer. - * - * **Note:** JavaScript follows the IEEE-754 standard for resolving - * floating-point values which can produce unexpected results. - * - * @static - * @memberOf _ - * @since 0.7.0 - * @category Number - * @param {number} [lower=0] The lower bound. - * @param {number} [upper=1] The upper bound. - * @param {boolean} [floating] Specify returning a floating-point number. - * @returns {number} Returns the random number. - * @example - * - * _.random(0, 5); - * // => an integer between 0 and 5 - * - * _.random(5); - * // => also an integer between 0 and 5 - * - * _.random(5, true); - * // => a floating-point number between 0 and 5 - * - * _.random(1.2, 5.2); - * // => a floating-point number between 1.2 and 5.2 - */ - function random(lower, upper, floating) { - if (floating && typeof floating != 'boolean' && isIterateeCall(lower, upper, floating)) { - upper = floating = undefined; - } - if (floating === undefined) { - if (typeof upper == 'boolean') { - floating = upper; - upper = undefined; - } - else if (typeof lower == 'boolean') { - floating = lower; - lower = undefined; - } - } - if (lower === undefined && upper === undefined) { - lower = 0; - upper = 1; - } - else { - lower = toFinite(lower); - if (upper === undefined) { - upper = lower; - lower = 0; - } else { - upper = toFinite(upper); - } - } - if (lower > upper) { - var temp = lower; - lower = upper; - upper = temp; - } - if (floating || lower % 1 || upper % 1) { - var rand = nativeRandom(); - return nativeMin(lower + (rand * (upper - lower + freeParseFloat('1e-' + ((rand + '').length - 1)))), upper); - } - return baseRandom(lower, upper); - } - - /*------------------------------------------------------------------------*/ - - /** - * Converts `string` to [camel case](https://en.wikipedia.org/wiki/CamelCase). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the camel cased string. - * @example - * - * _.camelCase('Foo Bar'); - * // => 'fooBar' - * - * _.camelCase('--foo-bar--'); - * // => 'fooBar' - * - * _.camelCase('__FOO_BAR__'); - * // => 'fooBar' - */ - var camelCase = createCompounder(function(result, word, index) { - word = word.toLowerCase(); - return result + (index ? capitalize(word) : word); - }); - - /** - * Converts the first character of `string` to upper case and the remaining - * to lower case. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to capitalize. - * @returns {string} Returns the capitalized string. - * @example - * - * _.capitalize('FRED'); - * // => 'Fred' - */ - function capitalize(string) { - return upperFirst(toString(string).toLowerCase()); - } - - /** - * Deburrs `string` by converting - * [Latin-1 Supplement](https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block)#Character_table) - * and [Latin Extended-A](https://en.wikipedia.org/wiki/Latin_Extended-A) - * letters to basic Latin letters and removing - * [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to deburr. - * @returns {string} Returns the deburred string. - * @example - * - * _.deburr('déjà vu'); - * // => 'deja vu' - */ - function deburr(string) { - string = toString(string); - return string && string.replace(reLatin, deburrLetter).replace(reComboMark, ''); - } - - /** - * Checks if `string` ends with the given target string. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to inspect. - * @param {string} [target] The string to search for. - * @param {number} [position=string.length] The position to search up to. - * @returns {boolean} Returns `true` if `string` ends with `target`, - * else `false`. - * @example - * - * _.endsWith('abc', 'c'); - * // => true - * - * _.endsWith('abc', 'b'); - * // => false - * - * _.endsWith('abc', 'b', 2); - * // => true - */ - function endsWith(string, target, position) { - string = toString(string); - target = baseToString(target); - - var length = string.length; - position = position === undefined - ? length - : baseClamp(toInteger(position), 0, length); - - var end = position; - position -= target.length; - return position >= 0 && string.slice(position, end) == target; - } - - /** - * Converts the characters "&", "<", ">", '"', and "'" in `string` to their - * corresponding HTML entities. - * - * **Note:** No other characters are escaped. To escape additional - * characters use a third-party library like [_he_](https://mths.be/he). - * - * Though the ">" character is escaped for symmetry, characters like - * ">" and "/" don't need escaping in HTML and have no special meaning - * unless they're part of a tag or unquoted attribute value. See - * [Mathias Bynens's article](https://mathiasbynens.be/notes/ambiguous-ampersands) - * (under "semi-related fun fact") for more details. - * - * When working with HTML you should always - * [quote attribute values](http://wonko.com/post/html-escaping) to reduce - * XSS vectors. - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category String - * @param {string} [string=''] The string to escape. - * @returns {string} Returns the escaped string. - * @example - * - * _.escape('fred, barney, & pebbles'); - * // => 'fred, barney, & pebbles' - */ - function escape(string) { - string = toString(string); - return (string && reHasUnescapedHtml.test(string)) - ? string.replace(reUnescapedHtml, escapeHtmlChar) - : string; - } - - /** - * Escapes the `RegExp` special characters "^", "$", "\", ".", "*", "+", - * "?", "(", ")", "[", "]", "{", "}", and "|" in `string`. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to escape. - * @returns {string} Returns the escaped string. - * @example - * - * _.escapeRegExp('[lodash](https://lodash.com/)'); - * // => '\[lodash\]\(https://lodash\.com/\)' - */ - function escapeRegExp(string) { - string = toString(string); - return (string && reHasRegExpChar.test(string)) - ? string.replace(reRegExpChar, '\\$&') - : string; - } - - /** - * Converts `string` to - * [kebab case](https://en.wikipedia.org/wiki/Letter_case#Special_case_styles). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the kebab cased string. - * @example - * - * _.kebabCase('Foo Bar'); - * // => 'foo-bar' - * - * _.kebabCase('fooBar'); - * // => 'foo-bar' - * - * _.kebabCase('__FOO_BAR__'); - * // => 'foo-bar' - */ - var kebabCase = createCompounder(function(result, word, index) { - return result + (index ? '-' : '') + word.toLowerCase(); - }); - - /** - * Converts `string`, as space separated words, to lower case. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the lower cased string. - * @example - * - * _.lowerCase('--Foo-Bar--'); - * // => 'foo bar' - * - * _.lowerCase('fooBar'); - * // => 'foo bar' - * - * _.lowerCase('__FOO_BAR__'); - * // => 'foo bar' - */ - var lowerCase = createCompounder(function(result, word, index) { - return result + (index ? ' ' : '') + word.toLowerCase(); - }); - - /** - * Converts the first character of `string` to lower case. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the converted string. - * @example - * - * _.lowerFirst('Fred'); - * // => 'fred' - * - * _.lowerFirst('FRED'); - * // => 'fRED' - */ - var lowerFirst = createCaseFirst('toLowerCase'); - - /** - * Pads `string` on the left and right sides if it's shorter than `length`. - * Padding characters are truncated if they can't be evenly divided by `length`. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to pad. - * @param {number} [length=0] The padding length. - * @param {string} [chars=' '] The string used as padding. - * @returns {string} Returns the padded string. - * @example - * - * _.pad('abc', 8); - * // => ' abc ' - * - * _.pad('abc', 8, '_-'); - * // => '_-abc_-_' - * - * _.pad('abc', 3); - * // => 'abc' - */ - function pad(string, length, chars) { - string = toString(string); - length = toInteger(length); - - var strLength = length ? stringSize(string) : 0; - if (!length || strLength >= length) { - return string; - } - var mid = (length - strLength) / 2; - return ( - createPadding(nativeFloor(mid), chars) + - string + - createPadding(nativeCeil(mid), chars) - ); - } - - /** - * Pads `string` on the right side if it's shorter than `length`. Padding - * characters are truncated if they exceed `length`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to pad. - * @param {number} [length=0] The padding length. - * @param {string} [chars=' '] The string used as padding. - * @returns {string} Returns the padded string. - * @example - * - * _.padEnd('abc', 6); - * // => 'abc ' - * - * _.padEnd('abc', 6, '_-'); - * // => 'abc_-_' - * - * _.padEnd('abc', 3); - * // => 'abc' - */ - function padEnd(string, length, chars) { - string = toString(string); - length = toInteger(length); - - var strLength = length ? stringSize(string) : 0; - return (length && strLength < length) - ? (string + createPadding(length - strLength, chars)) - : string; - } - - /** - * Pads `string` on the left side if it's shorter than `length`. Padding - * characters are truncated if they exceed `length`. - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to pad. - * @param {number} [length=0] The padding length. - * @param {string} [chars=' '] The string used as padding. - * @returns {string} Returns the padded string. - * @example - * - * _.padStart('abc', 6); - * // => ' abc' - * - * _.padStart('abc', 6, '_-'); - * // => '_-_abc' - * - * _.padStart('abc', 3); - * // => 'abc' - */ - function padStart(string, length, chars) { - string = toString(string); - length = toInteger(length); - - var strLength = length ? stringSize(string) : 0; - return (length && strLength < length) - ? (createPadding(length - strLength, chars) + string) - : string; - } - - /** - * Converts `string` to an integer of the specified radix. If `radix` is - * `undefined` or `0`, a `radix` of `10` is used unless `value` is a - * hexadecimal, in which case a `radix` of `16` is used. - * - * **Note:** This method aligns with the - * [ES5 implementation](https://es5.github.io/#x15.1.2.2) of `parseInt`. - * - * @static - * @memberOf _ - * @since 1.1.0 - * @category String - * @param {string} string The string to convert. - * @param {number} [radix=10] The radix to interpret `value` by. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {number} Returns the converted integer. - * @example - * - * _.parseInt('08'); - * // => 8 - * - * _.map(['6', '08', '10'], _.parseInt); - * // => [6, 8, 10] - */ - function parseInt(string, radix, guard) { - if (guard || radix == null) { - radix = 0; - } else if (radix) { - radix = +radix; - } - return nativeParseInt(toString(string).replace(reTrimStart, ''), radix || 0); - } - - /** - * Repeats the given string `n` times. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to repeat. - * @param {number} [n=1] The number of times to repeat the string. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {string} Returns the repeated string. - * @example - * - * _.repeat('*', 3); - * // => '***' - * - * _.repeat('abc', 2); - * // => 'abcabc' - * - * _.repeat('abc', 0); - * // => '' - */ - function repeat(string, n, guard) { - if ((guard ? isIterateeCall(string, n, guard) : n === undefined)) { - n = 1; - } else { - n = toInteger(n); - } - return baseRepeat(toString(string), n); - } - - /** - * Replaces matches for `pattern` in `string` with `replacement`. - * - * **Note:** This method is based on - * [`String#replace`](https://mdn.io/String/replace). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to modify. - * @param {RegExp|string} pattern The pattern to replace. - * @param {Function|string} replacement The match replacement. - * @returns {string} Returns the modified string. - * @example - * - * _.replace('Hi Fred', 'Fred', 'Barney'); - * // => 'Hi Barney' - */ - function replace() { - var args = arguments, - string = toString(args[0]); - - return args.length < 3 ? string : string.replace(args[1], args[2]); - } - - /** - * Converts `string` to - * [snake case](https://en.wikipedia.org/wiki/Snake_case). - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the snake cased string. - * @example - * - * _.snakeCase('Foo Bar'); - * // => 'foo_bar' - * - * _.snakeCase('fooBar'); - * // => 'foo_bar' - * - * _.snakeCase('--FOO-BAR--'); - * // => 'foo_bar' - */ - var snakeCase = createCompounder(function(result, word, index) { - return result + (index ? '_' : '') + word.toLowerCase(); - }); - - /** - * Splits `string` by `separator`. - * - * **Note:** This method is based on - * [`String#split`](https://mdn.io/String/split). - * - * @static - * @memberOf _ - * @since 4.0.0 - * @category String - * @param {string} [string=''] The string to split. - * @param {RegExp|string} separator The separator pattern to split by. - * @param {number} [limit] The length to truncate results to. - * @returns {Array} Returns the string segments. - * @example - * - * _.split('a-b-c', '-', 2); - * // => ['a', 'b'] - */ - function split(string, separator, limit) { - if (limit && typeof limit != 'number' && isIterateeCall(string, separator, limit)) { - separator = limit = undefined; - } - limit = limit === undefined ? MAX_ARRAY_LENGTH : limit >>> 0; - if (!limit) { - return []; - } - string = toString(string); - if (string && ( - typeof separator == 'string' || - (separator != null && !isRegExp(separator)) - )) { - separator = baseToString(separator); - if (!separator && hasUnicode(string)) { - return castSlice(stringToArray(string), 0, limit); - } - } - return string.split(separator, limit); - } - - /** - * Converts `string` to - * [start case](https://en.wikipedia.org/wiki/Letter_case#Stylistic_or_specialised_usage). - * - * @static - * @memberOf _ - * @since 3.1.0 - * @category String - * @param {string} [string=''] The string to convert. - * @returns {string} Returns the start cased string. - * @example - * - * _.startCase('--foo-bar--'); - * // => 'Foo Bar' - * - * _.startCase('fooBar'); - * // => 'Foo Bar' - * - * _.startCase('__FOO_BAR__'); - * // => 'FOO BAR' - */ - var startCase = createCompounder(function(result, word, index) { - return result + (index ? ' ' : '') + upperFirst(word); - }); - - /** - * Checks if `string` starts with the given target string. - * - * @static - * @memberOf _ - * @since 3.0.0 - * @category String - * @param {string} [string=''] The string to inspect. - * @param {string} [target] The string to search for. - * @param {number} [position=0] The position to search from. - * @returns {boolean} Returns `true` if `string` starts with `target`, - * else `false`. - * @example - * - * _.startsWith('abc', 'a'); - * // => true - * - * _.startsWith('abc', 'b'); - * // => false - * - * _.startsWith('abc', 'b', 1); - * // => true - */ - function startsWith(string, target, position) { - string = toString(string); - position = position == null - ? 0 - : baseClamp(toInteger(position), 0, string.length); - - target = baseToString(target); - return string.slice(position, position + target.length) == target; - } - - /** - * Creates a compiled template function that can interpolate data properties - * in "interpolate" delimiters, HTML-escape interpolated data properties in - * "escape" delimiters, and execute JavaScript in "evaluate" delimiters. Data - * properties may be accessed as free variables in the template. If a setting - * object is given, it takes precedence over `_.templateSettings` values. - * - * **Note:** In the development build `_.template` utilizes - * [sourceURLs](http://www.html5rocks.com/en/tutorials/developertools/sourcemaps/#toc-sourceurl) - * for easier debugging. - * - * For more information on precompiling templates see - * [lodash's custom builds documentation](https://lodash.com/custom-builds). - * - * For more information on Chrome extension sandboxes see - * [Chrome's extensions documentation](https://developer.chrome.com/extensions/sandboxingEval). - * - * @static - * @since 0.1.0 - * @memberOf _ - * @category String - * @param {string} [string=''] The template string. - * @param {Object} [options={}] The options object. - * @param {RegExp} [options.escape=_.templateSettings.escape] - * The HTML "escape" delimiter. - * @param {RegExp} [options.evaluate=_.templateSettings.evaluate] - * The "evaluate" delimiter. - * @param {Object} [options.imports=_.templateSettings.imports] - * An object to import into the template as free variables. - * @param {RegExp} [options.interpolate=_.templateSettings.interpolate] - * The "interpolate" delimiter. - * @param {string} [options.sourceURL='lodash.templateSources[n]'] - * The sourceURL of the compiled template. - * @param {string} [options.variable='obj'] - * The data object variable name. - * @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`. - * @returns {Function} Returns the compiled template function. - * @example - * - * // Use the "interpolate" delimiter to create a compiled template. - * var compiled = _.template('hello <%= user %>!'); - * compiled({ 'user': 'fred' }); - * // => 'hello fred!' - * - * // Use the HTML "escape" delimiter to escape data property values. - * var compiled = _.template('<%- value %>'); - * compiled({ 'value': ' - {{content-for "body-footer"}} - {{content-for "test-body-footer"}} + {{content-for "body-footer"}} {{content-for "test-body-footer"}} diff --git a/ui/tests/integration/components/alert-inline-test.js b/ui/tests/integration/components/alert-inline-test.js index 6397c165da9f..a992257ede3f 100644 --- a/ui/tests/integration/components/alert-inline-test.js +++ b/ui/tests/integration/components/alert-inline-test.js @@ -1,71 +1,91 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { render, settled, find, waitUntil } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; +const SHARED_STYLES = { + success: { + icon: 'check-circle-fill', + class: 'hds-alert--color-success', + }, + warning: { + icon: 'alert-triangle-fill', + class: 'hds-alert--color-warning', + }, +}; module('Integration | Component | alert-inline', function (hooks) { setupRenderingTest(hooks); - hooks.beforeEach(function () { - this.set('message', 'some very important alert'); - }); + test('it renders alert message for each @color arg', async function (assert) { + const COLORS = { + ...SHARED_STYLES, + neutral: { + icon: 'info-fill', + class: 'hds-alert--color-neutral', + }, + highlight: { + icon: 'info-fill', + class: 'hds-alert--color-highlight', + }, + critical: { + icon: 'alert-diamond-fill', + class: 'hds-alert--color-critical', + }, + }; - test('it renders alert message with correct class args', async function (assert) { - await render(hbs` - - `); + const { neutral } = COLORS; // default color + await render(hbs``); assert.dom('[data-test-inline-error-message]').hasText('some very important alert'); - assert - .dom('[data-test-inline-alert]') - .hasAttribute('class', 'message-inline padding-top is-marginless size-small'); - }); - - test('it yields to block text', async function (assert) { - await render(hbs` - - A much more important alert - - `); - assert.dom('[data-test-inline-error-message]').hasText('A much more important alert'); - }); + assert.dom(`[data-test-icon="${neutral.icon}"]`).exists('renders default icon'); + assert.dom('[data-test-inline-alert]').hasClass(neutral.class, 'renders default class'); - test('it renders correctly for type=danger', async function (assert) { - this.set('type', 'danger'); - await render(hbs` - - `); - assert - .dom('[data-test-inline-error-message]') - .hasAttribute('class', 'has-text-danger', 'has danger text'); - assert.dom('[data-test-icon="x-square-fill"]').exists('danger icon exists'); + // assert deprecated @type arg values map to expected color + for (const type in COLORS) { + this.color = type; + const color = COLORS[type]; + await render(hbs``); + assert.dom(`[data-test-icon="${color.icon}"]`).exists(`@color="${type}" renders icon: ${color.icon}`); + assert + .dom('[data-test-inline-alert]') + .hasClass(color.class, `@color="${type}" renders class: ${color.class}`); + } }); - test('it renders correctly for type=warning', async function (assert) { - this.set('type', 'warning'); - await render(hbs` - - `); - assert.dom('[data-test-inline-error-message]').doesNotHaveAttribute('class', 'does not have styled text'); - assert.dom('[data-test-icon="alert-triangle-fill"]').exists('warning icon exists'); + test('it renders alert color for each deprecated @type arg', async function (assert) { + const OLD_TYPES = { + ...SHARED_STYLES, + info: { + icon: 'info-fill', + class: 'hds-alert--color-highlight', + }, + danger: { + icon: 'alert-diamond-fill', + class: 'hds-alert--color-critical', + }, + }; + // assert deprecated @type arg values map to expected color + for (const type in OLD_TYPES) { + this.type = type; + const color = OLD_TYPES[type]; + await render(hbs``); + assert + .dom(`[data-test-icon="${color.icon}"]`) + .exists(`deprecated @type="${type}" renders icon: ${color.icon}`); + assert + .dom('[data-test-inline-alert]') + .hasClass(color.class, `deprecated @type="${type}" renders class: ${color.class}`); + } }); test('it mimics loading when message changes', async function (assert) { + this.message = 'some very important alert'; await render(hbs` - + `); assert .dom('[data-test-inline-error-message]') diff --git a/ui/tests/integration/components/app-footer-test.js b/ui/tests/integration/components/app-footer-test.js new file mode 100644 index 000000000000..cc9ee5555101 --- /dev/null +++ b/ui/tests/integration/components/app-footer-test.js @@ -0,0 +1,47 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +const selectors = { + versionDisplay: '[data-test-footer-version]', + upgradeLink: '[data-test-footer-upgrade-link]', + docsLink: '[data-test-footer-documentation-link]', +}; + +module('Integration | Component | app-footer', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.versionSvc = this.owner.lookup('service:version'); + }); + + test('it renders a sane default', async function (assert) { + await render(hbs``); + assert.dom(selectors.versionDisplay).hasText('Vault', 'Vault without version by default'); + assert.dom(selectors.upgradeLink).hasText('Upgrade to Vault Enterprise', 'upgrade link shows'); + assert.dom(selectors.docsLink).hasText('Documentation', 'displays docs link'); + }); + + test('it renders for community version', async function (assert) { + this.versionSvc.version = '1.15.1'; + this.versionSvc.type = 'community'; + await render(hbs``); + assert.dom(selectors.versionDisplay).hasText('Vault 1.15.1', 'Vault shows version when available'); + assert.dom(selectors.upgradeLink).hasText('Upgrade to Vault Enterprise', 'upgrade link shows'); + assert.dom(selectors.docsLink).hasText('Documentation', 'displays docs link'); + }); + test('it renders for ent version', async function (assert) { + this.versionSvc.version = '1.15.1+hsm'; + this.versionSvc.type = 'enterprise'; + await render(hbs``); + assert.dom(selectors.versionDisplay).hasText('Vault 1.15.1+hsm', 'shows version when available'); + assert.dom(selectors.upgradeLink).doesNotExist('upgrade link not shown'); + assert.dom(selectors.docsLink).hasText('Documentation', 'displays docs link'); + }); +}); diff --git a/ui/tests/integration/components/auth-config-form/options-test.js b/ui/tests/integration/components/auth-config-form/options-test.js index 36a9b6f9c0e4..c55b16a97c46 100644 --- a/ui/tests/integration/components/auth-config-form/options-test.js +++ b/ui/tests/integration/components/auth-config-form/options-test.js @@ -1,53 +1,201 @@ -import { resolve } from 'rsvp'; -import EmberObject from '@ember/object'; +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; -import { render, settled } from '@ember/test-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { click, fillIn, render } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; -import sinon from 'sinon'; -import { create } from 'ember-cli-page-object'; -import authConfigForm from 'vault/tests/pages/components/auth-config-form/options'; +import { SELECTORS } from 'vault/tests/helpers/general-selectors'; +import { methods } from 'vault/helpers/mountable-auth-methods'; -const component = create(authConfigForm); +const userLockoutSupported = ['approle', 'ldap', 'userpass']; +const userLockoutUnsupported = methods() + .map((m) => m.type) + .filter((m) => !userLockoutSupported.includes(m)); module('Integration | Component | auth-config-form options', function (hooks) { setupRenderingTest(hooks); + setupMirage(hooks); hooks.beforeEach(function () { this.owner.lookup('service:flash-messages').registerTypes(['success']); this.router = this.owner.lookup('service:router'); + this.store = this.owner.lookup('service:store'); + this.createModel = (path, type) => { + this.model = this.store.createRecord('auth-method', { path, type }); + this.model.set('config', this.store.createRecord('mount-config')); + }; + }); + + for (const type of userLockoutSupported) { + test(`it submits data correctly for ${type} method (supports user_lockout_config)`, async function (assert) { + assert.expect(3); + const path = `my-${type}-auth/`; + this.createModel(path, type); + + this.router.reopen({ + transitionTo() { + return { + followRedirects() { + assert.ok(true, `saving ${type} calls transitionTo on save`); + }, + }; + }, + }); + + this.server.post(`sys/mounts/auth/${path}/tune`, (schema, req) => { + const payload = JSON.parse(req.requestBody); + const expected = { + default_lease_ttl: '30s', + listing_visibility: 'unauth', + token_type: 'default-batch', + user_lockout_config: { + lockout_threshold: '7', + lockout_duration: '600s', + lockout_counter_reset: '5s', + lockout_disable: true, + }, + }; + assert.propEqual(payload, expected, `${type} method payload contains tune parameters`); + return { payload }; + }); + await render(hbs``); + + assert.dom('[data-test-user-lockout-section]').hasText('User lockout configuration'); + + await click(SELECTORS.inputByAttr('config.listingVisibility')); + await fillIn(SELECTORS.inputByAttr('config.tokenType'), 'default-batch'); + + await click(SELECTORS.ttl.toggle('Default Lease TTL')); + await fillIn(SELECTORS.ttl.input('Default Lease TTL'), '30'); + + await fillIn(SELECTORS.inputByAttr('config.lockoutThreshold'), '7'); + + await click(SELECTORS.ttl.toggle('Lockout duration')); + await fillIn(SELECTORS.ttl.input('Lockout duration'), '10'); + await fillIn( + `${SELECTORS.inputByAttr('config.lockoutDuration')} ${SELECTORS.selectByAttr('ttl-unit')}`, + 'm' + ); + await click(SELECTORS.ttl.toggle('Lockout counter reset')); + await fillIn(SELECTORS.ttl.input('Lockout counter reset'), '5'); + + await click(SELECTORS.inputByAttr('config.lockoutDisable')); + + await click('[data-test-save-config]'); + }); + } + + for (const type of userLockoutUnsupported) { + if (type === 'token') return; // separate test below because does not include tokenType field + + test(`it submits data correctly for ${type} auth method`, async function (assert) { + assert.expect(7); + + const path = `my-${type}-auth/`; + this.createModel(path, type); + + this.router.reopen({ + transitionTo() { + return { + followRedirects() { + assert.ok(true, `saving ${type} calls transitionTo on save`); + }, + }; + }, + }); + + this.server.post(`sys/mounts/auth/${path}/tune`, (schema, req) => { + const payload = JSON.parse(req.requestBody); + const expected = { + default_lease_ttl: '30s', + listing_visibility: 'unauth', + token_type: 'default-batch', + }; + assert.propEqual(payload, expected, `${type} method payload contains tune parameters`); + return { payload }; + }); + await render(hbs``); + + assert + .dom('[data-test-user-lockout-section]') + .doesNotExist(`${type} method does not render user lockout section`); + + await click(SELECTORS.inputByAttr('config.listingVisibility')); + await fillIn(SELECTORS.inputByAttr('config.tokenType'), 'default-batch'); + + await click(SELECTORS.ttl.toggle('Default Lease TTL')); + await fillIn(SELECTORS.ttl.input('Default Lease TTL'), '30'); + + assert + .dom(SELECTORS.inputByAttr('config.lockoutThreshold')) + .doesNotExist(`${type} method does not render lockout threshold`); + assert + .dom(SELECTORS.ttl.toggle('Lockout duration')) + .doesNotExist(`${type} method does not render lockout duration `); + assert + .dom(SELECTORS.ttl.toggle('Lockout counter reset')) + .doesNotExist(`${type} method does not render lockout counter reset`); + assert + .dom(SELECTORS.inputByAttr('config.lockoutDisable')) + .doesNotExist(`${type} method does not render lockout disable`); + + await click('[data-test-save-config]'); + }); + } + + test('it submits data correctly for token auth method', async function (assert) { + assert.expect(8); + const type = 'token'; + const path = `my-${type}-auth/`; + this.createModel(path, type); + this.router.reopen({ transitionTo() { return { followRedirects() { - return resolve(); + assert.ok(true, `saving token calls transitionTo on save`); }, }; }, - replaceWith() { - return resolve(); - }, }); - }); - test('it submits data correctly', async function (assert) { - assert.expect(1); - const model = EmberObject.create({ - tune() { - return resolve(); - }, - config: { - serialize() { - return {}; - }, - }, - }); - sinon.spy(model.config, 'serialize'); - this.set('model', model); - await render(hbs`{{auth-config-form/options model=this.model}}`); - component.save(); - return settled().then(() => { - assert.ok(model.config.serialize.calledOnce); + this.server.post(`sys/mounts/auth/${path}/tune`, (schema, req) => { + const payload = JSON.parse(req.requestBody); + const expected = { + default_lease_ttl: '30s', + listing_visibility: 'unauth', + }; + assert.propEqual(payload, expected, `${type} method payload contains tune parameters`); + return { payload }; }); + await render(hbs``); + + assert + .dom(SELECTORS.inputByAttr('config.tokenType')) + .doesNotExist('does not render tokenType for token auth method'); + + await click(SELECTORS.inputByAttr('config.listingVisibility')); + await click(SELECTORS.ttl.toggle('Default Lease TTL')); + await fillIn(SELECTORS.ttl.input('Default Lease TTL'), '30'); + + assert.dom('[data-test-user-lockout-section]').doesNotExist('token does not render user lockout section'); + assert + .dom(SELECTORS.inputByAttr('config.lockoutThreshold')) + .doesNotExist('token method does not render lockout threshold'); + assert + .dom(SELECTORS.ttl.toggle('Lockout duration')) + .doesNotExist('token method does not render lockout duration '); + assert + .dom(SELECTORS.ttl.toggle('Lockout counter reset')) + .doesNotExist('token method does not render lockout counter reset'); + assert + .dom(SELECTORS.inputByAttr('config.lockoutDisable')) + .doesNotExist('token method does not render lockout disable'); + + await click('[data-test-save-config]'); }); }); diff --git a/ui/tests/integration/components/auth-form-test.js b/ui/tests/integration/components/auth-form-test.js index 9d2084931ea2..1f589229c684 100644 --- a/ui/tests/integration/components/auth-form-test.js +++ b/ui/tests/integration/components/auth-form-test.js @@ -1,3 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { later, _cancelTimers as cancelTimers } from '@ember/runloop'; import EmberObject from '@ember/object'; import { resolve } from 'rsvp'; @@ -10,6 +15,7 @@ import sinon from 'sinon'; import Pretender from 'pretender'; import { create } from 'ember-cli-page-object'; import authForm from '../../pages/components/auth-form'; +import { validate } from 'uuid'; const component = create(authForm); @@ -37,6 +43,7 @@ module('Integration | Component | auth form', function (hooks) { hooks.beforeEach(function () { this.owner.register('service:router', routerService); this.router = this.owner.lookup('service:router'); + this.onSuccess = sinon.spy(); }); const CSP_ERR_TEXT = `Error This is a standby Vault node but can't communicate with the active node via request forwarding. Sign in at the active node to use the Vault UI.`; @@ -87,7 +94,7 @@ module('Integration | Component | auth form', function (hooks) { this.set('cluster', EmberObject.create({})); this.set('selectedAuth', 'token'); await render(hbs`{{auth-form cluster=this.cluster selectedAuth=this.selectedAuth}}`); - // ARG TODO research and see if adapter errors changed, but null used to be Bad Request + // returns null because test does not return details of failed network request. On the app it will return the details of the error instead of null. return component.login().then(() => { assert.strictEqual(component.errorText, 'Error Authentication failed: null'); server.shutdown(); @@ -226,7 +233,9 @@ module('Integration | Component | auth form', function (hooks) { const wrappedToken = '54321'; this.set('wrappedToken', wrappedToken); this.set('cluster', EmberObject.create({})); - await render(hbs``); + await render( + hbs`` + ); later(() => cancelTimers(), 50); await settled(); assert.strictEqual( @@ -314,4 +323,35 @@ module('Integration | Component | auth form', function (hooks) { server.shutdown(); }); + + test('it should set nonce value as uuid for okta method type', async function (assert) { + assert.expect(1); + + const server = new Pretender(function () { + this.post('/v1/auth/okta/login/foo', (req) => { + const { nonce } = JSON.parse(req.requestBody); + assert.true(validate(nonce), 'Nonce value passed as uuid for okta login'); + return [ + 200, + { 'content-type': 'application/json' }, + JSON.stringify({ + auth: { + client_token: '12345', + }, + }), + ]; + }); + this.get('/v1/sys/internal/ui/mounts', this.passthrough); + }); + + this.set('cluster', EmberObject.create({})); + await render(hbs``); + + await component.selectMethod('okta'); + await component.username('foo'); + await component.password('bar'); + await component.login(); + + server.shutdown(); + }); }); diff --git a/ui/tests/integration/components/auth-jwt-test.js b/ui/tests/integration/components/auth-jwt-test.js index fb4e1891a737..55302a7bca7a 100644 --- a/ui/tests/integration/components/auth-jwt-test.js +++ b/ui/tests/integration/components/auth-jwt-test.js @@ -1,3 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { _cancelTimers as cancelTimers } from '@ember/runloop'; import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; @@ -133,7 +138,7 @@ module('Integration | Component | auth jwt', function (hooks) { await component.role('okta'); // 1 for initial render, 1 for each time role changed = 3 - assert.strictEqual(this.server.handledRequests.length, 4, 'fetches the auth_url when the path changes'); + assert.strictEqual(this.server.handledRequests.length, 3, 'fetches the auth_url when the path changes'); assert.strictEqual( component.loginButtonText, 'Sign in with Okta', diff --git a/ui/tests/integration/components/autocomplete-input-test.js b/ui/tests/integration/components/autocomplete-input-test.js index 71399067cb0d..ab24c5467dd7 100644 --- a/ui/tests/integration/components/autocomplete-input-test.js +++ b/ui/tests/integration/components/autocomplete-input-test.js @@ -1,12 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { click, fillIn, triggerEvent, typeIn, render } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; +import { setRunOptions } from 'ember-a11y-testing/test-support'; module('Integration | Component | autocomplete-input', function (hooks) { setupRenderingTest(hooks); test('it should render label', async function (assert) { + // TODO: make the input accessible when no label provided + setRunOptions({ + rules: { + label: { enabled: false }, + }, + }); await render( hbs` `); - - assert.dom(this.element).hasText('An Option', 'shows the display name of the option'); - assert.dom('.tooltip').doesNotExist('tooltip does not exist when disabled is false'); - await click('[data-test-mount-type="aws"]'); - assert.ok(spy.calledOnce, 'calls the radio change function when option clicked'); - }); - - test('it renders correctly when disabled', async function (assert) { - const spy = sinon.spy(); - this.set('onRadioChange', spy); - await render(hbs``); - - assert.dom(this.element).hasText('An Option', 'shows the display name of the option'); - assert.dom('.ember-basic-dropdown-trigger').exists('tooltip exists'); - await click('[data-test-mount-type="aws"]'); - assert.ok(spy.notCalled, 'does not call the radio change function when option is clicked'); - }); -}); diff --git a/ui/tests/integration/components/calendar-widget-test.js b/ui/tests/integration/components/calendar-widget-test.js index f10134c45c92..649200b22f8c 100644 --- a/ui/tests/integration/components/calendar-widget-test.js +++ b/ui/tests/integration/components/calendar-widget-test.js @@ -1,3 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { render, click } from '@ember/test-helpers'; @@ -5,136 +10,236 @@ import sinon from 'sinon'; import hbs from 'htmlbars-inline-precompile'; import calendarDropdown from 'vault/tests/pages/components/calendar-widget'; import { ARRAY_OF_MONTHS } from 'core/utils/date-formatters'; -import { subYears } from 'date-fns'; +import { subMonths, subYears } from 'date-fns'; +import timestamp from 'core/utils/timestamp'; module('Integration | Component | calendar-widget', function (hooks) { setupRenderingTest(hooks); + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); + }); hooks.beforeEach(function () { - const CURRENT_YEAR = new Date().getFullYear(); - const PREVIOUS_YEAR = subYears(new Date(), 1).getFullYear(); - this.set('currentYear', CURRENT_YEAR); - this.set('previousYear', PREVIOUS_YEAR); + const CURRENT_DATE = timestamp.now(); + this.set('currentDate', CURRENT_DATE); + this.set('calendarStartDate', subMonths(CURRENT_DATE, 12)); + this.set('calendarEndDate', CURRENT_DATE); + this.set('startTimestamp', subMonths(CURRENT_DATE, 12).toISOString()); + this.set('endTimestamp', CURRENT_DATE.toISOString()); this.set('handleClientActivityQuery', sinon.spy()); - this.set('handleCurrentBillingPeriod', sinon.spy()); - this.set('arrayOfMonths', ARRAY_OF_MONTHS); - this.set('endTimeFromResponse', [CURRENT_YEAR, 0]); + }); + hooks.after(function () { + timestamp.now.restore(); }); - test('it renders and can open the calendar view', async function (assert) { + test('it renders and disables correct months when start date is 12 months ago', async function (assert) { + assert.expect(14); await render(hbs` `); - + assert + .dom('[data-test-calendar-widget-trigger]') + .hasText(`Apr 2017 - Apr 2018`, 'renders and formats start and end dates'); await calendarDropdown.openCalendar(); assert.ok(calendarDropdown.showsCalendar, 'renders the calendar component'); + // assert months in current year are disabled/enabled correctly + const enabledMonths = ['January', 'February', 'March', 'April']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (enabledMonths.includes(month)) { + assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); + } else { + assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is disabled`); + } + }); }); - test('it does not allow a user to click to a future year but does allow a user to click to previous year', async function (assert) { + test('it renders and disables months before start timestamp', async function (assert) { await render(hbs` `); await calendarDropdown.openCalendar(); - assert.dom('[data-test-future-year]').isDisabled('Future year is disabled'); - + assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); await calendarDropdown.clickPreviousYear(); - assert.dom('[data-test-display-year]').hasText(this.previousYear.toString(), 'shows the previous year'); assert - .dom('[data-test-calendar-month="January"]') - .hasClass( - 'is-readOnly', - `January ${this.previousYear} is disabled because it comes before startTimeDisplay` - ); + .dom('[data-test-display-year]') + .hasText(`${subYears(this.currentDate, 1).getFullYear()}`, 'shows the previous year'); + assert.dom('[data-test-previous-year]').isDisabled('disables previous year'); + + // assert months in previous year are disabled/enabled correctly + const disabledMonths = ['January', 'February', 'March']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (disabledMonths.includes(month)) { + assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is disabled`); + } else { + assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); + } + }); }); - test('it disables the current month', async function (assert) { + test('it calls parent callback with correct arg when clicking "Current billing period"', async function (assert) { await render(hbs` `); - await calendarDropdown.openCalendar(); - const month = this.arrayOfMonths[new Date().getMonth()]; - assert - .dom(`[data-test-calendar-month="${month}"]`) - .hasClass('is-readOnly', `${month} ${this.currentYear} is disabled`); - // The component also disables all months after the current one, but this - // is tricky to test since it's based on browser time, so the behavior - // would be different in december than other months + await calendarDropdown.menuToggle(); + await calendarDropdown.clickCurrentBillingPeriod(); + assert.propEqual( + this.handleClientActivityQuery.args[0][0], + { dateType: 'reset' }, + 'it calls parent function with reset dateType' + ); }); - test('it allows you to reset the billing period', async function (assert) { + test('it calls parent callback with correct arg when clicking "Current month"', async function (assert) { await render(hbs` - - `); + + `); await calendarDropdown.menuToggle(); - await calendarDropdown.clickCurrentBillingPeriod(); - assert.ok(this.handleCurrentBillingPeriod.calledOnce, 'it calls the parents handleCurrentBillingPeriod'); + await calendarDropdown.clickCurrentMonth(); + assert.propEqual( + this.handleClientActivityQuery.args[0][0], + { dateType: 'currentMonth' }, + 'it calls parent function with currentMoth dateType' + ); }); - test('it passes the appropriate data to the handleCurrentBillingPeriod when a date is selected', async function (assert) { + test('it calls parent callback with correct arg when selecting a month', async function (assert) { await render(hbs` - - `); + + `); + await calendarDropdown.openCalendar(); + await click(`[data-test-calendar-month="April"`); + assert.propEqual( + this.handleClientActivityQuery.lastCall.lastArg, + { + dateType: 'endDate', + monthIdx: 3, + monthName: 'April', + year: 2018, + }, + 'it calls parent function with end date (current) month/year' + ); + await calendarDropdown.openCalendar(); await calendarDropdown.clickPreviousYear(); - await click('[data-test-calendar-month="October"]'); // select endTime of October 2021 - assert.ok(this.handleClientActivityQuery.calledOnce, 'it calls the parents handleClientActivityQuery'); - assert.ok( - this.handleClientActivityQuery.calledWith(9, this.previousYear, 'endTime'), - 'Passes the month as an index, year and date type to the parent' + await click(`[data-test-calendar-month="May"]`); + assert.propEqual( + this.handleClientActivityQuery.lastCall.lastArg, + { + dateType: 'endDate', + monthIdx: 4, + monthName: 'May', + year: 2017, + }, + 'it calls parent function with selected start date month/year' ); }); - test('it displays the year from endTimeDisplay when opened', async function (assert) { - this.set('endTimeFromResponse', [this.previousYear, 11]); + test('it disables correct months when start date 6 months ago', async function (assert) { + this.set('calendarStartDate', subMonths(this.currentDate, 6)); // Nov 3, 2017 + this.set('startTimestamp', subMonths(this.currentDate, 6).toISOString()); await render(hbs` - - `); + + `); + await calendarDropdown.openCalendar(); - assert - .dom('[data-test-display-year]') - .hasText(this.previousYear.toString(), 'Shows year from the end response'); + assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); + + // Check start year disables correct months + await calendarDropdown.clickPreviousYear(); + assert.dom('[data-test-previous-year]').isDisabled('previous year is disabled'); + const prevYearEnabled = ['October', 'November', 'December']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (prevYearEnabled.includes(month)) { + assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); + } else { + assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is read only`); + } + }); + + // Check end year disables correct months + await click('[data-test-next-year]'); + const currYearEnabled = ['January', 'February', 'March', 'April']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (currYearEnabled.includes(month)) { + assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); + } else { + assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is disabled`); + } + }); + }); + + test('it disables correct months when start date 36 months ago', async function (assert) { + this.set('calendarStartDate', subMonths(this.currentDate, 36)); // April 3 2015 + this.set('startTimestamp', subMonths(this.currentDate, 36).toISOString()); + await render(hbs` + + `); + + await calendarDropdown.openCalendar(); + assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); + for (const year of [2017, 2016, 2015]) { + await calendarDropdown.clickPreviousYear(); + assert.dom('[data-test-display-year]').hasText(year.toString()); + } + + assert.dom('[data-test-previous-year]').isDisabled('previous year is disabled'); + assert.dom('[data-test-next-year]').isEnabled('next year is enabled'); + + const disabledMonths = ['January', 'February', 'March']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (disabledMonths.includes(month)) { + assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is disabled`); + } else { + assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); + } + }); + + await click('[data-test-next-year]'); + ARRAY_OF_MONTHS.forEach(function (month) { + assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled for 2016`); + }); + await click('[data-test-next-year]'); + ARRAY_OF_MONTHS.forEach(function (month) { + assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled for 2017`); + }); + await click('[data-test-next-year]'); + + const enabledMonths = ['January', 'February', 'March', 'April']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (enabledMonths.includes(month)) { + assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); + } else { + assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is disabled`); + } + }); }); }); diff --git a/ui/tests/integration/components/certificate-card-test.js b/ui/tests/integration/components/certificate-card-test.js new file mode 100644 index 000000000000..668004765494 --- /dev/null +++ b/ui/tests/integration/components/certificate-card-test.js @@ -0,0 +1,86 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { rootPem, rootDer } from 'vault/tests/helpers/pki/values'; + +const SELECTORS = { + label: '[data-test-certificate-label]', + value: '[data-test-certificate-value]', + icon: '[data-test-certificate-icon]', + copyButton: '[data-test-copy-button]', + copyIcon: '[data-test-icon="clipboard-copy"]', +}; + +module('Integration | Component | certificate-card', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + await render(hbs``); + + assert.dom(SELECTORS.label).hasNoText('There is no label because there is no value'); + assert.dom(SELECTORS.value).hasNoText('There is no value because none was provided'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyIcon).exists('The copy icon renders'); + }); + + test('it renders with an example PEM Certificate', async function (assert) { + this.certificate = rootPem; + await render(hbs``); + + assert.dom(SELECTORS.label).hasText('PEM Format', 'The label text is PEM Format'); + assert.dom(SELECTORS.value).hasText(this.certificate, 'The data rendered is correct'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyButton).exists('The copy button exists'); + assert + .dom(SELECTORS.copyButton) + .hasAttribute('data-test-copy-button', this.certificate, 'copy value is the same as data'); + }); + + test('it renders with an example DER Certificate', async function (assert) { + this.certificate = rootDer; + await render(hbs``); + + assert.dom(SELECTORS.label).hasText('DER Format', 'The label text is DER Format'); + assert.dom(SELECTORS.value).hasText(this.certificate, 'The data rendered is correct'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyButton).exists('The copy button exists'); + assert + .dom(SELECTORS.copyButton) + .hasAttribute('data-test-copy-button', this.certificate, 'copy value is the same as data'); + }); + + test('it renders with the PEM Format label regardless of the value provided when @isPem is true', async function (assert) { + this.certificate = 'example-certificate-text'; + await render(hbs``); + + assert.dom(SELECTORS.label).hasText('PEM Format', 'The label text is PEM Format'); + assert.dom(SELECTORS.value).hasText(this.certificate, 'The data rendered is correct'); + }); + + test('it renders with an example CA Chain', async function (assert) { + this.caChain = [ + '-----BEGIN CERTIFICATE-----\nMIIDIDCCA...\n-----END CERTIFICATE-----\n', + '-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBA...\n-----END RSA PRIVATE KEY-----\n', + ]; + + await render(hbs``); + + assert.dom(SELECTORS.label).hasText('PEM Format', 'The label text is PEM Format'); + assert.dom(SELECTORS.value).hasText(this.caChain.join(','), 'The data rendered is correct'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyButton).exists('The copy button exists'); + assert + .dom(SELECTORS.copyButton) + .hasAttribute( + 'data-test-copy-button', + this.caChain.join('\n'), + 'copy value is array converted to a string' + ); + }); +}); diff --git a/ui/tests/integration/components/charts/vertical-bar-basic-test.js b/ui/tests/integration/components/charts/vertical-bar-basic-test.js new file mode 100644 index 000000000000..fbb968fd65e2 --- /dev/null +++ b/ui/tests/integration/components/charts/vertical-bar-basic-test.js @@ -0,0 +1,119 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render, triggerEvent } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +const EXAMPLE = [ + { + month: '7/22', + timestamp: '2022-07-01T00:00:00-07:00', + clients: null, + entity_clients: null, + non_entity_clients: null, + secret_syncs: null, + }, + { + month: '8/22', + timestamp: '2022-08-01T00:00:00-07:00', + clients: 6440, + entity_clients: 1471, + non_entity_clients: 4389, + secret_syncs: 4207, + }, + { + month: '9/22', + timestamp: '2022-09-01T00:00:00-07:00', + clients: 9583, + entity_clients: 149, + non_entity_clients: 20, + secret_syncs: 5802, + }, +]; + +module('Integration | Component | clients/charts/vertical-bar-basic', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.data = EXAMPLE; + }); + + test('it renders when some months have no data', async function (assert) { + assert.expect(10); + await render(hbs``); + assert.dom('[data-test-sync-bar-chart]').exists('renders chart container'); + assert.dom('[data-test-vertical-bar]').exists({ count: 3 }, 'renders 3 vertical bars'); + + // Tooltips + await triggerEvent('[data-test-interactive-area="9/22"]', 'mouseover'); + assert.dom('[data-test-tooltip]').exists({ count: 1 }, 'renders tooltip on mouseover'); + assert.dom('[data-test-tooltip-count]').hasText('5,802 secret syncs', 'tooltip has exact count'); + assert.dom('[data-test-tooltip-month]').hasText('September 2022', 'tooltip has humanized month and year'); + await triggerEvent('[data-test-interactive-area="9/22"]', 'mouseout'); + assert.dom('[data-test-tooltip]').doesNotExist('removes tooltip on mouseout'); + await triggerEvent('[data-test-interactive-area="7/22"]', 'mouseover'); + assert + .dom('[data-test-tooltip-count]') + .hasText('No data', 'renders tooltip with no data message when no data is available'); + // Axis + assert.dom('[data-test-x-axis]').hasText('7/22 8/22 9/22', 'renders x-axis labels'); + assert.dom('[data-test-y-axis]').hasText('0 2k 4k', 'renders y-axis labels'); + // Table + assert.dom('[data-test-underlying-data]').doesNotExist('does not render underlying data by default'); + }); + + // 0 is different than null (no data) + test('it renders when all months have 0 clients', async function (assert) { + assert.expect(9); + + this.data = [ + { + month: '6/22', + timestamp: '2022-06-01T00:00:00-07:00', + clients: 0, + entity_clients: 0, + non_entity_clients: 0, + secret_syncs: 0, + }, + { + month: '7/22', + timestamp: '2022-07-01T00:00:00-07:00', + clients: 0, + entity_clients: 0, + non_entity_clients: 0, + secret_syncs: 0, + }, + ]; + await render(hbs``); + + assert.dom('[data-test-sync-bar-chart]').exists('renders chart container'); + assert.dom('[data-test-vertical-bar]').exists({ count: 2 }, 'renders 2 vertical bars'); + assert.dom('[data-test-vertical-bar]').hasAttribute('height', '0', 'rectangles have 0 height'); + // Tooltips + await triggerEvent('[data-test-interactive-area="6/22"]', 'mouseover'); + assert.dom('[data-test-tooltip]').exists({ count: 1 }, 'renders tooltip on mouseover'); + assert.dom('[data-test-tooltip-count]').hasText('0 secret syncs', 'tooltip has exact count'); + assert.dom('[data-test-tooltip-month]').hasText('June 2022', 'tooltip has humanized month and year'); + await triggerEvent('[data-test-interactive-area="6/22"]', 'mouseout'); + assert.dom('[data-test-tooltip]').doesNotExist('removes tooltip on mouseout'); + // Axis + assert.dom('[data-test-x-axis]').hasText('6/22 7/22', 'renders x-axis labels'); + assert.dom('[data-test-y-axis]').hasText('0 1 2 3 4', 'renders y-axis labels'); + }); + + test('it renders underlying data', async function (assert) { + assert.expect(3); + await render( + hbs`` + ); + assert.dom('[data-test-sync-bar-chart]').exists('renders chart container'); + assert.dom('[data-test-underlying-data]').exists('renders underlying data when showTable=true'); + assert + .dom('[data-test-underlying-data] thead') + .hasText('Month Count of secret syncs', 'renders correct table headers'); + }); +}); diff --git a/ui/tests/integration/components/checkbox-grid-test.js b/ui/tests/integration/components/checkbox-grid-test.js index b1fb756b4e60..f887c29370e8 100644 --- a/ui/tests/integration/components/checkbox-grid-test.js +++ b/ui/tests/integration/components/checkbox-grid-test.js @@ -1,3 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { render, click } from '@ember/test-helpers'; diff --git a/ui/tests/integration/components/chevron-test.js b/ui/tests/integration/components/chevron-test.js index e16d0049a8e7..fc07e184431e 100644 --- a/ui/tests/integration/components/chevron-test.js +++ b/ui/tests/integration/components/chevron-test.js @@ -1,3 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { render } from '@ember/test-helpers'; @@ -23,8 +28,9 @@ module('Integration | Component | chevron', function (hooks) { const promise = waitForError(); render(hbs``); const err = await promise; + assert.ok( - err.message.includes('The direction property of {}); + this.set('onSubmit', () => {}); + }); + + test('it renders correctly', async function (assert) { + await render( + hbs`` + ); + + assert.dom(CHOOSE_PGP.begin).exists('PGP key selection form exists'); + assert.dom(CHOOSE_PGP.description).hasText('my custom form text', 'uses custom form text'); + await click(CHOOSE_PGP.toggle); + assert.dom(CHOOSE_PGP.useKeyButton).isDisabled('use pgp button is disabled'); + await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key'); + assert.dom(CHOOSE_PGP.useKeyButton).isNotDisabled('use pgp button is no longer disabled'); + await click(CHOOSE_PGP.useKeyButton); + assert + .dom(CHOOSE_PGP.confirm) + .hasText( + 'Below is the base-64 encoded PGP Key that will be used. Click the "Do it" button to proceed.', + 'Incorporates button text in confirmation' + ); + assert.dom(CHOOSE_PGP.base64Output).hasText('base64-pgp-key', 'Shows PGP key contents'); + assert.dom(CHOOSE_PGP.submit).hasText('Do it', 'uses passed buttonText'); + await click(CHOOSE_PGP.submit); + }); + + test('it calls onSubmit correctly', async function (assert) { + const submitSpy = sinon.spy(); + this.set('onSubmit', submitSpy); + await render( + hbs`` + ); + + assert.dom(CHOOSE_PGP.begin).exists('PGP key selection form exists'); + assert + .dom(CHOOSE_PGP.description) + .hasText('Choose a PGP Key from your computer or paste the contents of one in the form below.'); + await click(CHOOSE_PGP.toggle); + assert.dom(CHOOSE_PGP.useKeyButton).isDisabled('use pgp button is disabled'); + await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key'); + assert.dom(CHOOSE_PGP.useKeyButton).isNotDisabled('use pgp button is no longer disabled'); + await click(CHOOSE_PGP.useKeyButton); + assert + .dom(CHOOSE_PGP.confirm) + .hasText( + 'Below is the base-64 encoded PGP Key that will be used. Click the "Submit" button to proceed.', + 'Confirmation text has buttonText' + ); + assert.dom(CHOOSE_PGP.base64Output).hasText('base64-pgp-key', 'Shows PGP key contents'); + assert.dom(CHOOSE_PGP.submit).hasText('Submit', 'uses passed buttonText'); + await click(CHOOSE_PGP.submit); + assert.ok(submitSpy.calledOnceWith('base64-pgp-key')); + }); + + test('it calls cancel on cancel', async function (assert) { + const cancelSpy = sinon.spy(); + this.set('onCancel', cancelSpy); + await render( + hbs`` + ); + + await click(CHOOSE_PGP.toggle); + await fillIn(CHOOSE_PGP.pgpTextArea, 'base64-pgp-key'); + await click(CHOOSE_PGP.cancel); + assert.ok(cancelSpy.calledOnce); + }); +}); diff --git a/ui/tests/integration/components/clients/attribution-test.js b/ui/tests/integration/components/clients/attribution-test.js index 9ccbf4ed9bce..35faac9d46c9 100644 --- a/ui/tests/integration/components/clients/attribution-test.js +++ b/ui/tests/integration/components/clients/attribution-test.js @@ -1,17 +1,32 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; +import sinon from 'sinon'; import { setupRenderingTest } from 'ember-qunit'; import { render } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; -import { formatRFC3339 } from 'date-fns'; +import { endOfMonth, formatRFC3339 } from 'date-fns'; import { click } from '@ember/test-helpers'; +import subMonths from 'date-fns/subMonths'; +import timestamp from 'core/utils/timestamp'; module('Integration | Component | clients/attribution', function (hooks) { setupRenderingTest(hooks); + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); + }); + hooks.beforeEach(function () { - this.set('timestamp', formatRFC3339(new Date())); + this.csvDownloadStub = sinon.stub(this.owner.lookup('service:download'), 'csv'); + const mockNow = timestamp.now(); + this.mockNow = mockNow; + this.set('startTimestamp', formatRFC3339(subMonths(mockNow, 6))); + this.set('timestamp', formatRFC3339(mockNow)); this.set('selectedNamespace', null); - this.set('isDateRange', true); this.set('chartLegend', [ { label: 'entity clients', key: 'entity_clients' }, { label: 'non-entity clients', key: 'non_entity_clients' }, @@ -28,10 +43,14 @@ module('Integration | Component | clients/attribution', function (hooks) { ]); }); + hooks.after(function () { + timestamp.now.restore(); + this.csvDownloadStub.restore(); + }); + test('it renders empty state with no data', async function (assert) { await render(hbs` - - + `); assert.dom('[data-test-component="empty-state"]').exists(); @@ -43,14 +62,14 @@ module('Integration | Component | clients/attribution', function (hooks) { test('it renders with data for namespaces', async function (assert) { await render(hbs` - - `); @@ -71,17 +90,18 @@ module('Integration | Component | clients/attribution', function (hooks) { assert.dom('[data-test-attribution-clients]').includesText('namespace').includesText('10'); }); - test('it renders correct text for a single month', async function (assert) { - this.set('isDateRange', false); + test('it renders two charts and correct text for single, historical month', async function (assert) { + this.start = formatRFC3339(subMonths(this.mockNow, 1)); + this.end = formatRFC3339(subMonths(endOfMonth(this.mockNow), 1)); await render(hbs` - - `); assert @@ -124,17 +144,58 @@ module('Integration | Component | clients/attribution', function (hooks) { ); }); + test('it renders single chart for current month', async function (assert) { + await render(hbs` + + `); + assert + .dom('[data-test-chart-container="single-chart"]') + .exists('renders single chart with total clients'); + assert + .dom('[data-test-attribution-subtext]') + .hasTextContaining('this month', 'renders total monthly namespace text'); + }); + + test('it renders single chart and correct text for for date range', async function (assert) { + await render(hbs` + + `); + + assert + .dom('[data-test-chart-container="single-chart"]') + .exists('renders single chart with total clients'); + assert + .dom('[data-test-attribution-subtext]') + .hasTextContaining('date range', 'renders total monthly namespace text'); + }); + test('it renders with data for selected namespace auth methods for a date range', async function (assert) { this.set('selectedNamespace', 'second'); await render(hbs` - - `); @@ -157,17 +218,105 @@ module('Integration | Component | clients/attribution', function (hooks) { test('it renders modal', async function (assert) { await render(hbs` - - `); await click('[data-test-attribution-export-button]'); - assert.dom('.modal.is-active .title').hasText('Export attribution data', 'modal appears to export csv'); - assert.dom('.modal.is-active').includesText('January 2022 - February 2022'); + assert + .dom('[data-test-export-modal-title]') + .hasText('Export attribution data', 'modal appears to export csv'); + assert.dom('[ data-test-export-date-range]').includesText('June 2022 - December 2022'); + }); + + test('it downloads csv data for date range', async function (assert) { + assert.expect(2); + + await render(hbs` + + `); + await click('[data-test-attribution-export-button]'); + await click('[data-test-confirm-button]'); + const [filename, content] = this.csvDownloadStub.lastCall.args; + assert.strictEqual(filename, 'clients_by_namespace_June 2022-December 2022', 'csv has expected filename'); + assert.strictEqual( + content, + `Namespace path,Mount path\n *namespace totals, inclusive of mount clients,Total clients,Entity clients,Non-entity clients\nsecond,*,10,7,3\nfirst,*,5,3,2`, + 'csv has expected content' + ); + }); + + test('it downloads csv data for a single month', async function (assert) { + assert.expect(2); + await render(hbs` + + `); + await click('[data-test-attribution-export-button]'); + await click('[data-test-confirm-button]'); + const [filename, content] = this.csvDownloadStub.lastCall.args; + assert.strictEqual(filename, 'clients_by_namespace_June 2022', 'csv has single month in filename'); + assert.strictEqual( + content, + `Namespace path,Mount path\n *namespace totals, inclusive of mount clients,Total clients,Entity clients,Non-entity clients\nsecond,*,10,7,3\nfirst,*,5,3,2`, + 'csv has expected content' + ); + }); + + test('it downloads csv data when a namespace is selected', async function (assert) { + assert.expect(2); + this.selectedNamespace = 'second'; + + await render(hbs` + + `); + + await click('[data-test-attribution-export-button]'); + await click('[data-test-confirm-button]'); + const [filename, content] = this.csvDownloadStub.lastCall.args; + assert.strictEqual( + filename, + 'clients_by_mount_path_June 2022-December 2022', + 'csv has expected filename for a selected namespace' + ); + assert.strictEqual( + content, + `Namespace path,Mount path,Total clients,Entity clients,Non-entity clients\nsecond,auth1/,3,2,1\nsecond,auth2/,2,1,1`, + 'csv has expected content for a selected namespace' + ); + }); + + test('csv filename omits date if no start/end timestamp', async function (assert) { + assert.expect(1); + + await render(hbs` + + `); + + await click('[data-test-attribution-export-button]'); + await click('[data-test-confirm-button]'); + const [filename, ,] = this.csvDownloadStub.lastCall.args; + assert.strictEqual(filename, 'clients_by_namespace'); }); }); diff --git a/ui/tests/integration/components/clients/config-test.js b/ui/tests/integration/components/clients/config-test.js index c82b08353fcd..489f56b5138b 100644 --- a/ui/tests/integration/components/clients/config-test.js +++ b/ui/tests/integration/components/clients/config-test.js @@ -1,56 +1,46 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; -import { render, find, click } from '@ember/test-helpers'; -import { resolve } from 'rsvp'; +import { render, find, click, fillIn } from '@ember/test-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; module('Integration | Component | client count config', function (hooks) { setupRenderingTest(hooks); - - const createAttr = (name, type, options) => { - return { - name, - type, - options, - }; - }; - - const generateModel = (overrides) => { - return { - enabled: 'On', - retentionMonths: 24, - defaultReportMonths: 12, - configAttrs: [ - createAttr('enabled', 'string', { editType: 'boolean' }), - createAttr('retentionMonths', 'number'), - ], - changedAttributes: () => ({}), - save: () => {}, - ...overrides, - }; - }; + setupMirage(hooks); hooks.beforeEach(function () { this.router = this.owner.lookup('service:router'); - this.router.reopen({ - transitionTo() { - return { - followRedirects() { - return resolve(); - }, - }; - }, - }); - const model = generateModel(); - this.model = model; + this.transitionStub = sinon.stub(this.router, 'transitionTo'); + const store = this.owner.lookup('service:store'); + this.createModel = (enabled = 'enable', reporting_enabled = false, minimum_retention_months = 24) => { + store.pushPayload('clients/config', { + modelName: 'clients/config', + id: 'foo', + data: { + enabled, + reporting_enabled, + minimum_retention_months, + retention_months: 24, + }, + }); + this.model = store.peekRecord('clients/config', 'foo'); + }; }); test('it shows the table with the correct rows by default', async function (assert) { + this.createModel(); + await render(hbs``); - assert.dom('[data-test-pricing-metrics-config-table]').exists('Pricing metrics config table exists'); + assert.dom('[data-test-clients-config-table]').exists('Clients config table exists'); const rows = document.querySelectorAll('.info-table-row'); - assert.strictEqual(rows.length, 2, 'renders 2 infotable rows'); + assert.strictEqual(rows.length, 2, 'renders 2 info table rows'); assert.ok( find('[data-test-row-value="Usage data collection"]').textContent.includes('On'), 'Enabled value matches model' @@ -61,72 +51,111 @@ module('Integration | Component | client count config', function (hooks) { ); }); - test('TODO: it shows the config edit form when mode = edit', async function (assert) { - await render(hbs` - - - `); - - assert.dom('[data-test-pricing-metrics-config-form]').exists('Pricing metrics config form exists'); - const fields = document.querySelectorAll('[data-test-field]'); - assert.strictEqual(fields.length, 2, 'renders 2 fields'); - }); + test('it should function in edit mode when reporting is disabled', async function (assert) { + assert.expect(12); - test('it shows a modal with correct messaging when disabling', async function (assert) { - // Simulates the model when enabled value has been changed from On to Off - const simModel = generateModel({ - enabled: 'Off', - changedAttributes: () => ({ enabled: ['On', 'Off'] }), + this.server.put('/sys/internal/counters/config', (schema, req) => { + const { enabled, retention_months } = JSON.parse(req.requestBody); + const expected = { enabled: 'enable', retention_months: 24 }; + assert.deepEqual(expected, { enabled, retention_months }, 'Correct data sent in PUT request'); + return {}; }); - this.set('model', simModel); + + this.createModel('disable'); + await render(hbs` - `); - await click('[data-test-edit-metrics-config-save]'); - assert.dom('.modal.is-active').exists('Modal appears'); + assert.dom('[data-test-input="enabled"]').isNotChecked('Data collection checkbox is not checked'); + assert + .dom('label[for="enabled"]') + .hasText('Data collection is off', 'Correct label renders when data collection is off'); + assert.dom('[data-test-input="retentionMonths"]').hasValue('24', 'Retention months render'); + + await click('[data-test-input="enabled"]'); + await fillIn('[data-test-input="retentionMonths"]', -3); + await click('[data-test-clients-config-save]'); + assert + .dom('[data-test-inline-error-message]') + .hasText( + 'Retention period must be greater than or equal to 24.', + 'Validation error shows for incorrect retention period' + ); + + await fillIn('[data-test-input="retentionMonths"]', 24); + await click('[data-test-clients-config-save]'); + assert + .dom('[data-test-clients-config-modal="title"]') + .hasText('Turn usage tracking on?', 'Correct modal title renders'); + assert.dom('[data-test-clients-config-modal="on"]').exists('Correct modal description block renders'); + + await click('[data-test-clients-config-modal="continue"]'); assert.ok( - find('[data-test-modal-title]').textContent.includes('Turn usage tracking off?'), - 'Modal confirming turn tracking off' + this.transitionStub.calledWith('vault.cluster.clients.config'), + 'Route transitions correctly on save success' ); - await click('[data-test-metrics-config-cancel]'); - assert.dom('.modal.is-active').doesNotExist('Modal goes away'); + + await click('[data-test-input="enabled"]'); + await click('[data-test-clients-config-save]'); + assert.dom('[data-test-clients-config-modal]').exists('Modal renders'); + assert + .dom('[data-test-clients-config-modal="title"]') + .hasText('Turn usage tracking off?', 'Correct modal title renders'); + assert.dom('[data-test-clients-config-modal="off"]').exists('Correct modal description block renders'); + + await click('[data-test-clients-config-modal="cancel"]'); + assert.dom('[data-test-clients-config-modal]').doesNotExist('Modal is hidden on cancel'); }); - test('it shows a modal with correct messaging when enabling', async function (assert) { - // Simulates the model when enabled value has been changed from On to Off - const simModel = generateModel({ - changedAttributes: () => ({ enabled: ['Off', 'On'] }), + test('it should be hidden in edit mode when reporting is enabled', async function (assert) { + assert.expect(4); + + this.server.put('/sys/internal/counters/config', (schema, req) => { + const { enabled, retention_months } = JSON.parse(req.requestBody); + const expected = { enabled: 'enable', retention_months: 48 }; + assert.deepEqual(expected, { enabled, retention_months }, 'Correct data sent in PUT request'); + return {}; }); - this.set('model', simModel); + + this.createModel('enable', true, 24); + await render(hbs` - `); - await click('[data-test-edit-metrics-config-save]'); - assert.dom('.modal.is-active').exists('Modal appears'); - assert.ok( - find('[data-test-modal-title]').textContent.includes('Turn usage tracking on?'), - 'Modal confirming turn tracking on' - ); - await click('[data-test-metrics-config-cancel]'); - assert.dom('.modal.is-active').doesNotExist('Modal goes away'); + assert.dom('[data-test-input="enabled"]').doesNotExist('Data collection input not shown '); + assert.dom('[data-test-input="retentionMonths"]').hasValue('24', 'Retention months render'); + + await fillIn('[data-test-input="retentionMonths"]', 5); + await click('[data-test-clients-config-save]'); + assert + .dom('[data-test-inline-error-message]') + .hasText( + 'Retention period must be greater than or equal to 24.', + 'Validation error shows for incorrect retention period' + ); + + await fillIn('[data-test-input="retentionMonths"]', 48); + await click('[data-test-clients-config-save]'); }); - test('it does not show a modal on save if enable left unchanged', async function (assert) { - // Simulates the model when something other than enabled changed - const simModel = generateModel({ - changedAttributes: () => ({ retentionMonths: [24, '48'] }), + test('it should not show modal when data collection is not changed', async function (assert) { + assert.expect(1); + + this.server.put('/sys/internal/counters/config', (schema, req) => { + const { enabled, retention_months } = JSON.parse(req.requestBody); + const expected = { enabled: 'enable', retention_months: 24 }; + assert.deepEqual(expected, { enabled, retention_months }, 'Correct data sent in PUT request'); + return {}; }); - this.set('model', simModel); + + this.createModel(); + await render(hbs` - `); - - await click('[data-test-edit-metrics-config-save]'); - assert.dom('.modal.is-active').doesNotExist('No modal appears'); + await fillIn('[data-test-input="retentionMonths"]', 24); + await click('[data-test-clients-config-save]'); }); }); diff --git a/ui/tests/integration/components/clients/horizontal-bar-chart-test.js b/ui/tests/integration/components/clients/horizontal-bar-chart-test.js index 8f726b84e9a3..bea40d8c8c85 100644 --- a/ui/tests/integration/components/clients/horizontal-bar-chart-test.js +++ b/ui/tests/integration/components/clients/horizontal-bar-chart-test.js @@ -1,6 +1,11 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; -import { findAll, render, triggerEvent } from '@ember/test-helpers'; +import { findAll, render } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; module('Integration | Component | clients/horizontal-bar-chart', function (hooks) { @@ -43,12 +48,14 @@ module('Integration | Component | clients/horizontal-bar-chart', function (hooks textTotals.forEach((label, index) => { assert.dom(label).hasText(`${dataArray[index].clients}`, 'total value renders correct number'); }); - for (const [i, bar] of actionBars.entries()) { - const percent = Math.round((dataArray[i].clients / totalObject.clients) * 100); - await triggerEvent(bar, 'mouseover'); - const tooltip = document.querySelector('.ember-modal-dialog'); - assert.dom(tooltip).includesText(`${percent}%`, 'tooltip renders correct percentage'); - } + + // FLAKY after adding a11y testing, skip for now + // for (const [i, bar] of actionBars.entries()) { + // const percent = Math.round((dataArray[i].clients / totalObject.clients) * 100); + // await triggerEvent(bar, 'mouseover'); + // const tooltip = document.querySelector('.ember-modal-dialog'); + // assert.dom(tooltip).includesText(`${percent}%`, 'tooltip renders correct percentage'); + // } }); test('it renders data with a large range', async function (assert) { @@ -74,11 +81,12 @@ module('Integration | Component | clients/horizontal-bar-chart', function (hooks assert.strictEqual(actionBars.length, dataArray.length, 'renders correct number of hover bars'); assert.strictEqual(dataBars.length, dataArray.length * 2, 'renders correct number of data bars'); - for (const [i, bar] of actionBars.entries()) { - const percent = Math.round((dataArray[i].clients / totalObject.clients) * 100); - await triggerEvent(bar, 'mouseover'); - const tooltip = document.querySelector('.ember-modal-dialog'); - assert.dom(tooltip).includesText(`${percent}%`, 'tooltip renders correct percentage'); - } + // FLAKY after adding a11y testing, skip for now + // for (const [i, bar] of actionBars.entries()) { + // const percent = Math.round((dataArray[i].clients / totalObject.clients) * 100); + // await triggerEvent(bar, 'mouseover'); + // const tooltip = document.querySelector('.ember-modal-dialog'); + // assert.dom(tooltip).includesText(`${percent}%`, 'tooltip renders correct percentage'); + // } }); }); diff --git a/ui/tests/integration/components/clients/line-chart-test.js b/ui/tests/integration/components/clients/line-chart-test.js index 930b24367888..e3f76de34a52 100644 --- a/ui/tests/integration/components/clients/line-chart-test.js +++ b/ui/tests/integration/components/clients/line-chart-test.js @@ -1,39 +1,55 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; +import sinon from 'sinon'; import { setupRenderingTest } from 'ember-qunit'; -import { find, render, findAll, triggerEvent } from '@ember/test-helpers'; +import { find, render, findAll } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; import { format, formatRFC3339, subMonths } from 'date-fns'; -import { formatChartDate } from 'core/utils/date-formatters'; +import timestamp from 'core/utils/timestamp'; + module('Integration | Component | clients/line-chart', function (hooks) { setupRenderingTest(hooks); - const CURRENT_DATE = new Date(); + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); + }); hooks.beforeEach(function () { this.set('xKey', 'foo'); this.set('yKey', 'bar'); this.set('dataset', [ { - foo: 1, + foo: '2018-04-03T14:15:30', bar: 4, + expectedLabel: '4/18', }, { - foo: 2, + foo: '2018-05-03T14:15:30', bar: 8, + expectedLabel: '5/18', }, { - foo: 3, + foo: '2018-06-03T14:15:30', bar: 14, + expectedLabel: '6/18', }, { - foo: 4, + foo: '2018-07-03T14:15:30', bar: 10, + expectedLabel: '7/18', }, ]); }); + hooks.after(function () { + timestamp.now.restore(); + }); test('it renders', async function (assert) { await render(hbs`
- +
`); @@ -41,48 +57,56 @@ module('Integration | Component | clients/line-chart', function (hooks) { assert .dom('[data-test-line-chart="plot-point"]') .exists({ count: this.dataset.length }, `renders ${this.dataset.length} plot points`); - - findAll('[data-test-line-chart="x-axis-labels"] text').forEach((e, i) => { + findAll('[data-test-x-axis] text').forEach((e, i) => { + // For some reason the first axis label is not rendered assert .dom(e) - .hasText(`${this.dataset[i][this.xKey]}`, `renders x-axis label: ${this.dataset[i][this.xKey]}`); + .hasText( + `${this.dataset[i].expectedLabel}`, + `renders x-axis label: ${this.dataset[i].expectedLabel}` + ); }); - assert.dom(find('[data-test-line-chart="y-axis-labels"] text')).hasText('0', `y-axis starts at 0`); + assert.dom('[data-test-y-axis] text').hasText('0', `y-axis starts at 0`); }); test('it renders upgrade data', async function (assert) { + const now = timestamp.now(); this.set('dataset', [ { - foo: format(subMonths(CURRENT_DATE, 4), 'M/yy'), + foo: formatRFC3339(subMonths(now, 4)), bar: 4, + month: format(subMonths(now, 4), 'M/yy'), }, { - foo: format(subMonths(CURRENT_DATE, 3), 'M/yy'), + foo: formatRFC3339(subMonths(now, 3)), bar: 8, + month: format(subMonths(now, 3), 'M/yy'), }, { - foo: format(subMonths(CURRENT_DATE, 2), 'M/yy'), + foo: formatRFC3339(subMonths(now, 2)), bar: 14, + month: format(subMonths(now, 2), 'M/yy'), }, { - foo: format(subMonths(CURRENT_DATE, 1), 'M/yy'), + foo: formatRFC3339(subMonths(now, 1)), bar: 10, + month: format(subMonths(now, 1), 'M/yy'), }, ]); this.set('upgradeData', [ { id: '1.10.1', previousVersion: '1.9.2', - timestampInstalled: formatRFC3339(subMonths(CURRENT_DATE, 2)), + timestampInstalled: formatRFC3339(subMonths(now, 2)), }, ]); await render(hbs`
-
`); @@ -91,35 +115,44 @@ module('Integration | Component | clients/line-chart', function (hooks) { .dom('[data-test-line-chart="plot-point"]') .exists({ count: this.dataset.length }, `renders ${this.dataset.length} plot points`); assert - .dom(find(`[data-test-line-chart="upgrade-${this.dataset[2][this.xKey]}"]`)) - .hasStyle({ opacity: '1' }, `upgrade data point ${this.dataset[2][this.xKey]} has yellow highlight`); + .dom(find(`[data-test-line-chart="upgrade-${this.dataset[2].month}"]`)) + .hasStyle( + { fill: 'rgb(253, 238, 186)' }, + `upgrade data point ${this.dataset[2].month} has yellow highlight` + ); }); test('it renders tooltip', async function (assert) { + assert.expect(1); + const now = timestamp.now(); const tooltipData = [ { - month: format(subMonths(CURRENT_DATE, 4), 'M/yy'), + month: format(subMonths(now, 4), 'M/yy'), + timestamp: formatRFC3339(subMonths(now, 4)), clients: 4, new_clients: { clients: 0, }, }, { - month: format(subMonths(CURRENT_DATE, 3), 'M/yy'), + month: format(subMonths(now, 3), 'M/yy'), + timestamp: formatRFC3339(subMonths(now, 3)), clients: 8, new_clients: { clients: 4, }, }, { - month: format(subMonths(CURRENT_DATE, 2), 'M/yy'), + month: format(subMonths(now, 2), 'M/yy'), + timestamp: formatRFC3339(subMonths(now, 2)), clients: 14, new_clients: { clients: 6, }, }, { - month: format(subMonths(CURRENT_DATE, 1), 'M/yy'), + month: format(subMonths(now, 1), 'M/yy'), + timestamp: formatRFC3339(subMonths(now, 1)), clients: 20, new_clients: { clients: 4, @@ -131,41 +164,79 @@ module('Integration | Component | clients/line-chart', function (hooks) { { id: '1.10.1', previousVersion: '1.9.2', - timestampInstalled: formatRFC3339(subMonths(CURRENT_DATE, 2)), + timestampInstalled: formatRFC3339(subMonths(now, 2)), }, ]); await render(hbs`
-
`); - const tooltipHoverCircles = findAll('[data-test-line-chart] circle.hover-circle'); - for (const [i, bar] of tooltipHoverCircles.entries()) { - await triggerEvent(bar, 'mouseover'); - const tooltip = document.querySelector('.ember-modal-dialog'); - const { month, clients, new_clients } = tooltipData[i]; - assert - .dom(tooltip) - .includesText( - `${formatChartDate(month)} ${clients} total clients ${new_clients.clients} new clients`, - `tooltip text is correct for ${month}` - ); - } + const tooltipHoverCircles = findAll('[data-test-hover-circle]'); + assert.strictEqual(tooltipHoverCircles.length, tooltipData.length, 'all data circles are rendered'); + + // FLAKY after adding a11y testing, skip for now + // for (const [i, bar] of tooltipHoverCircles.entries()) { + // await triggerEvent(bar, 'mouseover'); + // const tooltip = document.querySelector('.ember-modal-dialog'); + // const { month, clients, new_clients } = tooltipData[i]; + // assert + // .dom(tooltip) + // .includesText( + // `${formatChartDate(month)} ${clients} total clients ${new_clients.clients} new clients`, + // `tooltip text is correct for ${month}` + // ); + // } + }); + + test('it fails gracefully when data is not formatted correctly', async function (assert) { + this.set('dataset', [ + { + foo: 1, + bar: 4, + }, + { + foo: 2, + bar: 8, + }, + { + foo: 3, + bar: 14, + }, + { + foo: 4, + bar: 10, + }, + ]); + await render(hbs` +
+ +
+ `); + + assert.dom('[data-test-line-chart]').doesNotExist('Chart is not rendered'); + assert + .dom('[data-test-component="empty-state"]') + .hasText('No data to display', 'Shows empty state when time date is not formatted correctly'); }); test('it fails gracefully when upgradeData is an object', async function (assert) { this.set('upgradeData', { some: 'object' }); await render(hbs`
-
`); @@ -179,11 +250,11 @@ module('Integration | Component | clients/line-chart', function (hooks) { this.set('upgradeData', [{ incorrect: 'key names' }]); await render(hbs`
-
`); @@ -196,7 +267,7 @@ module('Integration | Component | clients/line-chart', function (hooks) { test('it renders empty state when no dataset', async function (assert) { await render(hbs`
- +
`); @@ -208,4 +279,121 @@ module('Integration | Component | clients/line-chart', function (hooks) { 'custom message renders' ); }); + + test('it updates axis when dataset updates', async function (assert) { + const datasets = { + small: [ + { + foo: '2020-04-01', + bar: 4, + month: '4/20', + }, + { + foo: '2020-05-01', + bar: 8, + month: '5/20', + }, + { + foo: '2020-06-01', + bar: 1, + }, + { + foo: '2020-07-01', + bar: 10, + }, + ], + large: [ + { + foo: '2020-08-01', + bar: 4586, + month: '8/20', + }, + { + foo: '2020-09-01', + bar: 8928, + month: '9/20', + }, + { + foo: '2020-10-01', + bar: 11948, + month: '10/20', + }, + { + foo: '2020-11-01', + bar: 16943, + month: '11/20', + }, + ], + broken: [ + { + foo: '2020-01-01', + bar: null, + month: '1/20', + }, + { + foo: '2020-02-01', + bar: 0, + month: '2/20', + }, + { + foo: '2020-03-01', + bar: 22, + month: '3/20', + }, + { + foo: '2020-04-01', + bar: null, + month: '4/20', + }, + { + foo: '2020-05-01', + bar: 70, + month: '5/20', + }, + { + foo: '2020-06-01', + bar: 50, + month: '6/20', + }, + ], + }; + this.set('dataset', datasets.small); + await render(hbs` +
+ +
+ `); + assert.dom('[data-test-y-axis]').hasText('0 2 4 6 8 10', 'y-axis renders correctly for small values'); + assert + .dom('[data-test-x-axis]') + .hasText('4/20 5/20 6/20 7/20', 'x-axis renders correctly for small values'); + + // Update to large dataset + this.set('dataset', datasets.large); + assert.dom('[data-test-y-axis]').hasText('0 5k 10k 15k', 'y-axis renders correctly for new large values'); + assert + .dom('[data-test-x-axis]') + .hasText('8/20 9/20 10/20 11/20', 'x-axis renders correctly for small values'); + + // Update to broken dataset + this.set('dataset', datasets.broken); + assert.dom('[data-test-y-axis]').hasText('0 20 40 60', 'y-axis renders correctly for new broken values'); + assert + .dom('[data-test-x-axis]') + .hasText('1/20 2/20 3/20 4/20 5/20 6/20', 'x-axis renders correctly for small values'); + assert.dom('[data-test-hover-circle]').exists({ count: 4 }, 'only render circles for non-null values'); + + assert + .dom('[data-test-hover-circle="1/20"]') + .doesNotExist('first month dot does not exist because value is null'); + assert + .dom('[data-test-hover-circle="4/20"]') + .doesNotExist('other null count month dot also does not render'); + // Note: the line should also show a gap, but this is difficult to test for + }); }); diff --git a/ui/tests/integration/components/clients/monthly-usage-test.js b/ui/tests/integration/components/clients/monthly-usage-test.js deleted file mode 100644 index ce86c4ed9829..000000000000 --- a/ui/tests/integration/components/clients/monthly-usage-test.js +++ /dev/null @@ -1,1474 +0,0 @@ -import { module, test } from 'qunit'; -import { setupRenderingTest } from 'ember-qunit'; -import { render } from '@ember/test-helpers'; -import { hbs } from 'ember-cli-htmlbars'; -import { formatRFC3339 } from 'date-fns'; -import { findAll } from '@ember/test-helpers'; -import { calculateAverage } from 'vault/utils/chart-helpers'; -import { formatNumber } from 'core/helpers/format-number'; - -module('Integration | Component | clients/monthly-usage', function (hooks) { - setupRenderingTest(hooks); - const DATASET = [ - { - month: '8/21', - timestamp: '2021-08-01T00:00:00Z', - counts: null, - namespaces: [], - new_clients: { - month: '8/21', - namespaces: [], - }, - namespaces_by_key: {}, - }, - { - month: '9/21', - clients: 19251, - entity_clients: 10713, - non_entity_clients: 8538, - namespaces: [ - { - label: 'root', - clients: 4852, - entity_clients: 3108, - non_entity_clients: 1744, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1598, - entity_clients: 687, - non_entity_clients: 911, - }, - { - label: 'path-1', - clients: 1429, - entity_clients: 981, - non_entity_clients: 448, - }, - { - label: 'path-4-with-over-18-characters', - clients: 965, - entity_clients: 720, - non_entity_clients: 245, - }, - { - label: 'path-2', - clients: 860, - entity_clients: 720, - non_entity_clients: 140, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 4702, - entity_clients: 3057, - non_entity_clients: 1645, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1686, - entity_clients: 926, - non_entity_clients: 760, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1525, - entity_clients: 789, - non_entity_clients: 736, - }, - { - label: 'path-2', - clients: 905, - entity_clients: 849, - non_entity_clients: 56, - }, - { - label: 'path-1', - clients: 586, - entity_clients: 493, - non_entity_clients: 93, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 4569, - entity_clients: 1871, - non_entity_clients: 2698, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 1534, - entity_clients: 619, - non_entity_clients: 915, - }, - { - label: 'path-3-with-over-18-characters', - clients: 1528, - entity_clients: 589, - non_entity_clients: 939, - }, - { - label: 'path-1', - clients: 828, - entity_clients: 612, - non_entity_clients: 216, - }, - { - label: 'path-2', - clients: 679, - entity_clients: 51, - non_entity_clients: 628, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 3771, - entity_clients: 2029, - non_entity_clients: 1742, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1249, - entity_clients: 793, - non_entity_clients: 456, - }, - { - label: 'path-1', - clients: 1046, - entity_clients: 444, - non_entity_clients: 602, - }, - { - label: 'path-2', - clients: 930, - entity_clients: 277, - non_entity_clients: 653, - }, - { - label: 'path-4-with-over-18-characters', - clients: 546, - entity_clients: 515, - non_entity_clients: 31, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1357, - entity_clients: 648, - non_entity_clients: 709, - mounts: [ - { - label: 'path-1', - clients: 613, - entity_clients: 23, - non_entity_clients: 590, - }, - { - label: 'path-3-with-over-18-characters', - clients: 543, - entity_clients: 465, - non_entity_clients: 78, - }, - { - label: 'path-2', - clients: 146, - entity_clients: 141, - non_entity_clients: 5, - }, - { - label: 'path-4-with-over-18-characters', - clients: 55, - entity_clients: 19, - non_entity_clients: 36, - }, - ], - }, - ], - namespaces_by_key: { - root: { - month: '9/21', - clients: 4852, - entity_clients: 3108, - non_entity_clients: 1744, - new_clients: { - month: '9/21', - label: 'root', - clients: 2525, - entity_clients: 1315, - non_entity_clients: 1210, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1598, - entity_clients: 687, - non_entity_clients: 911, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1055, - entity_clients: 257, - non_entity_clients: 798, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 1429, - entity_clients: 981, - non_entity_clients: 448, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 543, - entity_clients: 340, - non_entity_clients: 203, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 965, - entity_clients: 720, - non_entity_clients: 245, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 136, - entity_clients: 7, - non_entity_clients: 129, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 860, - entity_clients: 720, - non_entity_clients: 140, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 791, - entity_clients: 711, - non_entity_clients: 80, - }, - }, - }, - }, - 'test-ns-2/': { - month: '9/21', - clients: 4702, - entity_clients: 3057, - non_entity_clients: 1645, - new_clients: { - month: '9/21', - label: 'test-ns-2/', - clients: 1537, - entity_clients: 662, - non_entity_clients: 875, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1686, - entity_clients: 926, - non_entity_clients: 760, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 520, - entity_clients: 13, - non_entity_clients: 507, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 1525, - entity_clients: 789, - non_entity_clients: 736, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 499, - entity_clients: 197, - non_entity_clients: 302, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 905, - entity_clients: 849, - non_entity_clients: 56, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 398, - entity_clients: 370, - non_entity_clients: 28, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 586, - entity_clients: 493, - non_entity_clients: 93, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 120, - entity_clients: 82, - non_entity_clients: 38, - }, - }, - }, - }, - 'test-ns-1/': { - month: '9/21', - clients: 4569, - entity_clients: 1871, - non_entity_clients: 2698, - new_clients: { - month: '9/21', - label: 'test-ns-1/', - clients: 2712, - entity_clients: 879, - non_entity_clients: 1833, - }, - mounts_by_key: { - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 1534, - entity_clients: 619, - non_entity_clients: 915, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 740, - entity_clients: 39, - non_entity_clients: 701, - }, - }, - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1528, - entity_clients: 589, - non_entity_clients: 939, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1250, - entity_clients: 536, - non_entity_clients: 714, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 828, - entity_clients: 612, - non_entity_clients: 216, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 463, - entity_clients: 283, - non_entity_clients: 180, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 679, - entity_clients: 51, - non_entity_clients: 628, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 259, - entity_clients: 21, - non_entity_clients: 238, - }, - }, - }, - }, - 'test-ns-2-with-namespace-length-over-18-characters/': { - month: '9/21', - clients: 3771, - entity_clients: 2029, - non_entity_clients: 1742, - new_clients: { - month: '9/21', - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 2087, - entity_clients: 902, - non_entity_clients: 1185, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1249, - entity_clients: 793, - non_entity_clients: 456, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 472, - entity_clients: 260, - non_entity_clients: 212, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 1046, - entity_clients: 444, - non_entity_clients: 602, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 775, - entity_clients: 349, - non_entity_clients: 426, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 930, - entity_clients: 277, - non_entity_clients: 653, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 632, - entity_clients: 90, - non_entity_clients: 542, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 546, - entity_clients: 515, - non_entity_clients: 31, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 208, - entity_clients: 203, - non_entity_clients: 5, - }, - }, - }, - }, - 'test-ns-1-with-namespace-length-over-18-characters/': { - month: '9/21', - clients: 1357, - entity_clients: 648, - non_entity_clients: 709, - new_clients: { - month: '9/21', - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 560, - entity_clients: 189, - non_entity_clients: 371, - }, - mounts_by_key: { - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 613, - entity_clients: 23, - non_entity_clients: 590, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 318, - entity_clients: 12, - non_entity_clients: 306, - }, - }, - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 543, - entity_clients: 465, - non_entity_clients: 78, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 126, - entity_clients: 89, - non_entity_clients: 37, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 146, - entity_clients: 141, - non_entity_clients: 5, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 76, - entity_clients: 75, - non_entity_clients: 1, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 55, - entity_clients: 19, - non_entity_clients: 36, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 40, - entity_clients: 13, - non_entity_clients: 27, - }, - }, - }, - }, - }, - new_clients: { - month: '9/21', - clients: 9421, - entity_clients: 3947, - non_entity_clients: 5474, - namespaces: [ - { - label: 'test-ns-1/', - clients: 2712, - entity_clients: 879, - non_entity_clients: 1833, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1250, - entity_clients: 536, - non_entity_clients: 714, - }, - { - label: 'path-4-with-over-18-characters', - clients: 740, - entity_clients: 39, - non_entity_clients: 701, - }, - { - label: 'path-1', - clients: 463, - entity_clients: 283, - non_entity_clients: 180, - }, - { - label: 'path-2', - clients: 259, - entity_clients: 21, - non_entity_clients: 238, - }, - ], - }, - { - label: 'root', - clients: 2525, - entity_clients: 1315, - non_entity_clients: 1210, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1055, - entity_clients: 257, - non_entity_clients: 798, - }, - { - label: 'path-2', - clients: 791, - entity_clients: 711, - non_entity_clients: 80, - }, - { - label: 'path-1', - clients: 543, - entity_clients: 340, - non_entity_clients: 203, - }, - { - label: 'path-4-with-over-18-characters', - clients: 136, - entity_clients: 7, - non_entity_clients: 129, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 2087, - entity_clients: 902, - non_entity_clients: 1185, - mounts: [ - { - label: 'path-1', - clients: 775, - entity_clients: 349, - non_entity_clients: 426, - }, - { - label: 'path-2', - clients: 632, - entity_clients: 90, - non_entity_clients: 542, - }, - { - label: 'path-3-with-over-18-characters', - clients: 472, - entity_clients: 260, - non_entity_clients: 212, - }, - { - label: 'path-4-with-over-18-characters', - clients: 208, - entity_clients: 203, - non_entity_clients: 5, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 1537, - entity_clients: 662, - non_entity_clients: 875, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 520, - entity_clients: 13, - non_entity_clients: 507, - }, - { - label: 'path-4-with-over-18-characters', - clients: 499, - entity_clients: 197, - non_entity_clients: 302, - }, - { - label: 'path-2', - clients: 398, - entity_clients: 370, - non_entity_clients: 28, - }, - { - label: 'path-1', - clients: 120, - entity_clients: 82, - non_entity_clients: 38, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 560, - entity_clients: 189, - non_entity_clients: 371, - mounts: [ - { - label: 'path-1', - clients: 318, - entity_clients: 12, - non_entity_clients: 306, - }, - { - label: 'path-3-with-over-18-characters', - clients: 126, - entity_clients: 89, - non_entity_clients: 37, - }, - { - label: 'path-2', - clients: 76, - entity_clients: 75, - non_entity_clients: 1, - }, - { - label: 'path-4-with-over-18-characters', - clients: 40, - entity_clients: 13, - non_entity_clients: 27, - }, - ], - }, - ], - }, - }, - { - month: '10/21', - clients: 19417, - entity_clients: 10105, - non_entity_clients: 9312, - namespaces: [ - { - label: 'root', - clients: 4835, - entity_clients: 2364, - non_entity_clients: 2471, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1797, - entity_clients: 883, - non_entity_clients: 914, - }, - { - label: 'path-1', - clients: 1501, - entity_clients: 663, - non_entity_clients: 838, - }, - { - label: 'path-2', - clients: 1461, - entity_clients: 800, - non_entity_clients: 661, - }, - { - label: 'path-4-with-over-18-characters', - clients: 76, - entity_clients: 18, - non_entity_clients: 58, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 4027, - entity_clients: 1692, - non_entity_clients: 2335, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 1223, - entity_clients: 820, - non_entity_clients: 403, - }, - { - label: 'path-3-with-over-18-characters', - clients: 1110, - entity_clients: 111, - non_entity_clients: 999, - }, - { - label: 'path-1', - clients: 1034, - entity_clients: 462, - non_entity_clients: 572, - }, - { - label: 'path-2', - clients: 660, - entity_clients: 299, - non_entity_clients: 361, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 3924, - entity_clients: 2132, - non_entity_clients: 1792, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1411, - entity_clients: 765, - non_entity_clients: 646, - }, - { - label: 'path-2', - clients: 1205, - entity_clients: 382, - non_entity_clients: 823, - }, - { - label: 'path-1', - clients: 884, - entity_clients: 850, - non_entity_clients: 34, - }, - { - label: 'path-4-with-over-18-characters', - clients: 424, - entity_clients: 135, - non_entity_clients: 289, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 3639, - entity_clients: 2314, - non_entity_clients: 1325, - mounts: [ - { - label: 'path-1', - clients: 1062, - entity_clients: 781, - non_entity_clients: 281, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1021, - entity_clients: 609, - non_entity_clients: 412, - }, - { - label: 'path-2', - clients: 849, - entity_clients: 426, - non_entity_clients: 423, - }, - { - label: 'path-3-with-over-18-characters', - clients: 707, - entity_clients: 498, - non_entity_clients: 209, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 2992, - entity_clients: 1603, - non_entity_clients: 1389, - mounts: [ - { - label: 'path-1', - clients: 1140, - entity_clients: 480, - non_entity_clients: 660, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1058, - entity_clients: 651, - non_entity_clients: 407, - }, - { - label: 'path-2', - clients: 575, - entity_clients: 416, - non_entity_clients: 159, - }, - { - label: 'path-3-with-over-18-characters', - clients: 219, - entity_clients: 56, - non_entity_clients: 163, - }, - ], - }, - ], - namespaces_by_key: { - root: { - month: '10/21', - clients: 4835, - entity_clients: 2364, - non_entity_clients: 2471, - new_clients: { - month: '10/21', - label: 'root', - clients: 1732, - entity_clients: 586, - non_entity_clients: 1146, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1797, - entity_clients: 883, - non_entity_clients: 914, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 907, - entity_clients: 192, - non_entity_clients: 715, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1501, - entity_clients: 663, - non_entity_clients: 838, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 276, - entity_clients: 202, - non_entity_clients: 74, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 1461, - entity_clients: 800, - non_entity_clients: 661, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 502, - entity_clients: 189, - non_entity_clients: 313, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 76, - entity_clients: 18, - non_entity_clients: 58, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 47, - entity_clients: 3, - non_entity_clients: 44, - }, - }, - }, - }, - 'test-ns-2/': { - month: '10/21', - clients: 4027, - entity_clients: 1692, - non_entity_clients: 2335, - new_clients: { - month: '10/21', - label: 'test-ns-2/', - clients: 2301, - entity_clients: 678, - non_entity_clients: 1623, - }, - mounts_by_key: { - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1223, - entity_clients: 820, - non_entity_clients: 403, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 602, - entity_clients: 212, - non_entity_clients: 390, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1110, - entity_clients: 111, - non_entity_clients: 999, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 440, - entity_clients: 7, - non_entity_clients: 433, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1034, - entity_clients: 462, - non_entity_clients: 572, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 980, - entity_clients: 454, - non_entity_clients: 526, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 660, - entity_clients: 299, - non_entity_clients: 361, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 279, - entity_clients: 5, - non_entity_clients: 274, - }, - }, - }, - }, - 'test-ns-2-with-namespace-length-over-18-characters/': { - month: '10/21', - clients: 3924, - entity_clients: 2132, - non_entity_clients: 1792, - new_clients: { - month: '10/21', - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 1561, - entity_clients: 1225, - non_entity_clients: 336, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1411, - entity_clients: 765, - non_entity_clients: 646, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 948, - entity_clients: 660, - non_entity_clients: 288, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 1205, - entity_clients: 382, - non_entity_clients: 823, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 305, - entity_clients: 289, - non_entity_clients: 16, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 884, - entity_clients: 850, - non_entity_clients: 34, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 230, - entity_clients: 207, - non_entity_clients: 23, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 424, - entity_clients: 135, - non_entity_clients: 289, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 78, - entity_clients: 69, - non_entity_clients: 9, - }, - }, - }, - }, - 'test-ns-1-with-namespace-length-over-18-characters/': { - month: '10/21', - clients: 3639, - entity_clients: 2314, - non_entity_clients: 1325, - new_clients: { - month: '10/21', - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1245, - entity_clients: 710, - non_entity_clients: 535, - }, - mounts_by_key: { - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1062, - entity_clients: 781, - non_entity_clients: 281, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 288, - entity_clients: 63, - non_entity_clients: 225, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1021, - entity_clients: 609, - non_entity_clients: 412, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 440, - entity_clients: 323, - non_entity_clients: 117, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 849, - entity_clients: 426, - non_entity_clients: 423, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 339, - entity_clients: 308, - non_entity_clients: 31, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 707, - entity_clients: 498, - non_entity_clients: 209, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 178, - entity_clients: 16, - non_entity_clients: 162, - }, - }, - }, - }, - 'test-ns-1/': { - month: '10/21', - clients: 2992, - entity_clients: 1603, - non_entity_clients: 1389, - new_clients: { - month: '10/21', - label: 'test-ns-1/', - clients: 820, - entity_clients: 356, - non_entity_clients: 464, - }, - mounts_by_key: { - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1140, - entity_clients: 480, - non_entity_clients: 660, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 239, - entity_clients: 30, - non_entity_clients: 209, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1058, - entity_clients: 651, - non_entity_clients: 407, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 256, - entity_clients: 63, - non_entity_clients: 193, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 575, - entity_clients: 416, - non_entity_clients: 159, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 259, - entity_clients: 245, - non_entity_clients: 14, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 219, - entity_clients: 56, - non_entity_clients: 163, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 66, - entity_clients: 18, - non_entity_clients: 48, - }, - }, - }, - }, - }, - new_clients: { - month: '10/21', - clients: 7659, - entity_clients: 3555, - non_entity_clients: 4104, - namespaces: [ - { - label: 'test-ns-2/', - clients: 2301, - entity_clients: 678, - non_entity_clients: 1623, - mounts: [ - { - label: 'path-1', - clients: 980, - entity_clients: 454, - non_entity_clients: 526, - }, - { - label: 'path-4-with-over-18-characters', - clients: 602, - entity_clients: 212, - non_entity_clients: 390, - }, - { - label: 'path-3-with-over-18-characters', - clients: 440, - entity_clients: 7, - non_entity_clients: 433, - }, - { - label: 'path-2', - clients: 279, - entity_clients: 5, - non_entity_clients: 274, - }, - ], - }, - { - label: 'root', - clients: 1732, - entity_clients: 586, - non_entity_clients: 1146, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 907, - entity_clients: 192, - non_entity_clients: 715, - }, - { - label: 'path-2', - clients: 502, - entity_clients: 189, - non_entity_clients: 313, - }, - { - label: 'path-1', - clients: 276, - entity_clients: 202, - non_entity_clients: 74, - }, - { - label: 'path-4-with-over-18-characters', - clients: 47, - entity_clients: 3, - non_entity_clients: 44, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 1561, - entity_clients: 1225, - non_entity_clients: 336, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 948, - entity_clients: 660, - non_entity_clients: 288, - }, - { - label: 'path-2', - clients: 305, - entity_clients: 289, - non_entity_clients: 16, - }, - { - label: 'path-1', - clients: 230, - entity_clients: 207, - non_entity_clients: 23, - }, - { - label: 'path-4-with-over-18-characters', - clients: 78, - entity_clients: 69, - non_entity_clients: 9, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1245, - entity_clients: 710, - non_entity_clients: 535, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 440, - entity_clients: 323, - non_entity_clients: 117, - }, - { - label: 'path-2', - clients: 339, - entity_clients: 308, - non_entity_clients: 31, - }, - { - label: 'path-1', - clients: 288, - entity_clients: 63, - non_entity_clients: 225, - }, - { - label: 'path-3-with-over-18-characters', - clients: 178, - entity_clients: 16, - non_entity_clients: 162, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 820, - entity_clients: 356, - non_entity_clients: 464, - mounts: [ - { - label: 'path-2', - clients: 259, - entity_clients: 245, - non_entity_clients: 14, - }, - { - label: 'path-4-with-over-18-characters', - clients: 256, - entity_clients: 63, - non_entity_clients: 193, - }, - { - label: 'path-1', - clients: 239, - entity_clients: 30, - non_entity_clients: 209, - }, - { - label: 'path-3-with-over-18-characters', - clients: 66, - entity_clients: 18, - non_entity_clients: 48, - }, - ], - }, - ], - }, - }, - ]; - hooks.beforeEach(function () { - this.set('timestamp', formatRFC3339(new Date())); - this.set('isDateRange', true); - this.set('chartLegend', [ - { label: 'entity clients', key: 'entity_clients' }, - { label: 'non-entity clients', key: 'non_entity_clients' }, - ]); - this.set('byMonthActivityData', DATASET); - }); - - test('it renders empty state with no data', async function (assert) { - await render(hbs` - - - `); - assert.dom('[data-test-monthly-usage]').exists('monthly usage component renders'); - assert.dom('[data-test-component="empty-state"]').exists(); - assert.dom('[data-test-empty-state-subtext]').hasText('No data to display'); - assert.dom('[data-test-monthly-usage-average-total] p.data-details').hasText('0', 'average total is 0'); - assert.dom('[data-test-monthly-usage-average-new] p.data-details').hasText('0', 'average new is 0'); - assert.dom('[data-test-vertical-bar-chart]').doesNotExist('vertical bar chart does not render'); - assert.dom('[data-test-monthly-usage-legend]').doesNotExist('legend does not exist'); - assert.dom('[data-test-monthly-usage-timestamp]').exists('renders timestamp'); - }); - - test('it renders with month over month activity data', async function (assert) { - const expectedTotal = formatNumber([calculateAverage(DATASET, 'clients')]); - const expectedNew = formatNumber([ - calculateAverage( - DATASET?.map((d) => d.new_clients), - 'clients' - ), - ]); - await render(hbs` - - - `); - assert.dom('[data-test-monthly-usage]').exists('monthly usage component renders'); - assert.dom('[data-test-component="empty-state"]').doesNotExist(); - assert.dom('[data-test-vertical-bar-chart]').exists('vertical bar chart displays'); - assert.dom('[data-test-monthly-usage-legend]').exists('renders vertical bar chart legend'); - assert.dom('[data-test-monthly-usage-timestamp]').exists('renders timestamp'); - - findAll('[data-test-vertical-chart="x-axis-labels"] text').forEach((e, i) => { - assert.dom(e).hasText(`${DATASET[i].month}`, `renders x-axis label: ${DATASET[i].month}`); - }); - assert - .dom('[data-test-vertical-chart="data-bar"]') - .exists( - { count: DATASET.filter((m) => m.counts !== null).length * 2 }, - 'renders correct number of data bars' - ); - assert - .dom('[data-test-monthly-usage-average-total] p.data-details') - .hasText(`${expectedTotal}`, `renders correct total average ${expectedTotal}`); - assert - .dom('[data-test-monthly-usage-average-new] p.data-details') - .hasText(`${expectedNew}`, `renders correct new average ${expectedNew}`); - }); -}); diff --git a/ui/tests/integration/components/clients/page/counts-test.js b/ui/tests/integration/components/clients/page/counts-test.js new file mode 100644 index 000000000000..b65b7978504e --- /dev/null +++ b/ui/tests/integration/components/clients/page/counts-test.js @@ -0,0 +1,262 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { render, click, settled, findAll } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import clientsHandler, { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; +import { getUnixTime } from 'date-fns'; +import { SELECTORS as ts, dateDropdownSelect } from 'vault/tests/helpers/clients'; +import { selectChoose } from 'ember-power-select/test-support/helpers'; +import timestamp from 'core/utils/timestamp'; +import sinon from 'sinon'; + +const START_TIME = getUnixTime(LICENSE_START); +const END_TIME = getUnixTime(STATIC_NOW); + +module('Integration | Component | clients | Page::Counts', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => STATIC_NOW); + }); + + hooks.beforeEach(async function () { + clientsHandler(this.server); + this.store = this.owner.lookup('service:store'); + const activityQuery = { + start_time: { timestamp: START_TIME }, + end_time: { timestamp: END_TIME }, + }; + this.activity = await this.store.queryRecord('clients/activity', activityQuery); + this.config = await this.store.queryRecord('clients/config', {}); + this.startTimestamp = START_TIME; + this.endTimestamp = END_TIME; + this.versionHistory = []; + this.renderComponent = () => + render(hbs` + +
Yield block
+
+ `); + }); + hooks.after(function () { + timestamp.now.restore(); + }); + + test('it should render start date label and description based on version', async function (assert) { + const versionService = this.owner.lookup('service:version'); + + await this.renderComponent(); + + assert.dom(ts.counts.startLabel).hasText('Client counting start date', 'Label renders for OSS'); + assert + .dom(ts.counts.description) + .hasText( + 'This date is when client counting starts. Without this starting point, the data shown is not reliable.', + 'Description renders for OSS' + ); + + versionService.set('type', 'enterprise'); + await settled(); + + assert.dom(ts.counts.startLabel).hasText('Billing start month', 'Label renders for Enterprise'); + assert + .dom(ts.counts.description) + .hasText( + 'This date comes from your license, and defines when client counting starts. Without this starting point, the data shown is not reliable.', + 'Description renders for Enterprise' + ); + }); + + test('it should populate start and end month displays', async function (assert) { + await this.renderComponent(); + + assert.dom(ts.counts.startMonth).hasText('July 2023', 'Start month renders'); + assert + .dom(ts.calendarWidget.trigger) + .hasText('Jul 2023 - Jan 2024', 'Start and end months render in filter bar'); + }); + + test('it should render no data empty state', async function (assert) { + this.activity = { id: 'no-data' }; + + await this.renderComponent(); + + assert + .dom(ts.emptyStateTitle) + .hasText('No data received from July 2023 to January 2024', 'No data empty state renders'); + }); + + test('it should render activity error', async function (assert) { + this.activity = null; + this.activityError = { httpStatus: 403 }; + + await this.renderComponent(); + + assert.dom(ts.emptyStateTitle).hasText('You are not authorized', 'Activity error empty state renders'); + }); + + test('it should render config disabled alert', async function (assert) { + this.config.enabled = 'Off'; + + await this.renderComponent(); + + assert.dom(ts.counts.configDisabled).hasText('Tracking is disabled', 'Config disabled alert renders'); + }); + + test('it should send correct values on start and end date change', async function (assert) { + assert.expect(4); + + let expected = { start_time: getUnixTime(new Date('2023-01-01T00:00:00Z')), end_time: END_TIME }; + this.onFilterChange = (params) => { + assert.deepEqual(params, expected, 'Correct values sent on filter change'); + this.startTimestamp = params.start_time || START_TIME; + this.endTimestamp = params.end_time || END_TIME; + }; + + await this.renderComponent(); + await dateDropdownSelect('January', '2023'); + + expected.start_time = END_TIME; + await click(ts.calendarWidget.trigger); + await click(ts.calendarWidget.currentMonth); + + expected.start_time = getUnixTime(this.config.billingStartTimestamp); + await click(ts.calendarWidget.trigger); + await click(ts.calendarWidget.currentBillingPeriod); + + expected = { end_time: getUnixTime(new Date('2023-12-31T00:00:00Z')) }; + await click(ts.calendarWidget.trigger); + await click(ts.calendarWidget.customEndMonth); + await click(ts.calendarWidget.previousYear); + await click(ts.calendarWidget.calendarMonth('December')); + }); + + test('it should render namespace and auth mount filters', async function (assert) { + assert.expect(5); + + this.namespace = 'root'; + this.mountPath = 'auth/authid0'; + + let assertion = (params) => + assert.deepEqual(params, { ns: undefined, mountPath: undefined }, 'Auth mount cleared with namespace'); + this.onFilterChange = (params) => { + if (assertion) { + assertion(params); + } + const keys = Object.keys(params); + this.namespace = keys.includes('ns') ? params.ns : this.namespace; + this.mountPath = keys.includes('mountPath') ? params.mountPath : this.mountPath; + }; + + await this.renderComponent(); + + assert.dom(ts.counts.namespaces).includesText(this.namespace, 'Selected namespace renders'); + assert.dom(ts.counts.mountPaths).includesText(this.mountPath, 'Selected auth mount renders'); + + await click(`${ts.counts.namespaces} button`); + // this is only necessary in tests since SearchSelect does not respond to initialValue changes + // in the app the component is rerender on query param change + assertion = null; + await click(`${ts.counts.mountPaths} button`); + + assertion = (params) => assert.true(params.ns.includes('ns/'), 'Namespace value sent on change'); + await selectChoose(ts.counts.namespaces, '.ember-power-select-option', 0); + + assertion = (params) => + assert.true(params.mountPath.includes('auth/'), 'Auth mount value sent on change'); + await selectChoose(ts.counts.mountPaths, 'auth/authid0'); + }); + + test('it should render start time discrepancy alert', async function (assert) { + this.startTimestamp = getUnixTime(new Date('2022-06-01T00:00:00Z')); + + await this.renderComponent(); + + assert + .dom(ts.counts.startDiscrepancy) + .hasText( + 'You requested data from June 2022. We only have data from July 2023, and that is what is being shown here.', + 'Start discrepancy alert renders' + ); + }); + + test('it renders alert if upgrade happened within queried activity', async function (assert) { + assert.expect(4); + this.versionHistory = await this.store.findAll('clients/version-history').then((resp) => { + return resp.map(({ version, previousVersion, timestampInstalled }) => { + return { + version, + previousVersion, + timestampInstalled, + }; + }); + }); + + await this.renderComponent(); + + assert + .dom(ts.upgradeWarning) + .hasTextContaining( + `Client count data contains 2 upgrades Vault was upgraded during this time period. Keep this in mind while looking at the data. Visit our Client count FAQ for more information.`, + 'it renders title and subtext' + ); + assert + .dom(`${ts.upgradeWarning} ul`) + .doesNotHaveTextContaining( + '1.9.1', + 'Warning does not include subsequent patch releases (e.g. 1.9.1) of the same notable upgrade.' + ); + const [first, second] = findAll(`${ts.upgradeWarning} li`); + assert + .dom(first) + .hasText( + `1.9.0 (upgraded on Jul 2, 2023) - We introduced changes to non-entity token and local auth mount logic for client counting in 1.9.`, + 'alert includes 1.9.0 upgrade' + ); + + assert + .dom(second) + .hasTextContaining( + `1.10.1 (upgraded on Sep 2, 2023) - We added monthly breakdowns and mount level attribution starting in 1.10.`, + 'alert includes 1.10.1 upgrade' + ); + }); + + test('it should render empty state for no start or license start time', async function (assert) { + this.startTimestamp = null; + this.config.billingStartTimestamp = null; + this.activity = {}; + + await this.renderComponent(); + + assert.dom(ts.emptyStateTitle).hasText('No start date found', 'Empty state renders'); + assert.dom(ts.counts.startDropdown).exists('Date dropdown renders when start time is not provided'); + }); + + test('it should render catch all empty state', async function (assert) { + this.activity.total = null; + + await this.renderComponent(); + + assert + .dom(ts.emptyStateTitle) + .hasText('No data received from July 2023 to January 2024', 'Empty state renders'); + }); +}); diff --git a/ui/tests/integration/components/clients/page/sync-test.js b/ui/tests/integration/components/clients/page/sync-test.js new file mode 100644 index 000000000000..623fe9d723cc --- /dev/null +++ b/ui/tests/integration/components/clients/page/sync-test.js @@ -0,0 +1,184 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { render, findAll } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import clientsHandler, { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; +import { getUnixTime } from 'date-fns'; +import { SELECTORS } from 'vault/tests/helpers/clients'; +import { formatNumber } from 'core/helpers/format-number'; +import { calculateAverage } from 'vault/utils/chart-helpers'; +import { dateFormat } from 'core/helpers/date-format'; + +const START_TIME = getUnixTime(LICENSE_START); +const END_TIME = getUnixTime(STATIC_NOW); +const { syncTab, charts, usageStats } = SELECTORS; + +module('Integration | Component | clients | Clients::Page::Sync', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + clientsHandler(this.server); + this.store = this.owner.lookup('service:store'); + const activityQuery = { + start_time: { timestamp: START_TIME }, + end_time: { timestamp: END_TIME }, + }; + // set this to 0 + this.activity = await this.store.queryRecord('clients/activity', activityQuery); + this.startTimestamp = START_TIME; + this.endTimestamp = END_TIME; + this.isSecretsSyncActivated = true; + + this.renderComponent = () => + render(hbs` + + `); + }); + + test('it should render with full month activity data', async function (assert) { + assert.expect(4 + this.activity.byMonth.length); + const expectedTotal = formatNumber([this.activity.total.secret_syncs]); + const expectedAvg = formatNumber([calculateAverage(this.activity.byMonth, 'secret_syncs')]); + await this.renderComponent(); + assert + .dom(syncTab.total) + .hasText( + `Total sync clients The total number of secrets synced from Vault to other destinations during this date range. ${expectedTotal}`, + `renders correct total sync stat ${expectedTotal}` + ); + assert + .dom(syncTab.average) + .hasText( + `Average sync clients per month ${expectedAvg}`, + `renders correct average sync stat ${expectedAvg}` + ); + + const formattedTimestamp = dateFormat([this.activity.responseTimestamp, 'MMM d yyyy, h:mm:ss aaa'], { + withTimeZone: true, + }); + assert.dom(charts.timestamp).hasText(`Updated ${formattedTimestamp}`, 'renders response timestamp'); + + // assert bar chart is correct + findAll(`${charts.chart('Secrets sync usage')} ${charts.xAxisLabel}`).forEach((e, i) => { + assert + .dom(e) + .hasText( + `${this.activity.byMonth[i].month}`, + `renders x-axis labels for bar chart: ${this.activity.byMonth[i].month}` + ); + }); + + const dataBars = findAll(charts.dataBar).filter((b) => b.hasAttribute('height')); + assert.strictEqual(dataBars.length, this.activity.byMonth.filter((m) => m.counts !== null).length); + }); + + test('it should render an empty state for no monthly data', async function (assert) { + assert.expect(5); + this.activity.set('byMonth', []); + + await this.renderComponent(); + + assert.dom(charts.chart('Secrets sync usage')).doesNotExist('vertical bar chart does not render'); + assert.dom(SELECTORS.emptyStateTitle).hasText('No monthly secrets sync clients'); + const formattedTimestamp = dateFormat([this.activity.responseTimestamp, 'MMM d yyyy, h:mm:ss aaa'], { + withTimeZone: true, + }); + assert.dom(charts.timestamp).hasText(`Updated ${formattedTimestamp}`, 'renders timestamp'); + assert.dom(syncTab.total).doesNotExist('total sync counts does not exist'); + assert.dom(syncTab.average).doesNotExist('average sync client counts does not exist'); + }); + + test('it should render stats without chart for a single month', async function (assert) { + assert.expect(4); + const activityQuery = { start_time: { timestamp: START_TIME }, end_time: { timestamp: START_TIME } }; + this.activity = await this.store.queryRecord('clients/activity', activityQuery); + const total = formatNumber([this.activity.total.secret_syncs]); + await this.renderComponent(); + + assert.dom(charts.chart('Secrets sync usage')).doesNotExist('vertical bar chart does not render'); + assert + .dom(usageStats) + .hasText( + `Secrets sync usage This data can be used to understand how many secrets sync clients have been used for this date range. Each Vault secret that is synced to at least one destination counts as one Vault client. Total sync clients ${total}`, + 'renders sync stats instead of chart' + ); + assert.dom(syncTab.total).doesNotExist('total sync counts does not exist'); + assert.dom(syncTab.average).doesNotExist('average sync client counts does not exist'); + }); + + test('it should render an empty state if secrets sync is not activated', async function (assert) { + this.isSecretsSyncActivated = false; + + await this.renderComponent(); + + assert.dom(SELECTORS.emptyStateTitle).hasText('No Secrets Sync clients'); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText('No data is available because Secrets Sync has not been activated.'); + assert.dom(SELECTORS.emptyStateActions).hasText('Activate Secrets Sync'); + + assert.dom(charts.chart('Secrets sync usage')).doesNotExist(); + assert.dom(syncTab.total).doesNotExist(); + assert.dom(syncTab.average).doesNotExist(); + }); + + test('it should render an empty chart if secrets sync is activated but no secrets synced', async function (assert) { + this.isSecretsSyncActivated = true; + const counts = { + clients: 10, + entity_clients: 4, + non_entity_clients: 6, + secret_syncs: 0, + }; + const monthData = { + month: '1/24', + timestamp: '2024-01-01T00:00:00-08:00', + ...counts, + namespaces: [ + { + label: 'root', + ...counts, + mounts: [], + }, + ], + }; + this.activity.byMonth = [ + { + ...monthData, + namespaces_by_key: { + root: { + ...monthData, + mounts_by_key: {}, + }, + }, + new_clients: { + ...monthData, + }, + }, + ]; + this.activity.total = counts; + await this.renderComponent(); + + assert + .dom(syncTab.total) + .hasText( + 'Total sync clients The total number of secrets synced from Vault to other destinations during this date range. 0' + ); + assert.dom(syncTab.average).doesNotExist('Does not render average if the calculation is 0'); + }); +}); diff --git a/ui/tests/integration/components/clients/page/token-test.js b/ui/tests/integration/components/clients/page/token-test.js new file mode 100644 index 000000000000..f4427906930e --- /dev/null +++ b/ui/tests/integration/components/clients/page/token-test.js @@ -0,0 +1,188 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { render, findAll } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import clientsHandler, { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; +import { getUnixTime } from 'date-fns'; +import { calculateAverage } from 'vault/utils/chart-helpers'; +import { formatNumber } from 'core/helpers/format-number'; +import { dateFormat } from 'core/helpers/date-format'; +import { SELECTORS as ts } from 'vault/tests/helpers/clients'; + +const START_TIME = getUnixTime(LICENSE_START); +const END_TIME = getUnixTime(STATIC_NOW); + +module('Integration | Component | clients | Page::Token', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + clientsHandler(this.server); + const store = this.owner.lookup('service:store'); + const activityQuery = { + start_time: { timestamp: START_TIME }, + end_time: { timestamp: END_TIME }, + }; + this.activity = await store.queryRecord('clients/activity', activityQuery); + this.newActivity = this.activity.byMonth.map((d) => d.new_clients); + this.versionHistory = await store + .findAll('clients/version-history') + .then((response) => { + return response.map(({ version, previousVersion, timestampInstalled }) => { + return { + version, + previousVersion, + timestampInstalled, + }; + }); + }) + .catch(() => []); + this.startTimestamp = START_TIME; + this.endTimestamp = END_TIME; + this.renderComponent = () => + render(hbs` + + `); + }); + + test('it should render monthly total chart', async function (assert) { + const getAverage = (data) => { + const average = ['entity_clients', 'non_entity_clients'].reduce((count, key) => { + return (count += calculateAverage(data, key) || 0); + }, 0); + return formatNumber([average]); + }; + const expectedTotal = getAverage(this.activity.byMonth); + const expectedNew = getAverage(this.newActivity); + const chart = ts.charts.chart('monthly total'); + + await this.renderComponent(); + + assert + .dom(ts.charts.statTextValue('Average total clients per month')) + .hasText(expectedTotal, 'renders correct total clients'); + assert + .dom(ts.charts.statTextValue('Average new clients per month')) + .hasText(expectedNew, 'renders correct new clients'); + // assert bar chart is correct + findAll(`${chart} ${ts.charts.bar.xAxisLabel}`).forEach((e, i) => { + assert + .dom(e) + .hasText( + `${this.activity.byMonth[i].month}`, + `renders x-axis labels for bar chart: ${this.activity.byMonth[i].month}` + ); + }); + assert + .dom(`${chart} ${ts.charts.bar.dataBar}`) + .exists( + { count: this.activity.byMonth.filter((m) => m.counts !== null).length * 2 }, + 'renders correct number of data bars' + ); + const formattedTimestamp = dateFormat([this.activity.responseTimestamp, 'MMM d yyyy, h:mm:ss aaa'], { + withTimeZone: true, + }); + assert + .dom(`${chart} ${ts.charts.timestamp}`) + .hasText(`Updated ${formattedTimestamp}`, 'renders timestamp'); + assert.dom(`${chart} ${ts.charts.legendLabel(1)}`).hasText('Entity clients', 'Legend label renders'); + assert.dom(`${chart} ${ts.charts.legendLabel(2)}`).hasText('Non-entity clients', 'Legend label renders'); + }); + + test('it should render monthly new chart', async function (assert) { + const expectedNewEntity = formatNumber([calculateAverage(this.newActivity, 'entity_clients')]); + const expectedNewNonEntity = formatNumber([calculateAverage(this.newActivity, 'non_entity_clients')]); + const chart = ts.charts.chart('monthly new'); + + await this.renderComponent(); + + assert + .dom(ts.charts.statTextValue('Average new entity clients per month')) + .hasText(expectedNewEntity, 'renders correct new entity clients'); + assert + .dom(ts.charts.statTextValue('Average new non-entity clients per month')) + .hasText(expectedNewNonEntity, 'renders correct new nonentity clients'); + // assert bar chart is correct + findAll(`${chart} ${ts.charts.bar.xAxisLabel}`).forEach((e, i) => { + assert + .dom(e) + .hasText( + `${this.activity.byMonth[i].month}`, + `renders x-axis labels for bar chart: ${this.activity.byMonth[i].month}` + ); + }); + assert + .dom(`${chart} ${ts.charts.bar.dataBar}`) + .exists( + { count: this.activity.byMonth.filter((m) => m.counts !== null).length * 2 }, + 'renders correct number of data bars' + ); + const formattedTimestamp = dateFormat([this.activity.responseTimestamp, 'MMM d yyyy, h:mm:ss aaa'], { + withTimeZone: true, + }); + assert + .dom(`${chart} ${ts.charts.timestamp}`) + .hasText(`Updated ${formattedTimestamp}`, 'renders timestamp'); + assert.dom(`${chart} ${ts.charts.legendLabel(1)}`).hasText('Entity clients', 'Legend label renders'); + assert.dom(`${chart} ${ts.charts.legendLabel(2)}`).hasText('Non-entity clients', 'Legend label renders'); + }); + + test('it should render empty state for no new monthly data', async function (assert) { + this.activity.byMonth = this.activity.byMonth.map((d) => ({ + ...d, + new_clients: { month: d.month }, + })); + const chart = ts.charts.chart('monthly-new'); + + await this.renderComponent(); + + assert.dom(`${chart} ${ts.charts.verticalBar}`).doesNotExist('Chart does not render'); + assert.dom(`${chart} ${ts.charts.legend}`).doesNotExist('Legend does not render'); + assert.dom(ts.emptyStateTitle).hasText('No new clients'); + assert.dom(ts.tokenTab.entity).doesNotExist('New client counts does not exist'); + assert.dom(ts.tokenTab.nonentity).doesNotExist('Average new client counts does not exist'); + }); + + test('it should render usage stats', async function (assert) { + assert.expect(6); + + this.activity.endTime = this.activity.startTime; + const { + total: { entity_clients, non_entity_clients }, + } = this.activity; + + const checkUsage = () => { + assert + .dom(ts.charts.statTextValue('Total clients')) + .hasText(formatNumber([entity_clients + non_entity_clients]), 'Total clients value renders'); + assert + .dom(ts.charts.statTextValue('Entity clients')) + .hasText(formatNumber([entity_clients]), 'Entity clients value renders'); + assert + .dom(ts.charts.statTextValue('Non-entity clients')) + .hasText(formatNumber([non_entity_clients]), 'Non-entity clients value renders'); + }; + + // total usage should display for single month query + await this.renderComponent(); + checkUsage(); + + // total usage should display when there is no monthly data + this.activity.byMonth = null; + await this.renderComponent(); + checkUsage(); + }); +}); diff --git a/ui/tests/integration/components/clients/running-total-test.js b/ui/tests/integration/components/clients/running-total-test.js index 4d6f46ff67aa..e8ca539693b6 100644 --- a/ui/tests/integration/components/clients/running-total-test.js +++ b/ui/tests/integration/components/clients/running-total-test.js @@ -1,1585 +1,199 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; import { render } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; -import { formatRFC3339 } from 'date-fns'; +import clientsHandler, { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; +import sinon from 'sinon'; +import { formatRFC3339, getUnixTime } from 'date-fns'; import { findAll } from '@ember/test-helpers'; -import { calculateAverage } from 'vault/utils/chart-helpers'; import { formatNumber } from 'core/helpers/format-number'; +import timestamp from 'core/utils/timestamp'; +import { setRunOptions } from 'ember-a11y-testing/test-support'; +import { SELECTORS as ts } from 'vault/tests/helpers/clients'; + +const START_TIME = getUnixTime(LICENSE_START); module('Integration | Component | clients/running-total', function (hooks) { setupRenderingTest(hooks); - const MONTHLY_ACTIVITY = [ - { - month: '8/21', - timestamp: '2021-08-01T00:00:00Z', - counts: null, - namespaces: [], - new_clients: { - month: '8/21', - namespaces: [], - }, - namespaces_by_key: {}, - }, - { - month: '9/21', - clients: 19251, - entity_clients: 10713, - non_entity_clients: 8538, - namespaces: [ - { - label: 'root', - clients: 4852, - entity_clients: 3108, - non_entity_clients: 1744, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1598, - entity_clients: 687, - non_entity_clients: 911, - }, - { - label: 'path-1', - clients: 1429, - entity_clients: 981, - non_entity_clients: 448, - }, - { - label: 'path-4-with-over-18-characters', - clients: 965, - entity_clients: 720, - non_entity_clients: 245, - }, - { - label: 'path-2', - clients: 860, - entity_clients: 720, - non_entity_clients: 140, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 4702, - entity_clients: 3057, - non_entity_clients: 1645, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1686, - entity_clients: 926, - non_entity_clients: 760, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1525, - entity_clients: 789, - non_entity_clients: 736, - }, - { - label: 'path-2', - clients: 905, - entity_clients: 849, - non_entity_clients: 56, - }, - { - label: 'path-1', - clients: 586, - entity_clients: 493, - non_entity_clients: 93, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 4569, - entity_clients: 1871, - non_entity_clients: 2698, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 1534, - entity_clients: 619, - non_entity_clients: 915, - }, - { - label: 'path-3-with-over-18-characters', - clients: 1528, - entity_clients: 589, - non_entity_clients: 939, - }, - { - label: 'path-1', - clients: 828, - entity_clients: 612, - non_entity_clients: 216, - }, - { - label: 'path-2', - clients: 679, - entity_clients: 51, - non_entity_clients: 628, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 3771, - entity_clients: 2029, - non_entity_clients: 1742, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1249, - entity_clients: 793, - non_entity_clients: 456, - }, - { - label: 'path-1', - clients: 1046, - entity_clients: 444, - non_entity_clients: 602, - }, - { - label: 'path-2', - clients: 930, - entity_clients: 277, - non_entity_clients: 653, - }, - { - label: 'path-4-with-over-18-characters', - clients: 546, - entity_clients: 515, - non_entity_clients: 31, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1357, - entity_clients: 648, - non_entity_clients: 709, - mounts: [ - { - label: 'path-1', - clients: 613, - entity_clients: 23, - non_entity_clients: 590, - }, - { - label: 'path-3-with-over-18-characters', - clients: 543, - entity_clients: 465, - non_entity_clients: 78, - }, - { - label: 'path-2', - clients: 146, - entity_clients: 141, - non_entity_clients: 5, - }, - { - label: 'path-4-with-over-18-characters', - clients: 55, - entity_clients: 19, - non_entity_clients: 36, - }, - ], - }, - ], - namespaces_by_key: { - root: { - month: '9/21', - clients: 4852, - entity_clients: 3108, - non_entity_clients: 1744, - new_clients: { - month: '9/21', - label: 'root', - clients: 2525, - entity_clients: 1315, - non_entity_clients: 1210, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1598, - entity_clients: 687, - non_entity_clients: 911, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1055, - entity_clients: 257, - non_entity_clients: 798, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 1429, - entity_clients: 981, - non_entity_clients: 448, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 543, - entity_clients: 340, - non_entity_clients: 203, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 965, - entity_clients: 720, - non_entity_clients: 245, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 136, - entity_clients: 7, - non_entity_clients: 129, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 860, - entity_clients: 720, - non_entity_clients: 140, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 791, - entity_clients: 711, - non_entity_clients: 80, - }, - }, - }, - }, - 'test-ns-2/': { - month: '9/21', - clients: 4702, - entity_clients: 3057, - non_entity_clients: 1645, - new_clients: { - month: '9/21', - label: 'test-ns-2/', - clients: 1537, - entity_clients: 662, - non_entity_clients: 875, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1686, - entity_clients: 926, - non_entity_clients: 760, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 520, - entity_clients: 13, - non_entity_clients: 507, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 1525, - entity_clients: 789, - non_entity_clients: 736, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 499, - entity_clients: 197, - non_entity_clients: 302, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 905, - entity_clients: 849, - non_entity_clients: 56, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 398, - entity_clients: 370, - non_entity_clients: 28, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 586, - entity_clients: 493, - non_entity_clients: 93, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 120, - entity_clients: 82, - non_entity_clients: 38, - }, - }, - }, - }, - 'test-ns-1/': { - month: '9/21', - clients: 4569, - entity_clients: 1871, - non_entity_clients: 2698, - new_clients: { - month: '9/21', - label: 'test-ns-1/', - clients: 2712, - entity_clients: 879, - non_entity_clients: 1833, - }, - mounts_by_key: { - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 1534, - entity_clients: 619, - non_entity_clients: 915, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 740, - entity_clients: 39, - non_entity_clients: 701, - }, - }, - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1528, - entity_clients: 589, - non_entity_clients: 939, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1250, - entity_clients: 536, - non_entity_clients: 714, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 828, - entity_clients: 612, - non_entity_clients: 216, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 463, - entity_clients: 283, - non_entity_clients: 180, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 679, - entity_clients: 51, - non_entity_clients: 628, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 259, - entity_clients: 21, - non_entity_clients: 238, - }, - }, - }, - }, - 'test-ns-2-with-namespace-length-over-18-characters/': { - month: '9/21', - clients: 3771, - entity_clients: 2029, - non_entity_clients: 1742, - new_clients: { - month: '9/21', - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 2087, - entity_clients: 902, - non_entity_clients: 1185, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 1249, - entity_clients: 793, - non_entity_clients: 456, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 472, - entity_clients: 260, - non_entity_clients: 212, - }, - }, - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 1046, - entity_clients: 444, - non_entity_clients: 602, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 775, - entity_clients: 349, - non_entity_clients: 426, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 930, - entity_clients: 277, - non_entity_clients: 653, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 632, - entity_clients: 90, - non_entity_clients: 542, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 546, - entity_clients: 515, - non_entity_clients: 31, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 208, - entity_clients: 203, - non_entity_clients: 5, - }, - }, - }, - }, - 'test-ns-1-with-namespace-length-over-18-characters/': { - month: '9/21', - clients: 1357, - entity_clients: 648, - non_entity_clients: 709, - new_clients: { - month: '9/21', - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 560, - entity_clients: 189, - non_entity_clients: 371, - }, - mounts_by_key: { - 'path-1': { - month: '9/21', - label: 'path-1', - clients: 613, - entity_clients: 23, - non_entity_clients: 590, - new_clients: { - month: '9/21', - label: 'path-1', - clients: 318, - entity_clients: 12, - non_entity_clients: 306, - }, - }, - 'path-3-with-over-18-characters': { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 543, - entity_clients: 465, - non_entity_clients: 78, - new_clients: { - month: '9/21', - label: 'path-3-with-over-18-characters', - clients: 126, - entity_clients: 89, - non_entity_clients: 37, - }, - }, - 'path-2': { - month: '9/21', - label: 'path-2', - clients: 146, - entity_clients: 141, - non_entity_clients: 5, - new_clients: { - month: '9/21', - label: 'path-2', - clients: 76, - entity_clients: 75, - non_entity_clients: 1, - }, - }, - 'path-4-with-over-18-characters': { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 55, - entity_clients: 19, - non_entity_clients: 36, - new_clients: { - month: '9/21', - label: 'path-4-with-over-18-characters', - clients: 40, - entity_clients: 13, - non_entity_clients: 27, - }, - }, - }, - }, - }, - new_clients: { - month: '9/21', - clients: 9421, - entity_clients: 3947, - non_entity_clients: 5474, - namespaces: [ - { - label: 'test-ns-1/', - clients: 2712, - entity_clients: 879, - non_entity_clients: 1833, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1250, - entity_clients: 536, - non_entity_clients: 714, - }, - { - label: 'path-4-with-over-18-characters', - clients: 740, - entity_clients: 39, - non_entity_clients: 701, - }, - { - label: 'path-1', - clients: 463, - entity_clients: 283, - non_entity_clients: 180, - }, - { - label: 'path-2', - clients: 259, - entity_clients: 21, - non_entity_clients: 238, - }, - ], - }, - { - label: 'root', - clients: 2525, - entity_clients: 1315, - non_entity_clients: 1210, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1055, - entity_clients: 257, - non_entity_clients: 798, - }, - { - label: 'path-2', - clients: 791, - entity_clients: 711, - non_entity_clients: 80, - }, - { - label: 'path-1', - clients: 543, - entity_clients: 340, - non_entity_clients: 203, - }, - { - label: 'path-4-with-over-18-characters', - clients: 136, - entity_clients: 7, - non_entity_clients: 129, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 2087, - entity_clients: 902, - non_entity_clients: 1185, - mounts: [ - { - label: 'path-1', - clients: 775, - entity_clients: 349, - non_entity_clients: 426, - }, - { - label: 'path-2', - clients: 632, - entity_clients: 90, - non_entity_clients: 542, - }, - { - label: 'path-3-with-over-18-characters', - clients: 472, - entity_clients: 260, - non_entity_clients: 212, - }, - { - label: 'path-4-with-over-18-characters', - clients: 208, - entity_clients: 203, - non_entity_clients: 5, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 1537, - entity_clients: 662, - non_entity_clients: 875, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 520, - entity_clients: 13, - non_entity_clients: 507, - }, - { - label: 'path-4-with-over-18-characters', - clients: 499, - entity_clients: 197, - non_entity_clients: 302, - }, - { - label: 'path-2', - clients: 398, - entity_clients: 370, - non_entity_clients: 28, - }, - { - label: 'path-1', - clients: 120, - entity_clients: 82, - non_entity_clients: 38, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 560, - entity_clients: 189, - non_entity_clients: 371, - mounts: [ - { - label: 'path-1', - clients: 318, - entity_clients: 12, - non_entity_clients: 306, - }, - { - label: 'path-3-with-over-18-characters', - clients: 126, - entity_clients: 89, - non_entity_clients: 37, - }, - { - label: 'path-2', - clients: 76, - entity_clients: 75, - non_entity_clients: 1, - }, - { - label: 'path-4-with-over-18-characters', - clients: 40, - entity_clients: 13, - non_entity_clients: 27, - }, - ], - }, - ], - }, - }, - { - month: '10/21', - clients: 19417, - entity_clients: 10105, - non_entity_clients: 9312, - namespaces: [ - { - label: 'root', - clients: 4835, - entity_clients: 2364, - non_entity_clients: 2471, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1797, - entity_clients: 883, - non_entity_clients: 914, - }, - { - label: 'path-1', - clients: 1501, - entity_clients: 663, - non_entity_clients: 838, - }, - { - label: 'path-2', - clients: 1461, - entity_clients: 800, - non_entity_clients: 661, - }, - { - label: 'path-4-with-over-18-characters', - clients: 76, - entity_clients: 18, - non_entity_clients: 58, - }, - ], - }, - { - label: 'test-ns-2/', - clients: 4027, - entity_clients: 1692, - non_entity_clients: 2335, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 1223, - entity_clients: 820, - non_entity_clients: 403, - }, - { - label: 'path-3-with-over-18-characters', - clients: 1110, - entity_clients: 111, - non_entity_clients: 999, - }, - { - label: 'path-1', - clients: 1034, - entity_clients: 462, - non_entity_clients: 572, - }, - { - label: 'path-2', - clients: 660, - entity_clients: 299, - non_entity_clients: 361, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 3924, - entity_clients: 2132, - non_entity_clients: 1792, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 1411, - entity_clients: 765, - non_entity_clients: 646, - }, - { - label: 'path-2', - clients: 1205, - entity_clients: 382, - non_entity_clients: 823, - }, - { - label: 'path-1', - clients: 884, - entity_clients: 850, - non_entity_clients: 34, - }, - { - label: 'path-4-with-over-18-characters', - clients: 424, - entity_clients: 135, - non_entity_clients: 289, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 3639, - entity_clients: 2314, - non_entity_clients: 1325, - mounts: [ - { - label: 'path-1', - clients: 1062, - entity_clients: 781, - non_entity_clients: 281, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1021, - entity_clients: 609, - non_entity_clients: 412, - }, - { - label: 'path-2', - clients: 849, - entity_clients: 426, - non_entity_clients: 423, - }, - { - label: 'path-3-with-over-18-characters', - clients: 707, - entity_clients: 498, - non_entity_clients: 209, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 2992, - entity_clients: 1603, - non_entity_clients: 1389, - mounts: [ - { - label: 'path-1', - clients: 1140, - entity_clients: 480, - non_entity_clients: 660, - }, - { - label: 'path-4-with-over-18-characters', - clients: 1058, - entity_clients: 651, - non_entity_clients: 407, - }, - { - label: 'path-2', - clients: 575, - entity_clients: 416, - non_entity_clients: 159, - }, - { - label: 'path-3-with-over-18-characters', - clients: 219, - entity_clients: 56, - non_entity_clients: 163, - }, - ], - }, - ], - namespaces_by_key: { - root: { - month: '10/21', - clients: 4835, - entity_clients: 2364, - non_entity_clients: 2471, - new_clients: { - month: '10/21', - label: 'root', - clients: 1732, - entity_clients: 586, - non_entity_clients: 1146, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1797, - entity_clients: 883, - non_entity_clients: 914, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 907, - entity_clients: 192, - non_entity_clients: 715, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1501, - entity_clients: 663, - non_entity_clients: 838, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 276, - entity_clients: 202, - non_entity_clients: 74, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 1461, - entity_clients: 800, - non_entity_clients: 661, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 502, - entity_clients: 189, - non_entity_clients: 313, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 76, - entity_clients: 18, - non_entity_clients: 58, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 47, - entity_clients: 3, - non_entity_clients: 44, - }, - }, - }, - }, - 'test-ns-2/': { - month: '10/21', - clients: 4027, - entity_clients: 1692, - non_entity_clients: 2335, - new_clients: { - month: '10/21', - label: 'test-ns-2/', - clients: 2301, - entity_clients: 678, - non_entity_clients: 1623, - }, - mounts_by_key: { - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1223, - entity_clients: 820, - non_entity_clients: 403, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 602, - entity_clients: 212, - non_entity_clients: 390, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1110, - entity_clients: 111, - non_entity_clients: 999, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 440, - entity_clients: 7, - non_entity_clients: 433, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1034, - entity_clients: 462, - non_entity_clients: 572, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 980, - entity_clients: 454, - non_entity_clients: 526, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 660, - entity_clients: 299, - non_entity_clients: 361, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 279, - entity_clients: 5, - non_entity_clients: 274, - }, - }, - }, - }, - 'test-ns-2-with-namespace-length-over-18-characters/': { - month: '10/21', - clients: 3924, - entity_clients: 2132, - non_entity_clients: 1792, - new_clients: { - month: '10/21', - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 1561, - entity_clients: 1225, - non_entity_clients: 336, - }, - mounts_by_key: { - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 1411, - entity_clients: 765, - non_entity_clients: 646, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 948, - entity_clients: 660, - non_entity_clients: 288, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 1205, - entity_clients: 382, - non_entity_clients: 823, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 305, - entity_clients: 289, - non_entity_clients: 16, - }, - }, - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 884, - entity_clients: 850, - non_entity_clients: 34, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 230, - entity_clients: 207, - non_entity_clients: 23, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 424, - entity_clients: 135, - non_entity_clients: 289, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 78, - entity_clients: 69, - non_entity_clients: 9, - }, - }, - }, - }, - 'test-ns-1-with-namespace-length-over-18-characters/': { - month: '10/21', - clients: 3639, - entity_clients: 2314, - non_entity_clients: 1325, - new_clients: { - month: '10/21', - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1245, - entity_clients: 710, - non_entity_clients: 535, - }, - mounts_by_key: { - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1062, - entity_clients: 781, - non_entity_clients: 281, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 288, - entity_clients: 63, - non_entity_clients: 225, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1021, - entity_clients: 609, - non_entity_clients: 412, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 440, - entity_clients: 323, - non_entity_clients: 117, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 849, - entity_clients: 426, - non_entity_clients: 423, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 339, - entity_clients: 308, - non_entity_clients: 31, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 707, - entity_clients: 498, - non_entity_clients: 209, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 178, - entity_clients: 16, - non_entity_clients: 162, - }, - }, - }, - }, - 'test-ns-1/': { - month: '10/21', - clients: 2992, - entity_clients: 1603, - non_entity_clients: 1389, - new_clients: { - month: '10/21', - label: 'test-ns-1/', - clients: 820, - entity_clients: 356, - non_entity_clients: 464, - }, - mounts_by_key: { - 'path-1': { - month: '10/21', - label: 'path-1', - clients: 1140, - entity_clients: 480, - non_entity_clients: 660, - new_clients: { - month: '10/21', - label: 'path-1', - clients: 239, - entity_clients: 30, - non_entity_clients: 209, - }, - }, - 'path-4-with-over-18-characters': { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 1058, - entity_clients: 651, - non_entity_clients: 407, - new_clients: { - month: '10/21', - label: 'path-4-with-over-18-characters', - clients: 256, - entity_clients: 63, - non_entity_clients: 193, - }, - }, - 'path-2': { - month: '10/21', - label: 'path-2', - clients: 575, - entity_clients: 416, - non_entity_clients: 159, - new_clients: { - month: '10/21', - label: 'path-2', - clients: 259, - entity_clients: 245, - non_entity_clients: 14, - }, - }, - 'path-3-with-over-18-characters': { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 219, - entity_clients: 56, - non_entity_clients: 163, - new_clients: { - month: '10/21', - label: 'path-3-with-over-18-characters', - clients: 66, - entity_clients: 18, - non_entity_clients: 48, - }, - }, - }, - }, - }, - new_clients: { - month: '10/21', - clients: 7659, - entity_clients: 3555, - non_entity_clients: 4104, - namespaces: [ - { - label: 'test-ns-2/', - clients: 2301, - entity_clients: 678, - non_entity_clients: 1623, - mounts: [ - { - label: 'path-1', - clients: 980, - entity_clients: 454, - non_entity_clients: 526, - }, - { - label: 'path-4-with-over-18-characters', - clients: 602, - entity_clients: 212, - non_entity_clients: 390, - }, - { - label: 'path-3-with-over-18-characters', - clients: 440, - entity_clients: 7, - non_entity_clients: 433, - }, - { - label: 'path-2', - clients: 279, - entity_clients: 5, - non_entity_clients: 274, - }, - ], - }, - { - label: 'root', - clients: 1732, - entity_clients: 586, - non_entity_clients: 1146, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 907, - entity_clients: 192, - non_entity_clients: 715, - }, - { - label: 'path-2', - clients: 502, - entity_clients: 189, - non_entity_clients: 313, - }, - { - label: 'path-1', - clients: 276, - entity_clients: 202, - non_entity_clients: 74, - }, - { - label: 'path-4-with-over-18-characters', - clients: 47, - entity_clients: 3, - non_entity_clients: 44, - }, - ], - }, - { - label: 'test-ns-2-with-namespace-length-over-18-characters/', - clients: 1561, - entity_clients: 1225, - non_entity_clients: 336, - mounts: [ - { - label: 'path-3-with-over-18-characters', - clients: 948, - entity_clients: 660, - non_entity_clients: 288, - }, - { - label: 'path-2', - clients: 305, - entity_clients: 289, - non_entity_clients: 16, - }, - { - label: 'path-1', - clients: 230, - entity_clients: 207, - non_entity_clients: 23, - }, - { - label: 'path-4-with-over-18-characters', - clients: 78, - entity_clients: 69, - non_entity_clients: 9, - }, - ], - }, - { - label: 'test-ns-1-with-namespace-length-over-18-characters/', - clients: 1245, - entity_clients: 710, - non_entity_clients: 535, - mounts: [ - { - label: 'path-4-with-over-18-characters', - clients: 440, - entity_clients: 323, - non_entity_clients: 117, - }, - { - label: 'path-2', - clients: 339, - entity_clients: 308, - non_entity_clients: 31, - }, - { - label: 'path-1', - clients: 288, - entity_clients: 63, - non_entity_clients: 225, - }, - { - label: 'path-3-with-over-18-characters', - clients: 178, - entity_clients: 16, - non_entity_clients: 162, - }, - ], - }, - { - label: 'test-ns-1/', - clients: 820, - entity_clients: 356, - non_entity_clients: 464, - mounts: [ - { - label: 'path-2', - clients: 259, - entity_clients: 245, - non_entity_clients: 14, - }, - { - label: 'path-4-with-over-18-characters', - clients: 256, - entity_clients: 63, - non_entity_clients: 193, - }, - { - label: 'path-1', - clients: 239, - entity_clients: 30, - non_entity_clients: 209, - }, - { - label: 'path-3-with-over-18-characters', - clients: 66, - entity_clients: 18, - non_entity_clients: 48, - }, - ], - }, - ], - }, - }, - ]; - const NEW_ACTIVITY = MONTHLY_ACTIVITY.map((d) => d.new_clients); - const TOTAL_USAGE_COUNTS = { - clients: 38668, - entity_clients: 20818, - non_entity_clients: 17850, - }; - hooks.beforeEach(function () { - this.set('timestamp', formatRFC3339(new Date())); + setupMirage(hooks); + + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => STATIC_NOW); + }); + + hooks.beforeEach(async function () { + clientsHandler(this.server); + const store = this.owner.lookup('service:store'); + const activityQuery = { + start_time: { timestamp: START_TIME }, + end_time: { timestamp: getUnixTime(timestamp.now()) }, + }; + const activity = await store.queryRecord('clients/activity', activityQuery); + this.byMonthActivity = activity.byMonth; + this.newActivity = this.byMonthActivity.map((d) => d.new_clients); + this.totalUsageCounts = activity.total; + this.set('timestamp', formatRFC3339(timestamp.now())); this.set('chartLegend', [ { label: 'entity clients', key: 'entity_clients' }, { label: 'non-entity clients', key: 'non_entity_clients' }, ]); + this.isSecretsSyncActivated = true; + this.isHistoricalMonth = false; + + this.renderComponent = async () => { + await render(hbs` + + `); + }; + // Fails on #ember-testing-container + setRunOptions({ + rules: { + 'scrollable-region-focusable': { enabled: false }, + }, + }); + }); + + hooks.after(function () { + timestamp.now.restore(); }); test('it renders with full monthly activity data', async function (assert) { - this.set('byMonthActivityData', MONTHLY_ACTIVITY); - this.set('byMonthNewClients', NEW_ACTIVITY); - this.set('totalUsageCounts', TOTAL_USAGE_COUNTS); - const expectedTotalEntity = formatNumber([TOTAL_USAGE_COUNTS.entity_clients]); - const expectedTotalNonEntity = formatNumber([TOTAL_USAGE_COUNTS.non_entity_clients]); - const expectedNewEntity = formatNumber([calculateAverage(NEW_ACTIVITY, 'entity_clients')]); - const expectedNewNonEntity = formatNumber([calculateAverage(NEW_ACTIVITY, 'non_entity_clients')]); + const expectedTotalEntity = formatNumber([this.totalUsageCounts.entity_clients]); + const expectedTotalNonEntity = formatNumber([this.totalUsageCounts.non_entity_clients]); + const expectedTotalSync = formatNumber([this.totalUsageCounts.secret_syncs]); - await render(hbs` - - - `); + await this.renderComponent(); - assert.dom('[data-test-running-total]').exists('running total component renders'); - assert.dom('[data-test-line-chart]').exists('line chart renders'); - assert.dom('[data-test-vertical-bar-chart]').exists('vertical bar chart renders'); - assert.dom('[data-test-running-total-legend]').exists('legend renders'); - assert.dom('[data-test-running-total-timestamp]').exists('renders timestamp'); - assert - .dom('[data-test-running-total-entity] p.data-details') - .hasText(`${expectedTotalEntity}`, `renders correct total average ${expectedTotalEntity}`); + assert.dom(ts.charts.chart('running total')).exists('running total component renders'); + assert.dom(ts.charts.lineChart).exists('line chart renders'); assert - .dom('[data-test-running-total-nonentity] p.data-details') - .hasText(`${expectedTotalNonEntity}`, `renders correct new average ${expectedTotalNonEntity}`); + .dom(ts.charts.statTextValue('Entity clients')) + .hasText(`${expectedTotalEntity}`, `renders correct total entity average ${expectedTotalEntity}`); assert - .dom('[data-test-running-new-entity] p.data-details') - .hasText(`${expectedNewEntity}`, `renders correct total average ${expectedNewEntity}`); + .dom(ts.charts.statTextValue('Non-entity clients')) + .hasText( + `${expectedTotalNonEntity}`, + `renders correct total nonentity average ${expectedTotalNonEntity}` + ); assert - .dom('[data-test-running-new-nonentity] p.data-details') - .hasText(`${expectedNewNonEntity}`, `renders correct new average ${expectedNewNonEntity}`); + .dom(ts.charts.statTextValue('Secrets sync clients')) + .hasText(`${expectedTotalSync}`, `renders correct total sync ${expectedTotalSync}`); // assert line chart is correct - findAll('[data-test-line-chart="x-axis-labels"] text').forEach((e, i) => { + findAll(ts.charts.line.xAxisLabel).forEach((e, i) => { assert .dom(e) .hasText( - `${MONTHLY_ACTIVITY[i].month}`, - `renders x-axis labels for line chart: ${MONTHLY_ACTIVITY[i].month}` + `${this.byMonthActivity[i].month}`, + `renders x-axis labels for line chart: ${this.byMonthActivity[i].month}` ); }); assert - .dom('[data-test-line-chart="plot-point"]') + .dom(ts.charts.line.plotPoint) .exists( - { count: MONTHLY_ACTIVITY.filter((m) => m.counts !== null).length }, + { count: this.byMonthActivity.filter((m) => m.counts !== null).length }, 'renders correct number of plot points' ); - - // assert bar chart is correct - findAll('[data-test-vertical-chart="x-axis-labels"] text').forEach((e, i) => { - assert - .dom(e) - .hasText( - `${MONTHLY_ACTIVITY[i].month}`, - `renders x-axis labels for bar chart: ${MONTHLY_ACTIVITY[i].month}` - ); - }); - assert - .dom('[data-test-vertical-chart="data-bar"]') - .exists( - { count: MONTHLY_ACTIVITY.filter((m) => m.counts !== null).length * 2 }, - 'renders correct number of data bars' - ); }); test('it renders with no new monthly data', async function (assert) { - this.set('byMonthActivityData', MONTHLY_ACTIVITY); - this.set('byMonthNewClients', NEW_ACTIVITY); - this.set('totalUsageCounts', TOTAL_USAGE_COUNTS); - const expectedTotalEntity = formatNumber([TOTAL_USAGE_COUNTS.entity_clients]); - const expectedTotalNonEntity = formatNumber([TOTAL_USAGE_COUNTS.non_entity_clients]); + this.byMonthActivity = this.byMonthActivity.map((d) => ({ + ...d, + new_clients: { month: d.month }, + })); - await render(hbs` - - - `); + const expectedTotalEntity = formatNumber([this.totalUsageCounts.entity_clients]); + const expectedTotalNonEntity = formatNumber([this.totalUsageCounts.non_entity_clients]); + const expectedTotalSync = formatNumber([this.totalUsageCounts.secret_syncs]); + + await this.renderComponent(); + + assert.dom(ts.charts.chart('running total')).exists('running total component renders'); + assert.dom(ts.charts.lineChart).exists('line chart renders'); - assert.dom('[data-test-running-total]').exists('running total component renders'); - assert.dom('[data-test-line-chart]').exists('line chart renders'); - assert.dom('[data-test-vertical-bar-chart]').doesNotExist('vertical bar chart does not render'); - assert.dom('[data-test-running-total-legend]').doesNotExist('legend does not render'); - assert.dom('[data-test-component="empty-state"]').exists('renders empty state'); - assert.dom('[data-test-empty-state-title]').hasText('No new clients'); - assert.dom('[data-test-running-total-timestamp]').exists('renders timestamp'); - assert - .dom('[data-test-running-total-entity] p.data-details') - .hasText(`${expectedTotalEntity}`, `renders correct total average ${expectedTotalEntity}`); assert - .dom('[data-test-running-total-nonentity] p.data-details') - .hasText(`${expectedTotalNonEntity}`, `renders correct new average ${expectedTotalNonEntity}`); + .dom(ts.charts.statTextValue('Entity clients')) + .hasText(`${expectedTotalEntity}`, `renders correct total entity average ${expectedTotalEntity}`); assert - .dom('[data-test-running-new-entity] p.data-details') - .hasText('0', 'renders 0 average new entity clients'); + .dom(ts.charts.statTextValue('Non-entity clients')) + .hasText( + `${expectedTotalNonEntity}`, + `renders correct total nonentity average ${expectedTotalNonEntity}` + ); assert - .dom('[data-test-running-new-nonentity] p.data-details') - .hasText('0', 'renders 0 average entity clients'); + .dom(ts.charts.statTextValue('Secrets sync clients')) + .hasText(`${expectedTotalSync}`, `renders correct total sync ${expectedTotalSync}`); }); test('it renders with single historical month data', async function (assert) { - const singleMonth = MONTHLY_ACTIVITY[MONTHLY_ACTIVITY.length - 1]; - const singleMonthNew = NEW_ACTIVITY[NEW_ACTIVITY.length - 1]; - this.set('singleMonth', [singleMonth]); - this.set('singleMonthNew', [singleMonthNew]); + const singleMonth = this.byMonthActivity[this.byMonthActivity.length - 1]; + const singleMonthNew = this.newActivity[this.newActivity.length - 1]; + const expectedTotalClients = formatNumber([singleMonth.clients]); const expectedTotalEntity = formatNumber([singleMonth.entity_clients]); const expectedTotalNonEntity = formatNumber([singleMonth.non_entity_clients]); + const expectedTotalSync = formatNumber([singleMonth.secret_syncs]); const expectedNewClients = formatNumber([singleMonthNew.clients]); const expectedNewEntity = formatNumber([singleMonthNew.entity_clients]); const expectedNewNonEntity = formatNumber([singleMonthNew.non_entity_clients]); + const expectedNewSyncs = formatNumber([singleMonthNew.secret_syncs]); + const { statTextValue } = ts.charts; - await render(hbs` - - - `); - assert.dom('[data-test-running-total]').exists('running total component renders'); - assert.dom('[data-test-line-chart]').doesNotExist('line chart does not render'); - assert.dom('[data-test-vertical-bar-chart]').doesNotExist('vertical bar chart does not render'); - assert.dom('[data-test-running-total-legend]').doesNotExist('legend does not render'); - assert.dom('[data-test-running-total-timestamp]').doesNotExist('renders timestamp'); - assert.dom('[data-test-stat-text-container]').exists({ count: 6 }, 'renders stat text containers'); + this.byMonthActivity = [singleMonth]; + this.isHistoricalMonth = true; + + await this.renderComponent(); + + assert.dom(ts.charts.lineChart).doesNotExist('line chart does not render'); + assert.dom(statTextValue()).exists({ count: 8 }, 'renders 6 stat text containers'); assert - .dom('[data-test-new] [data-test-stat-text-container="New clients"] div.stat-value') + .dom(`[data-test-new] ${statTextValue('New clients')}`) .hasText(`${expectedNewClients}`, `renders correct total new clients: ${expectedNewClients}`); assert - .dom('[data-test-new] [data-test-stat-text-container="Entity clients"] div.stat-value') + .dom(`[data-test-new] ${statTextValue('Entity clients')}`) .hasText(`${expectedNewEntity}`, `renders correct total new entity: ${expectedNewEntity}`); assert - .dom('[data-test-new] [data-test-stat-text-container="Non-entity clients"] div.stat-value') + .dom(`[data-test-new] ${statTextValue('Non-entity clients')}`) .hasText(`${expectedNewNonEntity}`, `renders correct total new non-entity: ${expectedNewNonEntity}`); assert - .dom('[data-test-total] [data-test-stat-text-container="Total monthly clients"] div.stat-value') + .dom(`[data-test-new] ${statTextValue('Secrets sync clients')}`) + .hasText(`${expectedNewSyncs}`, `renders correct total new non-entity: ${expectedNewSyncs}`); + assert + .dom(`[data-test-total] ${statTextValue('Total monthly clients')}`) .hasText(`${expectedTotalClients}`, `renders correct total clients: ${expectedTotalClients}`); assert - .dom('[data-test-total] [data-test-stat-text-container="Entity clients"] div.stat-value') + .dom(`[data-test-total] ${statTextValue('Entity clients')}`) .hasText(`${expectedTotalEntity}`, `renders correct total entity: ${expectedTotalEntity}`); assert - .dom('[data-test-total] [data-test-stat-text-container="Non-entity clients"] div.stat-value') + .dom(`[data-test-total] ${statTextValue('Non-entity clients')}`) .hasText(`${expectedTotalNonEntity}`, `renders correct total non-entity: ${expectedTotalNonEntity}`); + assert + .dom(`[data-test-total] ${statTextValue('Secrets sync clients')}`) + .hasText(`${expectedTotalSync}`, `renders correct total sync: ${expectedTotalSync}`); + }); + + test('it hides secret sync totals when feature is not activated', async function (assert) { + this.isSecretsSyncActivated = false; + + await this.renderComponent(); + + assert.dom(ts.charts.chart('running total')).exists('running total component renders'); + assert.dom(ts.charts.lineChart).exists('line chart renders'); + assert.dom(ts.charts.statTextValue('Entity clients')).exists(); + assert.dom(ts.charts.statTextValue('Non-entity clients')).exists(); + assert.dom(ts.charts.statTextValue('Secrets sync clients')).doesNotExist('does not render secret syncs'); }); }); diff --git a/ui/tests/integration/components/clients/usage-stats-test.js b/ui/tests/integration/components/clients/usage-stats-test.js index d4471024a9d4..38f55d551631 100644 --- a/ui/tests/integration/components/clients/usage-stats-test.js +++ b/ui/tests/integration/components/clients/usage-stats-test.js @@ -1,3 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { render } from '@ember/test-helpers'; @@ -6,41 +11,84 @@ import { hbs } from 'ember-cli-htmlbars'; module('Integration | Component | clients/usage-stats', function (hooks) { setupRenderingTest(hooks); + hooks.beforeEach(function () { + this.isSecretsSyncActivated = false; + this.counts = {}; + + this.renderComponent = async () => + await render( + hbs`` + ); + }); + test('it renders defaults', async function (assert) { - await render(hbs``); + await this.renderComponent(); assert.dom('[data-test-stat-text]').exists({ count: 3 }, 'Renders 3 Stat texts even with no data passed'); assert.dom('[data-test-stat-text="total-clients"]').exists('Total clients exists'); - assert.dom('[data-test-stat-text="total-clients"] .stat-value').hasText('0', 'Value defaults to zero'); + assert.dom('[data-test-stat-text="total-clients"] .stat-value').hasText('-', 'renders dash when no data'); assert.dom('[data-test-stat-text="entity-clients"]').exists('Entity clients exists'); - assert.dom('[data-test-stat-text="entity-clients"] .stat-value').hasText('0', 'Value defaults to zero'); + assert + .dom('[data-test-stat-text="entity-clients"] .stat-value') + .hasText('-', 'renders dash when no data'); assert.dom('[data-test-stat-text="non-entity-clients"]').exists('Non entity clients exists'); assert .dom('[data-test-stat-text="non-entity-clients"] .stat-value') - .hasText('0', 'Value defaults to zero'); - assert.dom('a').hasAttribute('href', 'https://learn.hashicorp.com/tutorials/vault/usage-metrics'); + .hasText('-', 'renders dash when no data'); + assert + .dom('a') + .hasAttribute('href', 'https://developer.hashicorp.com/vault/tutorials/monitoring/usage-metrics'); }); - test('it renders with data', async function (assert) { - this.set('counts', { + test('it renders with token data', async function (assert) { + this.counts = { clients: 17, entity_clients: 7, non_entity_clients: 10, - }); - await render(hbs``); + }; - assert.dom('[data-test-stat-text]').exists({ count: 3 }, 'Renders 3 Stat texts even with no data passed'); - assert.dom('[data-test-stat-text="total-clients"]').exists('Total clients exists'); + await this.renderComponent(); + + assert.dom('[data-test-stat-text]').exists({ count: 3 }, 'Renders 3 Stat texts'); assert .dom('[data-test-stat-text="total-clients"] .stat-value') .hasText('17', 'Total clients shows passed value'); - assert.dom('[data-test-stat-text="entity-clients"]').exists('Entity clients exists'); assert .dom('[data-test-stat-text="entity-clients"] .stat-value') .hasText('7', 'entity clients shows passed value'); - assert.dom('[data-test-stat-text="non-entity-clients"]').exists('Non entity clients exists'); assert .dom('[data-test-stat-text="non-entity-clients"] .stat-value') .hasText('10', 'non entity clients shows passed value'); }); + + module('it renders with full totals data', function (hooks) { + hooks.beforeEach(function () { + this.counts = { + clients: 22, + entity_clients: 7, + non_entity_clients: 10, + secret_syncs: 5, + }; + }); + + test('with secrets sync activated', async function (assert) { + this.isSecretsSyncActivated = true; + + await this.renderComponent(); + + assert.dom('[data-test-stat-text]').exists({ count: 4 }, 'Renders 4 Stat texts'); + assert + .dom('[data-test-stat-text="secret-syncs"] .stat-value') + .hasText('5', 'secrets sync clients shows passed value'); + }); + + test('with secrets sync NOT activated', async function (assert) { + this.isSecretsSyncActivated = false; + + await this.renderComponent(); + + assert.dom('[data-test-stat-text]').exists({ count: 3 }, 'Renders 3 Stat texts'); + assert.dom('[data-test-stat-text="secret-syncs"] .stat-value').doesNotExist(); + }); + }); }); diff --git a/ui/tests/integration/components/clients/vertical-bar-chart-test.js b/ui/tests/integration/components/clients/vertical-bar-chart-test.js index 58831c8725da..678d5a563a00 100644 --- a/ui/tests/integration/components/clients/vertical-bar-chart-test.js +++ b/ui/tests/integration/components/clients/vertical-bar-chart-test.js @@ -1,6 +1,11 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; -import { render, findAll, find, triggerEvent } from '@ember/test-helpers'; +import { render, findAll, find } from '@ember/test-helpers'; import { hbs } from 'ember-cli-htmlbars'; module('Integration | Component | clients/vertical-bar-chart', function (hooks) { @@ -19,15 +24,14 @@ module('Integration | Component | clients/vertical-bar-chart', function (hooks) ]; this.set('barChartData', barChartData); - await render(hbs` + await render(hbs`
-
`); - const tooltipHoverBars = findAll('[data-test-vertical-bar-chart] rect.tooltip-rect'); assert.dom('[data-test-vertical-bar-chart]').exists('renders chart'); assert .dom('[data-test-vertical-chart="data-bar"]') @@ -38,16 +42,18 @@ module('Integration | Component | clients/vertical-bar-chart', function (hooks) assert.dom(e).hasText(`${barChartData[i].month}`, `renders x-axis label: ${barChartData[i].month}`); }); - for (const [i, bar] of tooltipHoverBars.entries()) { - await triggerEvent(bar, 'mouseover'); - const tooltip = document.querySelector('.ember-modal-dialog'); - assert - .dom(tooltip) - .includesText( - `${barChartData[i].clients} total clients ${barChartData[i].entity_clients} entity clients ${barChartData[i].non_entity_clients} non-entity clients`, - 'tooltip text is correct' - ); - } + // FLAKY after adding a11y testing, skip for now + // const tooltipHoverBars = findAll('[data-test-vertical-bar-chart] rect.tooltip-rect'); + // for (const [i, bar] of tooltipHoverBars.entries()) { + // await triggerEvent(bar, 'mouseover'); + // const tooltip = document.querySelector('.ember-modal-dialog'); + // assert + // .dom(tooltip) + // .includesText( + // `${barChartData[i].clients} total clients ${barChartData[i].entity_clients} entity clients ${barChartData[i].non_entity_clients} non-entity clients`, + // 'tooltip text is correct' + // ); + // } }); test('it renders chart and tooltip for new clients', async function (assert) { @@ -57,16 +63,15 @@ module('Integration | Component | clients/vertical-bar-chart', function (hooks) ]; this.set('barChartData', barChartData); - await render(hbs` + await render(hbs`
-
`); - const tooltipHoverBars = findAll('[data-test-vertical-bar-chart] rect.tooltip-rect'); assert.dom('[data-test-vertical-bar-chart]').exists('renders chart'); assert .dom('[data-test-vertical-chart="data-bar"]') @@ -77,16 +82,18 @@ module('Integration | Component | clients/vertical-bar-chart', function (hooks) assert.dom(e).hasText(`${barChartData[i].month}`, `renders x-axis label: ${barChartData[i].month}`); }); - for (const [i, bar] of tooltipHoverBars.entries()) { - await triggerEvent(bar, 'mouseover'); - const tooltip = document.querySelector('.ember-modal-dialog'); - assert - .dom(tooltip) - .includesText( - `${barChartData[i].clients} new clients ${barChartData[i].entity_clients} entity clients ${barChartData[i].non_entity_clients} non-entity clients`, - 'tooltip text is correct' - ); - } + // FLAKY after adding a11y testing, skip for now + // const tooltipHoverBars = findAll('[data-test-vertical-bar-chart] rect.tooltip-rect'); + // for (const [i, bar] of tooltipHoverBars.entries()) { + // await triggerEvent(bar, 'mouseover'); + // const tooltip = document.querySelector('.ember-modal-dialog'); + // assert + // .dom(tooltip) + // .includesText( + // `${barChartData[i].clients} new clients ${barChartData[i].entity_clients} entity clients ${barChartData[i].non_entity_clients} non-entity clients`, + // 'tooltip text is correct' + // ); + // } }); test('it renders empty state when no dataset', async function (assert) { diff --git a/ui/tests/integration/components/config-ui/messages/page/create-and-edit-test.js b/ui/tests/integration/components/config-ui/messages/page/create-and-edit-test.js new file mode 100644 index 000000000000..be01689cff7c --- /dev/null +++ b/ui/tests/integration/components/config-ui/messages/page/create-and-edit-test.js @@ -0,0 +1,254 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { setupEngine } from 'ember-engines/test-support'; +import { render, click, fillIn } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { datetimeLocalStringFormat } from 'core/utils/date-formatters'; +import { format, addDays, startOfDay } from 'date-fns'; +import { PAGE } from 'vault/tests/helpers/config-ui/message-selectors'; +import timestamp from 'core/utils/timestamp'; + +module('Integration | Component | messages/page/create-and-edit', function (hooks) { + setupRenderingTest(hooks); + setupEngine(hooks, 'config-ui'); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.context = { owner: this.engine }; + this.store = this.owner.lookup('service:store'); + this.message = this.store.createRecord('config-ui/message'); + }); + + test('it should display all the create form fields and default radio button values', async function (assert) { + assert.expect(17); + + await render(hbs``, { + owner: this.engine, + }); + + assert.dom(PAGE.title).hasText('Create message'); + assert.dom(PAGE.radio('authenticated')).exists(); + assert.dom(PAGE.radio('unauthenticated')).exists(); + assert.dom(PAGE.radio('authenticated')).isChecked(); + assert.dom(PAGE.radio('unauthenticated')).isNotChecked(); + assert.dom(PAGE.radio('banner')).exists(); + assert.dom(PAGE.radio('modal')).exists(); + assert.dom(PAGE.radio('banner')).isChecked(); + assert.dom(PAGE.radio('modal')).isNotChecked(); + assert.dom(PAGE.field('title')).exists(); + assert.dom(PAGE.field('message')).exists(); + assert.dom('[data-test-kv-key="0"]').exists(); + assert.dom('[data-test-kv-value="0"]').exists(); + assert.dom(PAGE.input('startTime')).exists(); + assert + .dom(PAGE.input('startTime')) + .hasValue(format(addDays(startOfDay(timestamp.now()), 1), datetimeLocalStringFormat)); + assert.dom(PAGE.input('endTime')).exists(); + assert.dom(PAGE.input('endTime')).hasValue(''); + }); + + test('it should display validation errors for invalid form fields', async function (assert) { + assert.expect(8); + await render(hbs``, { + owner: this.engine, + }); + + await fillIn(PAGE.input('startTime'), '2024-01-20T00:00'); + await fillIn(PAGE.input('endTime'), '2024-01-01T00:00'); + await click(PAGE.button('create-message')); + assert.dom(PAGE.input('title')).hasClass('has-error-border'); + assert.dom(`${PAGE.fieldValidation('title')} ${PAGE.inlineErrorMessage}`).hasText('Title is required.'); + assert.dom(PAGE.input('message')).hasClass('has-error-border'); + assert + .dom(`${PAGE.fieldValidation('message')} ${PAGE.inlineErrorMessage}`) + .hasText('Message is required.'); + assert.dom(PAGE.input('startTime')).hasClass('has-error-border'); + assert + .dom(`${PAGE.fieldValidation('startTime')} ${PAGE.inlineErrorMessage}`) + .hasText('Start time is after end time.'); + assert.dom(PAGE.input('endTime')).hasClass('has-error-border'); + assert + .dom(`${PAGE.fieldValidation('endTime')} ${PAGE.inlineErrorMessage}`) + .hasText('End time is before start time.'); + }); + + test('it should create new message', async function (assert) { + assert.expect(1); + + this.server.post('/sys/config/ui/custom-messages', () => { + assert.ok(true, 'POST request made to create message'); + }); + + await render(hbs``, { + owner: this.engine, + }); + await fillIn(PAGE.input('title'), 'Awesome custom message title'); + await fillIn( + PAGE.input('message'), + 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Pulvinar mattis nunc sed blandit libero volutpat sed cras ornare.' + ); + await fillIn( + PAGE.input('startTime'), + format(addDays(startOfDay(new Date('2023-12-12')), 1), datetimeLocalStringFormat) + ); + await click('#specificDate'); + await fillIn( + PAGE.input('endTime'), + format(addDays(startOfDay(new Date('2023-12-12')), 10), datetimeLocalStringFormat) + ); + await fillIn('[data-test-kv-key="0"]', 'Learn more'); + await fillIn('[data-test-kv-value="0"]', 'www.learn.com'); + await click(PAGE.button('create-message')); + }); + + test('it should have form vaildations', async function (assert) { + assert.expect(4); + await render(hbs``, { + owner: this.engine, + }); + await click(PAGE.button('create-message')); + assert.dom(PAGE.input('title')).hasClass('has-error-border', 'show error border for title field'); + assert.dom(`${PAGE.fieldValidation('title')} ${PAGE.inlineErrorMessage}`).hasText('Title is required.'); + assert.dom(PAGE.input('message')).hasClass('has-error-border', 'show error border for message field'); + assert + .dom(`${PAGE.fieldValidation('message')} ${PAGE.inlineErrorMessage}`) + .hasText('Message is required.'); + }); + + test('it should prepopulate form if form is in edit mode', async function (assert) { + assert.expect(13); + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: 'hhhhh-iiii-lllll-dddd', + type: 'modal', + authenticated: false, + title: 'Hello world', + message: 'Blah blah blah. Some super long message.', + start_time: '2023-12-12T08:00:00.000Z', + end_time: '2023-12-21T08:00:00.000Z', + link: { 'Learn more': 'www.learnmore.com' }, + }); + this.message = this.store.peekRecord('config-ui/message', 'hhhhh-iiii-lllll-dddd'); + await render(hbs``, { + owner: this.engine, + }); + + assert.dom(PAGE.title).hasText('Edit message'); + assert.dom(PAGE.radio('authenticated')).exists(); + assert.dom(PAGE.radio('unauthenticated')).isChecked(); + assert.dom(PAGE.radio('modal')).exists(); + assert.dom(PAGE.radio('modal')).isChecked(); + assert.dom(PAGE.input('title')).hasValue('Hello world'); + assert.dom(PAGE.input('message')).hasValue('Blah blah blah. Some super long message.'); + assert.dom('[data-test-kv-key="0"]').exists(); + assert.dom('[data-test-kv-key="0"]').hasValue('Learn more'); + assert.dom('[data-test-kv-value="0"]').exists(); + assert.dom('[data-test-kv-value="0"]').hasValue('www.learnmore.com'); + await click('#specificDate'); + assert + .dom(PAGE.input('startTime')) + .hasValue(format(new Date(this.message.startTime), datetimeLocalStringFormat)); + assert + .dom(PAGE.input('endTime')) + .hasValue(format(new Date(this.message.endTime), datetimeLocalStringFormat)); + }); + + test('it should show a preview image modal when preview is clicked', async function (assert) { + assert.expect(6); + await render(hbs``, { + owner: this.engine, + }); + await fillIn(PAGE.input('title'), 'Awesome custom message title'); + await fillIn( + PAGE.input('message'), + 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Pulvinar mattis nunc sed blandit libero volutpat sed cras ornare.' + ); + await click(PAGE.button('preview')); + assert.dom(PAGE.modal('preview modal')).doesNotExist(); + assert.dom(PAGE.modal('preview image')).exists(); + assert.dom(PAGE.alertTitle('Awesome custom message title')).hasText('Awesome custom message title'); + assert + .dom(PAGE.alertDescription('Awesome custom message title')) + .hasText( + 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Pulvinar mattis nunc sed blandit libero volutpat sed cras ornare.' + ); + assert.dom('img').hasAttribute('src', '/ui/images/custom-messages-dashboard.png'); + await click(PAGE.modalButton('Close')); + await click('#unauthenticated'); + await click(PAGE.button('preview')); + assert.dom('img').hasAttribute('src', '/ui/images/custom-messages-login.png'); + }); + + test('it should show a preview modal when preview is clicked', async function (assert) { + assert.expect(4); + await render(hbs``, { + owner: this.engine, + }); + await click(PAGE.radio('modal')); + await fillIn(PAGE.input('title'), 'Preview modal title'); + await fillIn(PAGE.input('message'), 'Some preview modal message thats super long.'); + await click(PAGE.button('preview')); + assert.dom(PAGE.modal('preview modal')).exists(); + assert.dom(PAGE.modal('preview image')).doesNotExist(); + assert.dom(PAGE.modalTitle('Preview modal title')).hasText('Preview modal title'); + assert.dom(PAGE.modalBody('Preview modal title')).hasText('Some preview modal message thats super long.'); + }); + + test('it should show multiple modal message', async function (assert) { + assert.expect(2); + + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '01234567-89ab-cdef-0123-456789abcdef', + active: true, + type: 'modal', + authenticated: true, + title: 'Message title 1', + message: 'Some long long long message', + link: { here: 'www.example.com' }, + startTime: '2021-08-01T00:00:00Z', + endTime: '', + }); + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '01234567-89ab-vvvv-0123-456789abcdef', + active: true, + type: 'modal', + authenticated: false, + title: 'Message title 2', + message: 'Some long long long message', + link: { here: 'www.example.com' }, + startTime: '2021-08-01T00:00:00Z', + endTime: '2090-08-01T00:00:00Z', + }); + + this.messages = this.store.peekAll('config-ui/message'); + + await render( + hbs``, + { + owner: this.engine, + } + ); + await fillIn(PAGE.input('title'), 'Awesome custom message title'); + await fillIn( + PAGE.input('message'), + 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Pulvinar mattis nunc sed blandit libero volutpat sed cras ornare.' + ); + await click(PAGE.radio('modal')); + await click(PAGE.button('create-message')); + assert.dom(PAGE.modalTitle('Warning: more than one modal')).exists(); + assert + .dom(PAGE.modalBody('Warning: more than one modal')) + .hasText( + 'You have an active modal configured after the user logs in and are trying to create another one. It is recommended to avoid having more than one modal at once as it can be intrusive for users. Would you like to continue creating your message? Click “Confirm” to continue.' + ); + await click(PAGE.modalButton('confirm')); + }); +}); diff --git a/ui/tests/integration/components/config-ui/messages/page/details-test.js b/ui/tests/integration/components/config-ui/messages/page/details-test.js new file mode 100644 index 000000000000..84473f1dc6c7 --- /dev/null +++ b/ui/tests/integration/components/config-ui/messages/page/details-test.js @@ -0,0 +1,92 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { setupEngine } from 'ember-engines/test-support'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { dateFormat } from 'core/helpers/date-format'; + +const allFields = [ + { label: 'Active', key: 'active' }, + { label: 'Type', key: 'type' }, + { label: 'Authenticated', key: 'authenticated' }, + { label: 'Title', key: 'title' }, + { label: 'Message', key: 'message' }, + { label: 'Start time', key: 'startTime' }, + { label: 'End time', key: 'endTime' }, + { label: 'Link', key: 'link' }, +]; + +module('Integration | Component | messages/page/details', function (hooks) { + setupRenderingTest(hooks); + setupEngine(hooks, 'config-ui'); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.context = { owner: this.engine }; + this.store = this.owner.lookup('service:store'); + + this.server.post('/sys/capabilities-self', () => ({ + data: { + capabilities: ['root'], + }, + })); + + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '01234567-89ab-cdef-0123-456789abcdef', + active: true, + type: 'banner', + authenticated: true, + title: 'Message title 1', + message: 'Some long long long message', + link: { here: 'www.example.com' }, + start_time: '2021-08-01T00:00:00Z', + end_time: '', + canDeleteCustomMessages: true, + canEditCustomMessages: true, + }); + }); + + test('it should show the message details', async function (assert) { + this.message = await this.store.peekRecord('config-ui/message', '01234567-89ab-cdef-0123-456789abcdef'); + + await render(hbs``, { + owner: this.engine, + }); + assert.dom('[data-test-page-title]').hasText('Message title 1'); + assert + .dom('[data-test-component="info-table-row"]') + .exists({ count: allFields.length }, 'Correct number of filtered fields render'); + + allFields.forEach((field) => { + assert + .dom(`[data-test-row-label="${field.label}"]`) + .hasText(field.label, `${field.label} label renders`); + if (field.key === 'startTime' || field.key === 'endTime') { + const formattedDate = dateFormat([this.message[field.key], 'MMM d, yyyy hh:mm aaa'], { + withTimeZone: true, + }); + assert + .dom(`[data-test-row-value="${field.label}"]`) + .hasText(formattedDate || 'Never', `${field.label} value renders`); + } else if (field.key === 'authenticated' || field.key === 'active') { + assert + .dom(`[data-test-value-div="${field.label}"]`) + .hasText(this.message[field.key] ? 'Yes' : 'No', `${field.label} value renders`); + } else if (field.key === 'link') { + assert.dom('[data-test-value-div="Link"]').exists(); + assert.dom('[data-test-value-div="Link"] [data-test-link="message link"]').hasText('here'); + } else { + assert + .dom(`[data-test-row-value="${field.label}"]`) + .hasText(this.message[field.key], `${field.label} value renders`); + } + }); + }); +}); diff --git a/ui/tests/integration/components/config-ui/messages/page/list-test.js b/ui/tests/integration/components/config-ui/messages/page/list-test.js new file mode 100644 index 000000000000..94e9bb355b3d --- /dev/null +++ b/ui/tests/integration/components/config-ui/messages/page/list-test.js @@ -0,0 +1,144 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { setupEngine } from 'ember-engines/test-support'; +import { render, click } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { PAGE } from 'vault/tests/helpers/config-ui/message-selectors'; +import { allowAllCapabilitiesStub } from 'vault/tests/helpers/stubs'; + +const META = { + currentPage: 1, + lastPage: 1, + nextPage: 1, + prevPage: 1, + total: 3, + pageSize: 15, +}; + +module('Integration | Component | messages/page/list', function (hooks) { + setupRenderingTest(hooks); + setupEngine(hooks, 'config-ui'); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.server.post('/sys/capabilities-self', allowAllCapabilitiesStub()); + this.store = this.owner.lookup('service:store'); + + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '0', + active: true, + type: 'banner', + authenticated: true, + title: 'Message title 1', + message: 'Some long long long message', + link: { title: 'here', href: 'www.example.com' }, + start_time: '2021-08-01T00:00:00Z', + end_time: '', + }); + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '1', + active: false, + type: 'modal', + authenticated: true, + title: 'Message title 2', + message: 'Some long long long message blah blah blah', + link: { title: 'here', href: 'www.example2.com' }, + start_time: '2023-07-01T00:00:00Z', + end_time: '2023-08-01T00:00:00Z', + }); + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: '2', + active: false, + type: 'banner', + authenticated: false, + title: 'Message title 3', + message: 'Some long long long message', + link: { title: 'here', href: 'www.example.com' }, + }); + }); + + test('it should show the messages empty state', async function (assert) { + this.messages = []; + + await render(hbs``, { + owner: this.engine, + }); + + assert.dom('[data-test-empty-state-title]').hasText('No messages yet'); + assert + .dom('[data-test-empty-state-message]') + .hasText( + 'Add a custom message for all users after they log into Vault. Create message to get started.' + ); + }); + + test('it should show the list of custom messages', async function (assert) { + this.messages = this.store.peekAll('config-ui/message', {}); + this.messages.meta = META; + await render(hbs``, { + owner: this.engine, + }); + assert.dom('[data-test-icon="message-circle"]').exists(); + for (const message of this.messages) { + assert.dom(PAGE.listItem('Message title 1')).exists(); + assert.dom(`[data-linked-block-title="${message.id}"]`).hasText(message.title); + } + }); + + test('it should show max message warning modal', async function (assert) { + for (let i = 0; i < 97; i++) { + this.store.pushPayload('config-ui/message', { + modelName: 'config-ui/message', + id: `${i}-a`, + active: true, + type: 'banner', + authenticated: false, + title: `Message title ${i}`, + message: 'Some long long long message', + link: { title: 'here', href: 'www.example.com' }, + start_time: '2021-08-01T00:00:00Z', + }); + } + + this.messages = this.store.peekAll('config-ui/message', {}); + this.messages.meta = { + currentPage: 1, + lastPage: 1, + nextPage: 1, + prevPage: 1, + total: this.messages.length, + pageSize: 100, + }; + await render(hbs``, { + owner: this.engine, + }); + await click(PAGE.button('create message')); + assert.dom(PAGE.modalTitle('maximum-message-modal')).hasText('Maximum number of messages reached'); + assert + .dom(PAGE.modalBody('maximum-message-modal')) + .hasText( + 'Vault can only store up to 100 messages. To create a message, delete one of your messages to clear up space.' + ); + await click(PAGE.modalButton('maximum-message-modal')); + }); + + test('it should show the correct badge colors based on badge status', async function (assert) { + this.messages = this.store.peekAll('config-ui/message', {}); + this.messages.meta = META; + await render(hbs``, { + owner: this.engine, + }); + assert.dom(PAGE.badge('0')).hasClass('hds-badge--color-success'); + assert.dom(PAGE.badge('1')).hasClass('hds-badge--color-neutral'); + assert.dom(PAGE.badge('2')).hasClass('hds-badge--color-highlight'); + }); +}); diff --git a/ui/tests/integration/components/confirm-action-test.js b/ui/tests/integration/components/confirm-action-test.js index c665e6ae1b29..2bf6bbc492b3 100644 --- a/ui/tests/integration/components/confirm-action-test.js +++ b/ui/tests/integration/components/confirm-action-test.js @@ -1,53 +1,149 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; -import { render, click } from '@ember/test-helpers'; +import { render, click, find } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; import sinon from 'sinon'; +import { setRunOptions } from 'ember-a11y-testing/test-support'; +const SELECTORS = { + modalToggle: '[data-test-confirm-action-trigger]', + title: '[data-test-confirm-action-title]', + message: '[data-test-confirm-action-message]', + confirm: '[data-test-confirm-button]', + cancel: '[data-test-confirm-cancel-button]', +}; module('Integration | Component | confirm-action', function (hooks) { setupRenderingTest(hooks); - test('it renders and on click shows the correct icon', async function (assert) { - const confirmAction = sinon.spy(); - this.set('onConfirm', confirmAction); + hooks.beforeEach(function () { + this.onConfirm = sinon.spy(); + }); + + test('it renders defaults and calls onConfirmAction', async function (assert) { await render(hbs` - DELETE - + /> `); - assert.dom('[data-test-icon="chevron-down"]').exists('Icon is pointing down'); - await click('[data-test-confirm-action-trigger="true"]'); - assert.dom('[data-test-icon="chevron-up"]').exists('Icon is now pointing up'); - assert.dom('[data-test-confirm-action-title]').hasText('Delete this?'); + + assert.dom(SELECTORS.modalToggle).hasText('DELETE', 'renders button text'); + await click(SELECTORS.modalToggle); + // hasClass assertion wasn't working so this is the workaround + assert.strictEqual( + find('#confirm-action-modal').className, + 'hds-modal hds-modal--size-small hds-modal--color-critical has-text-left', + 'renders critical modal color by default' + ); + assert.dom(SELECTORS.confirm).hasClass('hds-button--color-critical', 'renders critical confirm button'); + assert.dom(SELECTORS.title).hasText('Are you sure?', 'renders default title'); + assert + .dom(SELECTORS.message) + .hasText('You will not be able to recover it later.', 'renders default body text'); + await click(SELECTORS.cancel); + assert.false(this.onConfirm.called, 'does not call the action when Cancel is clicked'); + await click(SELECTORS.modalToggle); + await click(SELECTORS.confirm); + assert.true(this.onConfirm.called, 'calls the action when Confirm is clicked'); + assert.dom(SELECTORS.title).doesNotExist('modal closes after confirm is clicked'); }); - test('it closes the confirmation modal on successful delete', async function (assert) { - const confirmAction = sinon.spy(); - this.set('onConfirm', confirmAction); + test('it renders isInDropdown defaults and calls onConfirmAction', async function (assert) { + setRunOptions({ + rules: { + // this component breaks this rule because it expects to be rendered within